]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - arch/arm/mm/init.c
ARM: Add arm_memblock_steal() to allocate memory away from the kernel
[~shefty/rdma-dev.git] / arch / arm / mm / init.c
1 /*
2  *  linux/arch/arm/mm/init.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/initrd.h>
19 #include <linux/of_fdt.h>
20 #include <linux/highmem.h>
21 #include <linux/gfp.h>
22 #include <linux/memblock.h>
23
24 #include <asm/mach-types.h>
25 #include <asm/memblock.h>
26 #include <asm/prom.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/sizes.h>
30 #include <asm/tlb.h>
31 #include <asm/fixmap.h>
32
33 #include <asm/mach/arch.h>
34 #include <asm/mach/map.h>
35 #include <asm/memblock.h>
36
37 #include "mm.h"
38
39 static unsigned long phys_initrd_start __initdata = 0;
40 static unsigned long phys_initrd_size __initdata = 0;
41
42 static int __init early_initrd(char *p)
43 {
44         unsigned long start, size;
45         char *endp;
46
47         start = memparse(p, &endp);
48         if (*endp == ',') {
49                 size = memparse(endp + 1, NULL);
50
51                 phys_initrd_start = start;
52                 phys_initrd_size = size;
53         }
54         return 0;
55 }
56 early_param("initrd", early_initrd);
57
58 static int __init parse_tag_initrd(const struct tag *tag)
59 {
60         printk(KERN_WARNING "ATAG_INITRD is deprecated; "
61                 "please update your bootloader.\n");
62         phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
63         phys_initrd_size = tag->u.initrd.size;
64         return 0;
65 }
66
67 __tagtable(ATAG_INITRD, parse_tag_initrd);
68
69 static int __init parse_tag_initrd2(const struct tag *tag)
70 {
71         phys_initrd_start = tag->u.initrd.start;
72         phys_initrd_size = tag->u.initrd.size;
73         return 0;
74 }
75
76 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
77
78 #ifdef CONFIG_OF_FLATTREE
79 void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
80 {
81         phys_initrd_start = start;
82         phys_initrd_size = end - start;
83 }
84 #endif /* CONFIG_OF_FLATTREE */
85
86 /*
87  * This keeps memory configuration data used by a couple memory
88  * initialization functions, as well as show_mem() for the skipping
89  * of holes in the memory map.  It is populated by arm_add_memory().
90  */
91 struct meminfo meminfo;
92
93 void show_mem(unsigned int filter)
94 {
95         int free = 0, total = 0, reserved = 0;
96         int shared = 0, cached = 0, slab = 0, i;
97         struct meminfo * mi = &meminfo;
98
99         printk("Mem-info:\n");
100         show_free_areas(filter);
101
102         for_each_bank (i, mi) {
103                 struct membank *bank = &mi->bank[i];
104                 unsigned int pfn1, pfn2;
105                 struct page *page, *end;
106
107                 pfn1 = bank_pfn_start(bank);
108                 pfn2 = bank_pfn_end(bank);
109
110                 page = pfn_to_page(pfn1);
111                 end  = pfn_to_page(pfn2 - 1) + 1;
112
113                 do {
114                         total++;
115                         if (PageReserved(page))
116                                 reserved++;
117                         else if (PageSwapCache(page))
118                                 cached++;
119                         else if (PageSlab(page))
120                                 slab++;
121                         else if (!page_count(page))
122                                 free++;
123                         else
124                                 shared += page_count(page) - 1;
125                         page++;
126                 } while (page < end);
127         }
128
129         printk("%d pages of RAM\n", total);
130         printk("%d free pages\n", free);
131         printk("%d reserved pages\n", reserved);
132         printk("%d slab pages\n", slab);
133         printk("%d pages shared\n", shared);
134         printk("%d pages swap cached\n", cached);
135 }
136
137 static void __init find_limits(unsigned long *min, unsigned long *max_low,
138                                unsigned long *max_high)
139 {
140         struct meminfo *mi = &meminfo;
141         int i;
142
143         /* This assumes the meminfo array is properly sorted */
144         *min = bank_pfn_start(&mi->bank[0]);
145         for_each_bank (i, mi)
146                 if (mi->bank[i].highmem)
147                                 break;
148         *max_low = bank_pfn_end(&mi->bank[i - 1]);
149         *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
150 }
151
152 static void __init arm_bootmem_init(unsigned long start_pfn,
153         unsigned long end_pfn)
154 {
155         struct memblock_region *reg;
156         unsigned int boot_pages;
157         phys_addr_t bitmap;
158         pg_data_t *pgdat;
159
160         /*
161          * Allocate the bootmem bitmap page.  This must be in a region
162          * of memory which has already been mapped.
163          */
164         boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
165         bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
166                                 __pfn_to_phys(end_pfn));
167
168         /*
169          * Initialise the bootmem allocator, handing the
170          * memory banks over to bootmem.
171          */
172         node_set_online(0);
173         pgdat = NODE_DATA(0);
174         init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
175
176         /* Free the lowmem regions from memblock into bootmem. */
177         for_each_memblock(memory, reg) {
178                 unsigned long start = memblock_region_memory_base_pfn(reg);
179                 unsigned long end = memblock_region_memory_end_pfn(reg);
180
181                 if (end >= end_pfn)
182                         end = end_pfn;
183                 if (start >= end)
184                         break;
185
186                 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
187         }
188
189         /* Reserve the lowmem memblock reserved regions in bootmem. */
190         for_each_memblock(reserved, reg) {
191                 unsigned long start = memblock_region_reserved_base_pfn(reg);
192                 unsigned long end = memblock_region_reserved_end_pfn(reg);
193
194                 if (end >= end_pfn)
195                         end = end_pfn;
196                 if (start >= end)
197                         break;
198
199                 reserve_bootmem(__pfn_to_phys(start),
200                                 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
201         }
202 }
203
204 #ifdef CONFIG_ZONE_DMA
205
206 unsigned long arm_dma_zone_size __read_mostly;
207 EXPORT_SYMBOL(arm_dma_zone_size);
208
209 /*
210  * The DMA mask corresponding to the maximum bus address allocatable
211  * using GFP_DMA.  The default here places no restriction on DMA
212  * allocations.  This must be the smallest DMA mask in the system,
213  * so a successful GFP_DMA allocation will always satisfy this.
214  */
215 u32 arm_dma_limit;
216
217 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
218         unsigned long dma_size)
219 {
220         if (size[0] <= dma_size)
221                 return;
222
223         size[ZONE_NORMAL] = size[0] - dma_size;
224         size[ZONE_DMA] = dma_size;
225         hole[ZONE_NORMAL] = hole[0];
226         hole[ZONE_DMA] = 0;
227 }
228 #endif
229
230 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
231         unsigned long max_high)
232 {
233         unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
234         struct memblock_region *reg;
235
236         /*
237          * initialise the zones.
238          */
239         memset(zone_size, 0, sizeof(zone_size));
240
241         /*
242          * The memory size has already been determined.  If we need
243          * to do anything fancy with the allocation of this memory
244          * to the zones, now is the time to do it.
245          */
246         zone_size[0] = max_low - min;
247 #ifdef CONFIG_HIGHMEM
248         zone_size[ZONE_HIGHMEM] = max_high - max_low;
249 #endif
250
251         /*
252          * Calculate the size of the holes.
253          *  holes = node_size - sum(bank_sizes)
254          */
255         memcpy(zhole_size, zone_size, sizeof(zhole_size));
256         for_each_memblock(memory, reg) {
257                 unsigned long start = memblock_region_memory_base_pfn(reg);
258                 unsigned long end = memblock_region_memory_end_pfn(reg);
259
260                 if (start < max_low) {
261                         unsigned long low_end = min(end, max_low);
262                         zhole_size[0] -= low_end - start;
263                 }
264 #ifdef CONFIG_HIGHMEM
265                 if (end > max_low) {
266                         unsigned long high_start = max(start, max_low);
267                         zhole_size[ZONE_HIGHMEM] -= end - high_start;
268                 }
269 #endif
270         }
271
272 #ifdef CONFIG_ZONE_DMA
273         /*
274          * Adjust the sizes according to any special requirements for
275          * this machine type.
276          */
277         if (arm_dma_zone_size) {
278                 arm_adjust_dma_zone(zone_size, zhole_size,
279                         arm_dma_zone_size >> PAGE_SHIFT);
280                 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
281         } else
282                 arm_dma_limit = 0xffffffff;
283 #endif
284
285         free_area_init_node(0, zone_size, min, zhole_size);
286 }
287
288 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
289 int pfn_valid(unsigned long pfn)
290 {
291         return memblock_is_memory(__pfn_to_phys(pfn));
292 }
293 EXPORT_SYMBOL(pfn_valid);
294 #endif
295
296 #ifndef CONFIG_SPARSEMEM
297 static void arm_memory_present(void)
298 {
299 }
300 #else
301 static void arm_memory_present(void)
302 {
303         struct memblock_region *reg;
304
305         for_each_memblock(memory, reg)
306                 memory_present(0, memblock_region_memory_base_pfn(reg),
307                                memblock_region_memory_end_pfn(reg));
308 }
309 #endif
310
311 static bool arm_memblock_steal_permitted = true;
312
313 phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align)
314 {
315         phys_addr_t phys;
316
317         BUG_ON(!arm_memblock_steal_permitted);
318
319         phys = memblock_alloc(size, align);
320         memblock_free(phys, size);
321         memblock_remove(phys, size);
322
323         return phys;
324 }
325
326 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
327 {
328         int i;
329
330         for (i = 0; i < mi->nr_banks; i++)
331                 memblock_add(mi->bank[i].start, mi->bank[i].size);
332
333         /* Register the kernel text, kernel data and initrd with memblock. */
334 #ifdef CONFIG_XIP_KERNEL
335         memblock_reserve(__pa(_sdata), _end - _sdata);
336 #else
337         memblock_reserve(__pa(_stext), _end - _stext);
338 #endif
339 #ifdef CONFIG_BLK_DEV_INITRD
340         if (phys_initrd_size &&
341             !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
342                 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
343                        phys_initrd_start, phys_initrd_size);
344                 phys_initrd_start = phys_initrd_size = 0;
345         }
346         if (phys_initrd_size &&
347             memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
348                 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
349                        phys_initrd_start, phys_initrd_size);
350                 phys_initrd_start = phys_initrd_size = 0;
351         }
352         if (phys_initrd_size) {
353                 memblock_reserve(phys_initrd_start, phys_initrd_size);
354
355                 /* Now convert initrd to virtual addresses */
356                 initrd_start = __phys_to_virt(phys_initrd_start);
357                 initrd_end = initrd_start + phys_initrd_size;
358         }
359 #endif
360
361         arm_mm_memblock_reserve();
362         arm_dt_memblock_reserve();
363
364         /* reserve any platform specific memblock areas */
365         if (mdesc->reserve)
366                 mdesc->reserve();
367
368         arm_memblock_steal_permitted = false;
369         memblock_allow_resize();
370         memblock_dump_all();
371 }
372
373 void __init bootmem_init(void)
374 {
375         unsigned long min, max_low, max_high;
376
377         max_low = max_high = 0;
378
379         find_limits(&min, &max_low, &max_high);
380
381         arm_bootmem_init(min, max_low);
382
383         /*
384          * Sparsemem tries to allocate bootmem in memory_present(),
385          * so must be done after the fixed reservations
386          */
387         arm_memory_present();
388
389         /*
390          * sparse_init() needs the bootmem allocator up and running.
391          */
392         sparse_init();
393
394         /*
395          * Now free the memory - free_area_init_node needs
396          * the sparse mem_map arrays initialized by sparse_init()
397          * for memmap_init_zone(), otherwise all PFNs are invalid.
398          */
399         arm_bootmem_free(min, max_low, max_high);
400
401         /*
402          * This doesn't seem to be used by the Linux memory manager any
403          * more, but is used by ll_rw_block.  If we can get rid of it, we
404          * also get rid of some of the stuff above as well.
405          *
406          * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
407          * the system, not the maximum PFN.
408          */
409         max_low_pfn = max_low - PHYS_PFN_OFFSET;
410         max_pfn = max_high - PHYS_PFN_OFFSET;
411 }
412
413 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
414 {
415         unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
416
417         for (; pfn < end; pfn++) {
418                 struct page *page = pfn_to_page(pfn);
419                 ClearPageReserved(page);
420                 init_page_count(page);
421                 __free_page(page);
422                 pages++;
423         }
424
425         if (size && s)
426                 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
427
428         return pages;
429 }
430
431 /*
432  * Poison init memory with an undefined instruction (ARM) or a branch to an
433  * undefined instruction (Thumb).
434  */
435 static inline void poison_init_mem(void *s, size_t count)
436 {
437         u32 *p = (u32 *)s;
438         for (; count != 0; count -= 4)
439                 *p++ = 0xe7fddef0;
440 }
441
442 static inline void
443 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
444 {
445         struct page *start_pg, *end_pg;
446         unsigned long pg, pgend;
447
448         /*
449          * Convert start_pfn/end_pfn to a struct page pointer.
450          */
451         start_pg = pfn_to_page(start_pfn - 1) + 1;
452         end_pg = pfn_to_page(end_pfn - 1) + 1;
453
454         /*
455          * Convert to physical addresses, and
456          * round start upwards and end downwards.
457          */
458         pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
459         pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
460
461         /*
462          * If there are free pages between these,
463          * free the section of the memmap array.
464          */
465         if (pg < pgend)
466                 free_bootmem(pg, pgend - pg);
467 }
468
469 /*
470  * The mem_map array can get very big.  Free the unused area of the memory map.
471  */
472 static void __init free_unused_memmap(struct meminfo *mi)
473 {
474         unsigned long bank_start, prev_bank_end = 0;
475         unsigned int i;
476
477         /*
478          * This relies on each bank being in address order.
479          * The banks are sorted previously in bootmem_init().
480          */
481         for_each_bank(i, mi) {
482                 struct membank *bank = &mi->bank[i];
483
484                 bank_start = bank_pfn_start(bank);
485
486 #ifdef CONFIG_SPARSEMEM
487                 /*
488                  * Take care not to free memmap entries that don't exist
489                  * due to SPARSEMEM sections which aren't present.
490                  */
491                 bank_start = min(bank_start,
492                                  ALIGN(prev_bank_end, PAGES_PER_SECTION));
493 #else
494                 /*
495                  * Align down here since the VM subsystem insists that the
496                  * memmap entries are valid from the bank start aligned to
497                  * MAX_ORDER_NR_PAGES.
498                  */
499                 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
500 #endif
501                 /*
502                  * If we had a previous bank, and there is a space
503                  * between the current bank and the previous, free it.
504                  */
505                 if (prev_bank_end && prev_bank_end < bank_start)
506                         free_memmap(prev_bank_end, bank_start);
507
508                 /*
509                  * Align up here since the VM subsystem insists that the
510                  * memmap entries are valid from the bank end aligned to
511                  * MAX_ORDER_NR_PAGES.
512                  */
513                 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
514         }
515
516 #ifdef CONFIG_SPARSEMEM
517         if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
518                 free_memmap(prev_bank_end,
519                             ALIGN(prev_bank_end, PAGES_PER_SECTION));
520 #endif
521 }
522
523 static void __init free_highpages(void)
524 {
525 #ifdef CONFIG_HIGHMEM
526         unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
527         struct memblock_region *mem, *res;
528
529         /* set highmem page free */
530         for_each_memblock(memory, mem) {
531                 unsigned long start = memblock_region_memory_base_pfn(mem);
532                 unsigned long end = memblock_region_memory_end_pfn(mem);
533
534                 /* Ignore complete lowmem entries */
535                 if (end <= max_low)
536                         continue;
537
538                 /* Truncate partial highmem entries */
539                 if (start < max_low)
540                         start = max_low;
541
542                 /* Find and exclude any reserved regions */
543                 for_each_memblock(reserved, res) {
544                         unsigned long res_start, res_end;
545
546                         res_start = memblock_region_reserved_base_pfn(res);
547                         res_end = memblock_region_reserved_end_pfn(res);
548
549                         if (res_end < start)
550                                 continue;
551                         if (res_start < start)
552                                 res_start = start;
553                         if (res_start > end)
554                                 res_start = end;
555                         if (res_end > end)
556                                 res_end = end;
557                         if (res_start != start)
558                                 totalhigh_pages += free_area(start, res_start,
559                                                              NULL);
560                         start = res_end;
561                         if (start == end)
562                                 break;
563                 }
564
565                 /* And now free anything which remains */
566                 if (start < end)
567                         totalhigh_pages += free_area(start, end, NULL);
568         }
569         totalram_pages += totalhigh_pages;
570 #endif
571 }
572
573 /*
574  * mem_init() marks the free areas in the mem_map and tells us how much
575  * memory is free.  This is done after various parts of the system have
576  * claimed their memory after the kernel image.
577  */
578 void __init mem_init(void)
579 {
580         unsigned long reserved_pages, free_pages;
581         struct memblock_region *reg;
582         int i;
583 #ifdef CONFIG_HAVE_TCM
584         /* These pointers are filled in on TCM detection */
585         extern u32 dtcm_end;
586         extern u32 itcm_end;
587 #endif
588
589         max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
590
591         /* this will put all unused low memory onto the freelists */
592         free_unused_memmap(&meminfo);
593
594         totalram_pages += free_all_bootmem();
595
596 #ifdef CONFIG_SA1111
597         /* now that our DMA memory is actually so designated, we can free it */
598         totalram_pages += free_area(PHYS_PFN_OFFSET,
599                                     __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
600 #endif
601
602         free_highpages();
603
604         reserved_pages = free_pages = 0;
605
606         for_each_bank(i, &meminfo) {
607                 struct membank *bank = &meminfo.bank[i];
608                 unsigned int pfn1, pfn2;
609                 struct page *page, *end;
610
611                 pfn1 = bank_pfn_start(bank);
612                 pfn2 = bank_pfn_end(bank);
613
614                 page = pfn_to_page(pfn1);
615                 end  = pfn_to_page(pfn2 - 1) + 1;
616
617                 do {
618                         if (PageReserved(page))
619                                 reserved_pages++;
620                         else if (!page_count(page))
621                                 free_pages++;
622                         page++;
623                 } while (page < end);
624         }
625
626         /*
627          * Since our memory may not be contiguous, calculate the
628          * real number of pages we have in this system
629          */
630         printk(KERN_INFO "Memory:");
631         num_physpages = 0;
632         for_each_memblock(memory, reg) {
633                 unsigned long pages = memblock_region_memory_end_pfn(reg) -
634                         memblock_region_memory_base_pfn(reg);
635                 num_physpages += pages;
636                 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
637         }
638         printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
639
640         printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
641                 nr_free_pages() << (PAGE_SHIFT-10),
642                 free_pages << (PAGE_SHIFT-10),
643                 reserved_pages << (PAGE_SHIFT-10),
644                 totalhigh_pages << (PAGE_SHIFT-10));
645
646 #define MLK(b, t) b, t, ((t) - (b)) >> 10
647 #define MLM(b, t) b, t, ((t) - (b)) >> 20
648 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
649
650         printk(KERN_NOTICE "Virtual kernel memory layout:\n"
651                         "    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
652 #ifdef CONFIG_HAVE_TCM
653                         "    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
654                         "    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
655 #endif
656                         "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
657                         "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
658                         "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
659 #ifdef CONFIG_HIGHMEM
660                         "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
661 #endif
662                         "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
663                         "      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
664                         "      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
665                         "      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
666                         "       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
667
668                         MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
669                                 (PAGE_SIZE)),
670 #ifdef CONFIG_HAVE_TCM
671                         MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
672                         MLK(ITCM_OFFSET, (unsigned long) itcm_end),
673 #endif
674                         MLK(FIXADDR_START, FIXADDR_TOP),
675                         MLM(VMALLOC_START, VMALLOC_END),
676                         MLM(PAGE_OFFSET, (unsigned long)high_memory),
677 #ifdef CONFIG_HIGHMEM
678                         MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
679                                 (PAGE_SIZE)),
680 #endif
681                         MLM(MODULES_VADDR, MODULES_END),
682
683                         MLK_ROUNDUP(_text, _etext),
684                         MLK_ROUNDUP(__init_begin, __init_end),
685                         MLK_ROUNDUP(_sdata, _edata),
686                         MLK_ROUNDUP(__bss_start, __bss_stop));
687
688 #undef MLK
689 #undef MLM
690 #undef MLK_ROUNDUP
691
692         /*
693          * Check boundaries twice: Some fundamental inconsistencies can
694          * be detected at build time already.
695          */
696 #ifdef CONFIG_MMU
697         BUILD_BUG_ON(TASK_SIZE                          > MODULES_VADDR);
698         BUG_ON(TASK_SIZE                                > MODULES_VADDR);
699 #endif
700
701 #ifdef CONFIG_HIGHMEM
702         BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
703         BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE      > PAGE_OFFSET);
704 #endif
705
706         if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
707                 extern int sysctl_overcommit_memory;
708                 /*
709                  * On a machine this small we won't get
710                  * anywhere without overcommit, so turn
711                  * it on by default.
712                  */
713                 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
714         }
715 }
716
717 void free_initmem(void)
718 {
719 #ifdef CONFIG_HAVE_TCM
720         extern char __tcm_start, __tcm_end;
721
722         poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
723         totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
724                                     __phys_to_pfn(__pa(&__tcm_end)),
725                                     "TCM link");
726 #endif
727
728         poison_init_mem(__init_begin, __init_end - __init_begin);
729         if (!machine_is_integrator() && !machine_is_cintegrator())
730                 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
731                                             __phys_to_pfn(__pa(__init_end)),
732                                             "init");
733 }
734
735 #ifdef CONFIG_BLK_DEV_INITRD
736
737 static int keep_initrd;
738
739 void free_initrd_mem(unsigned long start, unsigned long end)
740 {
741         if (!keep_initrd) {
742                 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
743                 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
744                                             __phys_to_pfn(__pa(end)),
745                                             "initrd");
746         }
747 }
748
749 static int __init keepinitrd_setup(char *__unused)
750 {
751         keep_initrd = 1;
752         return 1;
753 }
754
755 __setup("keepinitrd", keepinitrd_setup);
756 #endif