2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/export.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
22 #include <asm/processor.h>
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data = {
28 .bdata = &bootmem_node_data[0]
30 EXPORT_SYMBOL(contig_page_data);
33 unsigned long max_low_pfn;
34 unsigned long min_low_pfn;
35 unsigned long max_pfn;
37 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
39 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
41 static int bootmem_debug;
43 static int __init bootmem_debug_setup(char *buf)
48 early_param("bootmem_debug", bootmem_debug_setup);
50 #define bdebug(fmt, args...) ({ \
51 if (unlikely(bootmem_debug)) \
57 static unsigned long __init bootmap_bytes(unsigned long pages)
59 unsigned long bytes = DIV_ROUND_UP(pages, 8);
61 return ALIGN(bytes, sizeof(long));
65 * bootmem_bootmap_pages - calculate bitmap size in pages
66 * @pages: number of pages the bitmap has to represent
68 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
70 unsigned long bytes = bootmap_bytes(pages);
72 return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
78 static void __init link_bootmem(bootmem_data_t *bdata)
82 list_for_each_entry(ent, &bdata_list, list) {
83 if (bdata->node_min_pfn < ent->node_min_pfn) {
84 list_add_tail(&bdata->list, &ent->list);
89 list_add_tail(&bdata->list, &bdata_list);
93 * Called once to set up the allocator itself.
95 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
96 unsigned long mapstart, unsigned long start, unsigned long end)
98 unsigned long mapsize;
100 mminit_validate_memmodel_limits(&start, &end);
101 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
102 bdata->node_min_pfn = start;
103 bdata->node_low_pfn = end;
107 * Initially all pages are reserved - setup_arch() has to
108 * register free RAM areas explicitly.
110 mapsize = bootmap_bytes(end - start);
111 memset(bdata->node_bootmem_map, 0xff, mapsize);
113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
114 bdata - bootmem_node_data, start, mapstart, end, mapsize);
120 * init_bootmem_node - register a node as boot memory
121 * @pgdat: node to register
122 * @freepfn: pfn where the bitmap for this node is to be placed
123 * @startpfn: first pfn on the node
124 * @endpfn: first pfn after the node
126 * Returns the number of bytes needed to hold the bitmap for this node.
128 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
129 unsigned long startpfn, unsigned long endpfn)
131 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
135 * init_bootmem - register boot memory
136 * @start: pfn where the bitmap is to be placed
137 * @pages: number of available physical pages
139 * Returns the number of bytes needed to hold the bitmap.
141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
149 * free_bootmem_late - free bootmem pages directly to page allocator
150 * @addr: starting physical address of the range
151 * @size: size of the range in bytes
153 * This is only useful when the bootmem allocator has already been torn
154 * down, but we are still initializing the system. Pages are given directly
155 * to the page allocator, no bootmem metadata is updated because it is gone.
157 void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
159 unsigned long cursor, end;
161 kmemleak_free_part(__va(physaddr), size);
163 cursor = PFN_UP(physaddr);
164 end = PFN_DOWN(physaddr + size);
166 for (; cursor < end; cursor++) {
167 __free_pages_bootmem(pfn_to_page(cursor), 0);
172 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
175 unsigned long start, end, pages, count = 0;
177 if (!bdata->node_bootmem_map)
180 start = bdata->node_min_pfn;
181 end = bdata->node_low_pfn;
183 bdebug("nid=%td start=%lx end=%lx\n",
184 bdata - bootmem_node_data, start, end);
186 while (start < end) {
187 unsigned long *map, idx, vec;
190 map = bdata->node_bootmem_map;
191 idx = start - bdata->node_min_pfn;
192 shift = idx & (BITS_PER_LONG - 1);
194 * vec holds at most BITS_PER_LONG map bits,
195 * bit 0 corresponds to start.
197 vec = ~map[idx / BITS_PER_LONG];
201 if (end - start >= BITS_PER_LONG)
202 vec |= ~map[idx / BITS_PER_LONG + 1] <<
203 (BITS_PER_LONG - shift);
206 * If we have a properly aligned and fully unreserved
207 * BITS_PER_LONG block of pages in front of us, free
210 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
211 int order = ilog2(BITS_PER_LONG);
213 __free_pages_bootmem(pfn_to_page(start), order);
214 count += BITS_PER_LONG;
215 start += BITS_PER_LONG;
217 unsigned long cur = start;
219 start = ALIGN(start + 1, BITS_PER_LONG);
220 while (vec && cur != start) {
222 page = pfn_to_page(cur);
223 __free_pages_bootmem(page, 0);
232 page = virt_to_page(bdata->node_bootmem_map);
233 pages = bdata->node_low_pfn - bdata->node_min_pfn;
234 pages = bootmem_bootmap_pages(pages);
237 __free_pages_bootmem(page++, 0);
239 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
244 static int reset_managed_pages_done __initdata;
246 static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
250 if (reset_managed_pages_done)
253 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
254 z->managed_pages = 0;
257 void __init reset_all_zones_managed_pages(void)
259 struct pglist_data *pgdat;
261 for_each_online_pgdat(pgdat)
262 reset_node_managed_pages(pgdat);
263 reset_managed_pages_done = 1;
267 * free_all_bootmem_node - release a node's free pages to the buddy allocator
268 * @pgdat: node to be released
270 * Returns the number of pages actually released.
272 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
276 register_page_bootmem_info_node(pgdat);
277 reset_node_managed_pages(pgdat);
278 pages = free_all_bootmem_core(pgdat->bdata);
279 totalram_pages += pages;
285 * free_all_bootmem - release free pages to the buddy allocator
287 * Returns the number of pages actually released.
289 unsigned long __init free_all_bootmem(void)
291 unsigned long total_pages = 0;
292 bootmem_data_t *bdata;
294 reset_all_zones_managed_pages();
296 list_for_each_entry(bdata, &bdata_list, list)
297 total_pages += free_all_bootmem_core(bdata);
299 totalram_pages += total_pages;
304 static void __init __free(bootmem_data_t *bdata,
305 unsigned long sidx, unsigned long eidx)
309 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
310 sidx + bdata->node_min_pfn,
311 eidx + bdata->node_min_pfn);
313 if (bdata->hint_idx > sidx)
314 bdata->hint_idx = sidx;
316 for (idx = sidx; idx < eidx; idx++)
317 if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
321 static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
322 unsigned long eidx, int flags)
325 int exclusive = flags & BOOTMEM_EXCLUSIVE;
327 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
328 bdata - bootmem_node_data,
329 sidx + bdata->node_min_pfn,
330 eidx + bdata->node_min_pfn,
333 for (idx = sidx; idx < eidx; idx++)
334 if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
336 __free(bdata, sidx, idx);
339 bdebug("silent double reserve of PFN %lx\n",
340 idx + bdata->node_min_pfn);
345 static int __init mark_bootmem_node(bootmem_data_t *bdata,
346 unsigned long start, unsigned long end,
347 int reserve, int flags)
349 unsigned long sidx, eidx;
351 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
352 bdata - bootmem_node_data, start, end, reserve, flags);
354 BUG_ON(start < bdata->node_min_pfn);
355 BUG_ON(end > bdata->node_low_pfn);
357 sidx = start - bdata->node_min_pfn;
358 eidx = end - bdata->node_min_pfn;
361 return __reserve(bdata, sidx, eidx, flags);
363 __free(bdata, sidx, eidx);
367 static int __init mark_bootmem(unsigned long start, unsigned long end,
368 int reserve, int flags)
371 bootmem_data_t *bdata;
374 list_for_each_entry(bdata, &bdata_list, list) {
378 if (pos < bdata->node_min_pfn ||
379 pos >= bdata->node_low_pfn) {
380 BUG_ON(pos != start);
384 max = min(bdata->node_low_pfn, end);
386 err = mark_bootmem_node(bdata, pos, max, reserve, flags);
387 if (reserve && err) {
388 mark_bootmem(start, pos, 0, 0);
394 pos = bdata->node_low_pfn;
400 * free_bootmem_node - mark a page range as usable
401 * @pgdat: node the range resides on
402 * @physaddr: starting address of the range
403 * @size: size of the range in bytes
405 * Partial pages will be considered reserved and left as they are.
407 * The range must reside completely on the specified node.
409 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
412 unsigned long start, end;
414 kmemleak_free_part(__va(physaddr), size);
416 start = PFN_UP(physaddr);
417 end = PFN_DOWN(physaddr + size);
419 mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
423 * free_bootmem - mark a page range as usable
424 * @addr: starting physical address of the range
425 * @size: size of the range in bytes
427 * Partial pages will be considered reserved and left as they are.
429 * The range must be contiguous but may span node boundaries.
431 void __init free_bootmem(unsigned long physaddr, unsigned long size)
433 unsigned long start, end;
435 kmemleak_free_part(__va(physaddr), size);
437 start = PFN_UP(physaddr);
438 end = PFN_DOWN(physaddr + size);
440 mark_bootmem(start, end, 0, 0);
444 * reserve_bootmem_node - mark a page range as reserved
445 * @pgdat: node the range resides on
446 * @physaddr: starting address of the range
447 * @size: size of the range in bytes
448 * @flags: reservation flags (see linux/bootmem.h)
450 * Partial pages will be reserved.
452 * The range must reside completely on the specified node.
454 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
455 unsigned long size, int flags)
457 unsigned long start, end;
459 start = PFN_DOWN(physaddr);
460 end = PFN_UP(physaddr + size);
462 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
466 * reserve_bootmem - mark a page range as reserved
467 * @addr: starting address of the range
468 * @size: size of the range in bytes
469 * @flags: reservation flags (see linux/bootmem.h)
471 * Partial pages will be reserved.
473 * The range must be contiguous but may span node boundaries.
475 int __init reserve_bootmem(unsigned long addr, unsigned long size,
478 unsigned long start, end;
480 start = PFN_DOWN(addr);
481 end = PFN_UP(addr + size);
483 return mark_bootmem(start, end, 1, flags);
486 static unsigned long __init align_idx(struct bootmem_data *bdata,
487 unsigned long idx, unsigned long step)
489 unsigned long base = bdata->node_min_pfn;
492 * Align the index with respect to the node start so that the
493 * combination of both satisfies the requested alignment.
496 return ALIGN(base + idx, step) - base;
499 static unsigned long __init align_off(struct bootmem_data *bdata,
500 unsigned long off, unsigned long align)
502 unsigned long base = PFN_PHYS(bdata->node_min_pfn);
504 /* Same as align_idx for byte offsets */
506 return ALIGN(base + off, align) - base;
509 static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
510 unsigned long size, unsigned long align,
511 unsigned long goal, unsigned long limit)
513 unsigned long fallback = 0;
514 unsigned long min, max, start, sidx, midx, step;
516 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
517 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
521 BUG_ON(align & (align - 1));
522 BUG_ON(limit && goal + size > limit);
524 if (!bdata->node_bootmem_map)
527 min = bdata->node_min_pfn;
528 max = bdata->node_low_pfn;
531 limit >>= PAGE_SHIFT;
533 if (limit && max > limit)
538 step = max(align >> PAGE_SHIFT, 1UL);
540 if (goal && min < goal && goal < max)
541 start = ALIGN(goal, step);
543 start = ALIGN(min, step);
545 sidx = start - bdata->node_min_pfn;
546 midx = max - bdata->node_min_pfn;
548 if (bdata->hint_idx > sidx) {
550 * Handle the valid case of sidx being zero and still
551 * catch the fallback below.
554 sidx = align_idx(bdata, bdata->hint_idx, step);
560 unsigned long eidx, i, start_off, end_off;
562 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
563 sidx = align_idx(bdata, sidx, step);
564 eidx = sidx + PFN_UP(size);
566 if (sidx >= midx || eidx > midx)
569 for (i = sidx; i < eidx; i++)
570 if (test_bit(i, bdata->node_bootmem_map)) {
571 sidx = align_idx(bdata, i, step);
577 if (bdata->last_end_off & (PAGE_SIZE - 1) &&
578 PFN_DOWN(bdata->last_end_off) + 1 == sidx)
579 start_off = align_off(bdata, bdata->last_end_off, align);
581 start_off = PFN_PHYS(sidx);
583 merge = PFN_DOWN(start_off) < sidx;
584 end_off = start_off + size;
586 bdata->last_end_off = end_off;
587 bdata->hint_idx = PFN_UP(end_off);
590 * Reserve the area now:
592 if (__reserve(bdata, PFN_DOWN(start_off) + merge,
593 PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
596 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
598 memset(region, 0, size);
600 * The min_count is set to 0 so that bootmem allocated blocks
601 * are never reported as leaks.
603 kmemleak_alloc(region, size, 0, 0);
608 sidx = align_idx(bdata, fallback - 1, step);
616 static void * __init alloc_bootmem_core(unsigned long size,
621 bootmem_data_t *bdata;
624 if (WARN_ON_ONCE(slab_is_available()))
625 return kzalloc(size, GFP_NOWAIT);
627 list_for_each_entry(bdata, &bdata_list, list) {
628 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
630 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
633 region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
641 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
649 ptr = alloc_bootmem_core(size, align, goal, limit);
661 * __alloc_bootmem_nopanic - allocate boot memory without panicking
662 * @size: size of the request in bytes
663 * @align: alignment of the region
664 * @goal: preferred starting address of the region
666 * The goal is dropped if it can not be satisfied and the allocation will
667 * fall back to memory below @goal.
669 * Allocation may happen on any node in the system.
671 * Returns NULL on failure.
673 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
676 unsigned long limit = 0;
678 return ___alloc_bootmem_nopanic(size, align, goal, limit);
681 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
682 unsigned long goal, unsigned long limit)
684 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
689 * Whoops, we cannot satisfy the allocation request.
691 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
692 panic("Out of memory");
697 * __alloc_bootmem - allocate boot memory
698 * @size: size of the request in bytes
699 * @align: alignment of the region
700 * @goal: preferred starting address of the region
702 * The goal is dropped if it can not be satisfied and the allocation will
703 * fall back to memory below @goal.
705 * Allocation may happen on any node in the system.
707 * The function panics if the request can not be satisfied.
709 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
712 unsigned long limit = 0;
714 return ___alloc_bootmem(size, align, goal, limit);
717 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
718 unsigned long size, unsigned long align,
719 unsigned long goal, unsigned long limit)
723 if (WARN_ON_ONCE(slab_is_available()))
724 return kzalloc(size, GFP_NOWAIT);
727 /* do not panic in alloc_bootmem_bdata() */
728 if (limit && goal + size > limit)
731 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
735 ptr = alloc_bootmem_core(size, align, goal, limit);
747 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
748 unsigned long align, unsigned long goal)
750 if (WARN_ON_ONCE(slab_is_available()))
751 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
753 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
756 void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
757 unsigned long align, unsigned long goal,
762 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
766 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
767 panic("Out of memory");
772 * __alloc_bootmem_node - allocate boot memory from a specific node
773 * @pgdat: node to allocate from
774 * @size: size of the request in bytes
775 * @align: alignment of the region
776 * @goal: preferred starting address of the region
778 * The goal is dropped if it can not be satisfied and the allocation will
779 * fall back to memory below @goal.
781 * Allocation may fall back to any node in the system if the specified node
782 * can not hold the requested memory.
784 * The function panics if the request can not be satisfied.
786 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
787 unsigned long align, unsigned long goal)
789 if (WARN_ON_ONCE(slab_is_available()))
790 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
792 return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
795 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
796 unsigned long align, unsigned long goal)
799 unsigned long end_pfn;
801 if (WARN_ON_ONCE(slab_is_available()))
802 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
804 /* update goal according ...MAX_DMA32_PFN */
805 end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
807 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
808 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
810 unsigned long new_goal;
812 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
813 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
820 return __alloc_bootmem_node(pgdat, size, align, goal);
824 #ifndef ARCH_LOW_ADDRESS_LIMIT
825 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
829 * __alloc_bootmem_low - allocate low boot memory
830 * @size: size of the request in bytes
831 * @align: alignment of the region
832 * @goal: preferred starting address of the region
834 * The goal is dropped if it can not be satisfied and the allocation will
835 * fall back to memory below @goal.
837 * Allocation may happen on any node in the system.
839 * The function panics if the request can not be satisfied.
841 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
844 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
847 void * __init __alloc_bootmem_low_nopanic(unsigned long size,
851 return ___alloc_bootmem_nopanic(size, align, goal,
852 ARCH_LOW_ADDRESS_LIMIT);
856 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
857 * @pgdat: node to allocate from
858 * @size: size of the request in bytes
859 * @align: alignment of the region
860 * @goal: preferred starting address of the region
862 * The goal is dropped if it can not be satisfied and the allocation will
863 * fall back to memory below @goal.
865 * Allocation may fall back to any node in the system if the specified node
866 * can not hold the requested memory.
868 * The function panics if the request can not be satisfied.
870 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
871 unsigned long align, unsigned long goal)
873 if (WARN_ON_ONCE(slab_is_available()))
874 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
876 return ___alloc_bootmem_node(pgdat, size, align,
877 goal, ARCH_LOW_ADDRESS_LIMIT);