1 #include <linux/kernel.h>
2 #include <linux/types.h>
3 #include <linux/init.h>
4 #include <linux/bitops.h>
5 #include <linux/memblock.h>
6 #include <linux/bootmem.h>
8 #include <linux/range.h>
10 /* Check for already reserved areas */
11 static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
13 struct memblock_region *r;
14 u64 addr = *addrp, last;
20 for_each_memblock(reserved, r) {
21 if (last > r->base && addr < r->base) {
22 size = r->base - addr;
26 if (last > (r->base + r->size) && addr < (r->base + r->size)) {
27 addr = round_up(r->base + r->size, align);
32 if (last <= (r->base + r->size) && addr >= r->base) {
44 static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start,
45 u64 *sizep, u64 align)
49 addr = round_up(ei_start, align);
51 addr = round_up(start, align);
54 *sizep = ei_last - addr;
55 while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
64 return MEMBLOCK_ERROR;
68 * Find next free range after start, and size is returned in *sizep
70 u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
72 struct memblock_region *r;
74 for_each_memblock(memory, r) {
75 u64 ei_start = r->base;
76 u64 ei_last = ei_start + r->size;
79 addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start,
82 if (addr != MEMBLOCK_ERROR)
86 return MEMBLOCK_ERROR;
89 static __init struct range *find_range_array(int count)
94 size = sizeof(struct range) * count;
95 end = memblock.current_limit;
97 mem = memblock_find_in_range(0, end, size, sizeof(struct range));
98 if (mem == MEMBLOCK_ERROR)
99 panic("can not find more space for range array");
102 * This range is tempoaray, so don't reserve it, it will not be
103 * overlapped because We will not alloccate new buffer before
104 * We discard this one
107 memset(range, 0, size);
112 #ifdef CONFIG_NO_BOOTMEM
113 static void __init memblock_x86_subtract_reserved(struct range *range, int az)
115 u64 final_start, final_end;
116 struct memblock_region *r;
118 /* Take out region array itself at first*/
119 memblock_free_reserved_regions();
121 pr_info("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
123 for_each_memblock(reserved, r) {
124 pr_info(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
125 final_start = PFN_DOWN(r->base);
126 final_end = PFN_UP(r->base + r->size);
127 if (final_start >= final_end)
129 subtract_range(range, az, final_start, final_end);
132 /* Put region array back ? */
133 memblock_reserve_reserved_regions();
140 static int __init count_work_fn(unsigned long start_pfn,
141 unsigned long end_pfn, void *datax)
143 struct count_data *data = datax;
150 static int __init count_early_node_map(int nodeid)
152 struct count_data data;
155 work_with_active_regions(nodeid, count_work_fn, &data);
160 int __init get_free_all_memory_range(struct range **rangep, int nodeid)
166 count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
168 range = find_range_array(count);
172 * Use early_node_map[] and memblock.reserved.region to get range array
175 nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
177 subtract_range(range, count, max_low_pfn, -1ULL);
179 memblock_x86_subtract_reserved(range, count);
180 nr_range = clean_sort_range(range, count);
186 void __init memblock_x86_to_bootmem(u64 start, u64 end)
189 u64 final_start, final_end;
190 struct memblock_region *r;
192 /* Take out region array itself */
193 memblock_free_reserved_regions();
195 count = memblock.reserved.cnt;
196 pr_info("(%d early reservations) ==> bootmem [%010llx-%010llx]\n", count, start, end - 1);
197 for_each_memblock(reserved, r) {
198 pr_info(" [%010llx-%010llx] ", (u64)r->base, (u64)r->base + r->size - 1);
199 final_start = max(start, r->base);
200 final_end = min(end, r->base + r->size);
201 if (final_start >= final_end) {
205 pr_cont(" ==> [%010llx-%010llx]\n", final_start, final_end - 1);
206 reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT);
209 /* Put region array back ? */
210 memblock_reserve_reserved_regions();
214 static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
219 u64 final_start, final_end;
221 struct memblock_region *r;
223 count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
225 range = find_range_array(count);
229 limit = PFN_DOWN(limit);
231 for_each_memblock(memory, r) {
232 final_start = PFN_UP(r->base);
233 final_end = PFN_DOWN(r->base + r->size);
234 if (final_start >= final_end)
236 if (final_start >= limit || final_end <= addr)
239 nr_range = add_range(range, count, nr_range, final_start, final_end);
241 subtract_range(range, count, 0, addr);
242 subtract_range(range, count, limit, -1ULL);
244 /* Subtract memblock.reserved.region in range ? */
246 goto sort_and_count_them;
247 for_each_memblock(reserved, r) {
248 final_start = PFN_DOWN(r->base);
249 final_end = PFN_UP(r->base + r->size);
250 if (final_start >= final_end)
252 if (final_start >= limit || final_end <= addr)
255 subtract_range(range, count, final_start, final_end);
259 nr_range = clean_sort_range(range, count);
262 for (i = 0; i < nr_range; i++)
263 free_size += range[i].end - range[i].start;
265 return free_size << PAGE_SHIFT;
268 u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
270 return __memblock_x86_memory_in_range(addr, limit, true);
273 u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
275 return __memblock_x86_memory_in_range(addr, limit, false);
278 void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
283 if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx]\n", start, end))
286 memblock_reserve(start, end - start);
289 void __init memblock_x86_free_range(u64 start, u64 end)
294 if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx]\n", start, end))
297 memblock_free(start, end - start);
301 * Need to call this function after memblock_x86_register_active_regions,
302 * so early_node_map[] is filled already.
304 u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
307 addr = find_memory_core_early(nid, size, align, start, end);
308 if (addr != MEMBLOCK_ERROR)
311 /* Fallback, should already have start end within node range */
312 return memblock_find_in_range(start, end, size, align);
316 * Finds an active region in the address range from start_pfn to last_pfn and
317 * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
319 static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
320 unsigned long start_pfn,
321 unsigned long last_pfn,
322 unsigned long *ei_startpfn,
323 unsigned long *ei_endpfn)
325 u64 align = PAGE_SIZE;
327 *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
328 *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
330 /* Skip map entries smaller than a page */
331 if (*ei_startpfn >= *ei_endpfn)
334 /* Skip if map is outside the node */
335 if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
338 /* Check for overlaps */
339 if (*ei_startpfn < start_pfn)
340 *ei_startpfn = start_pfn;
341 if (*ei_endpfn > last_pfn)
342 *ei_endpfn = last_pfn;
347 /* Walk the memblock.memory map and register active regions within a node */
348 void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
349 unsigned long last_pfn)
351 unsigned long ei_startpfn;
352 unsigned long ei_endpfn;
353 struct memblock_region *r;
355 for_each_memblock(memory, r)
356 if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
357 &ei_startpfn, &ei_endpfn))
358 add_active_range(nid, ei_startpfn, ei_endpfn);
362 * Find the hole size (in bytes) in the memory range.
363 * @start: starting address of the memory range to scan
364 * @end: ending address of the memory range to scan
366 u64 __init memblock_x86_hole_size(u64 start, u64 end)
368 unsigned long start_pfn = start >> PAGE_SHIFT;
369 unsigned long last_pfn = end >> PAGE_SHIFT;
370 unsigned long ei_startpfn, ei_endpfn, ram = 0;
371 struct memblock_region *r;
373 for_each_memblock(memory, r)
374 if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
375 &ei_startpfn, &ei_endpfn))
376 ram += ei_endpfn - ei_startpfn;
378 return end - start - ((u64)ram << PAGE_SHIFT);