1 #include <linux/kernel.h>
2 #include <linux/types.h>
3 #include <linux/init.h>
4 #include <linux/bitops.h>
5 #include <linux/memblock.h>
6 #include <linux/bootmem.h>
8 #include <linux/range.h>
10 /* Check for already reserved areas */
11 static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
13 struct memblock_region *r;
14 u64 addr = *addrp, last;
20 for_each_memblock(reserved, r) {
21 if (last > r->base && addr < r->base) {
22 size = r->base - addr;
26 if (last > (r->base + r->size) && addr < (r->base + r->size)) {
27 addr = round_up(r->base + r->size, align);
32 if (last <= (r->base + r->size) && addr >= r->base) {
44 static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start,
45 u64 *sizep, u64 align)
49 addr = round_up(ei_start, align);
51 addr = round_up(start, align);
54 *sizep = ei_last - addr;
55 while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
64 return MEMBLOCK_ERROR;
68 * Find next free range after start, and size is returned in *sizep
70 u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
72 struct memblock_region *r;
74 for_each_memblock(memory, r) {
75 u64 ei_start = r->base;
76 u64 ei_last = ei_start + r->size;
79 addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start,
82 if (addr != MEMBLOCK_ERROR)
86 return MEMBLOCK_ERROR;