Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Procedures for maintaining information about logical memory blocks.
4 : *
5 : * Peter Bergner, IBM Corp. June 2001.
6 : * Copyright (C) 2001 Peter Bergner.
7 : */
8 :
9 : #include <linux/kernel.h>
10 : #include <linux/slab.h>
11 : #include <linux/init.h>
12 : #include <linux/bitops.h>
13 : #include <linux/poison.h>
14 : #include <linux/pfn.h>
15 : #include <linux/debugfs.h>
16 : #include <linux/kmemleak.h>
17 : #include <linux/seq_file.h>
18 : #include <linux/memblock.h>
19 :
20 : #include <asm/sections.h>
21 : #include <linux/io.h>
22 :
23 : #include "internal.h"
24 :
25 : #define INIT_MEMBLOCK_REGIONS 128
26 : #define INIT_PHYSMEM_REGIONS 4
27 :
28 : #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29 : # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30 : #endif
31 :
32 : /**
33 : * DOC: memblock overview
34 : *
35 : * Memblock is a method of managing memory regions during the early
36 : * boot period when the usual kernel memory allocators are not up and
37 : * running.
38 : *
39 : * Memblock views the system memory as collections of contiguous
40 : * regions. There are several types of these collections:
41 : *
42 : * * ``memory`` - describes the physical memory available to the
43 : * kernel; this may differ from the actual physical memory installed
44 : * in the system, for instance when the memory is restricted with
45 : * ``mem=`` command line parameter
46 : * * ``reserved`` - describes the regions that were allocated
47 : * * ``physmem`` - describes the actual physical memory available during
48 : * boot regardless of the possible restrictions and memory hot(un)plug;
49 : * the ``physmem`` type is only available on some architectures.
50 : *
51 : * Each region is represented by struct memblock_region that
52 : * defines the region extents, its attributes and NUMA node id on NUMA
53 : * systems. Every memory type is described by the struct memblock_type
54 : * which contains an array of memory regions along with
55 : * the allocator metadata. The "memory" and "reserved" types are nicely
56 : * wrapped with struct memblock. This structure is statically
57 : * initialized at build time. The region arrays are initially sized to
58 : * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
59 : * for "reserved". The region array for "physmem" is initially sized to
60 : * %INIT_PHYSMEM_REGIONS.
61 : * The memblock_allow_resize() enables automatic resizing of the region
62 : * arrays during addition of new regions. This feature should be used
63 : * with care so that memory allocated for the region array will not
64 : * overlap with areas that should be reserved, for example initrd.
65 : *
66 : * The early architecture setup should tell memblock what the physical
67 : * memory layout is by using memblock_add() or memblock_add_node()
68 : * functions. The first function does not assign the region to a NUMA
69 : * node and it is appropriate for UMA systems. Yet, it is possible to
70 : * use it on NUMA systems as well and assign the region to a NUMA node
71 : * later in the setup process using memblock_set_node(). The
72 : * memblock_add_node() performs such an assignment directly.
73 : *
74 : * Once memblock is setup the memory can be allocated using one of the
75 : * API variants:
76 : *
77 : * * memblock_phys_alloc*() - these functions return the **physical**
78 : * address of the allocated memory
79 : * * memblock_alloc*() - these functions return the **virtual** address
80 : * of the allocated memory.
81 : *
82 : * Note, that both API variants use implicit assumptions about allowed
83 : * memory ranges and the fallback methods. Consult the documentation
84 : * of memblock_alloc_internal() and memblock_alloc_range_nid()
85 : * functions for more elaborate description.
86 : *
87 : * As the system boot progresses, the architecture specific mem_init()
88 : * function frees all the memory to the buddy page allocator.
89 : *
90 : * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
91 : * memblock data structures (except "physmem") will be discarded after the
92 : * system initialization completes.
93 : */
94 :
95 : #ifndef CONFIG_NUMA
96 : struct pglist_data __refdata contig_page_data;
97 : EXPORT_SYMBOL(contig_page_data);
98 : #endif
99 :
100 : unsigned long max_low_pfn;
101 : unsigned long min_low_pfn;
102 : unsigned long max_pfn;
103 : unsigned long long max_possible_pfn;
104 :
105 : static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
106 : static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
107 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
108 : static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
109 : #endif
110 :
111 : struct memblock memblock __initdata_memblock = {
112 : .memory.regions = memblock_memory_init_regions,
113 : .memory.cnt = 1, /* empty dummy entry */
114 : .memory.max = INIT_MEMBLOCK_REGIONS,
115 : .memory.name = "memory",
116 :
117 : .reserved.regions = memblock_reserved_init_regions,
118 : .reserved.cnt = 1, /* empty dummy entry */
119 : .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
120 : .reserved.name = "reserved",
121 :
122 : .bottom_up = false,
123 : .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
124 : };
125 :
126 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127 : struct memblock_type physmem = {
128 : .regions = memblock_physmem_init_regions,
129 : .cnt = 1, /* empty dummy entry */
130 : .max = INIT_PHYSMEM_REGIONS,
131 : .name = "physmem",
132 : };
133 : #endif
134 :
135 : /*
136 : * keep a pointer to &memblock.memory in the text section to use it in
137 : * __next_mem_range() and its helpers.
138 : * For architectures that do not keep memblock data after init, this
139 : * pointer will be reset to NULL at memblock_discard()
140 : */
141 : static __refdata struct memblock_type *memblock_memory = &memblock.memory;
142 :
143 : #define for_each_memblock_type(i, memblock_type, rgn) \
144 : for (i = 0, rgn = &memblock_type->regions[0]; \
145 : i < memblock_type->cnt; \
146 : i++, rgn = &memblock_type->regions[i])
147 :
148 : #define memblock_dbg(fmt, ...) \
149 : do { \
150 : if (memblock_debug) \
151 : pr_info(fmt, ##__VA_ARGS__); \
152 : } while (0)
153 :
154 : static int memblock_debug __initdata_memblock;
155 : static bool system_has_some_mirror __initdata_memblock = false;
156 : static int memblock_can_resize __initdata_memblock;
157 : static int memblock_memory_in_slab __initdata_memblock = 0;
158 : static int memblock_reserved_in_slab __initdata_memblock = 0;
159 :
160 21 : static enum memblock_flags __init_memblock choose_memblock_flags(void)
161 : {
162 21 : return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
163 : }
164 :
165 : /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
166 : static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
167 : {
168 27 : return *size = min(*size, PHYS_ADDR_MAX - base);
169 : }
170 :
171 : /*
172 : * Address comparison utilities
173 : */
174 : static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
175 : phys_addr_t base2, phys_addr_t size2)
176 : {
177 0 : return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
178 : }
179 :
180 0 : bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
181 : phys_addr_t base, phys_addr_t size)
182 : {
183 : unsigned long i;
184 :
185 0 : memblock_cap_size(base, &size);
186 :
187 0 : for (i = 0; i < type->cnt; i++)
188 0 : if (memblock_addrs_overlap(base, size, type->regions[i].base,
189 0 : type->regions[i].size))
190 : break;
191 0 : return i < type->cnt;
192 : }
193 :
194 : /**
195 : * __memblock_find_range_bottom_up - find free area utility in bottom-up
196 : * @start: start of candidate range
197 : * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
198 : * %MEMBLOCK_ALLOC_ACCESSIBLE
199 : * @size: size of free area to find
200 : * @align: alignment of free area to find
201 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
202 : * @flags: pick from blocks based on memory attributes
203 : *
204 : * Utility called from memblock_find_in_range_node(), find free area bottom-up.
205 : *
206 : * Return:
207 : * Found address on success, 0 on failure.
208 : */
209 : static phys_addr_t __init_memblock
210 0 : __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
211 : phys_addr_t size, phys_addr_t align, int nid,
212 : enum memblock_flags flags)
213 : {
214 : phys_addr_t this_start, this_end, cand;
215 : u64 i;
216 :
217 0 : for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
218 0 : this_start = clamp(this_start, start, end);
219 0 : this_end = clamp(this_end, start, end);
220 :
221 0 : cand = round_up(this_start, align);
222 0 : if (cand < this_end && this_end - cand >= size)
223 : return cand;
224 : }
225 :
226 : return 0;
227 : }
228 :
229 : /**
230 : * __memblock_find_range_top_down - find free area utility, in top-down
231 : * @start: start of candidate range
232 : * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
233 : * %MEMBLOCK_ALLOC_ACCESSIBLE
234 : * @size: size of free area to find
235 : * @align: alignment of free area to find
236 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
237 : * @flags: pick from blocks based on memory attributes
238 : *
239 : * Utility called from memblock_find_in_range_node(), find free area top-down.
240 : *
241 : * Return:
242 : * Found address on success, 0 on failure.
243 : */
244 : static phys_addr_t __init_memblock
245 21 : __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
246 : phys_addr_t size, phys_addr_t align, int nid,
247 : enum memblock_flags flags)
248 : {
249 : phys_addr_t this_start, this_end, cand;
250 : u64 i;
251 :
252 124 : for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
253 : NULL) {
254 124 : this_start = clamp(this_start, start, end);
255 124 : this_end = clamp(this_end, start, end);
256 :
257 124 : if (this_end < size)
258 0 : continue;
259 :
260 124 : cand = round_down(this_end - size, align);
261 124 : if (cand >= this_start)
262 : return cand;
263 : }
264 :
265 : return 0;
266 : }
267 :
268 : /**
269 : * memblock_find_in_range_node - find free area in given range and node
270 : * @size: size of free area to find
271 : * @align: alignment of free area to find
272 : * @start: start of candidate range
273 : * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
274 : * %MEMBLOCK_ALLOC_ACCESSIBLE
275 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
276 : * @flags: pick from blocks based on memory attributes
277 : *
278 : * Find @size free area aligned to @align in the specified range and node.
279 : *
280 : * Return:
281 : * Found address on success, 0 on failure.
282 : */
283 21 : static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
284 : phys_addr_t align, phys_addr_t start,
285 : phys_addr_t end, int nid,
286 : enum memblock_flags flags)
287 : {
288 : /* pump up @end */
289 21 : if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
290 : end == MEMBLOCK_ALLOC_NOLEAKTRACE)
291 18 : end = memblock.current_limit;
292 :
293 : /* avoid allocating the first page */
294 21 : start = max_t(phys_addr_t, start, PAGE_SIZE);
295 21 : end = max(start, end);
296 :
297 21 : if (memblock_bottom_up())
298 0 : return __memblock_find_range_bottom_up(start, end, size, align,
299 : nid, flags);
300 : else
301 21 : return __memblock_find_range_top_down(start, end, size, align,
302 : nid, flags);
303 : }
304 :
305 : /**
306 : * memblock_find_in_range - find free area in given range
307 : * @start: start of candidate range
308 : * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
309 : * %MEMBLOCK_ALLOC_ACCESSIBLE
310 : * @size: size of free area to find
311 : * @align: alignment of free area to find
312 : *
313 : * Find @size free area aligned to @align in the specified range.
314 : *
315 : * Return:
316 : * Found address on success, 0 on failure.
317 : */
318 0 : static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
319 : phys_addr_t end, phys_addr_t size,
320 : phys_addr_t align)
321 : {
322 : phys_addr_t ret;
323 0 : enum memblock_flags flags = choose_memblock_flags();
324 :
325 : again:
326 0 : ret = memblock_find_in_range_node(size, align, start, end,
327 : NUMA_NO_NODE, flags);
328 :
329 0 : if (!ret && (flags & MEMBLOCK_MIRROR)) {
330 0 : pr_warn("Could not allocate %pap bytes of mirrored memory\n",
331 : &size);
332 0 : flags &= ~MEMBLOCK_MIRROR;
333 0 : goto again;
334 : }
335 :
336 0 : return ret;
337 : }
338 :
339 3 : static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
340 : {
341 3 : type->total_size -= type->regions[r].size;
342 3 : memmove(&type->regions[r], &type->regions[r + 1],
343 3 : (type->cnt - (r + 1)) * sizeof(type->regions[r]));
344 3 : type->cnt--;
345 :
346 : /* Special case for empty arrays */
347 3 : if (type->cnt == 0) {
348 0 : WARN_ON(type->total_size != 0);
349 0 : type->cnt = 1;
350 0 : type->regions[0].base = 0;
351 0 : type->regions[0].size = 0;
352 0 : type->regions[0].flags = 0;
353 0 : memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
354 : }
355 3 : }
356 :
357 : #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
358 : /**
359 : * memblock_discard - discard memory and reserved arrays if they were allocated
360 : */
361 1 : void __init memblock_discard(void)
362 : {
363 : phys_addr_t addr, size;
364 :
365 1 : if (memblock.reserved.regions != memblock_reserved_init_regions) {
366 0 : addr = __pa(memblock.reserved.regions);
367 0 : size = PAGE_ALIGN(sizeof(struct memblock_region) *
368 : memblock.reserved.max);
369 0 : if (memblock_reserved_in_slab)
370 0 : kfree(memblock.reserved.regions);
371 : else
372 0 : memblock_free_late(addr, size);
373 : }
374 :
375 1 : if (memblock.memory.regions != memblock_memory_init_regions) {
376 0 : addr = __pa(memblock.memory.regions);
377 0 : size = PAGE_ALIGN(sizeof(struct memblock_region) *
378 : memblock.memory.max);
379 0 : if (memblock_memory_in_slab)
380 0 : kfree(memblock.memory.regions);
381 : else
382 0 : memblock_free_late(addr, size);
383 : }
384 :
385 1 : memblock_memory = NULL;
386 1 : }
387 : #endif
388 :
389 : /**
390 : * memblock_double_array - double the size of the memblock regions array
391 : * @type: memblock type of the regions array being doubled
392 : * @new_area_start: starting address of memory range to avoid overlap with
393 : * @new_area_size: size of memory range to avoid overlap with
394 : *
395 : * Double the size of the @type regions array. If memblock is being used to
396 : * allocate memory for a new reserved regions array and there is a previously
397 : * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
398 : * waiting to be reserved, ensure the memory used by the new array does
399 : * not overlap.
400 : *
401 : * Return:
402 : * 0 on success, -1 on failure.
403 : */
404 0 : static int __init_memblock memblock_double_array(struct memblock_type *type,
405 : phys_addr_t new_area_start,
406 : phys_addr_t new_area_size)
407 : {
408 : struct memblock_region *new_array, *old_array;
409 : phys_addr_t old_alloc_size, new_alloc_size;
410 : phys_addr_t old_size, new_size, addr, new_end;
411 0 : int use_slab = slab_is_available();
412 : int *in_slab;
413 :
414 : /* We don't allow resizing until we know about the reserved regions
415 : * of memory that aren't suitable for allocation
416 : */
417 0 : if (!memblock_can_resize)
418 : return -1;
419 :
420 : /* Calculate new doubled size */
421 0 : old_size = type->max * sizeof(struct memblock_region);
422 0 : new_size = old_size << 1;
423 : /*
424 : * We need to allocated new one align to PAGE_SIZE,
425 : * so we can free them completely later.
426 : */
427 0 : old_alloc_size = PAGE_ALIGN(old_size);
428 0 : new_alloc_size = PAGE_ALIGN(new_size);
429 :
430 : /* Retrieve the slab flag */
431 0 : if (type == &memblock.memory)
432 : in_slab = &memblock_memory_in_slab;
433 : else
434 0 : in_slab = &memblock_reserved_in_slab;
435 :
436 : /* Try to find some space for it */
437 0 : if (use_slab) {
438 0 : new_array = kmalloc(new_size, GFP_KERNEL);
439 0 : addr = new_array ? __pa(new_array) : 0;
440 : } else {
441 : /* only exclude range when trying to double reserved.regions */
442 0 : if (type != &memblock.reserved)
443 0 : new_area_start = new_area_size = 0;
444 :
445 0 : addr = memblock_find_in_range(new_area_start + new_area_size,
446 : memblock.current_limit,
447 : new_alloc_size, PAGE_SIZE);
448 0 : if (!addr && new_area_size)
449 0 : addr = memblock_find_in_range(0,
450 0 : min(new_area_start, memblock.current_limit),
451 : new_alloc_size, PAGE_SIZE);
452 :
453 0 : new_array = addr ? __va(addr) : NULL;
454 : }
455 0 : if (!addr) {
456 0 : pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
457 : type->name, type->max, type->max * 2);
458 0 : return -1;
459 : }
460 :
461 0 : new_end = addr + new_size - 1;
462 0 : memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
463 : type->name, type->max * 2, &addr, &new_end);
464 :
465 : /*
466 : * Found space, we now need to move the array over before we add the
467 : * reserved region since it may be our reserved array itself that is
468 : * full.
469 : */
470 0 : memcpy(new_array, type->regions, old_size);
471 0 : memset(new_array + type->max, 0, old_size);
472 0 : old_array = type->regions;
473 0 : type->regions = new_array;
474 0 : type->max <<= 1;
475 :
476 : /* Free old array. We needn't free it if the array is the static one */
477 0 : if (*in_slab)
478 0 : kfree(old_array);
479 0 : else if (old_array != memblock_memory_init_regions &&
480 : old_array != memblock_reserved_init_regions)
481 0 : memblock_free(old_array, old_alloc_size);
482 :
483 : /*
484 : * Reserve the new array if that comes from the memblock. Otherwise, we
485 : * needn't do it
486 : */
487 0 : if (!use_slab)
488 0 : BUG_ON(memblock_reserve(addr, new_alloc_size));
489 :
490 : /* Update slab flag */
491 0 : *in_slab = use_slab;
492 :
493 0 : return 0;
494 : }
495 :
496 : /**
497 : * memblock_merge_regions - merge neighboring compatible regions
498 : * @type: memblock type to scan
499 : *
500 : * Scan @type and merge neighboring compatible regions.
501 : */
502 22 : static void __init_memblock memblock_merge_regions(struct memblock_type *type)
503 : {
504 : int i = 0;
505 :
506 : /* cnt never goes below 1 */
507 179 : while (i < type->cnt - 1) {
508 157 : struct memblock_region *this = &type->regions[i];
509 157 : struct memblock_region *next = &type->regions[i + 1];
510 :
511 157 : if (this->base + this->size != next->base ||
512 9 : memblock_get_region_node(this) !=
513 18 : memblock_get_region_node(next) ||
514 9 : this->flags != next->flags) {
515 148 : BUG_ON(this->base + this->size > next->base);
516 148 : i++;
517 148 : continue;
518 : }
519 :
520 9 : this->size += next->size;
521 : /* move forward from next + 1, index of which is i + 2 */
522 9 : memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
523 9 : type->cnt--;
524 : }
525 22 : }
526 :
527 : /**
528 : * memblock_insert_region - insert new memblock region
529 : * @type: memblock type to insert into
530 : * @idx: index for the insertion point
531 : * @base: base address of the new region
532 : * @size: size of the new region
533 : * @nid: node id of the new region
534 : * @flags: flags of the new region
535 : *
536 : * Insert new memblock region [@base, @base + @size) into @type at @idx.
537 : * @type must already have extra room to accommodate the new region.
538 : */
539 24 : static void __init_memblock memblock_insert_region(struct memblock_type *type,
540 : int idx, phys_addr_t base,
541 : phys_addr_t size,
542 : int nid,
543 : enum memblock_flags flags)
544 : {
545 24 : struct memblock_region *rgn = &type->regions[idx];
546 :
547 24 : BUG_ON(type->cnt >= type->max);
548 24 : memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
549 24 : rgn->base = base;
550 24 : rgn->size = size;
551 24 : rgn->flags = flags;
552 24 : memblock_set_region_node(rgn, nid);
553 24 : type->cnt++;
554 24 : type->total_size += size;
555 24 : }
556 :
557 : /**
558 : * memblock_add_range - add new memblock region
559 : * @type: memblock type to add new region into
560 : * @base: base address of the new region
561 : * @size: size of the new region
562 : * @nid: nid of the new region
563 : * @flags: flags of the new region
564 : *
565 : * Add new memblock region [@base, @base + @size) into @type. The new region
566 : * is allowed to overlap with existing ones - overlaps don't affect already
567 : * existing regions. @type is guaranteed to be minimal (all neighbouring
568 : * compatible regions are merged) after the addition.
569 : *
570 : * Return:
571 : * 0 on success, -errno on failure.
572 : */
573 23 : static int __init_memblock memblock_add_range(struct memblock_type *type,
574 : phys_addr_t base, phys_addr_t size,
575 : int nid, enum memblock_flags flags)
576 : {
577 23 : bool insert = false;
578 23 : phys_addr_t obase = base;
579 23 : phys_addr_t end = base + memblock_cap_size(base, &size);
580 : int idx, nr_new;
581 : struct memblock_region *rgn;
582 :
583 23 : if (!size)
584 : return 0;
585 :
586 : /* special case for empty array */
587 23 : if (type->regions[0].size == 0) {
588 2 : WARN_ON(type->cnt != 1 || type->total_size);
589 2 : type->regions[0].base = base;
590 2 : type->regions[0].size = size;
591 2 : type->regions[0].flags = flags;
592 2 : memblock_set_region_node(&type->regions[0], nid);
593 2 : type->total_size = size;
594 : return 0;
595 : }
596 : repeat:
597 : /*
598 : * The following is executed twice. Once with %false @insert and
599 : * then with %true. The first counts the number of regions needed
600 : * to accommodate the new area. The second actually inserts them.
601 : */
602 42 : base = obase;
603 42 : nr_new = 0;
604 :
605 110 : for_each_memblock_type(idx, type, rgn) {
606 108 : phys_addr_t rbase = rgn->base;
607 108 : phys_addr_t rend = rbase + rgn->size;
608 :
609 108 : if (rbase >= end)
610 : break;
611 68 : if (rend <= base)
612 68 : continue;
613 : /*
614 : * @rgn overlaps. If it separates the lower part of new
615 : * area, insert that portion.
616 : */
617 0 : if (rbase > base) {
618 : #ifdef CONFIG_NUMA
619 : WARN_ON(nid != memblock_get_region_node(rgn));
620 : #endif
621 0 : WARN_ON(flags != rgn->flags);
622 0 : nr_new++;
623 0 : if (insert)
624 0 : memblock_insert_region(type, idx++, base,
625 : rbase - base, nid,
626 : flags);
627 : }
628 : /* area below @rend is dealt with, forget about it */
629 0 : base = min(rend, end);
630 : }
631 :
632 : /* insert the remaining portion */
633 42 : if (base < end) {
634 42 : nr_new++;
635 42 : if (insert)
636 21 : memblock_insert_region(type, idx, base, end - base,
637 : nid, flags);
638 : }
639 :
640 42 : if (!nr_new)
641 : return 0;
642 :
643 : /*
644 : * If this was the first round, resize array and repeat for actual
645 : * insertions; otherwise, merge and return.
646 : */
647 42 : if (!insert) {
648 21 : while (type->cnt + nr_new > type->max)
649 0 : if (memblock_double_array(type, obase, size) < 0)
650 : return -ENOMEM;
651 : insert = true;
652 : goto repeat;
653 : } else {
654 21 : memblock_merge_regions(type);
655 : return 0;
656 : }
657 : }
658 :
659 : /**
660 : * memblock_add_node - add new memblock region within a NUMA node
661 : * @base: base address of the new region
662 : * @size: size of the new region
663 : * @nid: nid of the new region
664 : * @flags: flags of the new region
665 : *
666 : * Add new memblock region [@base, @base + @size) to the "memory"
667 : * type. See memblock_add_range() description for mode details
668 : *
669 : * Return:
670 : * 0 on success, -errno on failure.
671 : */
672 0 : int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
673 : int nid, enum memblock_flags flags)
674 : {
675 0 : phys_addr_t end = base + size - 1;
676 :
677 0 : memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
678 : &base, &end, nid, flags, (void *)_RET_IP_);
679 :
680 0 : return memblock_add_range(&memblock.memory, base, size, nid, flags);
681 : }
682 :
683 : /**
684 : * memblock_add - add new memblock region
685 : * @base: base address of the new region
686 : * @size: size of the new region
687 : *
688 : * Add new memblock region [@base, @base + @size) to the "memory"
689 : * type. See memblock_add_range() description for mode details
690 : *
691 : * Return:
692 : * 0 on success, -errno on failure.
693 : */
694 1 : int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
695 : {
696 1 : phys_addr_t end = base + size - 1;
697 :
698 1 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
699 : &base, &end, (void *)_RET_IP_);
700 :
701 1 : return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
702 : }
703 :
704 : /**
705 : * memblock_isolate_range - isolate given range into disjoint memblocks
706 : * @type: memblock type to isolate range for
707 : * @base: base of range to isolate
708 : * @size: size of range to isolate
709 : * @start_rgn: out parameter for the start of isolated region
710 : * @end_rgn: out parameter for the end of isolated region
711 : *
712 : * Walk @type and ensure that regions don't cross the boundaries defined by
713 : * [@base, @base + @size). Crossing regions are split at the boundaries,
714 : * which may create at most two more regions. The index of the first
715 : * region inside the range is returned in *@start_rgn and end in *@end_rgn.
716 : *
717 : * Return:
718 : * 0 on success, -errno on failure.
719 : */
720 4 : static int __init_memblock memblock_isolate_range(struct memblock_type *type,
721 : phys_addr_t base, phys_addr_t size,
722 : int *start_rgn, int *end_rgn)
723 : {
724 4 : phys_addr_t end = base + memblock_cap_size(base, &size);
725 : int idx;
726 : struct memblock_region *rgn;
727 :
728 4 : *start_rgn = *end_rgn = 0;
729 :
730 4 : if (!size)
731 : return 0;
732 :
733 : /* we'll create at most two more regions */
734 4 : while (type->cnt + 2 > type->max)
735 0 : if (memblock_double_array(type, base, size) < 0)
736 : return -ENOMEM;
737 :
738 15 : for_each_memblock_type(idx, type, rgn) {
739 14 : phys_addr_t rbase = rgn->base;
740 14 : phys_addr_t rend = rbase + rgn->size;
741 :
742 14 : if (rbase >= end)
743 : break;
744 11 : if (rend <= base)
745 4 : continue;
746 :
747 7 : if (rbase < base) {
748 : /*
749 : * @rgn intersects from below. Split and continue
750 : * to process the next region - the new top half.
751 : */
752 2 : rgn->base = base;
753 2 : rgn->size -= base - rbase;
754 2 : type->total_size -= base - rbase;
755 2 : memblock_insert_region(type, idx, rbase, base - rbase,
756 : memblock_get_region_node(rgn),
757 : rgn->flags);
758 5 : } else if (rend > end) {
759 : /*
760 : * @rgn intersects from above. Split and redo the
761 : * current region - the new bottom half.
762 : */
763 1 : rgn->base = end;
764 1 : rgn->size -= end - rbase;
765 1 : type->total_size -= end - rbase;
766 1 : memblock_insert_region(type, idx--, rbase, end - rbase,
767 : memblock_get_region_node(rgn),
768 : rgn->flags);
769 : } else {
770 : /* @rgn is fully contained, record it */
771 4 : if (!*end_rgn)
772 4 : *start_rgn = idx;
773 4 : *end_rgn = idx + 1;
774 : }
775 : }
776 :
777 : return 0;
778 : }
779 :
780 3 : static int __init_memblock memblock_remove_range(struct memblock_type *type,
781 : phys_addr_t base, phys_addr_t size)
782 : {
783 : int start_rgn, end_rgn;
784 : int i, ret;
785 :
786 3 : ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
787 3 : if (ret)
788 : return ret;
789 :
790 6 : for (i = end_rgn - 1; i >= start_rgn; i--)
791 3 : memblock_remove_region(type, i);
792 : return 0;
793 : }
794 :
795 0 : int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
796 : {
797 0 : phys_addr_t end = base + size - 1;
798 :
799 0 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
800 : &base, &end, (void *)_RET_IP_);
801 :
802 0 : return memblock_remove_range(&memblock.memory, base, size);
803 : }
804 :
805 : /**
806 : * memblock_free - free boot memory allocation
807 : * @ptr: starting address of the boot memory allocation
808 : * @size: size of the boot memory block in bytes
809 : *
810 : * Free boot memory block previously allocated by memblock_alloc_xx() API.
811 : * The freeing memory will not be released to the buddy allocator.
812 : */
813 3 : void __init_memblock memblock_free(void *ptr, size_t size)
814 : {
815 3 : if (ptr)
816 3 : memblock_phys_free(__pa(ptr), size);
817 3 : }
818 :
819 : /**
820 : * memblock_phys_free - free boot memory block
821 : * @base: phys starting address of the boot memory block
822 : * @size: size of the boot memory block in bytes
823 : *
824 : * Free boot memory block previously allocated by memblock_alloc_xx() API.
825 : * The freeing memory will not be released to the buddy allocator.
826 : */
827 3 : int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
828 : {
829 3 : phys_addr_t end = base + size - 1;
830 :
831 3 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
832 : &base, &end, (void *)_RET_IP_);
833 :
834 3 : kmemleak_free_part_phys(base, size);
835 3 : return memblock_remove_range(&memblock.reserved, base, size);
836 : }
837 :
838 22 : int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
839 : {
840 22 : phys_addr_t end = base + size - 1;
841 :
842 22 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
843 : &base, &end, (void *)_RET_IP_);
844 :
845 22 : return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
846 : }
847 :
848 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
849 : int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
850 : {
851 : phys_addr_t end = base + size - 1;
852 :
853 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
854 : &base, &end, (void *)_RET_IP_);
855 :
856 : return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
857 : }
858 : #endif
859 :
860 : /**
861 : * memblock_setclr_flag - set or clear flag for a memory region
862 : * @base: base address of the region
863 : * @size: size of the region
864 : * @set: set or clear the flag
865 : * @flag: the flag to update
866 : *
867 : * This function isolates region [@base, @base + @size), and sets/clears flag
868 : *
869 : * Return: 0 on success, -errno on failure.
870 : */
871 1 : static int __init_memblock memblock_setclr_flag(phys_addr_t base,
872 : phys_addr_t size, int set, int flag)
873 : {
874 1 : struct memblock_type *type = &memblock.memory;
875 : int i, ret, start_rgn, end_rgn;
876 :
877 1 : ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
878 1 : if (ret)
879 : return ret;
880 :
881 2 : for (i = start_rgn; i < end_rgn; i++) {
882 1 : struct memblock_region *r = &type->regions[i];
883 :
884 1 : if (set)
885 0 : r->flags |= flag;
886 : else
887 1 : r->flags &= ~flag;
888 : }
889 :
890 1 : memblock_merge_regions(type);
891 1 : return 0;
892 : }
893 :
894 : /**
895 : * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
896 : * @base: the base phys addr of the region
897 : * @size: the size of the region
898 : *
899 : * Return: 0 on success, -errno on failure.
900 : */
901 0 : int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
902 : {
903 0 : return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
904 : }
905 :
906 : /**
907 : * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
908 : * @base: the base phys addr of the region
909 : * @size: the size of the region
910 : *
911 : * Return: 0 on success, -errno on failure.
912 : */
913 1 : int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
914 : {
915 1 : return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
916 : }
917 :
918 : /**
919 : * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
920 : * @base: the base phys addr of the region
921 : * @size: the size of the region
922 : *
923 : * Return: 0 on success, -errno on failure.
924 : */
925 0 : int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
926 : {
927 0 : system_has_some_mirror = true;
928 :
929 0 : return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
930 : }
931 :
932 : /**
933 : * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
934 : * @base: the base phys addr of the region
935 : * @size: the size of the region
936 : *
937 : * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
938 : * direct mapping of the physical memory. These regions will still be
939 : * covered by the memory map. The struct page representing NOMAP memory
940 : * frames in the memory map will be PageReserved()
941 : *
942 : * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
943 : * memblock, the caller must inform kmemleak to ignore that memory
944 : *
945 : * Return: 0 on success, -errno on failure.
946 : */
947 0 : int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
948 : {
949 0 : return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
950 : }
951 :
952 : /**
953 : * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
954 : * @base: the base phys addr of the region
955 : * @size: the size of the region
956 : *
957 : * Return: 0 on success, -errno on failure.
958 : */
959 0 : int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
960 : {
961 0 : return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
962 : }
963 :
964 150 : static bool should_skip_region(struct memblock_type *type,
965 : struct memblock_region *m,
966 : int nid, int flags)
967 : {
968 150 : int m_nid = memblock_get_region_node(m);
969 :
970 : /* we never skip regions when iterating memblock.reserved or physmem */
971 150 : if (type != memblock_memory)
972 : return false;
973 :
974 : /* only memory regions are associated with nodes, check it */
975 137 : if (nid != NUMA_NO_NODE && nid != m_nid)
976 : return true;
977 :
978 : /* skip hotpluggable memory regions if needed */
979 : if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
980 : !(flags & MEMBLOCK_HOTPLUG))
981 : return true;
982 :
983 : /* if we want mirror memory skip non-mirror memory regions */
984 137 : if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
985 : return true;
986 :
987 : /* skip nomap memory unless we were asked for it explicitly */
988 274 : if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
989 : return true;
990 :
991 : /* skip driver-managed memory unless we were asked for it explicitly */
992 274 : if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m))
993 : return true;
994 :
995 : return false;
996 : }
997 :
998 : /**
999 : * __next_mem_range - next function for for_each_free_mem_range() etc.
1000 : * @idx: pointer to u64 loop variable
1001 : * @nid: node selector, %NUMA_NO_NODE for all nodes
1002 : * @flags: pick from blocks based on memory attributes
1003 : * @type_a: pointer to memblock_type from where the range is taken
1004 : * @type_b: pointer to memblock_type which excludes memory from being taken
1005 : * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1006 : * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1007 : * @out_nid: ptr to int for nid of the range, can be %NULL
1008 : *
1009 : * Find the first area from *@idx which matches @nid, fill the out
1010 : * parameters, and update *@idx for the next iteration. The lower 32bit of
1011 : * *@idx contains index into type_a and the upper 32bit indexes the
1012 : * areas before each region in type_b. For example, if type_b regions
1013 : * look like the following,
1014 : *
1015 : * 0:[0-16), 1:[32-48), 2:[128-130)
1016 : *
1017 : * The upper 32bit indexes the following regions.
1018 : *
1019 : * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1020 : *
1021 : * As both region arrays are sorted, the function advances the two indices
1022 : * in lockstep and returns each intersection.
1023 : */
1024 27 : void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1025 : struct memblock_type *type_a,
1026 : struct memblock_type *type_b, phys_addr_t *out_start,
1027 : phys_addr_t *out_end, int *out_nid)
1028 : {
1029 27 : int idx_a = *idx & 0xffffffff;
1030 27 : int idx_b = *idx >> 32;
1031 :
1032 27 : if (WARN_ONCE(nid == MAX_NUMNODES,
1033 : "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1034 0 : nid = NUMA_NO_NODE;
1035 :
1036 1 : for (; idx_a < type_a->cnt; idx_a++) {
1037 26 : struct memblock_region *m = &type_a->regions[idx_a];
1038 :
1039 26 : phys_addr_t m_start = m->base;
1040 26 : phys_addr_t m_end = m->base + m->size;
1041 26 : int m_nid = memblock_get_region_node(m);
1042 :
1043 26 : if (should_skip_region(type_a, m, nid, flags))
1044 0 : continue;
1045 :
1046 26 : if (!type_b) {
1047 13 : if (out_start)
1048 13 : *out_start = m_start;
1049 13 : if (out_end)
1050 13 : *out_end = m_end;
1051 13 : if (out_nid)
1052 0 : *out_nid = m_nid;
1053 13 : idx_a++;
1054 13 : *idx = (u32)idx_a | (u64)idx_b << 32;
1055 13 : return;
1056 : }
1057 :
1058 : /* scan areas before each reservation */
1059 1 : for (; idx_b < type_b->cnt + 1; idx_b++) {
1060 : struct memblock_region *r;
1061 : phys_addr_t r_start;
1062 : phys_addr_t r_end;
1063 :
1064 14 : r = &type_b->regions[idx_b];
1065 14 : r_start = idx_b ? r[-1].base + r[-1].size : 0;
1066 14 : r_end = idx_b < type_b->cnt ?
1067 14 : r->base : PHYS_ADDR_MAX;
1068 :
1069 : /*
1070 : * if idx_b advanced past idx_a,
1071 : * break out to advance idx_a
1072 : */
1073 14 : if (r_start >= m_end)
1074 : break;
1075 : /* if the two regions intersect, we're done */
1076 13 : if (m_start < r_end) {
1077 12 : if (out_start)
1078 12 : *out_start =
1079 12 : max(m_start, r_start);
1080 12 : if (out_end)
1081 12 : *out_end = min(m_end, r_end);
1082 12 : if (out_nid)
1083 0 : *out_nid = m_nid;
1084 : /*
1085 : * The region which ends first is
1086 : * advanced for the next iteration.
1087 : */
1088 12 : if (m_end <= r_end)
1089 0 : idx_a++;
1090 : else
1091 12 : idx_b++;
1092 12 : *idx = (u32)idx_a | (u64)idx_b << 32;
1093 12 : return;
1094 : }
1095 : }
1096 : }
1097 :
1098 : /* signal end of iteration */
1099 2 : *idx = ULLONG_MAX;
1100 : }
1101 :
1102 : /**
1103 : * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1104 : *
1105 : * @idx: pointer to u64 loop variable
1106 : * @nid: node selector, %NUMA_NO_NODE for all nodes
1107 : * @flags: pick from blocks based on memory attributes
1108 : * @type_a: pointer to memblock_type from where the range is taken
1109 : * @type_b: pointer to memblock_type which excludes memory from being taken
1110 : * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1111 : * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1112 : * @out_nid: ptr to int for nid of the range, can be %NULL
1113 : *
1114 : * Finds the next range from type_a which is not marked as unsuitable
1115 : * in type_b.
1116 : *
1117 : * Reverse of __next_mem_range().
1118 : */
1119 124 : void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1120 : enum memblock_flags flags,
1121 : struct memblock_type *type_a,
1122 : struct memblock_type *type_b,
1123 : phys_addr_t *out_start,
1124 : phys_addr_t *out_end, int *out_nid)
1125 : {
1126 124 : int idx_a = *idx & 0xffffffff;
1127 124 : int idx_b = *idx >> 32;
1128 :
1129 124 : if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1130 0 : nid = NUMA_NO_NODE;
1131 :
1132 124 : if (*idx == (u64)ULLONG_MAX) {
1133 21 : idx_a = type_a->cnt - 1;
1134 21 : if (type_b != NULL)
1135 21 : idx_b = type_b->cnt;
1136 : else
1137 : idx_b = 0;
1138 : }
1139 :
1140 0 : for (; idx_a >= 0; idx_a--) {
1141 124 : struct memblock_region *m = &type_a->regions[idx_a];
1142 :
1143 124 : phys_addr_t m_start = m->base;
1144 124 : phys_addr_t m_end = m->base + m->size;
1145 124 : int m_nid = memblock_get_region_node(m);
1146 :
1147 124 : if (should_skip_region(type_a, m, nid, flags))
1148 0 : continue;
1149 :
1150 124 : if (!type_b) {
1151 0 : if (out_start)
1152 0 : *out_start = m_start;
1153 0 : if (out_end)
1154 0 : *out_end = m_end;
1155 0 : if (out_nid)
1156 0 : *out_nid = m_nid;
1157 0 : idx_a--;
1158 0 : *idx = (u32)idx_a | (u64)idx_b << 32;
1159 0 : return;
1160 : }
1161 :
1162 : /* scan areas before each reservation */
1163 20 : for (; idx_b >= 0; idx_b--) {
1164 : struct memblock_region *r;
1165 : phys_addr_t r_start;
1166 : phys_addr_t r_end;
1167 :
1168 144 : r = &type_b->regions[idx_b];
1169 144 : r_start = idx_b ? r[-1].base + r[-1].size : 0;
1170 288 : r_end = idx_b < type_b->cnt ?
1171 144 : r->base : PHYS_ADDR_MAX;
1172 : /*
1173 : * if idx_b advanced past idx_a,
1174 : * break out to advance idx_a
1175 : */
1176 :
1177 144 : if (r_end <= m_start)
1178 : break;
1179 : /* if the two regions intersect, we're done */
1180 144 : if (m_end > r_start) {
1181 124 : if (out_start)
1182 124 : *out_start = max(m_start, r_start);
1183 124 : if (out_end)
1184 124 : *out_end = min(m_end, r_end);
1185 124 : if (out_nid)
1186 0 : *out_nid = m_nid;
1187 124 : if (m_start >= r_start)
1188 0 : idx_a--;
1189 : else
1190 124 : idx_b--;
1191 124 : *idx = (u32)idx_a | (u64)idx_b << 32;
1192 124 : return;
1193 : }
1194 : }
1195 : }
1196 : /* signal end of iteration */
1197 0 : *idx = ULLONG_MAX;
1198 : }
1199 :
1200 : /*
1201 : * Common iterator interface used to define for_each_mem_pfn_range().
1202 : */
1203 12 : void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1204 : unsigned long *out_start_pfn,
1205 : unsigned long *out_end_pfn, int *out_nid)
1206 : {
1207 12 : struct memblock_type *type = &memblock.memory;
1208 : struct memblock_region *r;
1209 : int r_nid;
1210 :
1211 24 : while (++*idx < type->cnt) {
1212 6 : r = &type->regions[*idx];
1213 6 : r_nid = memblock_get_region_node(r);
1214 :
1215 6 : if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1216 0 : continue;
1217 6 : if (nid == MAX_NUMNODES || nid == r_nid)
1218 : break;
1219 : }
1220 12 : if (*idx >= type->cnt) {
1221 6 : *idx = -1;
1222 6 : return;
1223 : }
1224 :
1225 6 : if (out_start_pfn)
1226 6 : *out_start_pfn = PFN_UP(r->base);
1227 6 : if (out_end_pfn)
1228 6 : *out_end_pfn = PFN_DOWN(r->base + r->size);
1229 6 : if (out_nid)
1230 3 : *out_nid = r_nid;
1231 : }
1232 :
1233 : /**
1234 : * memblock_set_node - set node ID on memblock regions
1235 : * @base: base of area to set node ID for
1236 : * @size: size of area to set node ID for
1237 : * @type: memblock type to set node ID for
1238 : * @nid: node ID to set
1239 : *
1240 : * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1241 : * Regions which cross the area boundaries are split as necessary.
1242 : *
1243 : * Return:
1244 : * 0 on success, -errno on failure.
1245 : */
1246 0 : int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1247 : struct memblock_type *type, int nid)
1248 : {
1249 : #ifdef CONFIG_NUMA
1250 : int start_rgn, end_rgn;
1251 : int i, ret;
1252 :
1253 : ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1254 : if (ret)
1255 : return ret;
1256 :
1257 : for (i = start_rgn; i < end_rgn; i++)
1258 : memblock_set_region_node(&type->regions[i], nid);
1259 :
1260 : memblock_merge_regions(type);
1261 : #endif
1262 0 : return 0;
1263 : }
1264 :
1265 : #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1266 : /**
1267 : * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1268 : *
1269 : * @idx: pointer to u64 loop variable
1270 : * @zone: zone in which all of the memory blocks reside
1271 : * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1272 : * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1273 : *
1274 : * This function is meant to be a zone/pfn specific wrapper for the
1275 : * for_each_mem_range type iterators. Specifically they are used in the
1276 : * deferred memory init routines and as such we were duplicating much of
1277 : * this logic throughout the code. So instead of having it in multiple
1278 : * locations it seemed like it would make more sense to centralize this to
1279 : * one new iterator that does everything they need.
1280 : */
1281 : void __init_memblock
1282 : __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1283 : unsigned long *out_spfn, unsigned long *out_epfn)
1284 : {
1285 : int zone_nid = zone_to_nid(zone);
1286 : phys_addr_t spa, epa;
1287 :
1288 : __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1289 : &memblock.memory, &memblock.reserved,
1290 : &spa, &epa, NULL);
1291 :
1292 : while (*idx != U64_MAX) {
1293 : unsigned long epfn = PFN_DOWN(epa);
1294 : unsigned long spfn = PFN_UP(spa);
1295 :
1296 : /*
1297 : * Verify the end is at least past the start of the zone and
1298 : * that we have at least one PFN to initialize.
1299 : */
1300 : if (zone->zone_start_pfn < epfn && spfn < epfn) {
1301 : /* if we went too far just stop searching */
1302 : if (zone_end_pfn(zone) <= spfn) {
1303 : *idx = U64_MAX;
1304 : break;
1305 : }
1306 :
1307 : if (out_spfn)
1308 : *out_spfn = max(zone->zone_start_pfn, spfn);
1309 : if (out_epfn)
1310 : *out_epfn = min(zone_end_pfn(zone), epfn);
1311 :
1312 : return;
1313 : }
1314 :
1315 : __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1316 : &memblock.memory, &memblock.reserved,
1317 : &spa, &epa, NULL);
1318 : }
1319 :
1320 : /* signal end of iteration */
1321 : if (out_spfn)
1322 : *out_spfn = ULONG_MAX;
1323 : if (out_epfn)
1324 : *out_epfn = 0;
1325 : }
1326 :
1327 : #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1328 :
1329 : /**
1330 : * memblock_alloc_range_nid - allocate boot memory block
1331 : * @size: size of memory block to be allocated in bytes
1332 : * @align: alignment of the region and block's size
1333 : * @start: the lower bound of the memory region to allocate (phys address)
1334 : * @end: the upper bound of the memory region to allocate (phys address)
1335 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1336 : * @exact_nid: control the allocation fall back to other nodes
1337 : *
1338 : * The allocation is performed from memory region limited by
1339 : * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1340 : *
1341 : * If the specified node can not hold the requested memory and @exact_nid
1342 : * is false, the allocation falls back to any node in the system.
1343 : *
1344 : * For systems with memory mirroring, the allocation is attempted first
1345 : * from the regions with mirroring enabled and then retried from any
1346 : * memory region.
1347 : *
1348 : * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1349 : * allocated boot memory block, so that it is never reported as leaks.
1350 : *
1351 : * Return:
1352 : * Physical address of allocated memory block on success, %0 on failure.
1353 : */
1354 21 : phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1355 : phys_addr_t align, phys_addr_t start,
1356 : phys_addr_t end, int nid,
1357 : bool exact_nid)
1358 : {
1359 21 : enum memblock_flags flags = choose_memblock_flags();
1360 : phys_addr_t found;
1361 :
1362 21 : if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1363 0 : nid = NUMA_NO_NODE;
1364 :
1365 21 : if (!align) {
1366 : /* Can't use WARNs this early in boot on powerpc */
1367 0 : dump_stack();
1368 0 : align = SMP_CACHE_BYTES;
1369 : }
1370 :
1371 : again:
1372 21 : found = memblock_find_in_range_node(size, align, start, end, nid,
1373 : flags);
1374 21 : if (found && !memblock_reserve(found, size))
1375 : goto done;
1376 :
1377 0 : if (nid != NUMA_NO_NODE && !exact_nid) {
1378 0 : found = memblock_find_in_range_node(size, align, start,
1379 : end, NUMA_NO_NODE,
1380 : flags);
1381 0 : if (found && !memblock_reserve(found, size))
1382 : goto done;
1383 : }
1384 :
1385 0 : if (flags & MEMBLOCK_MIRROR) {
1386 0 : flags &= ~MEMBLOCK_MIRROR;
1387 0 : pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1388 : &size);
1389 0 : goto again;
1390 : }
1391 :
1392 : return 0;
1393 :
1394 : done:
1395 : /*
1396 : * Skip kmemleak for those places like kasan_init() and
1397 : * early_pgtable_alloc() due to high volume.
1398 : */
1399 : if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
1400 : /*
1401 : * The min_count is set to 0 so that memblock allocated
1402 : * blocks are never reported as leaks. This is because many
1403 : * of these blocks are only referred via the physical
1404 : * address which is not looked up by kmemleak.
1405 : */
1406 : kmemleak_alloc_phys(found, size, 0, 0);
1407 :
1408 : return found;
1409 : }
1410 :
1411 : /**
1412 : * memblock_phys_alloc_range - allocate a memory block inside specified range
1413 : * @size: size of memory block to be allocated in bytes
1414 : * @align: alignment of the region and block's size
1415 : * @start: the lower bound of the memory region to allocate (physical address)
1416 : * @end: the upper bound of the memory region to allocate (physical address)
1417 : *
1418 : * Allocate @size bytes in the between @start and @end.
1419 : *
1420 : * Return: physical address of the allocated memory block on success,
1421 : * %0 on failure.
1422 : */
1423 0 : phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1424 : phys_addr_t align,
1425 : phys_addr_t start,
1426 : phys_addr_t end)
1427 : {
1428 0 : memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1429 : __func__, (u64)size, (u64)align, &start, &end,
1430 : (void *)_RET_IP_);
1431 0 : return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1432 : false);
1433 : }
1434 :
1435 : /**
1436 : * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1437 : * @size: size of memory block to be allocated in bytes
1438 : * @align: alignment of the region and block's size
1439 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1440 : *
1441 : * Allocates memory block from the specified NUMA node. If the node
1442 : * has no available memory, attempts to allocated from any node in the
1443 : * system.
1444 : *
1445 : * Return: physical address of the allocated memory block on success,
1446 : * %0 on failure.
1447 : */
1448 0 : phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1449 : {
1450 0 : return memblock_alloc_range_nid(size, align, 0,
1451 : MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1452 : }
1453 :
1454 : /**
1455 : * memblock_alloc_internal - allocate boot memory block
1456 : * @size: size of memory block to be allocated in bytes
1457 : * @align: alignment of the region and block's size
1458 : * @min_addr: the lower bound of the memory region to allocate (phys address)
1459 : * @max_addr: the upper bound of the memory region to allocate (phys address)
1460 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1461 : * @exact_nid: control the allocation fall back to other nodes
1462 : *
1463 : * Allocates memory block using memblock_alloc_range_nid() and
1464 : * converts the returned physical address to virtual.
1465 : *
1466 : * The @min_addr limit is dropped if it can not be satisfied and the allocation
1467 : * will fall back to memory below @min_addr. Other constraints, such
1468 : * as node and mirrored memory will be handled again in
1469 : * memblock_alloc_range_nid().
1470 : *
1471 : * Return:
1472 : * Virtual address of allocated memory block on success, NULL on failure.
1473 : */
1474 21 : static void * __init memblock_alloc_internal(
1475 : phys_addr_t size, phys_addr_t align,
1476 : phys_addr_t min_addr, phys_addr_t max_addr,
1477 : int nid, bool exact_nid)
1478 : {
1479 : phys_addr_t alloc;
1480 :
1481 : /*
1482 : * Detect any accidental use of these APIs after slab is ready, as at
1483 : * this moment memblock may be deinitialized already and its
1484 : * internal data may be destroyed (after execution of memblock_free_all)
1485 : */
1486 21 : if (WARN_ON_ONCE(slab_is_available()))
1487 0 : return kzalloc_node(size, GFP_NOWAIT, nid);
1488 :
1489 21 : if (max_addr > memblock.current_limit)
1490 0 : max_addr = memblock.current_limit;
1491 :
1492 21 : alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1493 : exact_nid);
1494 :
1495 : /* retry allocation without lower limit */
1496 21 : if (!alloc && min_addr)
1497 0 : alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1498 : exact_nid);
1499 :
1500 21 : if (!alloc)
1501 : return NULL;
1502 :
1503 21 : return phys_to_virt(alloc);
1504 : }
1505 :
1506 : /**
1507 : * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1508 : * without zeroing memory
1509 : * @size: size of memory block to be allocated in bytes
1510 : * @align: alignment of the region and block's size
1511 : * @min_addr: the lower bound of the memory region from where the allocation
1512 : * is preferred (phys address)
1513 : * @max_addr: the upper bound of the memory region from where the allocation
1514 : * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1515 : * allocate only from memory limited by memblock.current_limit value
1516 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1517 : *
1518 : * Public function, provides additional debug information (including caller
1519 : * info), if enabled. Does not zero allocated memory.
1520 : *
1521 : * Return:
1522 : * Virtual address of allocated memory block on success, NULL on failure.
1523 : */
1524 0 : void * __init memblock_alloc_exact_nid_raw(
1525 : phys_addr_t size, phys_addr_t align,
1526 : phys_addr_t min_addr, phys_addr_t max_addr,
1527 : int nid)
1528 : {
1529 0 : memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1530 : __func__, (u64)size, (u64)align, nid, &min_addr,
1531 : &max_addr, (void *)_RET_IP_);
1532 :
1533 0 : return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1534 : true);
1535 : }
1536 :
1537 : /**
1538 : * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1539 : * memory and without panicking
1540 : * @size: size of memory block to be allocated in bytes
1541 : * @align: alignment of the region and block's size
1542 : * @min_addr: the lower bound of the memory region from where the allocation
1543 : * is preferred (phys address)
1544 : * @max_addr: the upper bound of the memory region from where the allocation
1545 : * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1546 : * allocate only from memory limited by memblock.current_limit value
1547 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1548 : *
1549 : * Public function, provides additional debug information (including caller
1550 : * info), if enabled. Does not zero allocated memory, does not panic if request
1551 : * cannot be satisfied.
1552 : *
1553 : * Return:
1554 : * Virtual address of allocated memory block on success, NULL on failure.
1555 : */
1556 1 : void * __init memblock_alloc_try_nid_raw(
1557 : phys_addr_t size, phys_addr_t align,
1558 : phys_addr_t min_addr, phys_addr_t max_addr,
1559 : int nid)
1560 : {
1561 1 : memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1562 : __func__, (u64)size, (u64)align, nid, &min_addr,
1563 : &max_addr, (void *)_RET_IP_);
1564 :
1565 1 : return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1566 : false);
1567 : }
1568 :
1569 : /**
1570 : * memblock_alloc_try_nid - allocate boot memory block
1571 : * @size: size of memory block to be allocated in bytes
1572 : * @align: alignment of the region and block's size
1573 : * @min_addr: the lower bound of the memory region from where the allocation
1574 : * is preferred (phys address)
1575 : * @max_addr: the upper bound of the memory region from where the allocation
1576 : * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1577 : * allocate only from memory limited by memblock.current_limit value
1578 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1579 : *
1580 : * Public function, provides additional debug information (including caller
1581 : * info), if enabled. This function zeroes the allocated memory.
1582 : *
1583 : * Return:
1584 : * Virtual address of allocated memory block on success, NULL on failure.
1585 : */
1586 20 : void * __init memblock_alloc_try_nid(
1587 : phys_addr_t size, phys_addr_t align,
1588 : phys_addr_t min_addr, phys_addr_t max_addr,
1589 : int nid)
1590 : {
1591 : void *ptr;
1592 :
1593 20 : memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1594 : __func__, (u64)size, (u64)align, nid, &min_addr,
1595 : &max_addr, (void *)_RET_IP_);
1596 20 : ptr = memblock_alloc_internal(size, align,
1597 : min_addr, max_addr, nid, false);
1598 20 : if (ptr)
1599 20 : memset(ptr, 0, size);
1600 :
1601 20 : return ptr;
1602 : }
1603 :
1604 : /**
1605 : * memblock_free_late - free pages directly to buddy allocator
1606 : * @base: phys starting address of the boot memory block
1607 : * @size: size of the boot memory block in bytes
1608 : *
1609 : * This is only useful when the memblock allocator has already been torn
1610 : * down, but we are still initializing the system. Pages are released directly
1611 : * to the buddy allocator.
1612 : */
1613 0 : void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
1614 : {
1615 : phys_addr_t cursor, end;
1616 :
1617 0 : end = base + size - 1;
1618 0 : memblock_dbg("%s: [%pa-%pa] %pS\n",
1619 : __func__, &base, &end, (void *)_RET_IP_);
1620 0 : kmemleak_free_part_phys(base, size);
1621 0 : cursor = PFN_UP(base);
1622 0 : end = PFN_DOWN(base + size);
1623 :
1624 0 : for (; cursor < end; cursor++) {
1625 0 : memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1626 : totalram_pages_inc();
1627 : }
1628 0 : }
1629 :
1630 : /*
1631 : * Remaining API functions
1632 : */
1633 :
1634 0 : phys_addr_t __init_memblock memblock_phys_mem_size(void)
1635 : {
1636 0 : return memblock.memory.total_size;
1637 : }
1638 :
1639 0 : phys_addr_t __init_memblock memblock_reserved_size(void)
1640 : {
1641 0 : return memblock.reserved.total_size;
1642 : }
1643 :
1644 : /* lowest address */
1645 1 : phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1646 : {
1647 1 : return memblock.memory.regions[0].base;
1648 : }
1649 :
1650 0 : phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1651 : {
1652 0 : int idx = memblock.memory.cnt - 1;
1653 :
1654 0 : return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1655 : }
1656 :
1657 0 : static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1658 : {
1659 0 : phys_addr_t max_addr = PHYS_ADDR_MAX;
1660 : struct memblock_region *r;
1661 :
1662 : /*
1663 : * translate the memory @limit size into the max address within one of
1664 : * the memory memblock regions, if the @limit exceeds the total size
1665 : * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1666 : */
1667 0 : for_each_mem_region(r) {
1668 0 : if (limit <= r->size) {
1669 0 : max_addr = r->base + limit;
1670 0 : break;
1671 : }
1672 0 : limit -= r->size;
1673 : }
1674 :
1675 0 : return max_addr;
1676 : }
1677 :
1678 0 : void __init memblock_enforce_memory_limit(phys_addr_t limit)
1679 : {
1680 : phys_addr_t max_addr;
1681 :
1682 0 : if (!limit)
1683 : return;
1684 :
1685 0 : max_addr = __find_max_addr(limit);
1686 :
1687 : /* @limit exceeds the total size of the memory, do nothing */
1688 0 : if (max_addr == PHYS_ADDR_MAX)
1689 : return;
1690 :
1691 : /* truncate both memory and reserved regions */
1692 0 : memblock_remove_range(&memblock.memory, max_addr,
1693 : PHYS_ADDR_MAX);
1694 0 : memblock_remove_range(&memblock.reserved, max_addr,
1695 : PHYS_ADDR_MAX);
1696 : }
1697 :
1698 0 : void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1699 : {
1700 : int start_rgn, end_rgn;
1701 : int i, ret;
1702 :
1703 0 : if (!size)
1704 0 : return;
1705 :
1706 0 : if (!memblock_memory->total_size) {
1707 0 : pr_warn("%s: No memory registered yet\n", __func__);
1708 0 : return;
1709 : }
1710 :
1711 0 : ret = memblock_isolate_range(&memblock.memory, base, size,
1712 : &start_rgn, &end_rgn);
1713 0 : if (ret)
1714 : return;
1715 :
1716 : /* remove all the MAP regions */
1717 0 : for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1718 0 : if (!memblock_is_nomap(&memblock.memory.regions[i]))
1719 0 : memblock_remove_region(&memblock.memory, i);
1720 :
1721 0 : for (i = start_rgn - 1; i >= 0; i--)
1722 0 : if (!memblock_is_nomap(&memblock.memory.regions[i]))
1723 0 : memblock_remove_region(&memblock.memory, i);
1724 :
1725 : /* truncate the reserved regions */
1726 0 : memblock_remove_range(&memblock.reserved, 0, base);
1727 0 : memblock_remove_range(&memblock.reserved,
1728 : base + size, PHYS_ADDR_MAX);
1729 : }
1730 :
1731 0 : void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1732 : {
1733 : phys_addr_t max_addr;
1734 :
1735 0 : if (!limit)
1736 : return;
1737 :
1738 0 : max_addr = __find_max_addr(limit);
1739 :
1740 : /* @limit exceeds the total size of the memory, do nothing */
1741 0 : if (max_addr == PHYS_ADDR_MAX)
1742 : return;
1743 :
1744 0 : memblock_cap_memory_range(0, max_addr);
1745 : }
1746 :
1747 0 : static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1748 : {
1749 0 : unsigned int left = 0, right = type->cnt;
1750 :
1751 : do {
1752 0 : unsigned int mid = (right + left) / 2;
1753 :
1754 0 : if (addr < type->regions[mid].base)
1755 : right = mid;
1756 0 : else if (addr >= (type->regions[mid].base +
1757 0 : type->regions[mid].size))
1758 0 : left = mid + 1;
1759 : else
1760 0 : return mid;
1761 0 : } while (left < right);
1762 : return -1;
1763 : }
1764 :
1765 0 : bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1766 : {
1767 0 : return memblock_search(&memblock.reserved, addr) != -1;
1768 : }
1769 :
1770 0 : bool __init_memblock memblock_is_memory(phys_addr_t addr)
1771 : {
1772 0 : return memblock_search(&memblock.memory, addr) != -1;
1773 : }
1774 :
1775 0 : bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1776 : {
1777 0 : int i = memblock_search(&memblock.memory, addr);
1778 :
1779 0 : if (i == -1)
1780 : return false;
1781 0 : return !memblock_is_nomap(&memblock.memory.regions[i]);
1782 : }
1783 :
1784 0 : int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1785 : unsigned long *start_pfn, unsigned long *end_pfn)
1786 : {
1787 0 : struct memblock_type *type = &memblock.memory;
1788 0 : int mid = memblock_search(type, PFN_PHYS(pfn));
1789 :
1790 0 : if (mid == -1)
1791 : return -1;
1792 :
1793 0 : *start_pfn = PFN_DOWN(type->regions[mid].base);
1794 0 : *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1795 :
1796 0 : return memblock_get_region_node(&type->regions[mid]);
1797 : }
1798 :
1799 : /**
1800 : * memblock_is_region_memory - check if a region is a subset of memory
1801 : * @base: base of region to check
1802 : * @size: size of region to check
1803 : *
1804 : * Check if the region [@base, @base + @size) is a subset of a memory block.
1805 : *
1806 : * Return:
1807 : * 0 if false, non-zero if true
1808 : */
1809 0 : bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1810 : {
1811 0 : int idx = memblock_search(&memblock.memory, base);
1812 0 : phys_addr_t end = base + memblock_cap_size(base, &size);
1813 :
1814 0 : if (idx == -1)
1815 : return false;
1816 0 : return (memblock.memory.regions[idx].base +
1817 0 : memblock.memory.regions[idx].size) >= end;
1818 : }
1819 :
1820 : /**
1821 : * memblock_is_region_reserved - check if a region intersects reserved memory
1822 : * @base: base of region to check
1823 : * @size: size of region to check
1824 : *
1825 : * Check if the region [@base, @base + @size) intersects a reserved
1826 : * memory block.
1827 : *
1828 : * Return:
1829 : * True if they intersect, false if not.
1830 : */
1831 0 : bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1832 : {
1833 0 : return memblock_overlaps_region(&memblock.reserved, base, size);
1834 : }
1835 :
1836 0 : void __init_memblock memblock_trim_memory(phys_addr_t align)
1837 : {
1838 : phys_addr_t start, end, orig_start, orig_end;
1839 : struct memblock_region *r;
1840 :
1841 0 : for_each_mem_region(r) {
1842 0 : orig_start = r->base;
1843 0 : orig_end = r->base + r->size;
1844 0 : start = round_up(orig_start, align);
1845 0 : end = round_down(orig_end, align);
1846 :
1847 0 : if (start == orig_start && end == orig_end)
1848 0 : continue;
1849 :
1850 0 : if (start < end) {
1851 0 : r->base = start;
1852 0 : r->size = end - start;
1853 : } else {
1854 0 : memblock_remove_region(&memblock.memory,
1855 0 : r - memblock.memory.regions);
1856 0 : r--;
1857 : }
1858 : }
1859 0 : }
1860 :
1861 0 : void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1862 : {
1863 0 : memblock.current_limit = limit;
1864 0 : }
1865 :
1866 0 : phys_addr_t __init_memblock memblock_get_current_limit(void)
1867 : {
1868 0 : return memblock.current_limit;
1869 : }
1870 :
1871 0 : static void __init_memblock memblock_dump(struct memblock_type *type)
1872 : {
1873 : phys_addr_t base, end, size;
1874 : enum memblock_flags flags;
1875 : int idx;
1876 : struct memblock_region *rgn;
1877 :
1878 0 : pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1879 :
1880 0 : for_each_memblock_type(idx, type, rgn) {
1881 0 : char nid_buf[32] = "";
1882 :
1883 0 : base = rgn->base;
1884 0 : size = rgn->size;
1885 0 : end = base + size - 1;
1886 0 : flags = rgn->flags;
1887 : #ifdef CONFIG_NUMA
1888 : if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1889 : snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1890 : memblock_get_region_node(rgn));
1891 : #endif
1892 0 : pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1893 : type->name, idx, &base, &end, &size, nid_buf, flags);
1894 : }
1895 0 : }
1896 :
1897 0 : static void __init_memblock __memblock_dump_all(void)
1898 : {
1899 0 : pr_info("MEMBLOCK configuration:\n");
1900 0 : pr_info(" memory size = %pa reserved size = %pa\n",
1901 : &memblock.memory.total_size,
1902 : &memblock.reserved.total_size);
1903 :
1904 0 : memblock_dump(&memblock.memory);
1905 0 : memblock_dump(&memblock.reserved);
1906 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1907 : memblock_dump(&physmem);
1908 : #endif
1909 0 : }
1910 :
1911 0 : void __init_memblock memblock_dump_all(void)
1912 : {
1913 0 : if (memblock_debug)
1914 0 : __memblock_dump_all();
1915 0 : }
1916 :
1917 0 : void __init memblock_allow_resize(void)
1918 : {
1919 0 : memblock_can_resize = 1;
1920 0 : }
1921 :
1922 0 : static int __init early_memblock(char *p)
1923 : {
1924 0 : if (p && strstr(p, "debug"))
1925 0 : memblock_debug = 1;
1926 0 : return 0;
1927 : }
1928 : early_param("memblock", early_memblock);
1929 :
1930 : static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1931 : {
1932 : struct page *start_pg, *end_pg;
1933 : phys_addr_t pg, pgend;
1934 :
1935 : /*
1936 : * Convert start_pfn/end_pfn to a struct page pointer.
1937 : */
1938 : start_pg = pfn_to_page(start_pfn - 1) + 1;
1939 : end_pg = pfn_to_page(end_pfn - 1) + 1;
1940 :
1941 : /*
1942 : * Convert to physical addresses, and round start upwards and end
1943 : * downwards.
1944 : */
1945 : pg = PAGE_ALIGN(__pa(start_pg));
1946 : pgend = __pa(end_pg) & PAGE_MASK;
1947 :
1948 : /*
1949 : * If there are free pages between these, free the section of the
1950 : * memmap array.
1951 : */
1952 : if (pg < pgend)
1953 : memblock_phys_free(pg, pgend - pg);
1954 : }
1955 :
1956 : /*
1957 : * The mem_map array can get very big. Free the unused area of the memory map.
1958 : */
1959 : static void __init free_unused_memmap(void)
1960 : {
1961 1 : unsigned long start, end, prev_end = 0;
1962 : int i;
1963 :
1964 : if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1965 : IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1966 : return;
1967 :
1968 : /*
1969 : * This relies on each bank being in address order.
1970 : * The banks are sorted previously in bootmem_init().
1971 : */
1972 : for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1973 : #ifdef CONFIG_SPARSEMEM
1974 : /*
1975 : * Take care not to free memmap entries that don't exist
1976 : * due to SPARSEMEM sections which aren't present.
1977 : */
1978 : start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1979 : #endif
1980 : /*
1981 : * Align down here since many operations in VM subsystem
1982 : * presume that there are no holes in the memory map inside
1983 : * a pageblock
1984 : */
1985 : start = round_down(start, pageblock_nr_pages);
1986 :
1987 : /*
1988 : * If we had a previous bank, and there is a space
1989 : * between the current bank and the previous, free it.
1990 : */
1991 : if (prev_end && prev_end < start)
1992 : free_memmap(prev_end, start);
1993 :
1994 : /*
1995 : * Align up here since many operations in VM subsystem
1996 : * presume that there are no holes in the memory map inside
1997 : * a pageblock
1998 : */
1999 : prev_end = ALIGN(end, pageblock_nr_pages);
2000 : }
2001 :
2002 : #ifdef CONFIG_SPARSEMEM
2003 : if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
2004 : prev_end = ALIGN(end, pageblock_nr_pages);
2005 : free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
2006 : }
2007 : #endif
2008 : }
2009 :
2010 2 : static void __init __free_pages_memory(unsigned long start, unsigned long end)
2011 : {
2012 : int order;
2013 :
2014 259 : while (start < end) {
2015 510 : order = min(MAX_ORDER - 1UL, __ffs(start));
2016 :
2017 521 : while (start + (1UL << order) > end)
2018 11 : order--;
2019 :
2020 255 : memblock_free_pages(pfn_to_page(start), start, order);
2021 :
2022 255 : start += (1UL << order);
2023 : }
2024 2 : }
2025 :
2026 12 : static unsigned long __init __free_memory_core(phys_addr_t start,
2027 : phys_addr_t end)
2028 : {
2029 12 : unsigned long start_pfn = PFN_UP(start);
2030 12 : unsigned long end_pfn = min_t(unsigned long,
2031 : PFN_DOWN(end), max_low_pfn);
2032 :
2033 12 : if (start_pfn >= end_pfn)
2034 : return 0;
2035 :
2036 2 : __free_pages_memory(start_pfn, end_pfn);
2037 :
2038 2 : return end_pfn - start_pfn;
2039 : }
2040 :
2041 1 : static void __init memmap_init_reserved_pages(void)
2042 : {
2043 : struct memblock_region *region;
2044 : phys_addr_t start, end;
2045 : u64 i;
2046 :
2047 : /* initialize struct pages for the reserved regions */
2048 14 : for_each_reserved_mem_range(i, &start, &end)
2049 13 : reserve_bootmem_region(start, end);
2050 :
2051 : /* and also treat struct pages for the NOMAP regions as PageReserved */
2052 2 : for_each_mem_region(region) {
2053 2 : if (memblock_is_nomap(region)) {
2054 0 : start = region->base;
2055 0 : end = start + region->size;
2056 0 : reserve_bootmem_region(start, end);
2057 : }
2058 : }
2059 1 : }
2060 :
2061 1 : static unsigned long __init free_low_memory_core_early(void)
2062 : {
2063 1 : unsigned long count = 0;
2064 : phys_addr_t start, end;
2065 : u64 i;
2066 :
2067 1 : memblock_clear_hotplug(0, -1);
2068 :
2069 1 : memmap_init_reserved_pages();
2070 :
2071 : /*
2072 : * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2073 : * because in some case like Node0 doesn't have RAM installed
2074 : * low ram will be on Node1
2075 : */
2076 13 : for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2077 : NULL)
2078 12 : count += __free_memory_core(start, end);
2079 :
2080 1 : return count;
2081 : }
2082 :
2083 : static int reset_managed_pages_done __initdata;
2084 :
2085 0 : void reset_node_managed_pages(pg_data_t *pgdat)
2086 : {
2087 : struct zone *z;
2088 :
2089 3 : for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2090 4 : atomic_long_set(&z->managed_pages, 0);
2091 0 : }
2092 :
2093 1 : void __init reset_all_zones_managed_pages(void)
2094 : {
2095 : struct pglist_data *pgdat;
2096 :
2097 1 : if (reset_managed_pages_done)
2098 : return;
2099 :
2100 2 : for_each_online_pgdat(pgdat)
2101 1 : reset_node_managed_pages(pgdat);
2102 :
2103 1 : reset_managed_pages_done = 1;
2104 : }
2105 :
2106 : /**
2107 : * memblock_free_all - release free pages to the buddy allocator
2108 : */
2109 1 : void __init memblock_free_all(void)
2110 : {
2111 : unsigned long pages;
2112 :
2113 : free_unused_memmap();
2114 1 : reset_all_zones_managed_pages();
2115 :
2116 1 : pages = free_low_memory_core_early();
2117 2 : totalram_pages_add(pages);
2118 1 : }
2119 :
2120 : #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2121 :
2122 : static int memblock_debug_show(struct seq_file *m, void *private)
2123 : {
2124 : struct memblock_type *type = m->private;
2125 : struct memblock_region *reg;
2126 : int i;
2127 : phys_addr_t end;
2128 :
2129 : for (i = 0; i < type->cnt; i++) {
2130 : reg = &type->regions[i];
2131 : end = reg->base + reg->size - 1;
2132 :
2133 : seq_printf(m, "%4d: ", i);
2134 : seq_printf(m, "%pa..%pa\n", ®->base, &end);
2135 : }
2136 : return 0;
2137 : }
2138 : DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2139 :
2140 : static int __init memblock_init_debugfs(void)
2141 : {
2142 : struct dentry *root = debugfs_create_dir("memblock", NULL);
2143 :
2144 : debugfs_create_file("memory", 0444, root,
2145 : &memblock.memory, &memblock_debug_fops);
2146 : debugfs_create_file("reserved", 0444, root,
2147 : &memblock.reserved, &memblock_debug_fops);
2148 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2149 : debugfs_create_file("physmem", 0444, root, &physmem,
2150 : &memblock_debug_fops);
2151 : #endif
2152 :
2153 : return 0;
2154 : }
2155 : __initcall(memblock_init_debugfs);
2156 :
2157 : #endif /* CONFIG_DEBUG_FS */
|