early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
max_pfn = max_low_pfn = max;
arm64_numa_init(); /* * Sparsemem tries to allocate bootmem in memory_present(), so must be * done after the fixed reservations. */ arm64_memory_present();
structmem_section { /* * This is, logically, a pointer to an array of struct * pages. However, it is stored with some other magic. * (see sparse.c::sparse_init_one_section()) * * Additionally during early boot we encode node id of * the location of the section here to guide allocation. * (see sparse.c::memory_present()) * * Making it a UL at least makes someone do a cast * before using it wrong. */ unsignedlong section_mem_map;
/* See declaration of similar field in struct zone */ unsignedlong *pageblock_flags; #ifdef CONFIG_PAGE_EXTENSION /* * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use * section. (see page_ext.h about this.) */ structpage_ext *page_ext; unsignedlong pad; #endif /* * WARNING: mem_section must be a power-of-2 in size for the * calculation and use of SECTION_ROOT_MASK to make sense. */ };
/* Record a memory area against a node. */ void __init memory_present(int nid, unsignedlong start, // 开始的PFN unsignedlong end)// 结束的PFN { unsignedlong pfn;
/* * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) * And number of present sections in this node is map_count. */ staticvoid __init sparse_init_nid(int nid, unsignedlong pnum_begin, unsignedlong pnum_end, unsignedlong map_count) { unsignedlong pnum, usemap_longs, *usemap; structpage *map;
usemap_longs = BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS); usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), usemap_size() * map_count); if (!usemap) { pr_err("%s: node[%d] usemap allocation failed", __func__, nid); goto failed; } // ------------------------------------------------------------- (1) // section_map_size() = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION) sparse_buffer_init(map_count * section_map_size(), nid); for_each_present_section_nr(pnum_begin, pnum) { if (pnum >= pnum_end) break; // --------------------------------------------------------- (2) map = sparse_mem_map_populate(pnum, nid, NULL); if (!map) { pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", __func__, nid); pnum_begin = pnum; goto failed; } check_usemap_section_nr(nid, usemap); // --------------------------------------------------------- (3) sparse_init_one_section(__nr_to_section(pnum), pnum, map, usemap); usemap += usemap_longs; } // ------------------------------------------------------------- (4) sparse_buffer_fini(); return; failed: /* We failed to allocate, mark all the following pnums as not present */ for_each_present_section_nr(pnum_begin, pnum) { structmem_section *ms;
if (pnum >= pnum_end) break; ms = __nr_to_section(pnum); ms->section_mem_map = 0; } }
/* * Subtle, we encode the real pfn into the mem_map such that * the identity pfn - section_mem_map will return the actual * physical page frame number. */
// 将mem_map - (section_nr_to_pfn(pnum)嵌入section_mem_map中,它们两个都保证了不会 // 将标志位覆盖,参考如下,include/linux/mmzone.h /* * We use the lower bits of the mem_map pointer to store * a little bit of information. The pointer is calculated * as mem_map - section_nr_to_pfn(pnum). The result is * aligned to the minimum alignment of the two values: * 1. All mem_map arrays are page-aligned. * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT * lowest bits. PFN_SECTION_SHIFT is arch-specific * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the * worst combination is powerpc with 256k pages, * which results in PFN_SECTION_SHIFT equal 6. * To sum it up, at least 6 bits are available. */
/* Record a memory area against a node. */ void __init memory_present(int nid, unsignedlong start, // 开始的PFN unsignedlong end)// 结束的PFN { unsignedlong pfn;