/* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. * * These 'compile-time allocated' memory buffers are * page-sized. Use set_fixmap(idx,phys) to associate * physical memory with fixmap indices. * */
/* * Reserve a virtual window for the FDT that is 2 MB larger than the * maximum supported size, and put it at the top of the fixmap region. * The additional space ensures that any FDT that does not exceed * MAX_FDT_SIZE can be mapped regardless of whether it crosses any * 2 MB alignment boundaries. * * Keep this at the top so it remains 2 MB aligned. */ #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) FIX_FDT_END, FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
FIX_EARLYCON_MEM_BASE, FIX_TEXT_POKE0,
#ifdef CONFIG_ACPI_APEI_GHES /* Used for GHES mapping from assorted contexts */ FIX_APEI_GHES_IRQ, FIX_APEI_GHES_NMI, #endif/* CONFIG_ACPI_APEI_GHES */
// ./arch/arm64/mm/mmu.c /* * The p*d_populate functions call virt_to_phys implicitly so they can't be used * directly on kernel symbols (bm_p*d). This function is called too early to use * lm_alias so __p*d_populate functions must be used to populate with the * physical address from __pa_symbol. */ void __init early_fixmap_init(void) { pgd_t *pgdp, pgd; pud_t *pudp; pmd_t *pmdp; // ----------------------------------------------------------------------------------------------------- (1) // #define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) // #define PCI_IO_END (VMEMMAP_START - SZ_2M) // #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) // #define FIXADDR_TOP (PCI_IO_START - SZ_2M) // // hi_addr // | - PCI_IO_END // | | // | | // | | // | |- PCI_IO_START // | || // | SZ_2M|| // | || // | |- FIXADDR_TOP -| // | | || // | | || // | | || // | | || // | | ||4MB < FIXADDR_SIZE < 6MB // | | || // | | || // | | || // | | || // \|/ - FIXADDR_START -| // lo_addr | // | ... // | // - FIX_PTE // | // - FIX_PMD // | // - FIX_PUD // | // - FIX_PGD // | // | ... // // #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) // #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) // // FIXADDR_START and FIX_PGD should be in the same PMD, otherwise we cannot use // one PTE(bm_pte) to map them // unsignedlong addr = FIXADDR_START; // 0XFFFF7DFFFE7F9000 // ----------------------------------------------------------------------------------------------------- (2) // get PGD entry virtual address pgdp = pgd_offset_k(addr); // 0XFFFF00001223C7D8 pgd = READ_ONCE(*pgdp); // pgd.pgd = 0X421AA003 // ----------------------------------------------------------------------------------------------------- (3) // if this entry is not NULL, just use it --> this is passed in live run if (CONFIG_PGTABLE_LEVELS > 3 && !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) { /* * We only end up here if the kernel mapping and the fixmap * share the top level pgd entry, which should only happen on * 16k/4 levels configurations. */ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); pudp = pud_offset_kimg(pgdp, addr); } else { // ------------------------------------------------------------------------------------------------- (4) // if this entry is NULL, need to map it first if (pgd_none(pgd)) __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); // ------------------------------------------------------------------------------------------------- (5) // fixmap_pxd() 1) uses PGD/PUD/PMD to get physical base address of the next level table // 2) uses addr to get table index and uses it to get entry's physical address // 3) add offset to get entry's virtual address pudp = fixmap_pud(addr); // 0XFFFF0000121AAFF8 // pudp.pud = 0X421A9003 } // ----------------------------------------------------------------------------------------------------- (6) // map PUD and PMD if (pud_none(READ_ONCE(*pudp))) __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); pmdp = fixmap_pmd(addr); // 0XFFFF0000121A9F98 // pmdp.pmd = 0X421A8003 __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
/* * The boot-ioremap range spans multiple pmds, for which * we are not prepared: */ BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
if (!dt_virt || !early_init_dt_scan(dt_virt)) { pr_crit("\n" "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" "\nPlease check your bootloader.", &dt_phys, dt_virt);
while (true) cpu_relax(); }
name = of_flat_dt_get_machine_name(); if (!name) return;
/* * Check whether the physical FDT address is set and meets the minimum * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be * at least 8 bytes so that we can always access the magic and size * fields of the FDT header after mapping the first chunk, double check * here if that is indeed the case. */ BUILD_BUG_ON(MIN_FDT_ALIGN < 8); if (!dt_phys || dt_phys % MIN_FDT_ALIGN) // dt_phys = 0x48000000 returnNULL;
/* * Make sure that the FDT region can be mapped without the need to * allocate additional translation table pages, so that it is safe * to call create_mapping_noalloc() this early. * * On 64k pages, the FDT will be mapped using PTEs, so we need to * be in the same PMD as the rest of the fixmap. * On 4k pages, we'll use section mappings for the FDT so we only * have to be in the same PUD. */ BUILD_BUG_ON(dt_virt_base % SZ_2M);
/* map the first chunk so we can read the size from the header */ // ----------------------------------------------------------------------------------------------------- (1) // 我们先映射2MB,如果不够,后面将再次进行映射 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
if (fdt_magic(dt_virt) != FDT_MAGIC) returnNULL;
*size = fdt_totalsize(dt_virt); if (*size > MAX_FDT_SIZE) returnNULL;
/* * This function can only be used to modify existing table entries, * without allocating new levels of table. Note that this permits the * creation of new section or page entries. */ staticvoid __init create_mapping_noalloc(phys_addr_t phys, unsignedlong virt, phys_addr_t size, pgprot_t prot) { if (virt < VMALLOC_START) { pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", &phys, virt); return; } __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, NO_CONT_MAPPINGS); }
/* * If the virtual and physical address don't have the same offset * within a page, we cannot map the region as the caller expects. */ if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) return;
end = addr + length; do { // ------------------------------------------------------------------------------------------------- (1) // next is the smaller one between end and next PGD entry's mapping virtual address // here, end is much smaller next = pgd_addr_end(addr, end); alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, flags); phys += next - addr; } while (pgdp++, addr = next, addr != end); }
if (pgd_none(pgd)) { phys_addr_t pud_phys; BUG_ON(!pgtable_alloc); pud_phys = pgtable_alloc(); __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE); pgd = READ_ONCE(*pgdp); } BUG_ON(pgd_bad(pgd)); // ----------------------------------------------------------------------------------------------------- (1) // pxd_set_fixmap_offset will do one thing - map FIX_PXD to physical address of bm_pxd // For Example: // bm_pud physical address: *pgdp + pud_index(addr) * sizeof(pud_t) // mapped virtual address : FIX_PUD // // FIX_PUD and FIXADDR_START share the same PMD entry in bm_pmd(otherwise we need to allocate one more entry // in bm_pmd). So FIX_PUD mapped pte is in bm_pte. After this mapping, we can use FIX_PUD instead of using this // method: pgd -> pa of pud entry -> va of pud entry // to access PUD table pudp = pud_set_fixmap_offset(pgdp, addr); do { pud_t old_pud = READ_ONCE(*pudp);
next = pud_addr_end(addr, end);
/* * For 4K granule only, attempt to put down a 1GB block */ if (use_1G_block(addr, next, phys) && (flags & NO_BLOCK_MAPPINGS) == 0) { pud_set_huge(pudp, phys, prot);
/* * After the PUD entry has been populated once, we * only allow updates to the permission attributes. */ BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), READ_ONCE(pud_val(*pudp)))); } else { alloc_init_cont_pmd(pudp, addr, next, phys, prot, pgtable_alloc, flags);
/* * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we * ever need to use IPIs for TLB broadcasting, then we're in trouble here. */ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { unsignedlong addr = __fix_to_virt(idx); pte_t *ptep;
/* * Check for initial section mappings in the pgd/pud. */ BUG_ON(pud_sect(pud)); if (pud_none(pud)) { phys_addr_t pmd_phys; BUG_ON(!pgtable_alloc); pmd_phys = pgtable_alloc(); __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); pud = READ_ONCE(*pudp); } BUG_ON(pud_bad(pud));
do { pgprot_t __prot = prot;
next = pmd_cont_addr_end(addr, end);
/* use a contiguous mapping if the range is suitably aligned */ if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && (flags & NO_CONT_MAPPINGS) == 0) __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
/* * After the PMD entry has been populated once, we * only allow updates to the permission attributes. */ BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), READ_ONCE(pmd_val(*pmdp)))); } else { alloc_init_cont_pte(pmdp, addr, next, phys, prot, pgtable_alloc, flags);