// ./arch/arm64/include/asm/kernel-hwdef.h /* * Number of page-table levels required to address 'va_bits' wide * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT) * bits with (PAGE_SHIFT - 3) bits at each page table level. Hence: * * levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3)) * * where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d)) * * We cannot include linux/kernel.h which defines DIV_ROUND_UP here * due to build issues. So we open code DIV_ROUND_UP here: * * ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3)) * * which gets simplified as : */ #define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - 3))
// ./arch/arm64/include/asm/kernel-pgtable.h /* * The linear mapping and the start of memory are both 2M aligned (per * the arm64 booting.txt requirements). Hence we can use section mapping * with 4K (section size = 2M) but not with 16K (section size = 32M) or * 64K (section size = 512M). */ // 我将不适用的部分注释掉 #ifdef CONFIG_ARM64_4K_PAGES #define ARM64_SWAPPER_USES_SECTION_MAPS 1 #else // #define ARM64_SWAPPER_USES_SECTION_MAPS 0 #endif
// ./arch/arm64/kernel/head.S /* * The following callee saved general purpose registers are used on the * primary lowlevel boot path: * * Register Scope Purpose * x21 stext() .. start_kernel() FDT pointer passed at boot in x0 * x23 stext() .. start_kernel() physical misalignment/KASLR offset * x28 __create_page_tables() callee preserved temp register * x19/x20 __primary_switch() callee preserved temp registers */ ENTRY(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w0=cpu_boot_mode adrp x23, __PHYS_OFFSET and x23, x23, MIN_KIMG_ALIGN - 1// KASLR offset, defaults to 0 bl set_cpu_boot_mode_flag bl __create_page_tables // ------------------------------------------ (1) /* * The following calls CPU setup code, see arch/arm64/mm/proc.S for * details. * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ bl __cpu_setup // initialise processor b __primary_switch // ---------------------------------------------- (2) ENDPROC(stext)
/* * Setup the initial page tables. We only setup the barest amount which is * required to get the kernel running. The following sections are required: * - identity mapping to enable the MMU (low address, TTBR0) * - first few MB of the kernel linear mapping to jump to once the MMU has * been enabled */ __create_page_tables: mov x28, lr
/* * Invalidate the idmap and swapper page tables to avoid potential * dirty cache lines being evicted. */ adrp x0, idmap_pg_dir // ---------------------------------------------- (1) ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) bl __inval_dcache_area // ---------------------------------------------- (2)
/* * Since the page tables have been populated with non-cacheable * accesses (MMU disabled), invalidate the idmap and swapper page * tables again to remove any speculatively loaded cache lines. */ adrp x0, idmap_pg_dir ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) dmb sy bl __inval_dcache_area
/* * Enable the MMU. * * x0 = SCTLR_EL1 value for turning on the MMU. * * Returns to the caller via x30/lr. This requires the caller to be covered * by the .idmap.text section. * * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ ENTRY(__enable_mmu) mrs x1, ID_AA64MMFR0_EL1 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED b.ne __no_granule_support update_early_cpu_boot_status 0, x1, x2 // 将这两个页表添加入TTBRX_EL1, // 1. idmap_pg_dir是在TTBR0_EL1(因为内核的物理地址就在这个区域),swapper_pg_dir是在TTBR1_EL1处 // 2. 从这里可以看出,TTBRx放的是物理地址 adrp x1, idmap_pg_dir adrp x2, swapper_pg_dir msr ttbr0_el1, x1 // load TTBR0 msr ttbr1_el1, x2 // load TTBR1 isb msr sctlr_el1, x0 isb /* * Invalidate the local I-cache so that any instructions fetched * speculatively from the PoC are discarded, since they may have * been dynamically patched at the PoU. */ ic iallu dsb nsh isb ret ENDPROC(__enable_mmu)