commit 8ffd7c5cadb62caf16f6e2eab1909cea3daf3490 Author: Brandon Bergren Date: Fri Sep 27 23:44:25 2019 -0500 Book-E clang fixes: locore and SPR_DEAR booke local crap booke boot page stuff booke clang stuff. More booke clang work more booke booke tidying booke whitespace More booke cleanup booke tweaks More booke diff --git a/sys/powerpc/booke/locore.S b/sys/powerpc/booke/locore.S index cc77b23885a..c630793bf36 100644 --- a/sys/powerpc/booke/locore.S +++ b/sys/powerpc/booke/locore.S @@ -39,7 +39,11 @@ #include #include -#define TMPSTACKSZ 16384 +#ifdef _CALL_ELF +.abiversion _CALL_ELF +#endif + +#define TMPSTACKSZ 32768 #ifdef __powerpc64__ #define GET_TOCBASE(r) \ @@ -76,6 +80,12 @@ #define WORD_SIZE 4 #endif +#ifdef __powerpc64__ + /* Placate lld by creating a kboot stub. */ + .section ".text.kboot", "x", @progbits + b __start +#endif + .text .globl btext btext: @@ -310,6 +320,9 @@ done_mapping: 1: mflr %r3 ld %r1,0(%r3) add %r1,%r1,%r3 +/* + * Relocate kernel + */ bl 1f .llong _DYNAMIC-. 1: mflr %r3 @@ -380,15 +393,62 @@ done_mapping: .globl __boot_page .align 12 __boot_page: - bl 1f + /* + * The boot page is a special page of memory used during AP bringup. + * Before the AP comes out of reset, the physical 4K page holding this + * code is arranged to be mapped at 0xfffff000 by use of + * platform-dependent registers. + * + * Alternatively, this page may be executed using an ePAPR-standardized + * method -- writing to the address specified in "cpu-release-addr". + * + * In either case, execution begins at the last instruction of the + * page, which is a branch back to the start of the page. + * + * The code in the page must do initial MMU setup and normalize the + * TLBs for regular operation in the correct address space before + * reading outside the page. + * + * This implementation accomplishes this by: + * 1) Wiping TLB0 and all TLB1 entries but the one currently in use. + * 2) Establishing a temporary 4K TLB1 mapping in AS=1, and switching + * to it with rfi. This entry must NOT be in TLB1 slot 0. + * (This is needed to give the code freedom to clean up AS=0.) + * 3) Removing the initial TLB1 entry, leaving us with a single valid + * TLB1 entry, NOT in slot 0. + * 4) Installing an AS0 entry in TLB1 slot 0 mapping the 64MB kernel + * segment at its final virtual address. A second rfi is done to + * switch to the final address space. At this point we can finally + * access the rest of the kernel segment safely. + * 5) The temporary TLB1 AS=1 entry is removed, finally leaving us in + * a consistent (but minimal) state. + * 6) Set up TOC, stack, and pcpu registers. + * 7) Now that we can finally call C code, call pmap_boostrap_ap(), + * which finishes copying in the shared TLB1 entries. + * + * At this point, the MMU is fully set up, and we can proceed with + * running the actual AP bootstrap code. + * + * Pieces of this code are also used for UP kernel, but in this case + * the sections specific to boot page functionality are dropped by + * the preprocessor. + */ +#ifdef __powerpc64__ + nop /* PPC64 alignment word. 64-bit target. */ +#endif + bl 1f /* 32-bit target. */ .globl bp_trace bp_trace: - .long 0 + ADDR(0) /* Trace pointer (%r31). */ .globl bp_kernload bp_kernload: - .long 0 + ADDR(0) /* Kern phys. load address. */ + + .globl bp_virtaddr +bp_virtaddr: + ADDR(0) /* Virt. address of __boot_page. */ /* * Initial configuration @@ -445,7 +505,7 @@ bp_kernload: mfmsr %r3 ori %r3, %r3, (PSL_IS | PSL_DS) #ifdef __powerpc64__ - oris %r3, %r3, PSL_CM@h + oris %r3, %r3, PSL_CM@h /* Ensure we're in 64-bit after RFI */ #endif bl 3f 3: mflr %r4 @@ -462,7 +522,7 @@ bp_kernload: bl tlb1_inval_entry /* - * Setup final mapping in TLB1[1] and switch to it + * Setup final mapping in TLB1[0] and switch to it */ /* Final kernel mapping, map in 64 MB of RAM */ lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */ @@ -482,31 +542,19 @@ bp_kernload: isync /* Retrieve kernel load [physical] address from bp_kernload */ +5: + mflr %r3 #ifdef __powerpc64__ - b 0f - .align 3 -0: - nop -#endif - bl 5f - ADDR(bp_kernload) - ADDR(__boot_page) -5: mflr %r3 -#ifdef __powerpc64__ - ld %r4, 0(%r3) - ld %r5, 8(%r3) - clrrdi %r3, %r3, 12 + clrrdi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */ #else - lwz %r4, 0(%r3) - lwz %r5, 4(%r3) - rlwinm %r3, %r3, 0, 0, 19 + clrrwi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */ #endif - sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */ - lwzx %r3, %r4, %r3 + LOAD %r4, (bp_kernload - __boot_page)(%r3) + LOAD %r5, (bp_virtaddr - __boot_page)(%r3) /* Set RPN and protection */ - ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l - mtspr SPR_MAS3, %r3 + ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l + mtspr SPR_MAS3, %r4 isync li %r4, 0 mtspr SPR_MAS7, %r4 @@ -520,8 +568,8 @@ bp_kernload: bl 6f 6: mflr %r3 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */ - add %r3, %r3, %r5 /* Make this virtual address */ - addi %r3, %r3, (7f - 6b) + add %r3, %r3, %r5 /* Make this a virtual address */ + addi %r3, %r3, (7f - 6b) /* And figure out return address. */ #ifdef __powerpc64__ lis %r4, PSL_CM@h /* Note AS=0 */ #else @@ -779,6 +827,11 @@ __boot_page_padding: */ .space 4092 - (__boot_page_padding - __boot_page) b __boot_page + /* + * This is the end of the boot page. + * During AP startup, the previous instruction is at 0xfffffffc + * virtual (i.e. the reset vector.) + */ #endif /* SMP */ /************************************************************************/ @@ -902,6 +955,7 @@ ENTRY(bpred_enable) * created. */ ENTRY(get_spr) + /* Note: The spr number is patched at runtime */ mfspr %r3, 0 blr @@ -921,8 +975,10 @@ tmpstackbound: .space 10240 /* XXX: this really should not be necessary */ #ifdef __powerpc64__ TOC_ENTRY(tmpstack) +#ifdef SMP TOC_ENTRY(bp_kernload) #endif +#endif /* * Compiled KERNBASE locations diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index ef0fd8a9af5..6c1cba6021b 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -1675,7 +1675,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) /* Calculate corresponding physical addresses for the kernel region. */ phys_kernelend = kernload + kernsize; debugf("kernel image and allocated data:\n"); - debugf(" kernload = 0x%09llx\n", (uint64_t)kernload); + debugf(" kernload = 0x%09"PRIx64"\n", (uint64_t)kernload); debugf(" kernstart = 0x%"PRI0ptrX"\n", kernstart); debugf(" kernsize = 0x%"PRI0ptrX"\n", kernsize); @@ -1856,8 +1856,8 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) thread0.td_kstack = kstack0; thread0.td_kstack_pages = kstack_pages; - debugf("kstack_sz = 0x%08x\n", kstack0_sz); - debugf("kstack0_phys at 0x%09llx - 0x%09llx\n", + debugf("kstack_sz = 0x%08"PRIxPTR"\n", kstack0_sz); + debugf("kstack0_phys at 0x%09"PRIx64" - 0x%09"PRIx64"\n", kstack0_phys, kstack0_phys + kstack0_sz); debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n", kstack0, kstack0 + kstack0_sz); @@ -3997,7 +3997,7 @@ tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) for (idx = 0; idx < nents; idx++) { pgsz = pgs[idx]; - debugf("%u: %llx -> %jx, size=%jx\n", idx, pa, + debugf("%u: %"PRIx64" -> %jx, size=%jx\n", idx, pa, (uintmax_t)va, (uintmax_t)pgsz); tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM); @@ -4306,7 +4306,7 @@ tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, size = tsize2size(tsize); printf("%3d: (%s) [AS=%d] " - "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " + "sz = 0x%08"PRIxPTR" tsz = %d tid = %d mas1 = 0x%08x " "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n", i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); } diff --git a/sys/powerpc/booke/trap_subr.S b/sys/powerpc/booke/trap_subr.S index 1b91503b5a3..5235fb688c5 100644 --- a/sys/powerpc/booke/trap_subr.S +++ b/sys/powerpc/booke/trap_subr.S @@ -120,7 +120,7 @@ GET_CPUINFO(%r1); /* Per-cpu structure */ \ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \ - mfdear %r30; \ + mfspr %r30, SPR_DEAR; \ mfesr %r31; \ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \ @@ -143,7 +143,7 @@ GET_CPUINFO(%r1); /* Per-cpu structure */ \ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \ - mfdear %r30; \ + mfspr %r30, SPR_DEAR; \ mfesr %r31; \ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \ @@ -375,9 +375,9 @@ rlwinm outr, inr, 6, 23, 25; /* 4 x TLBSAVE_LEN */ #endif #define TLB_PROLOG \ - mtsprg4 %r1; /* Save SP */ \ - mtsprg5 %r28; \ - mtsprg6 %r29; \ + mtspr SPR_SPRG4, %r1; /* Save SP */ \ + mtspr SPR_SPRG5, %r28; \ + mtspr SPR_SPRG6, %r29; \ /* calculate TLB nesting level and TLBSAVE instance address */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \ @@ -388,8 +388,8 @@ add %r1, %r1, %r29; /* current TLBSAVE ptr */ \ \ /* save R20-31 */ \ - mfsprg5 %r28; \ - mfsprg6 %r29; \ + mfspr %r28, SPR_SPRG5; \ + mfspr %r29, SPR_SPRG6; \ TLB_SAVE_REGS(%r1); \ /* save LR, CR */ \ mflr %r30; \ @@ -402,7 +402,7 @@ STORE %r30, (TLBSAVE_BOOKE_SRR0)(%r1); /* save SRR0 */ \ STORE %r31, (TLBSAVE_BOOKE_SRR1)(%r1); /* save SRR1 */ \ isync; \ - mfsprg4 %r1 + mfspr %r1, SPR_SPRG4 /* * restores LR, CR, SRR0-1, R20-31 from the TLBSAVE area @@ -410,7 +410,7 @@ * same notes as for the TLB_PROLOG */ #define TLB_RESTORE \ - mtsprg4 %r1; /* Save SP */ \ + mtspr SPR_SPRG4, %r1; /* Save SP */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ /* calculate TLB nesting level and TLBSAVE instance addr */ \ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \ @@ -432,7 +432,7 @@ mtsrr1 %r31; \ /* restore R20-31 */ \ TLB_RESTORE_REGS(%r1); \ - mfsprg4 %r1 + mfspr %r1, SPR_SPRG4 #ifdef SMP #define TLB_LOCK \ @@ -693,7 +693,7 @@ INTERRUPT(int_data_tlb_error) TLB_PROLOG TLB_LOCK - mfdear %r31 + mfspr %r31, SPR_DEAR /* * Save MAS0-MAS2 registers. There might be another tlb miss during @@ -1057,7 +1057,7 @@ ASENTRY_NOPROF(breakpoint) mflr %r31 mtsrr0 %r31 - mfdear %r30 + mfspr %r30, SPR_DEAR mfesr %r31 STORE %r30, (PC_DBSAVE+CPUSAVE_BOOKE_DEAR)(%r3) STORE %r31, (PC_DBSAVE+CPUSAVE_BOOKE_ESR)(%r3) diff --git a/sys/powerpc/conf/dpaa/config.dpaa b/sys/powerpc/conf/dpaa/config.dpaa index 64f3968a355..b42619713c6 100644 --- a/sys/powerpc/conf/dpaa/config.dpaa +++ b/sys/powerpc/conf/dpaa/config.dpaa @@ -6,6 +6,11 @@ makeoptions DPAA_COMPILE_CMD="${LINUXKPI_C} \ -Wno-cast-qual -Wno-unused-function -Wno-init-self -fms-extensions \ -include $S/contrib/ncsw/build/dflags.h \ -Wno-error=missing-prototypes \ + -Wno-error=parentheses-equality \ + -Wno-error=self-assign \ + -Wno-error=incompatible-pointer-types-discards-qualifiers \ + -Wno-error=non-literal-null-conversion \ + -Wno-error=enum-conversion \ -I$S/contrib/ncsw/build/ \ -I$S/contrib/ncsw/inc \ -I$S/contrib/ncsw/inc/cores \ diff --git a/sys/powerpc/include/spr.h b/sys/powerpc/include/spr.h index 25613c50298..1b0561c29e4 100644 --- a/sys/powerpc/include/spr.h +++ b/sys/powerpc/include/spr.h @@ -504,7 +504,11 @@ #define SPR_HASH2 0x3d3 /* .68 Secondary Hash Address Register */ #define SPR_IMISS 0x3d4 /* .68 Instruction TLB Miss Address Register */ #define SPR_TLBMISS 0x3d4 /* .6. TLB Miss Address Register */ +#if defined(BOOKE_PPC4XX) #define SPR_DEAR 0x3d5 /* 4.. Data Error Address Register */ +#else +#define SPR_DEAR 0x03d /* ..8 Data Exception Address Register */ +#endif #define SPR_ICMP 0x3d5 /* .68 Instruction TLB Compare Register */ #define SPR_PTEHI 0x3d5 /* .6. Instruction TLB Compare Register */ #define SPR_EVPR 0x3d6 /* 4.. Exception Vector Prefix Register */ diff --git a/sys/powerpc/mpc85xx/platform_mpc85xx.c b/sys/powerpc/mpc85xx/platform_mpc85xx.c index c1181e87ed8..a19a177f209 100644 --- a/sys/powerpc/mpc85xx/platform_mpc85xx.c +++ b/sys/powerpc/mpc85xx/platform_mpc85xx.c @@ -68,7 +68,8 @@ __FBSDID("$FreeBSD$"); extern void *ap_pcpu; extern vm_paddr_t kernload; /* Kernel physical load address */ extern uint8_t __boot_page[]; /* Boot page body */ -extern uint32_t bp_kernload; +extern vm_paddr_t bp_kernload; /* Boot page copy of kernload */ +extern vm_offset_t bp_virtaddr; /* Virtual address of boot page */ extern vm_offset_t __startkernel; struct cpu_release { @@ -354,10 +355,12 @@ mpc85xx_smp_start_cpu_epapr(platform_t plat, struct pcpu *pc) pmap_kenter(rel_page, rel_pa & ~PAGE_MASK); rel = (struct cpu_release *)rel_va; bptr = pmap_kextract((uintptr_t)__boot_page); + + cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel)); + rel->pir = pc->pc_cpuid; __asm __volatile("sync" ::: "memory"); + rel->entry_h = (bptr >> 32); __asm __volatile("sync" ::: "memory"); cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel)); - rel->pir = pc->pc_cpuid; __asm __volatile("sync"); - rel->entry_h = (bptr >> 32); - rel->entry_l = bptr; __asm __volatile("sync"); + rel->entry_l = bptr & 0xffffffff; __asm __volatile("sync" ::: "memory"); cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel)); if (bootverbose) printf("Waking up CPU %d via CPU release page %p\n", @@ -397,11 +400,13 @@ mpc85xx_smp_start_cpu(platform_t plat, struct pcpu *pc) cpuid = pc->pc_cpuid + 24; } bp_kernload = kernload; + bp_virtaddr = (vm_offset_t)&__boot_page; /* - * bp_kernload is in the boot page. Sync the cache because ePAPR - * booting has the other core(s) already running. + * bp_kernload and bp_virtaddr are in the boot page. Sync the cache + * because ePAPR booting has the other core(s) already running. */ cpu_flush_dcache(&bp_kernload, sizeof(bp_kernload)); + cpu_flush_dcache(&bp_virtaddr, sizeof(bp_virtaddr)); ap_pcpu = pc; __asm __volatile("msync; isync"); diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index 56f1609f8d4..4a6125a7333 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -144,7 +144,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot, u_int flags, int8_t psind) { - CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va, + CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %#x, %d)", pmap, va, p, prot, flags, psind); return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind)); }