diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc index bd46ea59693..57933ba464c 100644 --- a/sys/conf/files.powerpc +++ b/sys/conf/files.powerpc @@ -135,9 +135,9 @@ libkern/umoddi3.c optional powerpc | powerpcspe powerpc/aim/locore.S optional aim no-obj powerpc/aim/aim_machdep.c optional aim powerpc/aim/mmu_oea.c optional aim powerpc -powerpc/aim/mmu_oea64.c optional aim +powerpc/aim/mmu_oea64.c optional aim powerpc64 powerpc/aim/mmu_radix.c optional aim powerpc64 -powerpc/aim/moea64_native.c optional aim +powerpc/aim/moea64_native.c optional aim powerpc64 powerpc/aim/mp_cpudep.c optional aim powerpc/aim/slb.c optional aim powerpc64 powerpc/amigaone/platform_amigaone.c optional amigaone @@ -152,7 +152,7 @@ powerpc/booke/pmap.c optional booke powerpc/booke/spe.c optional powerpcspe powerpc/cpufreq/dfs.c optional cpufreq powerpc/cpufreq/mpc85xx_jog.c optional cpufreq mpc85xx -powerpc/cpufreq/pcr.c optional cpufreq aim +powerpc/cpufreq/pcr.c optional cpufreq aim powerpc64 powerpc/cpufreq/pmcr.c optional cpufreq aim powerpc64 powerpc/cpufreq/pmufreq.c optional cpufreq aim pmu powerpc/fpu/fpu_add.c optional fpu_emu | powerpcspe diff --git a/sys/powerpc/aim/aim_machdep.c b/sys/powerpc/aim/aim_machdep.c index 0c476227fd7..a8faead1abd 100644 --- a/sys/powerpc/aim/aim_machdep.c +++ b/sys/powerpc/aim/aim_machdep.c @@ -139,19 +139,13 @@ struct bat battable[16]; int radix_mmu = 0; #ifndef __powerpc64__ -/* Bits for running on 64-bit systems in 32-bit mode. */ -extern void *testppc64, *testppc64size; -extern void *restorebridge, *restorebridgesize; -extern void *rfid_patch, *rfi_patch1, *rfi_patch2; -extern void *trapcode64; - extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; #endif extern void *rstcode, *rstcodeend; extern void *trapcode, *trapcodeend; extern void *hypertrapcode, *hypertrapcodeend; -extern void *generictrap, *generictrap64; +extern void *generictrap; extern void *alitrap, *aliend; extern void *dsitrap, *dsiend; extern void *decrint, *decrsize; @@ -242,10 +236,6 @@ aim_cpu_init(vm_offset_t toc) register_t msr; uint8_t *cache_check; int cacheline_warn; -#ifndef __powerpc64__ - register_t scratch; - int ppc64; -#endif trap_offset = 0; cacheline_warn = 0; @@ -290,7 +280,7 @@ aim_cpu_init(vm_offset_t toc) /* * Initialize the interrupt tables and figure out our cache line - * size and whether or not we need the 64-bit bridge code. + * size. */ /* @@ -326,43 +316,7 @@ aim_cpu_init(vm_offset_t toc) cacheline_size = 32; } - #ifndef __powerpc64__ - /* - * Figure out whether we need to use the 64 bit PMAP. This works by - * executing an instruction that is only legal on 64-bit PPC (mtmsrd), - * and setting ppc64 = 0 if that causes a trap. - */ - - ppc64 = 1; - - bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); - __syncicache((void *)EXC_PGM, (size_t)&testppc64size); - - __asm __volatile("\ - mfmsr %0; \ - mtsprg2 %1; \ - \ - mtmsrd %0; \ - mfsprg2 %1;" - : "=r"(scratch), "=r"(ppc64)); - - if (ppc64) - cpu_features |= PPC_FEATURE_64; - - /* - * Now copy restorebridge into all the handlers, if necessary, - * and set up the trap tables. - */ - - if (cpu_features & PPC_FEATURE_64) { - /* Patch the two instances of rfi -> rfid */ - bcopy(&rfid_patch,&rfi_patch1,4); - #ifdef KDB - /* rfi_patch2 is at the end of dbleave */ - bcopy(&rfid_patch,&rfi_patch2,4); - #endif - } - #else /* powerpc64 */ + #ifdef __powerpc64__ cpu_features |= PPC_FEATURE_64; #endif @@ -375,23 +329,7 @@ aim_cpu_init(vm_offset_t toc) for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) bcopy(&trapcode, (void *)trap, trapsize); - #ifndef __powerpc64__ - if (cpu_features & PPC_FEATURE_64) { - /* - * Copy a code snippet to restore 32-bit bridge mode - * to the top of every non-generic trap handler - */ - - trap_offset += (size_t)&restorebridgesize; - bcopy(&restorebridge, (void *)EXC_RST, trap_offset); - bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); - bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); - bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); - bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); - bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); - bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); - } - #else + #ifdef __powerpc64__ trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode; bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize); @@ -425,10 +363,7 @@ aim_cpu_init(vm_offset_t toc) *((register_t *)TRAP_TOCBASE) = toc; #else /* Set branch address for trap code */ - if (cpu_features & PPC_FEATURE_64) - *((void **)TRAP_ENTRY) = &generictrap64; - else - *((void **)TRAP_ENTRY) = &generictrap; + *((void **)TRAP_ENTRY) = &generictrap; *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; /* G2-specific TLB miss helper handlers */ @@ -453,6 +388,7 @@ aim_cpu_init(vm_offset_t toc) * in case the platform module had a better idea of what we * should do. */ +#ifdef __powerpc64__ if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { radix_mmu = 0; TUNABLE_INT_FETCH("radix_mmu", &radix_mmu); @@ -460,10 +396,11 @@ aim_cpu_init(vm_offset_t toc) pmap_mmu_install(MMU_TYPE_RADIX, BUS_PROBE_GENERIC); else pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); - } else if (cpu_features & PPC_FEATURE_64) + } else pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); - else - pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); +#else + pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); +#endif } /* diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index d3703496d02..c6cbec73d01 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -127,11 +127,7 @@ static struct mtx_padalign pv_lock[PV_LOCK_COUNT]; * NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the * index at (N << 45). */ -#ifdef __powerpc64__ #define PV_LOCK_IDX(pa) ((pa_index(pa) * (((pa) >> 45) + 1)) % PV_LOCK_COUNT) -#else -#define PV_LOCK_IDX(pa) (pa_index(pa) % PV_LOCK_COUNT) -#endif #define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)])) #define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa)) #define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa)) @@ -189,13 +185,8 @@ SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD, #define BPVO_POOL_SIZE 327680 /* Sensible historical default value */ #define BPVO_POOL_EXPANSION_FACTOR 3 #define VSID_NBPW (sizeof(u_int32_t) * 8) -#ifdef __powerpc64__ #define NVSIDS (NPMAPS * 16) #define VSID_HASHMASK 0xffffffffUL -#else -#define NVSIDS NPMAPS -#define VSID_HASHMASK 0xfffffUL -#endif static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; static boolean_t moea64_initialized = FALSE; @@ -309,10 +300,7 @@ static int moea64_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded_addr); static size_t moea64_scan_pmap(void); static void *moea64_dump_pmap_init(unsigned blkpgs); -#ifdef __powerpc64__ static void moea64_page_array_startup(long); -#endif - static struct pmap_funcs moea64_methods = { .clear_modify = moea64_clear_modify, @@ -352,9 +340,7 @@ static struct pmap_funcs moea64_methods = { .quick_enter_page = moea64_quick_enter_page, .quick_remove_page = moea64_quick_remove_page, .page_is_mapped = moea64_page_is_mapped, -#ifdef __powerpc64__ .page_array_startup = moea64_page_array_startup, -#endif /* Internal interfaces */ .mapdev = moea64_mapdev, @@ -556,10 +542,6 @@ moea64_add_ofw_mappings(phandle_t mmu, size_t sz) for (i = 0; i < sz; i++) { pa_base = translations[i].om_pa; - #ifndef __powerpc64__ - if ((translations[i].om_pa >> 32) != 0) - panic("OFW translations above 32-bit boundary!"); - #endif if (pa_base % PAGE_SIZE) panic("OFW translation not page-aligned (phys)!"); @@ -594,7 +576,6 @@ moea64_add_ofw_mappings(phandle_t mmu, size_t sz) } } -#ifdef __powerpc64__ static void moea64_probe_large_page(void) { @@ -643,7 +624,6 @@ moea64_bootstrap_slb_prefault(vm_offset_t va, int large) slb_insert_kernel(entry.slbe, entry.slbv); } -#endif static int moea64_kenter_large(vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap) @@ -767,16 +747,6 @@ moea64_early_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) vm_paddr_t kernelphysstart, kernelphysend; int rm_pavail; -#ifndef __powerpc64__ - /* We don't have a direct map since there is no BAT */ - hw_direct_map = 0; - - /* Make sure battable is zero, since we have no BAT */ - for (i = 0; i < 16; i++) { - battable[i].batu = 0; - battable[i].batl = 0; - } -#else moea64_probe_large_page(); /* Use a direct map if we have large page support */ @@ -790,7 +760,6 @@ moea64_early_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap); __syncicache((void *)EXC_DSE, 0x80); __syncicache((void *)EXC_ISE, 0x80); -#endif kernelphysstart = kernelstart & ~DMAP_BASE_ADDRESS; kernelphysend = kernelend & ~DMAP_BASE_ADDRESS; @@ -939,27 +908,13 @@ moea64_mid_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) moea64_bpvo_pool = (struct pvo_entry *) PHYS_TO_DMAP((uintptr_t)moea64_bpvo_pool); - /* - * Make sure kernel vsid is allocated as well as VSID 0. - */ - #ifndef __powerpc64__ - moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] - |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); - moea64_vsid_bitmap[0] |= 1; - #endif - /* * Initialize the kernel pmap (which is statically allocated). */ - #ifdef __powerpc64__ for (i = 0; i < 64; i++) { pcpup->pc_aim.slb[i].slbv = 0; pcpup->pc_aim.slb[i].slbe = 0; } - #else - for (i = 0; i < 16; i++) - kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; - #endif kernel_pmap->pmap_phys = kernel_pmap; CPU_FILL(&kernel_pmap->pm_active); @@ -1026,32 +981,14 @@ moea64_late_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) /* * Map the entire KVA range into the SLB. We must not fault there. */ - #ifdef __powerpc64__ for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) moea64_bootstrap_slb_prefault(va, 0); - #endif /* * Remap any early IO mappings (console framebuffer, etc.) */ bs_remap_earlyboot(); - /* - * Figure out how far we can extend virtual_end into segment 16 - * without running into existing mappings. Segment 16 is guaranteed - * to contain neither RAM nor devices (at least on Apple hardware), - * but will generally contain some OFW mappings we should not - * step on. - */ - - #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ - PMAP_LOCK(kernel_pmap); - while (virtual_end < VM_MAX_KERNEL_ADDRESS && - moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) - virtual_end += PAGE_SIZE; - PMAP_UNLOCK(kernel_pmap); - #endif - /* * Allocate a kernel stack with a guard page for thread0 and map it * into the kernel page map. @@ -1160,14 +1097,9 @@ moea64_activate(struct thread *td) pm = &td->td_proc->p_vmspace->vm_pmap; CPU_SET(PCPU_GET(cpuid), &pm->pm_active); - #ifdef __powerpc64__ PCPU_SET(aim.userslb, pm->pm_slb); __asm __volatile("slbmte %0, %1; isync" :: "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); - #else - PCPU_SET(curpmap, pm->pmap_phys); - mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); - #endif } void @@ -1179,11 +1111,7 @@ moea64_deactivate(struct thread *td) pm = &td->td_proc->p_vmspace->vm_pmap; CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); - #ifdef __powerpc64__ PCPU_SET(aim.userslb, NULL); - #else - PCPU_SET(curpmap, NULL); - #endif } void @@ -1967,9 +1895,7 @@ moea64_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen) { size_t l; -#ifdef __powerpc64__ struct slb *slb; -#endif register_t slbv; *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK); @@ -1981,7 +1907,6 @@ moea64_map_user_ptr(pmap_t pm, volatile const void *uaddr, else if (l != ulen) return (EFAULT); -#ifdef __powerpc64__ /* Try lockless look-up first */ slb = user_va_to_slb_entry(pm, (vm_offset_t)uaddr); @@ -1996,12 +1921,6 @@ moea64_map_user_ptr(pmap_t pm, volatile const void *uaddr, /* Mark segment no-execute */ slbv |= SLBV_N; -#else - slbv = va_to_vsid(pm, (vm_offset_t)uaddr); - - /* Mark segment no-execute */ - slbv |= SR_N; -#endif /* If we have already set this VSID, we can just return */ if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv) @@ -2011,12 +1930,8 @@ moea64_map_user_ptr(pmap_t pm, volatile const void *uaddr, curthread->td_pcb->pcb_cpu.aim.usr_segm = (uintptr_t)uaddr >> ADDR_SR_SHFT; curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv; -#ifdef __powerpc64__ __asm __volatile ("slbie %0; slbmte %1, %2; isync" :: "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE)); -#else - __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv)); -#endif return (0); } @@ -2199,7 +2114,6 @@ moea64_get_unique_vsid(void) { panic("%s: out of segments",__func__); } -#ifdef __powerpc64__ int moea64_pinit(pmap_t pmap) { @@ -2212,33 +2126,6 @@ moea64_pinit(pmap_t pmap) return (1); } -#else -int -moea64_pinit(pmap_t pmap) -{ - int i; - uint32_t hash; - - RB_INIT(&pmap->pmap_pvo); - - if (pmap_bootstrapped) - pmap->pmap_phys = (pmap_t)moea64_kextract((vm_offset_t)pmap); - else - pmap->pmap_phys = pmap; - - /* - * Allocate some segment registers for this pmap. - */ - hash = moea64_get_unique_vsid(); - - for (i = 0; i < 16; i++) - pmap->pm_sr[i] = VSID_MAKE(i, hash); - - KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); - - return (1); -} -#endif /* * Initialize the pmap associated with process 0. @@ -2379,14 +2266,8 @@ moea64_release(pmap_t pmap) /* * Free segment registers' VSIDs */ - #ifdef __powerpc64__ slb_free_tree(pmap); slb_free_user_cache(pmap->pm_slb); - #else - KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); - - moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); - #endif } /* @@ -2590,7 +2471,6 @@ moea64_pvo_enter(struct pvo_entry *pvo, struct pvo_head *pvo_head, if (pvo->pvo_pmap == kernel_pmap) isync(); -#ifdef __powerpc64__ /* * Make sure all our bootstrap mappings are in the SLB as soon * as virtual memory is switched on. @@ -2598,7 +2478,6 @@ moea64_pvo_enter(struct pvo_entry *pvo, struct pvo_head *pvo_head, if (!pmap_bootstrapped) moea64_bootstrap_slb_prefault(PVO_VADDR(pvo), pvo->pvo_vaddr & PVO_LARGE); -#endif return (0); } @@ -2965,8 +2844,6 @@ moea64_scan_init() } } -#ifdef __powerpc64__ - static size_t moea64_scan_pmap() { @@ -3034,23 +2911,6 @@ moea64_dump_pmap_init(unsigned blkpgs) return (&dump_ctx); } -#else - -static size_t -moea64_scan_pmap() -{ - return (0); -} - -static void * -moea64_dump_pmap_init(unsigned blkpgs) -{ - return (NULL); -} - -#endif - -#ifdef __powerpc64__ static void moea64_map_range(vm_offset_t va, vm_paddr_t pa, vm_size_t npages) { @@ -3144,7 +3004,6 @@ moea64_page_array_startup(long pages) vm_page_array = (vm_page_t)vm_page_base; vm_page_array_size = pages; } -#endif static int64_t moea64_null_method(void) diff --git a/sys/powerpc/aim/trap_subr32.S b/sys/powerpc/aim/trap_subr32.S index 710a222fd84..05181d8df81 100644 --- a/sys/powerpc/aim/trap_subr32.S +++ b/sys/powerpc/aim/trap_subr32.S @@ -254,45 +254,6 @@ dtrace_invop_calltrap_addr: .text #endif -/* - * The next two routines are 64-bit glue code. The first is used to test if - * we are on a 64-bit system. By copying it to the illegal instruction - * handler, we can test for 64-bit mode by trying to execute a 64-bit - * instruction and seeing what happens. The second gets copied in front - * of all the other handlers to restore 32-bit bridge mode when traps - * are taken. - */ - -/* 64-bit test code. Sets SPRG2 to 0 if an illegal instruction is executed */ - - .globl CNAME(testppc64),CNAME(testppc64size) -CNAME(testppc64): - mtsprg1 %r31 - mfsrr0 %r31 - addi %r31, %r31, 4 - mtsrr0 %r31 - - li %r31, 0 - mtsprg2 %r31 - mfsprg1 %r31 - - rfi -CNAME(testppc64size) = .-CNAME(testppc64) - - -/* 64-bit bridge mode restore snippet. Gets copied in front of everything else - * on 64-bit systems. */ - - .globl CNAME(restorebridge),CNAME(restorebridgesize) -CNAME(restorebridge): - mtsprg1 %r31 - mfmsr %r31 - clrldi %r31,%r31,1 - mtmsrd %r31 - mfsprg1 %r31 - isync -CNAME(restorebridgesize) = .-CNAME(restorebridge) - /* * Processor reset exception handler. These are typically * the first instructions the processor executes after a @@ -730,15 +691,6 @@ realtrap: * SPRG2 - Original LR */ - .globl CNAME(generictrap64) -generictrap64: - mtsprg3 %r31 - mfmsr %r31 - clrldi %r31,%r31,1 - mtmsrd %r31 - mfsprg3 %r31 - isync - .globl CNAME(generictrap) generictrap: /* Save R1 for computing the exception vector */ @@ -818,13 +770,8 @@ CNAME(asttrapexit): 1: FRAME_LEAVE(PC_TEMPSAVE) - .globl CNAME(rfi_patch1) /* replace rfi with rfid on ppc64 */ -CNAME(rfi_patch1): rfi - .globl CNAME(rfid_patch) -CNAME(rfid_patch): - rfid #if defined(KDB) /* @@ -887,8 +834,6 @@ dbtrap: b realtrap dbleave: FRAME_LEAVE(PC_DBSAVE) - .globl CNAME(rfi_patch2) /* replace rfi with rfid on ppc64 */ -CNAME(rfi_patch2): rfi /* diff --git a/sys/powerpc/cpufreq/pcr.c b/sys/powerpc/cpufreq/pcr.c index 15f37bd46d8..d4dbfdee37b 100644 --- a/sys/powerpc/cpufreq/pcr.c +++ b/sys/powerpc/cpufreq/pcr.c @@ -110,20 +110,11 @@ static void write_scom(register_t address, uint64_t value) { register_t msr; - #ifndef __powerpc64__ - register_t hi, lo, scratch; - #endif msr = mfmsr(); mtmsr(msr & ~PSL_EE); isync(); - #ifdef __powerpc64__ mtspr(SPR_SCOMD, value); - #else - hi = (value >> 32) & 0xffffffff; - lo = value & 0xffffffff; - mtspr64(SPR_SCOMD, hi, lo, scratch); - #endif isync(); mtspr(SPR_SCOMC, address | SCOMC_WRITE); isync(); diff --git a/sys/powerpc/include/mmuvar.h b/sys/powerpc/include/mmuvar.h index 7569d4ccccf..b335adb97c1 100644 --- a/sys/powerpc/include/mmuvar.h +++ b/sys/powerpc/include/mmuvar.h @@ -220,7 +220,7 @@ DATA_SET(mmu_set, name) */ #define MMU_TYPE_BOOKE "mmu_booke" /* Book-E MMU specification */ #define MMU_TYPE_OEA "mmu_oea" /* 32-bit OEA */ -#define MMU_TYPE_G5 "mmu_g5" /* 64-bit bridge (ibm 970) */ +#define MMU_TYPE_G5 "mmu_g5" /* 64-bit OEA (ibm 970 / POWER HPT) */ #define MMU_TYPE_RADIX "mmu_radix" /* 64-bit native ISA 3.0 (POWER9) radix */ #define MMU_TYPE_8xx "mmu_8xx" /* 8xx quicc TLB */ diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h index edde0d73d04..fd5ad44393a 100644 --- a/sys/powerpc/include/pmap.h +++ b/sys/powerpc/include/pmap.h @@ -123,11 +123,12 @@ struct pvo_entry { #ifndef __powerpc64__ /* 32-bit fields */ pte_t pte; -#endif +#else /* 64-bit fields */ uintptr_t slot; vm_paddr_t pa; vm_prot_t prot; +#endif } pvo_pte; pmap_t pvo_pmap; /* Owning pmap */ vm_offset_t pvo_vaddr; /* VA of entry */ diff --git a/sys/powerpc/include/spr.h b/sys/powerpc/include/spr.h index 6da97e74333..a87a43d9c59 100644 --- a/sys/powerpc/include/spr.h +++ b/sys/powerpc/include/spr.h @@ -38,49 +38,6 @@ ( { register_t val; \ __asm __volatile("mfspr %0,%1" : "=r"(val) : "K"(reg)); \ val; } ) - - -#ifndef __powerpc64__ - -/* The following routines allow manipulation of the full 64-bit width - * of SPRs on 64 bit CPUs in bridge mode */ - -#define mtspr64(reg,valhi,vallo,scratch) \ - __asm __volatile(" \ - mfmsr %0; \ - insrdi %0,%5,1,0; \ - mtmsrd %0; \ - isync; \ - \ - sld %1,%1,%4; \ - or %1,%1,%2; \ - mtspr %3,%1; \ - srd %1,%1,%4; \ - \ - clrldi %0,%0,1; \ - mtmsrd %0; \ - isync;" \ - : "=r"(scratch), "=r"(valhi) : "r"(vallo), "K"(reg), "r"(32), "r"(1)) - -#define mfspr64upper(reg,scratch) \ - ( { register_t val; \ - __asm __volatile(" \ - mfmsr %0; \ - insrdi %0,%4,1,0; \ - mtmsrd %0; \ - isync; \ - \ - mfspr %1,%2; \ - srd %1,%1,%3; \ - \ - clrldi %0,%0,1; \ - mtmsrd %0; \ - isync;" \ - : "=r"(scratch), "=r"(val) : "K"(reg), "r"(32), "r"(1)); \ - val; } ) - -#endif - #endif /* _LOCORE */ /*