diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index a2bc695a0a1..32d298ddae1 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -678,22 +678,31 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, } } PMAP_UNLOCK(kernel_pmap); } /* * Make sure the kernel and BPVO pool stay mapped on systems either * without a direct map or on which the kernel is not already executing * out of the direct-mapped region. */ - - if (!hw_direct_map || kernelstart < DMAP_BASE_ADDRESS) { + if (hw_direct_map && kernelstart < DMAP_BASE_ADDRESS) { + /* + * For pre-dmap execution, we need to use identity mapping + * because we will be operating with the mmu on but in the + * wrong address configuration until we __restartkernel(). + */ + for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; + pa += PAGE_SIZE) + moea64_kenter(mmup, pa, pa); + } + else if (!hw_direct_map || kernelstart < DMAP_BASE_ADDRESS) { pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS; pkernelend = kernelend & ~DMAP_BASE_ADDRESS; for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend; pa += PAGE_SIZE) moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa); } if (!hw_direct_map) { size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); off = (vm_offset_t)(moea64_bpvo_pool);