Index: sys/powerpc/aim/aim_machdep.c =================================================================== --- sys/powerpc/aim/aim_machdep.c +++ sys/powerpc/aim/aim_machdep.c @@ -455,13 +455,35 @@ #endif +/* + * These functions need to provide addresses that both (a) work in real mode + * (or whatever mode/circumstances the kernel is in in early boot (now) and + * (b) can still, in principle, work once the kernel is going. Because these + * rely on existing mappings/real mode, unmap is a no-op. + */ vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size) { + KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); - return (pa); + /* + * If we have the MMU up in early boot, assume it is 1:1. Otherwise, + * try to get the address in a memory region compatible with the + * direct map for efficiency later. + */ + if (mfmsr() & PSL_DR) + return (pa); + else + return (DMAP_BASE_ADDRESS + pa); } +void +pmap_early_io_unmap(vm_offset_t va, vm_size_t size) +{ + + KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); +} + /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ void flush_disable_caches(void) Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c +++ sys/powerpc/aim/mmu_oea64.c @@ -551,7 +551,8 @@ /* If this address is direct-mapped, skip remapping */ if (hw_direct_map && translations[i].om_va == PHYS_TO_DMAP(pa_base) && - moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) == LPTE_M) + moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) + == LPTE_M) continue; PMAP_LOCK(kernel_pmap); @@ -664,25 +665,27 @@ } } PMAP_UNLOCK(kernel_pmap); - } else { - size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); - off = (vm_offset_t)(moea64_bpvo_pool); - for (pa = off; pa < off + size; pa += PAGE_SIZE) - moea64_kenter(mmup, pa, pa); + } - /* - * Map certain important things, like ourselves. - * - * NOTE: We do not map the exception vector space. That code is - * used only in real mode, and leaving it unmapped allows us to - * catch NULL pointer deferences, instead of making NULL a valid - * address. - */ + /* + * Make sure the kernel and BPVO pool stay mapped on systems either + * without a direct map or on which the kernel is not already executing + * out of the direct-mapped region. + */ + if (!hw_direct_map || kernelstart < DMAP_BASE_ADDRESS) { for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; - pa += PAGE_SIZE) + pa += PAGE_SIZE) moea64_kenter(mmup, pa, pa); } + + if (!hw_direct_map || (vm_offset_t)(moea64_bpvo_pool) < + DMAP_BASE_ADDRESS) { + size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); + off = (vm_offset_t)(moea64_bpvo_pool); + for (pa = off; pa < off + size; pa += PAGE_SIZE) + moea64_kenter(mmup, pa, pa); + } ENABLE_TRANS(msr); /* @@ -826,6 +829,11 @@ moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0); moea64_bpvo_pool_index = 0; + /* If in real mode, place at address usable through the direct map */ + if (hw_direct_map && !(mfmsr() & PSL_DR)) + moea64_bpvo_pool = (struct pvo_entry *) + PHYS_TO_DMAP((uintptr_t)moea64_bpvo_pool); + /* * Make sure kernel vsid is allocated as well as VSID 0. */ @@ -898,12 +906,11 @@ Maxmem = max(Maxmem, powerpc_btop(phys_avail[i + 1])); /* - * Initialize MMU and remap early physical mappings + * Initialize MMU. */ MMU_CPU_BOOTSTRAP(mmup,0); mtmsr(mfmsr() | PSL_DR | PSL_IR); pmap_bootstrapped++; - bs_remap_earlyboot(); /* * Set the start and end of kva. @@ -918,6 +925,11 @@ for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) moea64_bootstrap_slb_prefault(va, 0); #endif + + /* + * Remap any early IO mappings (console framebuffer, etc.) + */ + bs_remap_earlyboot(); /* * Figure out how far we can extend virtual_end into segment 16 @@ -1826,10 +1838,11 @@ /* * Shortcut the direct-mapped case when applicable. We never put - * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. + * anything but 1:1 (or 62-bit aliased) mappings below + * VM_MIN_KERNEL_ADDRESS. */ if (va < VM_MIN_KERNEL_ADDRESS) - return (va); + return (va & ~DMAP_BASE_ADDRESS); PMAP_LOCK(kernel_pmap); pvo = moea64_pvo_find_va(kernel_pmap, va); @@ -2565,12 +2578,15 @@ * Update vm about page writeability/executability if managed */ PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN); - pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); + if (pvo->pvo_vaddr & PVO_MANAGED) { + pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); - if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) { - LIST_REMOVE(pvo, pvo_vlink); - if (LIST_EMPTY(vm_page_to_pvoh(pg))) - vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE); + if (pg != NULL) { + LIST_REMOVE(pvo, pvo_vlink); + if (LIST_EMPTY(vm_page_to_pvoh(pg))) + vm_page_aflag_clear(pg, + PGA_WRITEABLE | PGA_EXECUTABLE); + } } moea64_pvo_entries--; @@ -2677,8 +2693,11 @@ vm_offset_t ppa; int error = 0; + if (hw_direct_map && mem_valid(pa, size) == 0) + return (0); + PMAP_LOCK(kernel_pmap); - key.pvo_vaddr = ppa = pa & ~ADDR_POFF; + key.pvo_vaddr = ppa = (DMAP_BASE_ADDRESS + pa) & ~ADDR_POFF; for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); ppa < pa + size; ppa += PAGE_SIZE, pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { Index: sys/powerpc/aim/moea64_native.c =================================================================== --- sys/powerpc/aim/moea64_native.c +++ sys/powerpc/aim/moea64_native.c @@ -444,9 +444,12 @@ moea64_mid_bootstrap(mmup, kernelstart, kernelend); /* - * Add a mapping for the page table itself if there is no direct map. + * Add a mapping for the page table itself if there is no direct map + * or the kernel is executing in a way that does let us make use of + * real-aliased addresses. */ - if (!hw_direct_map) { + if (!hw_direct_map || + (vm_offset_t)moea64_pteg_table < DMAP_BASE_ADDRESS) { size = moea64_pteg_count * sizeof(struct lpteg); off = (vm_offset_t)(moea64_pteg_table); DISABLE_TRANS(msr); Index: sys/powerpc/aim/slb.c =================================================================== --- sys/powerpc/aim/slb.c +++ sys/powerpc/aim/slb.c @@ -207,13 +207,16 @@ /* Set kernel VSID to deterministic value */ slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT; - /* Figure out if this is a large-page mapping */ - if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) { + /* + * Figure out if this is a large-page mapping. + */ + if (hw_direct_map && va > DMAP_BASE_ADDRESS && va < DMAP_MAX_ADDRESS) { /* * XXX: If we have set up a direct map, assumes * all physical memory is mapped with large pages. */ - if (mem_valid(va, 0) == 0) + + if (mem_valid(DMAP_TO_PHYS(va), 0) == 0) slbv |= SLBV_L; } Index: sys/powerpc/aim/trap_subr64.S =================================================================== --- sys/powerpc/aim/trap_subr64.S +++ sys/powerpc/aim/trap_subr64.S @@ -43,7 +43,9 @@ #define GET_CPUINFO(r) \ mfsprg0 r #define GET_TOCBASE(r) \ - li r,TRAP_TOCBASE; /* Magic address for TOC */ \ + lis r,DMAP_BASE_ADDRESS@highesta; /* To real-mode alias/dmap */ \ + sldi r,r,32; \ + ori r,r,TRAP_TOCBASE; /* Magic address for TOC */ \ ld r,0(r) /* Index: sys/powerpc/include/sr.h =================================================================== --- sys/powerpc/include/sr.h +++ sys/powerpc/include/sr.h @@ -53,7 +53,7 @@ #define KERNEL2_SEGMENT (0xfffff0 + KERNEL2_SR) #define EMPTY_SEGMENT 0xfffff0 #ifdef __powerpc64__ -#define USER_ADDR 0xcffffffff0000000UL +#define USER_ADDR 0xeffffffff0000000UL #else #define USER_ADDR ((uintptr_t)USER_SR << ADDR_SR_SHFT) #endif Index: sys/powerpc/include/vmparam.h =================================================================== --- sys/powerpc/include/vmparam.h +++ sys/powerpc/include/vmparam.h @@ -83,11 +83,7 @@ #if !defined(LOCORE) #ifdef __powerpc64__ #define VM_MIN_ADDRESS (0x0000000000000000UL) -#ifdef AIM -#define VM_MAXUSER_ADDRESS (0xfffffffffffff000UL) -#else -#define VM_MAXUSER_ADDRESS (0x7ffffffffffff000UL) -#endif +#define VM_MAXUSER_ADDRESS (0x3ffffffffffff000UL) #define VM_MAX_ADDRESS (0xffffffffffffffffUL) #else #define VM_MIN_ADDRESS ((vm_offset_t)0) @@ -99,7 +95,7 @@ #ifdef BOOKE #define VM_MIN_ADDRESS 0 #ifdef __powerpc64__ -#define VM_MAXUSER_ADDRESS 0x7ffffffffffff000 +#define VM_MAXUSER_ADDRESS 0x3ffffffffffff000 #else #define VM_MAXUSER_ADDRESS 0x7ffff000 #endif @@ -110,8 +106,13 @@ #define FREEBSD32_USRSTACK FREEBSD32_SHAREDPAGE #ifdef __powerpc64__ +#ifdef AIM +#define VM_MIN_KERNEL_ADDRESS 0xe000000000000000UL +#define VM_MAX_KERNEL_ADDRESS 0xe0000001c7ffffffUL +#else #define VM_MIN_KERNEL_ADDRESS 0xc000000000000000UL #define VM_MAX_KERNEL_ADDRESS 0xc0000001c7ffffffUL +#endif #define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS #endif @@ -243,14 +244,15 @@ /* * We (usually) have a direct map of all physical memory, so provide - * a macro to use to get the kernel VA address for a given PA. Returns - * 0 if the direct map is unavailable. The location of the direct map - * may not be 1:1 in future, so use of the macro is recommended. + * a macro to use to get the kernel VA address for a given PA. Check the + * value of PMAP_HAS_PMAP before using. */ #ifdef __powerpc64__ -#define DMAP_BASE_ADDRESS 0x0000000000000000UL +#define DMAP_BASE_ADDRESS 0xc000000000000000UL +#define DMAP_MAX_ADDRESS 0xcfffffffffffffffUL #else #define DMAP_BASE_ADDRESS 0x00000000UL +#define DMAP_MAX_ADDRESS 0xbfffffffUL #endif #define PMAP_HAS_DMAP (hw_direct_map) Index: sys/powerpc/ofw/ofw_machdep.c =================================================================== --- sys/powerpc/ofw/ofw_machdep.c +++ sys/powerpc/ofw/ofw_machdep.c @@ -81,22 +81,25 @@ int ofwcall(void *); static int openfirmware(void *args); +void *first_trap; + __inline void ofw_save_trap_vec(char *save_trap_vec) { - if (!ofw_real_mode) + if (!ofw_real_mode || !hw_direct_map) return; - bcopy((void *)EXC_RST, save_trap_vec, EXC_LAST - EXC_RST); + bcopy((void *)PHYS_TO_DMAP(EXC_RST), save_trap_vec, EXC_LAST - EXC_RST); } static __inline void ofw_restore_trap_vec(char *restore_trap_vec) { - if (!ofw_real_mode) + if (!ofw_real_mode || !hw_direct_map) return; - bcopy(restore_trap_vec, (void *)EXC_RST, EXC_LAST - EXC_RST); + bcopy(restore_trap_vec, (void *)PHYS_TO_DMAP(EXC_RST), + EXC_LAST - EXC_RST); __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); } @@ -381,12 +384,6 @@ #endif fdt = fdt_ptr; - - #ifdef FDT_DTB_STATIC - /* Check for a statically included blob */ - if (fdt == NULL) - fdt = &fdt_static_dtb; - #endif } boolean_t @@ -414,14 +411,52 @@ } else #endif if (fdt != NULL) { + bus_space_tag_t fdt_bt; + vm_offset_t tmp_fdt_ptr; + vm_size_t fdt_size; + uintptr_t fdt_va; + status = OF_install(OFW_FDT, 0); - if (status != TRUE) return status; - err = OF_init(fdt); + /* Get the FDT size for mapping if we can */ + tmp_fdt_ptr = pmap_early_io_map((vm_paddr_t)fdt, PAGE_SIZE); + if (fdt_check_header((void *)tmp_fdt_ptr) != 0) { + pmap_early_io_unmap(tmp_fdt_ptr, PAGE_SIZE); + return FALSE; + } + fdt_size = fdt_totalsize((void *)tmp_fdt_ptr); + pmap_early_io_unmap(tmp_fdt_ptr, PAGE_SIZE); + + /* + * Map this for real. Use bus_space_map() to take advantage + * of its auto-remapping function once the kernel is loaded. + * This is a dirty hack, but what we have. + */ +#ifdef _LITTLE_ENDIAN + fdt_bt = &bs_le_tag; +#else + fdt_bt = &bs_be_tag; +#endif + bus_space_map(fdt_bt, (vm_paddr_t)fdt, fdt_size, 0, &fdt_va); + + err = OF_init((void *)fdt_va); } + #ifdef FDT_DTB_STATIC + /* + * Check for a statically included blob already in the kernel and + * needing no mapping. + */ + else { + status = OF_install(OFW_FDT, 0); + if (status != TRUE) + return status; + err = OF_init(&fdt_static_dtb); + } + #endif + if (err != 0) { OF_install(NULL, 0); status = FALSE; Index: sys/powerpc/ofw/ofw_real.c =================================================================== --- sys/powerpc/ofw/ofw_real.c +++ sys/powerpc/ofw/ofw_real.c @@ -223,7 +223,7 @@ * we have a 32-bit virtual address to give OF. */ - if (!ofw_real_mode && !hw_direct_map) + if (!ofw_real_mode && (!hw_direct_map || DMAP_BASE_ADDRESS != 0)) pmap_kenter(of_bounce_phys, of_bounce_phys); mtx_unlock(&of_bounce_mtx); @@ -244,7 +244,7 @@ * can use right now is memory mapped by firmware. */ if (!pmap_bootstrapped) - return (cell_t)(uintptr_t)buf; + return (cell_t)((uintptr_t)buf & ~DMAP_BASE_ADDRESS); /* * XXX: It is possible for us to get called before the VM has @@ -253,7 +253,8 @@ * Copy into the emergency buffer, and reset at the end. */ of_bounce_virt = emergency_buffer; - of_bounce_phys = (vm_offset_t)of_bounce_virt; + of_bounce_phys = (vm_offset_t)of_bounce_virt & + ~DMAP_BASE_ADDRESS; of_bounce_size = sizeof(emergency_buffer); } @@ -261,7 +262,8 @@ * Make sure the bounce page offset satisfies any reasonable * alignment constraint. */ - of_bounce_offset += sizeof(register_t) - (of_bounce_offset % sizeof(register_t)); + of_bounce_offset += sizeof(register_t) - + (of_bounce_offset % sizeof(register_t)); if (of_bounce_offset + len > of_bounce_size) { panic("Oversize Open Firmware call!"); Index: sys/powerpc/powerpc/bus_machdep.c =================================================================== --- sys/powerpc/powerpc/bus_machdep.c +++ sys/powerpc/powerpc/bus_machdep.c @@ -115,7 +115,9 @@ for (i = 0; i < earlyboot_map_idx; i++) { spa = earlyboot_mappings[i].addr; - if (spa == earlyboot_mappings[i].virt && + + if (hw_direct_map && + PHYS_TO_DMAP(spa) == earlyboot_mappings[i].virt && pmap_dev_direct_mapped(spa, earlyboot_mappings[i].size) == 0) continue; Index: sys/powerpc/powerpc/genassym.c =================================================================== --- sys/powerpc/powerpc/genassym.c +++ sys/powerpc/powerpc/genassym.c @@ -222,6 +222,7 @@ ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(KERNBASE, KERNBASE); +ASSYM(DMAP_BASE_ADDRESS, DMAP_BASE_ADDRESS); ASSYM(MAXCOMLEN, MAXCOMLEN); #ifdef __powerpc64__ Index: sys/powerpc/powerpc/mem.c =================================================================== --- sys/powerpc/powerpc/mem.c +++ sys/powerpc/powerpc/mem.c @@ -125,8 +125,9 @@ break; } - if (!pmap_dev_direct_mapped(v, cnt)) { - error = uiomove((void *)v, cnt, uio); + if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) { + error = uiomove((void *)PHYS_TO_DMAP(v), cnt, + uio); } else { m.phys_addr = trunc_page(v); marr = &m; Index: sys/powerpc/ps3/platform_ps3.c =================================================================== --- sys/powerpc/ps3/platform_ps3.c +++ sys/powerpc/ps3/platform_ps3.c @@ -128,9 +128,6 @@ pmap_mmu_install("mmu_ps3", BUS_PROBE_SPECIFIC); cpu_idle_hook = ps3_cpu_idle; - /* Set a breakpoint to make NULL an invalid address */ - lv1_set_dabr(0x7 /* read and write, MMU on */, 2 /* kernel accesses */); - /* Record our PIR at boot for later */ ps3_boot_pir = mfspr(SPR_PIR); @@ -226,7 +223,8 @@ ps3_smp_start_cpu(platform_t plat, struct pcpu *pc) { /* kernel is spinning on 0x40 == -1 right now */ - volatile uint32_t *secondary_spin_sem = (uint32_t *)PHYS_TO_DMAP(0x40); + volatile uint32_t *secondary_spin_sem = + (uint32_t *)PHYS_TO_DMAP((uintptr_t)0x40); int remote_pir = pc->pc_hwref; int timeout;