Index: head/sys/dev/vt/hw/ofwfb/ofwfb.c =================================================================== --- head/sys/dev/vt/hw/ofwfb/ofwfb.c +++ head/sys/dev/vt/hw/ofwfb/ofwfb.c @@ -489,7 +489,7 @@ #if defined(__powerpc__) OF_decode_addr(node, fb_phys, &sc->sc_memt, &sc->fb.fb_vbase, NULL); - sc->fb.fb_pbase = sc->fb.fb_vbase; /* 1:1 mapped */ + sc->fb.fb_pbase = sc->fb.fb_vbase & ~DMAP_BASE_ADDRESS; #ifdef __powerpc64__ /* Real mode under a hypervisor probably doesn't cover FB */ if (!(mfmsr() & (PSL_HV | PSL_DR))) Index: head/sys/powerpc/aim/aim_machdep.c =================================================================== --- head/sys/powerpc/aim/aim_machdep.c +++ head/sys/powerpc/aim/aim_machdep.c @@ -455,11 +455,33 @@ #endif +/* + * These functions need to provide addresses that both (a) work in real mode + * (or whatever mode/circumstances the kernel is in in early boot (now)) and + * (b) can still, in principle, work once the kernel is going. Because these + * rely on existing mappings/real mode, unmap is a no-op. + */ vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size) { + KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); - return (pa); + /* + * If we have the MMU up in early boot, assume it is 1:1. Otherwise, + * try to get the address in a memory region compatible with the + * direct map for efficiency later. + */ + if (mfmsr() & PSL_DR) + return (pa); + else + return (DMAP_BASE_ADDRESS + pa); +} + +void +pmap_early_io_unmap(vm_offset_t va, vm_size_t size) +{ + + KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); } /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ Index: head/sys/powerpc/aim/mmu_oea64.c =================================================================== --- head/sys/powerpc/aim/mmu_oea64.c +++ head/sys/powerpc/aim/mmu_oea64.c @@ -551,7 +551,8 @@ /* If this address is direct-mapped, skip remapping */ if (hw_direct_map && translations[i].om_va == PHYS_TO_DMAP(pa_base) && - moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) == LPTE_M) + moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) + == LPTE_M) continue; PMAP_LOCK(kernel_pmap); @@ -664,25 +665,26 @@ } } PMAP_UNLOCK(kernel_pmap); - } else { - size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); - off = (vm_offset_t)(moea64_bpvo_pool); - for (pa = off; pa < off + size; pa += PAGE_SIZE) - moea64_kenter(mmup, pa, pa); + } - /* - * Map certain important things, like ourselves. - * - * NOTE: We do not map the exception vector space. That code is - * used only in real mode, and leaving it unmapped allows us to - * catch NULL pointer deferences, instead of making NULL a valid - * address. - */ + /* + * Make sure the kernel and BPVO pool stay mapped on systems either + * without a direct map or on which the kernel is not already executing + * out of the direct-mapped region. + */ + if (!hw_direct_map || kernelstart < DMAP_BASE_ADDRESS) { for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; - pa += PAGE_SIZE) + pa += PAGE_SIZE) moea64_kenter(mmup, pa, pa); } + + if (!hw_direct_map) { + size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); + off = (vm_offset_t)(moea64_bpvo_pool); + for (pa = off; pa < off + size; pa += PAGE_SIZE) + moea64_kenter(mmup, pa, pa); + } ENABLE_TRANS(msr); /* @@ -826,6 +828,11 @@ moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0); moea64_bpvo_pool_index = 0; + /* Place at address usable through the direct map */ + if (hw_direct_map) + moea64_bpvo_pool = (struct pvo_entry *) + PHYS_TO_DMAP((uintptr_t)moea64_bpvo_pool); + /* * Make sure kernel vsid is allocated as well as VSID 0. */ @@ -898,12 +905,11 @@ Maxmem = max(Maxmem, powerpc_btop(phys_avail[i + 1])); /* - * Initialize MMU and remap early physical mappings + * Initialize MMU. */ MMU_CPU_BOOTSTRAP(mmup,0); mtmsr(mfmsr() | PSL_DR | PSL_IR); pmap_bootstrapped++; - bs_remap_earlyboot(); /* * Set the start and end of kva. @@ -920,6 +926,11 @@ #endif /* + * Remap any early IO mappings (console framebuffer, etc.) + */ + bs_remap_earlyboot(); + + /* * Figure out how far we can extend virtual_end into segment 16 * without running into existing mappings. Segment 16 is guaranteed * to contain neither RAM nor devices (at least on Apple hardware), @@ -1826,10 +1837,11 @@ /* * Shortcut the direct-mapped case when applicable. We never put - * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. + * anything but 1:1 (or 62-bit aliased) mappings below + * VM_MIN_KERNEL_ADDRESS. */ if (va < VM_MIN_KERNEL_ADDRESS) - return (va); + return (va & ~DMAP_BASE_ADDRESS); PMAP_LOCK(kernel_pmap); pvo = moea64_pvo_find_va(kernel_pmap, va); @@ -2565,12 +2577,15 @@ * Update vm about page writeability/executability if managed */ PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN); - pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); + if (pvo->pvo_vaddr & PVO_MANAGED) { + pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); - if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) { - LIST_REMOVE(pvo, pvo_vlink); - if (LIST_EMPTY(vm_page_to_pvoh(pg))) - vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE); + if (pg != NULL) { + LIST_REMOVE(pvo, pvo_vlink); + if (LIST_EMPTY(vm_page_to_pvoh(pg))) + vm_page_aflag_clear(pg, + PGA_WRITEABLE | PGA_EXECUTABLE); + } } moea64_pvo_entries--; @@ -2677,8 +2692,12 @@ vm_offset_t ppa; int error = 0; + if (hw_direct_map && mem_valid(pa, size) == 0) + return (0); + PMAP_LOCK(kernel_pmap); - key.pvo_vaddr = ppa = pa & ~ADDR_POFF; + ppa = pa & ~ADDR_POFF; + key.pvo_vaddr = DMAP_BASE_ADDRESS + ppa; for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); ppa < pa + size; ppa += PAGE_SIZE, pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { Index: head/sys/powerpc/aim/moea64_native.c =================================================================== --- head/sys/powerpc/aim/moea64_native.c +++ head/sys/powerpc/aim/moea64_native.c @@ -401,7 +401,7 @@ */ __asm __volatile ("ptesync; mtsdr1 %0; isync" - :: "r"((uintptr_t)moea64_pteg_table + :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); tlbia(); } @@ -434,6 +434,9 @@ */ moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, size); + if (hw_direct_map) + moea64_pteg_table = + (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table); DISABLE_TRANS(msr); bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * sizeof(struct lpteg)); Index: head/sys/powerpc/aim/slb.c =================================================================== --- head/sys/powerpc/aim/slb.c +++ head/sys/powerpc/aim/slb.c @@ -207,13 +207,16 @@ /* Set kernel VSID to deterministic value */ slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT; - /* Figure out if this is a large-page mapping */ - if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) { + /* + * Figure out if this is a large-page mapping. + */ + if (hw_direct_map && va > DMAP_BASE_ADDRESS && va < DMAP_MAX_ADDRESS) { /* * XXX: If we have set up a direct map, assumes * all physical memory is mapped with large pages. */ - if (mem_valid(va, 0) == 0) + + if (mem_valid(DMAP_TO_PHYS(va), 0) == 0) slbv |= SLBV_L; } Index: head/sys/powerpc/aim/trap_subr64.S =================================================================== --- head/sys/powerpc/aim/trap_subr64.S +++ head/sys/powerpc/aim/trap_subr64.S @@ -43,7 +43,9 @@ #define GET_CPUINFO(r) \ mfsprg0 r #define GET_TOCBASE(r) \ - li r,TRAP_TOCBASE; /* Magic address for TOC */ \ + lis r,DMAP_BASE_ADDRESS@highesta; /* To real-mode alias/dmap */ \ + sldi r,r,32; \ + ori r,r,TRAP_TOCBASE; /* Magic address for TOC */ \ ld r,0(r) /* Index: head/sys/powerpc/include/sr.h =================================================================== --- head/sys/powerpc/include/sr.h +++ head/sys/powerpc/include/sr.h @@ -53,7 +53,7 @@ #define KERNEL2_SEGMENT (0xfffff0 + KERNEL2_SR) #define EMPTY_SEGMENT 0xfffff0 #ifdef __powerpc64__ -#define USER_ADDR 0xcffffffff0000000UL +#define USER_ADDR 0xeffffffff0000000UL #else #define USER_ADDR ((uintptr_t)USER_SR << ADDR_SR_SHFT) #endif Index: head/sys/powerpc/include/vmparam.h =================================================================== --- head/sys/powerpc/include/vmparam.h +++ head/sys/powerpc/include/vmparam.h @@ -83,11 +83,7 @@ #if !defined(LOCORE) #ifdef __powerpc64__ #define VM_MIN_ADDRESS (0x0000000000000000UL) -#ifdef AIM -#define VM_MAXUSER_ADDRESS (0xfffffffffffff000UL) -#else -#define VM_MAXUSER_ADDRESS (0x7ffffffffffff000UL) -#endif +#define VM_MAXUSER_ADDRESS (0x3ffffffffffff000UL) #define VM_MAX_ADDRESS (0xffffffffffffffffUL) #else #define VM_MIN_ADDRESS ((vm_offset_t)0) @@ -99,7 +95,7 @@ #ifdef BOOKE #define VM_MIN_ADDRESS 0 #ifdef __powerpc64__ -#define VM_MAXUSER_ADDRESS 0x7ffffffffffff000 +#define VM_MAXUSER_ADDRESS 0x3ffffffffffff000 #else #define VM_MAXUSER_ADDRESS 0x7ffff000 #endif @@ -110,8 +106,13 @@ #define FREEBSD32_USRSTACK FREEBSD32_SHAREDPAGE #ifdef __powerpc64__ +#ifdef AIM +#define VM_MIN_KERNEL_ADDRESS 0xe000000000000000UL +#define VM_MAX_KERNEL_ADDRESS 0xe0000001c7ffffffUL +#else #define VM_MIN_KERNEL_ADDRESS 0xc000000000000000UL #define VM_MAX_KERNEL_ADDRESS 0xc0000001c7ffffffUL +#endif #define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS #endif @@ -243,14 +244,17 @@ /* * We (usually) have a direct map of all physical memory, so provide - * a macro to use to get the kernel VA address for a given PA. Returns - * 0 if the direct map is unavailable. The location of the direct map - * may not be 1:1 in future, so use of the macro is recommended. + * a macro to use to get the kernel VA address for a given PA. Check the + * value of PMAP_HAS_PMAP before using. */ +#ifndef LOCORE #ifdef __powerpc64__ -#define DMAP_BASE_ADDRESS 0x0000000000000000UL +#define DMAP_BASE_ADDRESS 0xc000000000000000UL +#define DMAP_MAX_ADDRESS 0xcfffffffffffffffUL #else #define DMAP_BASE_ADDRESS 0x00000000UL +#define DMAP_MAX_ADDRESS 0xbfffffffUL +#endif #endif #define PMAP_HAS_DMAP (hw_direct_map) Index: head/sys/powerpc/ofw/ofw_machdep.c =================================================================== --- head/sys/powerpc/ofw/ofw_machdep.c +++ head/sys/powerpc/ofw/ofw_machdep.c @@ -84,20 +84,21 @@ __inline void ofw_save_trap_vec(char *save_trap_vec) { - if (!ofw_real_mode) + if (!ofw_real_mode || !hw_direct_map) return; - bcopy((void *)EXC_RST, save_trap_vec, EXC_LAST - EXC_RST); + bcopy((void *)PHYS_TO_DMAP(EXC_RST), save_trap_vec, EXC_LAST - EXC_RST); } static __inline void ofw_restore_trap_vec(char *restore_trap_vec) { - if (!ofw_real_mode) + if (!ofw_real_mode || !hw_direct_map) return; - bcopy(restore_trap_vec, (void *)EXC_RST, EXC_LAST - EXC_RST); - __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); + bcopy(restore_trap_vec, (void *)PHYS_TO_DMAP(EXC_RST), + EXC_LAST - EXC_RST); + __syncicache((void *)PHYS_TO_DMAP(EXC_RSVD), EXC_LAST - EXC_RSVD); } /* @@ -381,12 +382,6 @@ #endif fdt = fdt_ptr; - - #ifdef FDT_DTB_STATIC - /* Check for a statically included blob */ - if (fdt == NULL) - fdt = &fdt_static_dtb; - #endif } boolean_t @@ -414,13 +409,57 @@ } else #endif if (fdt != NULL) { - status = OF_install(OFW_FDT, 0); +#ifdef AIM + bus_space_tag_t fdt_bt; + vm_offset_t tmp_fdt_ptr; + vm_size_t fdt_size; + uintptr_t fdt_va; +#endif + status = OF_install(OFW_FDT, 0); if (status != TRUE) return status; +#ifdef AIM /* AIM-only for now -- Book-E does this remapping in early init */ + /* Get the FDT size for mapping if we can */ + tmp_fdt_ptr = pmap_early_io_map((vm_paddr_t)fdt, PAGE_SIZE); + if (fdt_check_header((void *)tmp_fdt_ptr) != 0) { + pmap_early_io_unmap(tmp_fdt_ptr, PAGE_SIZE); + return FALSE; + } + fdt_size = fdt_totalsize((void *)tmp_fdt_ptr); + pmap_early_io_unmap(tmp_fdt_ptr, PAGE_SIZE); + + /* + * Map this for real. Use bus_space_map() to take advantage + * of its auto-remapping function once the kernel is loaded. + * This is a dirty hack, but what we have. + */ +#ifdef _LITTLE_ENDIAN + fdt_bt = &bs_le_tag; +#else + fdt_bt = &bs_be_tag; +#endif + bus_space_map(fdt_bt, (vm_paddr_t)fdt, fdt_size, 0, &fdt_va); + + err = OF_init((void *)fdt_va); +#else err = OF_init(fdt); +#endif } + + #ifdef FDT_DTB_STATIC + /* + * Check for a statically included blob already in the kernel and + * needing no mapping. + */ + else { + status = OF_install(OFW_FDT, 0); + if (status != TRUE) + return status; + err = OF_init(&fdt_static_dtb); + } + #endif if (err != 0) { OF_install(NULL, 0); Index: head/sys/powerpc/ofw/ofw_real.c =================================================================== --- head/sys/powerpc/ofw/ofw_real.c +++ head/sys/powerpc/ofw/ofw_real.c @@ -223,7 +223,7 @@ * we have a 32-bit virtual address to give OF. */ - if (!ofw_real_mode && !hw_direct_map) + if (!ofw_real_mode && (!hw_direct_map || DMAP_BASE_ADDRESS != 0)) pmap_kenter(of_bounce_phys, of_bounce_phys); mtx_unlock(&of_bounce_mtx); @@ -244,7 +244,7 @@ * can use right now is memory mapped by firmware. */ if (!pmap_bootstrapped) - return (cell_t)(uintptr_t)buf; + return (cell_t)((uintptr_t)buf & ~DMAP_BASE_ADDRESS); /* * XXX: It is possible for us to get called before the VM has @@ -253,7 +253,8 @@ * Copy into the emergency buffer, and reset at the end. */ of_bounce_virt = emergency_buffer; - of_bounce_phys = (vm_offset_t)of_bounce_virt; + of_bounce_phys = (vm_offset_t)of_bounce_virt & + ~DMAP_BASE_ADDRESS; of_bounce_size = sizeof(emergency_buffer); } @@ -261,7 +262,8 @@ * Make sure the bounce page offset satisfies any reasonable * alignment constraint. */ - of_bounce_offset += sizeof(register_t) - (of_bounce_offset % sizeof(register_t)); + of_bounce_offset += sizeof(register_t) - + (of_bounce_offset % sizeof(register_t)); if (of_bounce_offset + len > of_bounce_size) { panic("Oversize Open Firmware call!"); Index: head/sys/powerpc/powerpc/bus_machdep.c =================================================================== --- head/sys/powerpc/powerpc/bus_machdep.c +++ head/sys/powerpc/powerpc/bus_machdep.c @@ -115,7 +115,9 @@ for (i = 0; i < earlyboot_map_idx; i++) { spa = earlyboot_mappings[i].addr; - if (spa == earlyboot_mappings[i].virt && + + if (hw_direct_map && + PHYS_TO_DMAP(spa) == earlyboot_mappings[i].virt && pmap_dev_direct_mapped(spa, earlyboot_mappings[i].size) == 0) continue; Index: head/sys/powerpc/powerpc/genassym.c =================================================================== --- head/sys/powerpc/powerpc/genassym.c +++ head/sys/powerpc/powerpc/genassym.c @@ -222,6 +222,7 @@ ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(KERNBASE, KERNBASE); +ASSYM(DMAP_BASE_ADDRESS, DMAP_BASE_ADDRESS); ASSYM(MAXCOMLEN, MAXCOMLEN); #ifdef __powerpc64__ Index: head/sys/powerpc/powerpc/mem.c =================================================================== --- head/sys/powerpc/powerpc/mem.c +++ head/sys/powerpc/powerpc/mem.c @@ -125,8 +125,9 @@ break; } - if (!pmap_dev_direct_mapped(v, cnt)) { - error = uiomove((void *)v, cnt, uio); + if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) { + error = uiomove((void *)PHYS_TO_DMAP(v), cnt, + uio); } else { m.phys_addr = trunc_page(v); marr = &m; Index: head/sys/powerpc/powerpc/uma_machdep.c =================================================================== --- head/sys/powerpc/powerpc/uma_machdep.c +++ head/sys/powerpc/powerpc/uma_machdep.c @@ -94,7 +94,12 @@ pmap_remove(kernel_pmap,(vm_offset_t)mem, (vm_offset_t)mem + PAGE_SIZE); - m = PHYS_TO_VM_PAGE((vm_offset_t)mem); + if (hw_direct_map) + m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)mem)); + else + m = PHYS_TO_VM_PAGE((vm_offset_t)mem); + KASSERT(m != NULL, + ("Freeing UMA block at %p with no associated page", mem)); vm_page_unwire_noq(m); vm_page_free(m); atomic_subtract_int(&hw_uma_mdpages, 1); Index: head/sys/powerpc/ps3/platform_ps3.c =================================================================== --- head/sys/powerpc/ps3/platform_ps3.c +++ head/sys/powerpc/ps3/platform_ps3.c @@ -128,9 +128,6 @@ pmap_mmu_install("mmu_ps3", BUS_PROBE_SPECIFIC); cpu_idle_hook = ps3_cpu_idle; - /* Set a breakpoint to make NULL an invalid address */ - lv1_set_dabr(0x7 /* read and write, MMU on */, 2 /* kernel accesses */); - /* Record our PIR at boot for later */ ps3_boot_pir = mfspr(SPR_PIR); @@ -227,7 +224,8 @@ ps3_smp_start_cpu(platform_t plat, struct pcpu *pc) { /* kernel is spinning on 0x40 == -1 right now */ - volatile uint32_t *secondary_spin_sem = (uint32_t *)PHYS_TO_DMAP(0x40); + volatile uint32_t *secondary_spin_sem = + (uint32_t *)PHYS_TO_DMAP((uintptr_t)0x40); int remote_pir = pc->pc_hwref; int timeout;