Index: head/sys/powerpc/aim/mmu_oea.c
===================================================================
--- head/sys/powerpc/aim/mmu_oea.c	(revision 328529)
+++ head/sys/powerpc/aim/mmu_oea.c	(revision 328530)
@@ -1,2752 +1,2780 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-4-Clause
  *
  * Copyright (c) 2001 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
 /*-
  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
  * Copyright (C) 1995, 1996 TooLs GmbH.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. All advertising materials mentioning features or use of this software
  *    must display the following acknowledgement:
  *	This product includes software developed by TooLs GmbH.
  * 4. The name of TooLs GmbH may not be used to endorse or promote products
  *    derived from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
  */
 /*-
  * Copyright (C) 2001 Benno Rice.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 /*
  * Manages physical address maps.
  *
  * Since the information managed by this module is also stored by the
  * logical address mapping module, this module may throw away valid virtual
  * to physical mappings at almost any time.  However, invalidations of
  * mappings must be done as requested.
  *
  * In order to cope with hardware architectures which make virtual to
  * physical map invalidates expensive, this module may delay invalidate
  * reduced protection operations until such time as they are actually
  * necessary.  This module is given full information as to which processors
  * are currently using which maps, and to when physical maps must be made
  * correct.
  */
 
 #include "opt_kstack_pages.h"
 
 #include <sys/param.h>
 #include <sys/kernel.h>
 #include <sys/conf.h>
 #include <sys/queue.h>
 #include <sys/cpuset.h>
 #include <sys/kerneldump.h>
 #include <sys/ktr.h>
 #include <sys/lock.h>
 #include <sys/msgbuf.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/rwlock.h>
 #include <sys/sched.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
 #include <sys/vmmeter.h>
 
 #include <dev/ofw/openfirm.h>
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_page.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_pageout.h>
 #include <vm/uma.h>
 
 #include <machine/cpu.h>
 #include <machine/platform.h>
 #include <machine/bat.h>
 #include <machine/frame.h>
 #include <machine/md_var.h>
 #include <machine/psl.h>
 #include <machine/pte.h>
 #include <machine/smp.h>
 #include <machine/sr.h>
 #include <machine/mmuvar.h>
 #include <machine/trap.h>
 
 #include "mmu_if.h"
 
 #define	MOEA_DEBUG
 
 #define TODO	panic("%s: not implemented", __func__);
 
 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
 #define	VSID_TO_SR(vsid)	((vsid) & 0xf)
 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
 
 struct ofw_map {
 	vm_offset_t	om_va;
 	vm_size_t	om_len;
 	vm_offset_t	om_pa;
 	u_int		om_mode;
 };
 
 extern unsigned char _etext[];
 extern unsigned char _end[];
 
 /*
  * Map of physical memory regions.
  */
 static struct	mem_region *regions;
 static struct	mem_region *pregions;
 static u_int    phys_avail_count;
 static int	regions_sz, pregions_sz;
 static struct	ofw_map *translations;
 
 /*
  * Lock for the pteg and pvo tables.
  */
 struct mtx	moea_table_mutex;
 struct mtx	moea_vsid_mutex;
 
 /* tlbie instruction synchronization */
 static struct mtx tlbie_mtx;
 
 /*
  * PTEG data.
  */
 static struct	pteg *moea_pteg_table;
 u_int		moea_pteg_count;
 u_int		moea_pteg_mask;
 
 /*
  * PVO data.
  */
 struct	pvo_head *moea_pvo_table;		/* pvo entries by pteg index */
 struct	pvo_head moea_pvo_kunmanaged =
     LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged);	/* list of unmanaged pages */
 
 static struct rwlock_padalign pvh_global_lock;
 
 uma_zone_t	moea_upvo_zone;	/* zone for pvo entries for unmanaged pages */
 uma_zone_t	moea_mpvo_zone;	/* zone for pvo entries for managed pages */
 
 #define	BPVO_POOL_SIZE	32768
 static struct	pvo_entry *moea_bpvo_pool;
 static int	moea_bpvo_pool_index = 0;
 
 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
 static u_int	moea_vsid_bitmap[NPMAPS / VSID_NBPW];
 
 static boolean_t moea_initialized = FALSE;
 
 /*
  * Statistics.
  */
 u_int	moea_pte_valid = 0;
 u_int	moea_pte_overflow = 0;
 u_int	moea_pte_replacements = 0;
 u_int	moea_pvo_entries = 0;
 u_int	moea_pvo_enter_calls = 0;
 u_int	moea_pvo_remove_calls = 0;
 u_int	moea_pte_spills = 0;
 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid,
     0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD,
     &moea_pte_overflow, 0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD,
     &moea_pte_replacements, 0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries,
     0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD,
     &moea_pvo_enter_calls, 0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD,
     &moea_pvo_remove_calls, 0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
     &moea_pte_spills, 0, "");
 
 /*
  * Allocate physical memory for use in moea_bootstrap.
  */
 static vm_offset_t	moea_bootstrap_alloc(vm_size_t, u_int);
 
 /*
  * PTE calls.
  */
 static int		moea_pte_insert(u_int, struct pte *);
 
 /*
  * PVO calls.
  */
 static int	moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
 		    vm_offset_t, vm_paddr_t, u_int, int);
 static void	moea_pvo_remove(struct pvo_entry *, int);
 static struct	pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *);
 static struct	pte *moea_pvo_to_pte(const struct pvo_entry *, int);
 
 /*
  * Utility routines.
  */
 static int		moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
 			    vm_prot_t, u_int, int8_t);
 static void		moea_syncicache(vm_paddr_t, vm_size_t);
 static boolean_t	moea_query_bit(vm_page_t, int);
 static u_int		moea_clear_bit(vm_page_t, int);
 static void		moea_kremove(mmu_t, vm_offset_t);
 int		moea_pte_spill(vm_offset_t);
 
 /*
  * Kernel MMU interface
  */
 void moea_clear_modify(mmu_t, vm_page_t);
 void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
 void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
 int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
     int8_t);
 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
     vm_prot_t);
 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
 vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
 vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
 void moea_init(mmu_t);
 boolean_t moea_is_modified(mmu_t, vm_page_t);
 boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
 boolean_t moea_is_referenced(mmu_t, vm_page_t);
 int moea_ts_referenced(mmu_t, vm_page_t);
 vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
 boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
 void moea_page_init(mmu_t, vm_page_t);
 int moea_page_wired_mappings(mmu_t, vm_page_t);
 void moea_pinit(mmu_t, pmap_t);
 void moea_pinit0(mmu_t, pmap_t);
 void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
 void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
 void moea_qremove(mmu_t, vm_offset_t, int);
 void moea_release(mmu_t, pmap_t);
 void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 void moea_remove_all(mmu_t, vm_page_t);
 void moea_remove_write(mmu_t, vm_page_t);
 void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 void moea_zero_page(mmu_t, vm_page_t);
 void moea_zero_page_area(mmu_t, vm_page_t, int, int);
 void moea_activate(mmu_t, struct thread *);
 void moea_deactivate(mmu_t, struct thread *);
 void moea_cpu_bootstrap(mmu_t, int);
 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
 void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t);
 void *moea_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
 vm_paddr_t moea_kextract(mmu_t, vm_offset_t);
 void moea_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
 void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t);
 void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
 boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
 static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
 void moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va);
 void moea_scan_init(mmu_t mmu);
 vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m);
 void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr);
 static int moea_map_user_ptr(mmu_t mmu, pmap_t pm,
     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
+static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+    int *is_user, vm_offset_t *decoded_addr);
 
 
 static mmu_method_t moea_methods[] = {
 	MMUMETHOD(mmu_clear_modify,	moea_clear_modify),
 	MMUMETHOD(mmu_copy_page,	moea_copy_page),
 	MMUMETHOD(mmu_copy_pages,	moea_copy_pages),
 	MMUMETHOD(mmu_enter,		moea_enter),
 	MMUMETHOD(mmu_enter_object,	moea_enter_object),
 	MMUMETHOD(mmu_enter_quick,	moea_enter_quick),
 	MMUMETHOD(mmu_extract,		moea_extract),
 	MMUMETHOD(mmu_extract_and_hold,	moea_extract_and_hold),
 	MMUMETHOD(mmu_init,		moea_init),
 	MMUMETHOD(mmu_is_modified,	moea_is_modified),
 	MMUMETHOD(mmu_is_prefaultable,	moea_is_prefaultable),
 	MMUMETHOD(mmu_is_referenced,	moea_is_referenced),
 	MMUMETHOD(mmu_ts_referenced,	moea_ts_referenced),
 	MMUMETHOD(mmu_map,     		moea_map),
 	MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
 	MMUMETHOD(mmu_page_init,	moea_page_init),
 	MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
 	MMUMETHOD(mmu_pinit,		moea_pinit),
 	MMUMETHOD(mmu_pinit0,		moea_pinit0),
 	MMUMETHOD(mmu_protect,		moea_protect),
 	MMUMETHOD(mmu_qenter,		moea_qenter),
 	MMUMETHOD(mmu_qremove,		moea_qremove),
 	MMUMETHOD(mmu_release,		moea_release),
 	MMUMETHOD(mmu_remove,		moea_remove),
 	MMUMETHOD(mmu_remove_all,      	moea_remove_all),
 	MMUMETHOD(mmu_remove_write,	moea_remove_write),
 	MMUMETHOD(mmu_sync_icache,	moea_sync_icache),
 	MMUMETHOD(mmu_unwire,		moea_unwire),
 	MMUMETHOD(mmu_zero_page,       	moea_zero_page),
 	MMUMETHOD(mmu_zero_page_area,	moea_zero_page_area),
 	MMUMETHOD(mmu_activate,		moea_activate),
 	MMUMETHOD(mmu_deactivate,      	moea_deactivate),
 	MMUMETHOD(mmu_page_set_memattr,	moea_page_set_memattr),
 	MMUMETHOD(mmu_quick_enter_page, moea_quick_enter_page),
 	MMUMETHOD(mmu_quick_remove_page, moea_quick_remove_page),
 
 	/* Internal interfaces */
 	MMUMETHOD(mmu_bootstrap,       	moea_bootstrap),
 	MMUMETHOD(mmu_cpu_bootstrap,   	moea_cpu_bootstrap),
 	MMUMETHOD(mmu_mapdev_attr,	moea_mapdev_attr),
 	MMUMETHOD(mmu_mapdev,		moea_mapdev),
 	MMUMETHOD(mmu_unmapdev,		moea_unmapdev),
 	MMUMETHOD(mmu_kextract,		moea_kextract),
 	MMUMETHOD(mmu_kenter,		moea_kenter),
 	MMUMETHOD(mmu_kenter_attr,	moea_kenter_attr),
 	MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
 	MMUMETHOD(mmu_scan_init,	moea_scan_init),
 	MMUMETHOD(mmu_dumpsys_map,	moea_dumpsys_map),
 	MMUMETHOD(mmu_map_user_ptr,	moea_map_user_ptr),
+	MMUMETHOD(mmu_decode_kernel_ptr, moea_decode_kernel_ptr),
 
 	{ 0, 0 }
 };
 
 MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0);
 
 static __inline uint32_t
 moea_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
 {
 	uint32_t pte_lo;
 	int i;
 
 	if (ma != VM_MEMATTR_DEFAULT) {
 		switch (ma) {
 		case VM_MEMATTR_UNCACHEABLE:
 			return (PTE_I | PTE_G);
 		case VM_MEMATTR_CACHEABLE:
 			return (PTE_M);
 		case VM_MEMATTR_WRITE_COMBINING:
 		case VM_MEMATTR_WRITE_BACK:
 		case VM_MEMATTR_PREFETCHABLE:
 			return (PTE_I);
 		case VM_MEMATTR_WRITE_THROUGH:
 			return (PTE_W | PTE_M);
 		}
 	}
 
 	/*
 	 * Assume the page is cache inhibited and access is guarded unless
 	 * it's in our available memory array.
 	 */
 	pte_lo = PTE_I | PTE_G;
 	for (i = 0; i < pregions_sz; i++) {
 		if ((pa >= pregions[i].mr_start) &&
 		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
 			pte_lo = PTE_M;
 			break;
 		}
 	}
 
 	return pte_lo;
 }
 
 static void
 tlbie(vm_offset_t va)
 {
 
 	mtx_lock_spin(&tlbie_mtx);
 	__asm __volatile("ptesync");
 	__asm __volatile("tlbie %0" :: "r"(va));
 	__asm __volatile("eieio; tlbsync; ptesync");
 	mtx_unlock_spin(&tlbie_mtx);
 }
 
 static void
 tlbia(void)
 {
 	vm_offset_t va;
 
 	for (va = 0; va < 0x00040000; va += 0x00001000) {
 		__asm __volatile("tlbie %0" :: "r"(va));
 		powerpc_sync();
 	}
 	__asm __volatile("tlbsync");
 	powerpc_sync();
 }
 
 static __inline int
 va_to_sr(u_int *sr, vm_offset_t va)
 {
 	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
 }
 
 static __inline u_int
 va_to_pteg(u_int sr, vm_offset_t addr)
 {
 	u_int hash;
 
 	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
 	    ADDR_PIDX_SHFT);
 	return (hash & moea_pteg_mask);
 }
 
 static __inline struct pvo_head *
 vm_page_to_pvoh(vm_page_t m)
 {
 
 	return (&m->md.mdpg_pvoh);
 }
 
 static __inline void
 moea_attr_clear(vm_page_t m, int ptebit)
 {
 
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
 	m->md.mdpg_attrs &= ~ptebit;
 }
 
 static __inline int
 moea_attr_fetch(vm_page_t m)
 {
 
 	return (m->md.mdpg_attrs);
 }
 
 static __inline void
 moea_attr_save(vm_page_t m, int ptebit)
 {
 
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
 	m->md.mdpg_attrs |= ptebit;
 }
 
 static __inline int
 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
 {
 	if (pt->pte_hi == pvo_pt->pte_hi)
 		return (1);
 
 	return (0);
 }
 
 static __inline int
 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
 {
 	return (pt->pte_hi & ~PTE_VALID) ==
 	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
 	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
 }
 
 static __inline void
 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
 {
 
 	mtx_assert(&moea_table_mutex, MA_OWNED);
 
 	/*
 	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
 	 * set when the real pte is set in memory.
 	 *
 	 * Note: Don't set the valid bit for correct operation of tlb update.
 	 */
 	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
 	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
 	pt->pte_lo = pte_lo;
 }
 
 static __inline void
 moea_pte_synch(struct pte *pt, struct pte *pvo_pt)
 {
 
 	mtx_assert(&moea_table_mutex, MA_OWNED);
 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
 }
 
 static __inline void
 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
 {
 
 	mtx_assert(&moea_table_mutex, MA_OWNED);
 
 	/*
 	 * As shown in Section 7.6.3.2.3
 	 */
 	pt->pte_lo &= ~ptebit;
 	tlbie(va);
 }
 
 static __inline void
 moea_pte_set(struct pte *pt, struct pte *pvo_pt)
 {
 
 	mtx_assert(&moea_table_mutex, MA_OWNED);
 	pvo_pt->pte_hi |= PTE_VALID;
 
 	/*
 	 * Update the PTE as defined in section 7.6.3.1.
 	 * Note that the REF/CHG bits are from pvo_pt and thus should have
 	 * been saved so this routine can restore them (if desired).
 	 */
 	pt->pte_lo = pvo_pt->pte_lo;
 	powerpc_sync();
 	pt->pte_hi = pvo_pt->pte_hi;
 	powerpc_sync();
 	moea_pte_valid++;
 }
 
 static __inline void
 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
 {
 
 	mtx_assert(&moea_table_mutex, MA_OWNED);
 	pvo_pt->pte_hi &= ~PTE_VALID;
 
 	/*
 	 * Force the reg & chg bits back into the PTEs.
 	 */
 	powerpc_sync();
 
 	/*
 	 * Invalidate the pte.
 	 */
 	pt->pte_hi &= ~PTE_VALID;
 
 	tlbie(va);
 
 	/*
 	 * Save the reg & chg bits.
 	 */
 	moea_pte_synch(pt, pvo_pt);
 	moea_pte_valid--;
 }
 
 static __inline void
 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
 {
 
 	/*
 	 * Invalidate the PTE
 	 */
 	moea_pte_unset(pt, pvo_pt, va);
 	moea_pte_set(pt, pvo_pt);
 }
 
 /*
  * Quick sort callout for comparing memory regions.
  */
 static int	om_cmp(const void *a, const void *b);
 
 static int
 om_cmp(const void *a, const void *b)
 {
 	const struct	ofw_map *mapa;
 	const struct	ofw_map *mapb;
 
 	mapa = a;
 	mapb = b;
 	if (mapa->om_pa < mapb->om_pa)
 		return (-1);
 	else if (mapa->om_pa > mapb->om_pa)
 		return (1);
 	else
 		return (0);
 }
 
 void
 moea_cpu_bootstrap(mmu_t mmup, int ap)
 {
 	u_int sdr;
 	int i;
 
 	if (ap) {
 		powerpc_sync();
 		__asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu));
 		__asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl));
 		isync();
 		__asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu));
 		__asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl));
 		isync();
 	}
 
 	__asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
 	__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
 	isync();
 
 	__asm __volatile("mtibatu 1,%0" :: "r"(0));
 	__asm __volatile("mtdbatu 2,%0" :: "r"(0));
 	__asm __volatile("mtibatu 2,%0" :: "r"(0));
 	__asm __volatile("mtdbatu 3,%0" :: "r"(0));
 	__asm __volatile("mtibatu 3,%0" :: "r"(0));
 	isync();
 
 	for (i = 0; i < 16; i++)
 		mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
 	powerpc_sync();
 
 	sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10);
 	__asm __volatile("mtsdr1 %0" :: "r"(sdr));
 	isync();
 
 	tlbia();
 }
 
 void
 moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
 {
 	ihandle_t	mmui;
 	phandle_t	chosen, mmu;
 	int		sz;
 	int		i, j;
 	vm_size_t	size, physsz, hwphyssz;
 	vm_offset_t	pa, va, off;
 	void		*dpcpu;
 	register_t	msr;
 
         /*
          * Set up BAT0 to map the lowest 256 MB area
          */
         battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
         battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
 
 	/*
 	 * Map PCI memory space.
 	 */
 	battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
 	battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
 
 	battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
 	battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
 
 	battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
 	battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
 
 	battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
 	battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
 
 	/*
 	 * Map obio devices.
 	 */
 	battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
 	battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
 
 	/*
 	 * Use an IBAT and a DBAT to map the bottom segment of memory
 	 * where we are. Turn off instruction relocation temporarily
 	 * to prevent faults while reprogramming the IBAT.
 	 */
 	msr = mfmsr();
 	mtmsr(msr & ~PSL_IR);
 	__asm (".balign 32; \n"
 	       "mtibatu 0,%0; mtibatl 0,%1; isync; \n"
 	       "mtdbatu 0,%0; mtdbatl 0,%1; isync"
 	    :: "r"(battable[0].batu), "r"(battable[0].batl));
 	mtmsr(msr);
 
 	/* map pci space */
 	__asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
 	__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
 	isync();
 
 	/* set global direct map flag */
 	hw_direct_map = 1;
 
 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
 	CTR0(KTR_PMAP, "moea_bootstrap: physical memory");
 
 	for (i = 0; i < pregions_sz; i++) {
 		vm_offset_t pa;
 		vm_offset_t end;
 
 		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
 			pregions[i].mr_start,
 			pregions[i].mr_start + pregions[i].mr_size,
 			pregions[i].mr_size);
 		/*
 		 * Install entries into the BAT table to allow all
 		 * of physmem to be convered by on-demand BAT entries.
 		 * The loop will sometimes set the same battable element
 		 * twice, but that's fine since they won't be used for
 		 * a while yet.
 		 */
 		pa = pregions[i].mr_start & 0xf0000000;
 		end = pregions[i].mr_start + pregions[i].mr_size;
 		do {
                         u_int n = pa >> ADDR_SR_SHFT;
 
 			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
 			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
 			pa += SEGMENT_LENGTH;
 		} while (pa < end);
 	}
 
 	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
 		panic("moea_bootstrap: phys_avail too small");
 
 	phys_avail_count = 0;
 	physsz = 0;
 	hwphyssz = 0;
 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
 		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
 		    regions[i].mr_start + regions[i].mr_size,
 		    regions[i].mr_size);
 		if (hwphyssz != 0 &&
 		    (physsz + regions[i].mr_size) >= hwphyssz) {
 			if (physsz < hwphyssz) {
 				phys_avail[j] = regions[i].mr_start;
 				phys_avail[j + 1] = regions[i].mr_start +
 				    hwphyssz - physsz;
 				physsz = hwphyssz;
 				phys_avail_count++;
 			}
 			break;
 		}
 		phys_avail[j] = regions[i].mr_start;
 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
 		phys_avail_count++;
 		physsz += regions[i].mr_size;
 	}
 
 	/* Check for overlap with the kernel and exception vectors */
 	for (j = 0; j < 2*phys_avail_count; j+=2) {
 		if (phys_avail[j] < EXC_LAST)
 			phys_avail[j] += EXC_LAST;
 
 		if (kernelstart >= phys_avail[j] &&
 		    kernelstart < phys_avail[j+1]) {
 			if (kernelend < phys_avail[j+1]) {
 				phys_avail[2*phys_avail_count] =
 				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
 				phys_avail[2*phys_avail_count + 1] =
 				    phys_avail[j+1];
 				phys_avail_count++;
 			}
 
 			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
 		}
 
 		if (kernelend >= phys_avail[j] &&
 		    kernelend < phys_avail[j+1]) {
 			if (kernelstart > phys_avail[j]) {
 				phys_avail[2*phys_avail_count] = phys_avail[j];
 				phys_avail[2*phys_avail_count + 1] =
 				    kernelstart & ~PAGE_MASK;
 				phys_avail_count++;
 			}
 
 			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
 		}
 	}
 
 	physmem = btoc(physsz);
 
 	/*
 	 * Allocate PTEG table.
 	 */
 #ifdef PTEGCOUNT
 	moea_pteg_count = PTEGCOUNT;
 #else
 	moea_pteg_count = 0x1000;
 
 	while (moea_pteg_count < physmem)
 		moea_pteg_count <<= 1;
 
 	moea_pteg_count >>= 1;
 #endif /* PTEGCOUNT */
 
 	size = moea_pteg_count * sizeof(struct pteg);
 	CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count,
 	    size);
 	moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size);
 	CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table);
 	bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg));
 	moea_pteg_mask = moea_pteg_count - 1;
 
 	/*
 	 * Allocate pv/overflow lists.
 	 */
 	size = sizeof(struct pvo_head) * moea_pteg_count;
 	moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size,
 	    PAGE_SIZE);
 	CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table);
 	for (i = 0; i < moea_pteg_count; i++)
 		LIST_INIT(&moea_pvo_table[i]);
 
 	/*
 	 * Initialize the lock that synchronizes access to the pteg and pvo
 	 * tables.
 	 */
 	mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF |
 	    MTX_RECURSE);
 	mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF);
 
 	mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN);
 
 	/*
 	 * Initialise the unmanaged pvo pool.
 	 */
 	moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc(
 		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
 	moea_bpvo_pool_index = 0;
 
 	/*
 	 * Make sure kernel vsid is allocated as well as VSID 0.
 	 */
 	moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
 	moea_vsid_bitmap[0] |= 1;
 
 	/*
 	 * Initialize the kernel pmap (which is statically allocated).
 	 */
 	PMAP_LOCK_INIT(kernel_pmap);
 	for (i = 0; i < 16; i++)
 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
 	CPU_FILL(&kernel_pmap->pm_active);
 	RB_INIT(&kernel_pmap->pmap_pvo);
 
  	/*
 	 * Initialize the global pv list lock.
 	 */
 	rw_init(&pvh_global_lock, "pmap pv global");
 
 	/*
 	 * Set up the Open Firmware mappings
 	 */
 	chosen = OF_finddevice("/chosen");
 	if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 &&
 	    (mmu = OF_instance_to_package(mmui)) != -1 &&
 	    (sz = OF_getproplen(mmu, "translations")) != -1) {
 		translations = NULL;
 		for (i = 0; phys_avail[i] != 0; i += 2) {
 			if (phys_avail[i + 1] >= sz) {
 				translations = (struct ofw_map *)phys_avail[i];
 				break;
 			}
 		}
 		if (translations == NULL)
 			panic("moea_bootstrap: no space to copy translations");
 		bzero(translations, sz);
 		if (OF_getprop(mmu, "translations", translations, sz) == -1)
 			panic("moea_bootstrap: can't get ofw translations");
 		CTR0(KTR_PMAP, "moea_bootstrap: translations");
 		sz /= sizeof(*translations);
 		qsort(translations, sz, sizeof (*translations), om_cmp);
 		for (i = 0; i < sz; i++) {
 			CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
 			    translations[i].om_pa, translations[i].om_va,
 			    translations[i].om_len);
 
 			/*
 			 * If the mapping is 1:1, let the RAM and device
 			 * on-demand BAT tables take care of the translation.
 			 */
 			if (translations[i].om_va == translations[i].om_pa)
 				continue;
 
 			/* Enter the pages */
 			for (off = 0; off < translations[i].om_len;
 			    off += PAGE_SIZE)
 				moea_kenter(mmup, translations[i].om_va + off,
 					    translations[i].om_pa + off);
 		}
 	}
 
 	/*
 	 * Calculate the last available physical address.
 	 */
 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
 		;
 	Maxmem = powerpc_btop(phys_avail[i + 1]);
 
 	moea_cpu_bootstrap(mmup,0);
 	mtmsr(mfmsr() | PSL_DR | PSL_IR);
 	pmap_bootstrapped++;
 
 	/*
 	 * Set the start and end of kva.
 	 */
 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
 	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
 
 	/*
 	 * Allocate a kernel stack with a guard page for thread0 and map it
 	 * into the kernel page map.
 	 */
 	pa = moea_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
 	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
 	virtual_avail = va + kstack_pages * PAGE_SIZE;
 	CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
 	thread0.td_kstack = va;
 	thread0.td_kstack_pages = kstack_pages;
 	for (i = 0; i < kstack_pages; i++) {
 		moea_kenter(mmup, va, pa);
 		pa += PAGE_SIZE;
 		va += PAGE_SIZE;
 	}
 
 	/*
 	 * Allocate virtual address space for the message buffer.
 	 */
 	pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE);
 	msgbufp = (struct msgbuf *)virtual_avail;
 	va = virtual_avail;
 	virtual_avail += round_page(msgbufsize);
 	while (va < virtual_avail) {
 		moea_kenter(mmup, va, pa);
 		pa += PAGE_SIZE;
 		va += PAGE_SIZE;
 	}
 
 	/*
 	 * Allocate virtual address space for the dynamic percpu area.
 	 */
 	pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
 	dpcpu = (void *)virtual_avail;
 	va = virtual_avail;
 	virtual_avail += DPCPU_SIZE;
 	while (va < virtual_avail) {
 		moea_kenter(mmup, va, pa);
 		pa += PAGE_SIZE;
 		va += PAGE_SIZE;
 	}
 	dpcpu_init(dpcpu, 0);
 }
 
 /*
  * Activate a user pmap.  The pmap must be activated before it's address
  * space can be accessed in any way.
  */
 void
 moea_activate(mmu_t mmu, struct thread *td)
 {
 	pmap_t	pm, pmr;
 
 	/*
 	 * Load all the data we need up front to encourage the compiler to
 	 * not issue any loads while we have interrupts disabled below.
 	 */
 	pm = &td->td_proc->p_vmspace->vm_pmap;
 	pmr = pm->pmap_phys;
 
 	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
 	PCPU_SET(curpmap, pmr);
 
 	mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
 }
 
 void
 moea_deactivate(mmu_t mmu, struct thread *td)
 {
 	pmap_t	pm;
 
 	pm = &td->td_proc->p_vmspace->vm_pmap;
 	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
 	PCPU_SET(curpmap, NULL);
 }
 
 void
 moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 {
 	struct	pvo_entry key, *pvo;
 
 	PMAP_LOCK(pm);
 	key.pvo_vaddr = sva;
 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 	    pvo != NULL && PVO_VADDR(pvo) < eva;
 	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
 		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
 			panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo);
 		pvo->pvo_vaddr &= ~PVO_WIRED;
 		pm->pm_stats.wired_count--;
 	}
 	PMAP_UNLOCK(pm);
 }
 
 void
 moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
 {
 	vm_offset_t	dst;
 	vm_offset_t	src;
 
 	dst = VM_PAGE_TO_PHYS(mdst);
 	src = VM_PAGE_TO_PHYS(msrc);
 
 	bcopy((void *)src, (void *)dst, PAGE_SIZE);
 }
 
 void
 moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
 {
 	void *a_cp, *b_cp;
 	vm_offset_t a_pg_offset, b_pg_offset;
 	int cnt;
 
 	while (xfersize > 0) {
 		a_pg_offset = a_offset & PAGE_MASK;
 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
 		a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
 		    a_pg_offset;
 		b_pg_offset = b_offset & PAGE_MASK;
 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
 		b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
 		    b_pg_offset;
 		bcopy(a_cp, b_cp, cnt);
 		a_offset += cnt;
 		b_offset += cnt;
 		xfersize -= cnt;
 	}
 }
 
 /*
  * Zero a page of physical memory by temporarily mapping it into the tlb.
  */
 void
 moea_zero_page(mmu_t mmu, vm_page_t m)
 {
 	vm_offset_t off, pa = VM_PAGE_TO_PHYS(m);
 
 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
 		__asm __volatile("dcbz 0,%0" :: "r"(pa + off));
 }
 
 void
 moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
 {
 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
 	void *va = (void *)(pa + off);
 
 	bzero(va, size);
 }
 
 vm_offset_t
 moea_quick_enter_page(mmu_t mmu, vm_page_t m)
 {
 
 	return (VM_PAGE_TO_PHYS(m));
 }
 
 void
 moea_quick_remove_page(mmu_t mmu, vm_offset_t addr)
 {
 }
 
 /*
  * Map the given physical page at the specified virtual address in the
  * target pmap with the protection requested.  If specified the page
  * will be wired down.
  */
 int
 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
     u_int flags, int8_t psind)
 {
 	int error;
 
 	for (;;) {
 		rw_wlock(&pvh_global_lock);
 		PMAP_LOCK(pmap);
 		error = moea_enter_locked(pmap, va, m, prot, flags, psind);
 		rw_wunlock(&pvh_global_lock);
 		PMAP_UNLOCK(pmap);
 		if (error != ENOMEM)
 			return (KERN_SUCCESS);
 		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
 			return (KERN_RESOURCE_SHORTAGE);
 		VM_OBJECT_ASSERT_UNLOCKED(m->object);
 		VM_WAIT;
 	}
 }
 
 /*
  * Map the given physical page at the specified virtual address in the
  * target pmap with the protection requested.  If specified the page
  * will be wired down.
  *
  * The global pvh and pmap must be locked.
  */
 static int
 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
     u_int flags, int8_t psind __unused)
 {
 	struct		pvo_head *pvo_head;
 	uma_zone_t	zone;
 	u_int		pte_lo, pvo_flags;
 	int		error;
 
 	if (pmap_bootstrapped)
 		rw_assert(&pvh_global_lock, RA_WLOCKED);
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	if ((m->oflags & VPO_UNMANAGED) != 0 || !moea_initialized) {
 		pvo_head = &moea_pvo_kunmanaged;
 		zone = moea_upvo_zone;
 		pvo_flags = 0;
 	} else {
 		pvo_head = vm_page_to_pvoh(m);
 		zone = moea_mpvo_zone;
 		pvo_flags = PVO_MANAGED;
 	}
 
 	pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
 
 	if (prot & VM_PROT_WRITE) {
 		pte_lo |= PTE_BW;
 		if (pmap_bootstrapped &&
 		    (m->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(m, PGA_WRITEABLE);
 	} else
 		pte_lo |= PTE_BR;
 
 	if ((flags & PMAP_ENTER_WIRED) != 0)
 		pvo_flags |= PVO_WIRED;
 
 	error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
 	    pte_lo, pvo_flags);
 
 	/*
 	 * Flush the real page from the instruction cache. This has be done
 	 * for all user mappings to prevent information leakage via the
 	 * instruction cache. moea_pvo_enter() returns ENOENT for the first
 	 * mapping for a page.
 	 */
 	if (pmap != kernel_pmap && error == ENOENT &&
 	    (pte_lo & (PTE_I | PTE_G)) == 0)
 		moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
 
 	return (error);
 }
 
 /*
  * Maps a sequence of resident pages belonging to the same object.
  * The sequence begins with the given page m_start.  This page is
  * mapped at the given virtual address start.  Each subsequent page is
  * mapped at a virtual address that is offset from start by the same
  * amount as the page is offset from m_start within the object.  The
  * last page in the sequence is the page with the largest offset from
  * m_start that can be mapped at a virtual address less than the given
  * virtual address end.  Not every virtual page between start and end
  * is mapped; only those for which a resident page exists with the
  * corresponding offset from m_start are mapped.
  */
 void
 moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
     vm_page_t m_start, vm_prot_t prot)
 {
 	vm_page_t m;
 	vm_pindex_t diff, psize;
 
 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
 
 	psize = atop(end - start);
 	m = m_start;
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pm);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 		moea_enter_locked(pm, start + ptoa(diff), m, prot &
 		    (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
 		m = TAILQ_NEXT(m, listq);
 	}
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pm);
 }
 
 void
 moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
     vm_prot_t prot)
 {
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pm);
 	moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
 	    0, 0);
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pm);
 }
 
 vm_paddr_t
 moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
 {
 	struct	pvo_entry *pvo;
 	vm_paddr_t pa;
 
 	PMAP_LOCK(pm);
 	pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
 	if (pvo == NULL)
 		pa = 0;
 	else
 		pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
 	PMAP_UNLOCK(pm);
 	return (pa);
 }
 
 /*
  * Atomically extract and hold the physical page with the given
  * pmap and virtual address pair if that mapping permits the given
  * protection.
  */
 vm_page_t
 moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 {
 	struct	pvo_entry *pvo;
 	vm_page_t m;
         vm_paddr_t pa;
 
 	m = NULL;
 	pa = 0;
 	PMAP_LOCK(pmap);
 retry:
 	pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
 	if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) &&
 	    ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW ||
 	     (prot & VM_PROT_WRITE) == 0)) {
 		if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa))
 			goto retry;
 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
 		vm_page_hold(m);
 	}
 	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }
 
 void
 moea_init(mmu_t mmu)
 {
 
 	moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
 	moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
 	moea_initialized = TRUE;
 }
 
 boolean_t
 moea_is_referenced(mmu_t mmu, vm_page_t m)
 {
 	boolean_t rv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea_is_referenced: page %p is not managed", m));
 	rw_wlock(&pvh_global_lock);
 	rv = moea_query_bit(m, PTE_REF);
 	rw_wunlock(&pvh_global_lock);
 	return (rv);
 }
 
 boolean_t
 moea_is_modified(mmu_t mmu, vm_page_t m)
 {
 	boolean_t rv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea_is_modified: page %p is not managed", m));
 
 	/*
 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can have PTE_CHG set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	rw_wlock(&pvh_global_lock);
 	rv = moea_query_bit(m, PTE_CHG);
 	rw_wunlock(&pvh_global_lock);
 	return (rv);
 }
 
 boolean_t
 moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	struct pvo_entry *pvo;
 	boolean_t rv;
 
 	PMAP_LOCK(pmap);
 	pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
 	rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0;
 	PMAP_UNLOCK(pmap);
 	return (rv);
 }
 
 void
 moea_clear_modify(mmu_t mmu, vm_page_t m)
 {
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	KASSERT(!vm_page_xbusied(m),
 	    ("moea_clear_modify: page %p is exclusive busy", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG
 	 * set.  If the object containing the page is locked and the page is
 	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	moea_clear_bit(m, PTE_CHG);
 	rw_wunlock(&pvh_global_lock);
 }
 
 /*
  * Clear the write and modified bits in each of the given page's mappings.
  */
 void
 moea_remove_write(mmu_t mmu, vm_page_t m)
 {
 	struct	pvo_entry *pvo;
 	struct	pte *pt;
 	pmap_t	pmap;
 	u_int	lo;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea_remove_write: page %p is not managed", m));
 
 	/*
 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * set by another thread while the object is locked.  Thus,
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	lo = moea_attr_fetch(m);
 	powerpc_sync();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
 		if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) {
 			pt = moea_pvo_to_pte(pvo, -1);
 			pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
 			pvo->pvo_pte.pte.pte_lo |= PTE_BR;
 			if (pt != NULL) {
 				moea_pte_synch(pt, &pvo->pvo_pte.pte);
 				lo |= pvo->pvo_pte.pte.pte_lo;
 				pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG;
 				moea_pte_change(pt, &pvo->pvo_pte.pte,
 				    pvo->pvo_vaddr);
 				mtx_unlock(&moea_table_mutex);
 			}
 		}
 		PMAP_UNLOCK(pmap);
 	}
 	if ((lo & PTE_CHG) != 0) {
 		moea_attr_clear(m, PTE_CHG);
 		vm_page_dirty(m);
 	}
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
 	rw_wunlock(&pvh_global_lock);
 }
 
 /*
  *	moea_ts_referenced:
  *
  *	Return a count of reference bits for a page, clearing those bits.
  *	It is not necessary for every reference bit to be cleared, but it
  *	is necessary that 0 only be returned when there are truly no
  *	reference bits set.
  *
  *	XXX: The exact number of bits to check and clear is a matter that
  *	should be tested and standardized at some point in the future for
  *	optimal aging of shared pages.
  */
 int
 moea_ts_referenced(mmu_t mmu, vm_page_t m)
 {
 	int count;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea_ts_referenced: page %p is not managed", m));
 	rw_wlock(&pvh_global_lock);
 	count = moea_clear_bit(m, PTE_REF);
 	rw_wunlock(&pvh_global_lock);
 	return (count);
 }
 
 /*
  * Modify the WIMG settings of all mappings for a page.
  */
 void
 moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
 {
 	struct	pvo_entry *pvo;
 	struct	pvo_head *pvo_head;
 	struct	pte *pt;
 	pmap_t	pmap;
 	u_int	lo;
 
 	if ((m->oflags & VPO_UNMANAGED) != 0) {
 		m->md.mdpg_cache_attrs = ma;
 		return;
 	}
 
 	rw_wlock(&pvh_global_lock);
 	pvo_head = vm_page_to_pvoh(m);
 	lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
 
 	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
 		pt = moea_pvo_to_pte(pvo, -1);
 		pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG;
 		pvo->pvo_pte.pte.pte_lo |= lo;
 		if (pt != NULL) {
 			moea_pte_change(pt, &pvo->pvo_pte.pte,
 			    pvo->pvo_vaddr);
 			if (pvo->pvo_pmap == kernel_pmap)
 				isync();
 		}
 		mtx_unlock(&moea_table_mutex);
 		PMAP_UNLOCK(pmap);
 	}
 	m->md.mdpg_cache_attrs = ma;
 	rw_wunlock(&pvh_global_lock);
 }
 
 /*
  * Map a wired page into kernel virtual address space.
  */
 void
 moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
 {
 
 	moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
 }
 
 void
 moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
 {
 	u_int		pte_lo;
 	int		error;
 
 #if 0
 	if (va < VM_MIN_KERNEL_ADDRESS)
 		panic("moea_kenter: attempt to enter non-kernel address %#x",
 		    va);
 #endif
 
 	pte_lo = moea_calc_wimg(pa, ma);
 
 	PMAP_LOCK(kernel_pmap);
 	error = moea_pvo_enter(kernel_pmap, moea_upvo_zone,
 	    &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
 
 	if (error != 0 && error != ENOENT)
 		panic("moea_kenter: failed to enter va %#x pa %#x: %d", va,
 		    pa, error);
 
 	PMAP_UNLOCK(kernel_pmap);
 }
 
 /*
  * Extract the physical page address associated with the given kernel virtual
  * address.
  */
 vm_paddr_t
 moea_kextract(mmu_t mmu, vm_offset_t va)
 {
 	struct		pvo_entry *pvo;
 	vm_paddr_t pa;
 
 	/*
 	 * Allow direct mappings on 32-bit OEA
 	 */
 	if (va < VM_MIN_KERNEL_ADDRESS) {
 		return (va);
 	}
 
 	PMAP_LOCK(kernel_pmap);
 	pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
 	KASSERT(pvo != NULL, ("moea_kextract: no addr found"));
 	pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
 	PMAP_UNLOCK(kernel_pmap);
 	return (pa);
 }
 
 /*
  * Remove a wired page from kernel virtual address space.
  */
 void
 moea_kremove(mmu_t mmu, vm_offset_t va)
 {
 
 	moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
 }
 
 /*
  * Provide a kernel pointer corresponding to a given userland pointer.
  * The returned pointer is valid until the next time this function is
  * called in this thread. This is used internally in copyin/copyout.
  */
 int
 moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
     void **kaddr, size_t ulen, size_t *klen)
 {
 	size_t l;
 	register_t vsid;
 
 	*kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK);
 	l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr);
 	if (l > ulen)
 		l = ulen;
 	if (klen)
 		*klen = l;
 	else if (l != ulen)
 		return (EFAULT);
 
 	vsid = va_to_vsid(pm, (vm_offset_t)uaddr);
  
 	/* Mark segment no-execute */
 	vsid |= SR_N;
  
 	/* If we have already set this VSID, we can just return */
 	if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == vsid)
 		return (0);
  
 	__asm __volatile("isync");
 	curthread->td_pcb->pcb_cpu.aim.usr_segm =
 	    (uintptr_t)uaddr >> ADDR_SR_SHFT;
 	curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid;
 	__asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(vsid));
+
+	return (0);
+}
+
+/*
+ * Figure out where a given kernel pointer (usually in a fault) points
+ * to from the VM's perspective, potentially remapping into userland's
+ * address space.
+ */
+static int
+moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
+    vm_offset_t *decoded_addr)
+{
+	vm_offset_t user_sr;
+
+	if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
+		user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
+		addr &= ADDR_PIDX | ADDR_POFF;
+		addr |= user_sr << ADDR_SR_SHFT;
+		*decoded_addr = addr;
+		*is_user = 1;
+	} else {
+		*decoded_addr = addr;
+		*is_user = 0;
+	}
 
 	return (0);
 }
 
 /*
  * Map a range of physical addresses into kernel virtual address space.
  *
  * The value passed in *virt is a suggested virtual address for the mapping.
  * Architectures which can support a direct-mapped physical to virtual region
  * can return the appropriate address within that region, leaving '*virt'
  * unchanged.  We cannot and therefore do not; *virt is updated with the
  * first usable address after the mapped region.
  */
 vm_offset_t
 moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
     vm_paddr_t pa_end, int prot)
 {
 	vm_offset_t	sva, va;
 
 	sva = *virt;
 	va = sva;
 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
 		moea_kenter(mmu, va, pa_start);
 	*virt = va;
 	return (sva);
 }
 
 /*
  * Returns true if the pmap's pv is one of the first
  * 16 pvs linked to from this page.  This count may
  * be changed upwards or downwards in the future; it
  * is only necessary that true be returned for a small
  * subset of pmaps for proper page aging.
  */
 boolean_t
 moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
 {
         int loops;
 	struct pvo_entry *pvo;
 	boolean_t rv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea_page_exists_quick: page %p is not managed", m));
 	loops = 0;
 	rv = FALSE;
 	rw_wlock(&pvh_global_lock);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		if (pvo->pvo_pmap == pmap) {
 			rv = TRUE;
 			break;
 		}
 		if (++loops >= 16)
 			break;
 	}
 	rw_wunlock(&pvh_global_lock);
 	return (rv);
 }
 
 void
 moea_page_init(mmu_t mmu __unused, vm_page_t m)
 {
 
 	m->md.mdpg_attrs = 0;
 	m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
 	LIST_INIT(&m->md.mdpg_pvoh);
 }
 
 /*
  * Return the number of managed mappings to the given physical page
  * that are wired.
  */
 int
 moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
 {
 	struct pvo_entry *pvo;
 	int count;
 
 	count = 0;
 	if ((m->oflags & VPO_UNMANAGED) != 0)
 		return (count);
 	rw_wlock(&pvh_global_lock);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
 		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
 			count++;
 	rw_wunlock(&pvh_global_lock);
 	return (count);
 }
 
 static u_int	moea_vsidcontext;
 
 void
 moea_pinit(mmu_t mmu, pmap_t pmap)
 {
 	int	i, mask;
 	u_int	entropy;
 
 	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap"));
 	RB_INIT(&pmap->pmap_pvo);
 
 	entropy = 0;
 	__asm __volatile("mftb %0" : "=r"(entropy));
 
 	if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap))
 	    == NULL) {
 		pmap->pmap_phys = pmap;
 	}
 
 
 	mtx_lock(&moea_vsid_mutex);
 	/*
 	 * Allocate some segment registers for this pmap.
 	 */
 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
 		u_int	hash, n;
 
 		/*
 		 * Create a new value by mutiplying by a prime and adding in
 		 * entropy from the timebase register.  This is to make the
 		 * VSID more random so that the PT hash function collides
 		 * less often.  (Note that the prime casues gcc to do shifts
 		 * instead of a multiply.)
 		 */
 		moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy;
 		hash = moea_vsidcontext & (NPMAPS - 1);
 		if (hash == 0)		/* 0 is special, avoid it */
 			continue;
 		n = hash >> 5;
 		mask = 1 << (hash & (VSID_NBPW - 1));
 		hash = (moea_vsidcontext & 0xfffff);
 		if (moea_vsid_bitmap[n] & mask) {	/* collision? */
 			/* anything free in this bucket? */
 			if (moea_vsid_bitmap[n] == 0xffffffff) {
 				entropy = (moea_vsidcontext >> 20);
 				continue;
 			}
 			i = ffs(~moea_vsid_bitmap[n]) - 1;
 			mask = 1 << i;
 			hash &= rounddown2(0xfffff, VSID_NBPW);
 			hash |= i;
 		}
 		KASSERT(!(moea_vsid_bitmap[n] & mask),
 		    ("Allocating in-use VSID group %#x\n", hash));
 		moea_vsid_bitmap[n] |= mask;
 		for (i = 0; i < 16; i++)
 			pmap->pm_sr[i] = VSID_MAKE(i, hash);
 		mtx_unlock(&moea_vsid_mutex);
 		return;
 	}
 
 	mtx_unlock(&moea_vsid_mutex);
 	panic("moea_pinit: out of segments");
 }
 
 /*
  * Initialize the pmap associated with process 0.
  */
 void
 moea_pinit0(mmu_t mmu, pmap_t pm)
 {
 
 	PMAP_LOCK_INIT(pm);
 	moea_pinit(mmu, pm);
 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
 }
 
 /*
  * Set the physical protection on the specified range of this map as requested.
  */
 void
 moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
     vm_prot_t prot)
 {
 	struct	pvo_entry *pvo, *tpvo, key;
 	struct	pte *pt;
 
 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
 	    ("moea_protect: non current pmap"));
 
 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 		moea_remove(mmu, pm, sva, eva);
 		return;
 	}
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pm);
 	key.pvo_vaddr = sva;
 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
 
 		/*
 		 * Grab the PTE pointer before we diddle with the cached PTE
 		 * copy.
 		 */
 		pt = moea_pvo_to_pte(pvo, -1);
 		/*
 		 * Change the protection of the page.
 		 */
 		pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
 		pvo->pvo_pte.pte.pte_lo |= PTE_BR;
 
 		/*
 		 * If the PVO is in the page table, update that pte as well.
 		 */
 		if (pt != NULL) {
 			moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
 			mtx_unlock(&moea_table_mutex);
 		}
 	}
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pm);
 }
 
 /*
  * Map a list of wired pages into kernel virtual address space.  This is
  * intended for temporary mappings which do not need page modification or
  * references recorded.  Existing mappings in the region are overwritten.
  */
 void
 moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
 {
 	vm_offset_t va;
 
 	va = sva;
 	while (count-- > 0) {
 		moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
 		va += PAGE_SIZE;
 		m++;
 	}
 }
 
 /*
  * Remove page mappings from kernel virtual address space.  Intended for
  * temporary mappings entered by moea_qenter.
  */
 void
 moea_qremove(mmu_t mmu, vm_offset_t sva, int count)
 {
 	vm_offset_t va;
 
 	va = sva;
 	while (count-- > 0) {
 		moea_kremove(mmu, va);
 		va += PAGE_SIZE;
 	}
 }
 
 void
 moea_release(mmu_t mmu, pmap_t pmap)
 {
         int idx, mask;
 
 	/*
 	 * Free segment register's VSID
 	 */
         if (pmap->pm_sr[0] == 0)
                 panic("moea_release");
 
 	mtx_lock(&moea_vsid_mutex);
         idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
         mask = 1 << (idx % VSID_NBPW);
         idx /= VSID_NBPW;
         moea_vsid_bitmap[idx] &= ~mask;
 	mtx_unlock(&moea_vsid_mutex);
 }
 
 /*
  * Remove the given range of addresses from the specified map.
  */
 void
 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 {
 	struct	pvo_entry *pvo, *tpvo, key;
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pm);
 	key.pvo_vaddr = sva;
 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
 		moea_pvo_remove(pvo, -1);
 	}
 	PMAP_UNLOCK(pm);
 	rw_wunlock(&pvh_global_lock);
 }
 
 /*
  * Remove physical page from all pmaps in which it resides. moea_pvo_remove()
  * will reflect changes in pte's back to the vm_page.
  */
 void
 moea_remove_all(mmu_t mmu, vm_page_t m)
 {
 	struct  pvo_head *pvo_head;
 	struct	pvo_entry *pvo, *next_pvo;
 	pmap_t	pmap;
 
 	rw_wlock(&pvh_global_lock);
 	pvo_head = vm_page_to_pvoh(m);
 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
 
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
 		moea_pvo_remove(pvo, -1);
 		PMAP_UNLOCK(pmap);
 	}
 	if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
 		moea_attr_clear(m, PTE_CHG);
 		vm_page_dirty(m);
 	}
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
 	rw_wunlock(&pvh_global_lock);
 }
 
 /*
  * Allocate a physical page of memory directly from the phys_avail map.
  * Can only be called from moea_bootstrap before avail start and end are
  * calculated.
  */
 static vm_offset_t
 moea_bootstrap_alloc(vm_size_t size, u_int align)
 {
 	vm_offset_t	s, e;
 	int		i, j;
 
 	size = round_page(size);
 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
 		if (align != 0)
 			s = roundup2(phys_avail[i], align);
 		else
 			s = phys_avail[i];
 		e = s + size;
 
 		if (s < phys_avail[i] || e > phys_avail[i + 1])
 			continue;
 
 		if (s == phys_avail[i]) {
 			phys_avail[i] += size;
 		} else if (e == phys_avail[i + 1]) {
 			phys_avail[i + 1] -= size;
 		} else {
 			for (j = phys_avail_count * 2; j > i; j -= 2) {
 				phys_avail[j] = phys_avail[j - 2];
 				phys_avail[j + 1] = phys_avail[j - 1];
 			}
 
 			phys_avail[i + 3] = phys_avail[i + 1];
 			phys_avail[i + 1] = s;
 			phys_avail[i + 2] = e;
 			phys_avail_count++;
 		}
 
 		return (s);
 	}
 	panic("moea_bootstrap_alloc: could not allocate memory");
 }
 
 static void
 moea_syncicache(vm_paddr_t pa, vm_size_t len)
 {
 	__syncicache((void *)pa, len);
 }
 
 static int
 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
     vm_offset_t va, vm_paddr_t pa, u_int pte_lo, int flags)
 {
 	struct	pvo_entry *pvo;
 	u_int	sr;
 	int	first;
 	u_int	ptegidx;
 	int	i;
 	int     bootstrap;
 
 	moea_pvo_enter_calls++;
 	first = 0;
 	bootstrap = 0;
 
 	/*
 	 * Compute the PTE Group index.
 	 */
 	va &= ~ADDR_POFF;
 	sr = va_to_sr(pm->pm_sr, va);
 	ptegidx = va_to_pteg(sr, va);
 
 	/*
 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
 	 * there is a mapping.
 	 */
 	mtx_lock(&moea_table_mutex);
 	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
 			if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa &&
 			    (pvo->pvo_pte.pte.pte_lo & PTE_PP) ==
 			    (pte_lo & PTE_PP)) {
 				/*
 				 * The PTE is not changing.  Instead, this may
 				 * be a request to change the mapping's wired
 				 * attribute.
 				 */
 				mtx_unlock(&moea_table_mutex);
 				if ((flags & PVO_WIRED) != 0 &&
 				    (pvo->pvo_vaddr & PVO_WIRED) == 0) {
 					pvo->pvo_vaddr |= PVO_WIRED;
 					pm->pm_stats.wired_count++;
 				} else if ((flags & PVO_WIRED) == 0 &&
 				    (pvo->pvo_vaddr & PVO_WIRED) != 0) {
 					pvo->pvo_vaddr &= ~PVO_WIRED;
 					pm->pm_stats.wired_count--;
 				}
 				return (0);
 			}
 			moea_pvo_remove(pvo, -1);
 			break;
 		}
 	}
 
 	/*
 	 * If we aren't overwriting a mapping, try to allocate.
 	 */
 	if (moea_initialized) {
 		pvo = uma_zalloc(zone, M_NOWAIT);
 	} else {
 		if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) {
 			panic("moea_enter: bpvo pool exhausted, %d, %d, %d",
 			      moea_bpvo_pool_index, BPVO_POOL_SIZE,
 			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
 		}
 		pvo = &moea_bpvo_pool[moea_bpvo_pool_index];
 		moea_bpvo_pool_index++;
 		bootstrap = 1;
 	}
 
 	if (pvo == NULL) {
 		mtx_unlock(&moea_table_mutex);
 		return (ENOMEM);
 	}
 
 	moea_pvo_entries++;
 	pvo->pvo_vaddr = va;
 	pvo->pvo_pmap = pm;
 	LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink);
 	pvo->pvo_vaddr &= ~ADDR_POFF;
 	if (flags & PVO_WIRED)
 		pvo->pvo_vaddr |= PVO_WIRED;
 	if (pvo_head != &moea_pvo_kunmanaged)
 		pvo->pvo_vaddr |= PVO_MANAGED;
 	if (bootstrap)
 		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
 
 	moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo);
 
 	/*
 	 * Add to pmap list
 	 */
 	RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
 
 	/*
 	 * Remember if the list was empty and therefore will be the first
 	 * item.
 	 */
 	if (LIST_FIRST(pvo_head) == NULL)
 		first = 1;
 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
 
 	if (pvo->pvo_vaddr & PVO_WIRED)
 		pm->pm_stats.wired_count++;
 	pm->pm_stats.resident_count++;
 
 	i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
 	KASSERT(i < 8, ("Invalid PTE index"));
 	if (i >= 0) {
 		PVO_PTEGIDX_SET(pvo, i);
 	} else {
 		panic("moea_pvo_enter: overflow");
 		moea_pte_overflow++;
 	}
 	mtx_unlock(&moea_table_mutex);
 
 	return (first ? ENOENT : 0);
 }
 
 static void
 moea_pvo_remove(struct pvo_entry *pvo, int pteidx)
 {
 	struct	pte *pt;
 
 	/*
 	 * If there is an active pte entry, we need to deactivate it (and
 	 * save the ref & cfg bits).
 	 */
 	pt = moea_pvo_to_pte(pvo, pteidx);
 	if (pt != NULL) {
 		moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
 		mtx_unlock(&moea_table_mutex);
 		PVO_PTEGIDX_CLR(pvo);
 	} else {
 		moea_pte_overflow--;
 	}
 
 	/*
 	 * Update our statistics.
 	 */
 	pvo->pvo_pmap->pm_stats.resident_count--;
 	if (pvo->pvo_vaddr & PVO_WIRED)
 		pvo->pvo_pmap->pm_stats.wired_count--;
 
 	/*
 	 * Save the REF/CHG bits into their cache if the page is managed.
 	 */
 	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
 		struct	vm_page *pg;
 
 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
 		if (pg != NULL) {
 			moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo &
 			    (PTE_REF | PTE_CHG));
 		}
 	}
 
 	/*
 	 * Remove this PVO from the PV and pmap lists.
 	 */
 	LIST_REMOVE(pvo, pvo_vlink);
 	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
 
 	/*
 	 * Remove this from the overflow list and return it to the pool
 	 * if we aren't going to reuse it.
 	 */
 	LIST_REMOVE(pvo, pvo_olink);
 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
 		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone :
 		    moea_upvo_zone, pvo);
 	moea_pvo_entries--;
 	moea_pvo_remove_calls++;
 }
 
 static __inline int
 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
 {
 	int	pteidx;
 
 	/*
 	 * We can find the actual pte entry without searching by grabbing
 	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
 	 * noticing the HID bit.
 	 */
 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
 	if (pvo->pvo_pte.pte.pte_hi & PTE_HID)
 		pteidx ^= moea_pteg_mask * 8;
 
 	return (pteidx);
 }
 
 static struct pvo_entry *
 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
 {
 	struct	pvo_entry *pvo;
 	int	ptegidx;
 	u_int	sr;
 
 	va &= ~ADDR_POFF;
 	sr = va_to_sr(pm->pm_sr, va);
 	ptegidx = va_to_pteg(sr, va);
 
 	mtx_lock(&moea_table_mutex);
 	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
 			if (pteidx_p)
 				*pteidx_p = moea_pvo_pte_index(pvo, ptegidx);
 			break;
 		}
 	}
 	mtx_unlock(&moea_table_mutex);
 
 	return (pvo);
 }
 
 static struct pte *
 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
 {
 	struct	pte *pt;
 
 	/*
 	 * If we haven't been supplied the ptegidx, calculate it.
 	 */
 	if (pteidx == -1) {
 		int	ptegidx;
 		u_int	sr;
 
 		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
 		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
 		pteidx = moea_pvo_pte_index(pvo, ptegidx);
 	}
 
 	pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7];
 	mtx_lock(&moea_table_mutex);
 
 	if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
 		panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no "
 		    "valid pte index", pvo);
 	}
 
 	if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
 		panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo "
 		    "pvo but no valid pte", pvo);
 	}
 
 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
 		if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) {
 			panic("moea_pvo_to_pte: pvo %p has valid pte in "
 			    "moea_pteg_table %p but invalid in pvo", pvo, pt);
 		}
 
 		if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF))
 		    != 0) {
 			panic("moea_pvo_to_pte: pvo %p pte does not match "
 			    "pte %p in moea_pteg_table", pvo, pt);
 		}
 
 		mtx_assert(&moea_table_mutex, MA_OWNED);
 		return (pt);
 	}
 
 	if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) {
 		panic("moea_pvo_to_pte: pvo %p has invalid pte %p in "
 		    "moea_pteg_table but valid in pvo: %8x, %8x", pvo, pt, pvo->pvo_pte.pte.pte_hi, pt->pte_hi);
 	}
 
 	mtx_unlock(&moea_table_mutex);
 	return (NULL);
 }
 
 /*
  * XXX: THIS STUFF SHOULD BE IN pte.c?
  */
 int
 moea_pte_spill(vm_offset_t addr)
 {
 	struct	pvo_entry *source_pvo, *victim_pvo;
 	struct	pvo_entry *pvo;
 	int	ptegidx, i, j;
 	u_int	sr;
 	struct	pteg *pteg;
 	struct	pte *pt;
 
 	moea_pte_spills++;
 
 	sr = mfsrin(addr);
 	ptegidx = va_to_pteg(sr, addr);
 
 	/*
 	 * Have to substitute some entry.  Use the primary hash for this.
 	 * Use low bits of timebase as random generator.
 	 */
 	pteg = &moea_pteg_table[ptegidx];
 	mtx_lock(&moea_table_mutex);
 	__asm __volatile("mftb %0" : "=r"(i));
 	i &= 7;
 	pt = &pteg->pt[i];
 
 	source_pvo = NULL;
 	victim_pvo = NULL;
 	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
 		/*
 		 * We need to find a pvo entry for this address.
 		 */
 		if (source_pvo == NULL &&
 		    moea_pte_match(&pvo->pvo_pte.pte, sr, addr,
 		    pvo->pvo_pte.pte.pte_hi & PTE_HID)) {
 			/*
 			 * Now found an entry to be spilled into the pteg.
 			 * The PTE is now valid, so we know it's active.
 			 */
 			j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
 
 			if (j >= 0) {
 				PVO_PTEGIDX_SET(pvo, j);
 				moea_pte_overflow--;
 				mtx_unlock(&moea_table_mutex);
 				return (1);
 			}
 
 			source_pvo = pvo;
 
 			if (victim_pvo != NULL)
 				break;
 		}
 
 		/*
 		 * We also need the pvo entry of the victim we are replacing
 		 * so save the R & C bits of the PTE.
 		 */
 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
 		    moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
 			victim_pvo = pvo;
 			if (source_pvo != NULL)
 				break;
 		}
 	}
 
 	if (source_pvo == NULL) {
 		mtx_unlock(&moea_table_mutex);
 		return (0);
 	}
 
 	if (victim_pvo == NULL) {
 		if ((pt->pte_hi & PTE_HID) == 0)
 			panic("moea_pte_spill: victim p-pte (%p) has no pvo"
 			    "entry", pt);
 
 		/*
 		 * If this is a secondary PTE, we need to search it's primary
 		 * pvo bucket for the matching PVO.
 		 */
 		LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask],
 		    pvo_olink) {
 			/*
 			 * We also need the pvo entry of the victim we are
 			 * replacing so save the R & C bits of the PTE.
 			 */
 			if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
 				victim_pvo = pvo;
 				break;
 			}
 		}
 
 		if (victim_pvo == NULL)
 			panic("moea_pte_spill: victim s-pte (%p) has no pvo"
 			    "entry", pt);
 	}
 
 	/*
 	 * We are invalidating the TLB entry for the EA we are replacing even
 	 * though it's valid.  If we don't, we lose any ref/chg bit changes
 	 * contained in the TLB entry.
 	 */
 	source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID;
 
 	moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr);
 	moea_pte_set(pt, &source_pvo->pvo_pte.pte);
 
 	PVO_PTEGIDX_CLR(victim_pvo);
 	PVO_PTEGIDX_SET(source_pvo, i);
 	moea_pte_replacements++;
 
 	mtx_unlock(&moea_table_mutex);
 	return (1);
 }
 
 static __inline struct pvo_entry *
 moea_pte_spillable_ident(u_int ptegidx)
 {
 	struct	pte *pt;
 	struct	pvo_entry *pvo_walk, *pvo = NULL;
 
 	LIST_FOREACH(pvo_walk, &moea_pvo_table[ptegidx], pvo_olink) {
 		if (pvo_walk->pvo_vaddr & PVO_WIRED)
 			continue;
 
 		if (!(pvo_walk->pvo_pte.pte.pte_hi & PTE_VALID))
 			continue;
 
 		pt = moea_pvo_to_pte(pvo_walk, -1);
 
 		if (pt == NULL)
 			continue;
 
 		pvo = pvo_walk;
 
 		mtx_unlock(&moea_table_mutex);
 		if (!(pt->pte_lo & PTE_REF))
 			return (pvo_walk);
 	}
 
 	return (pvo);
 }
 
 static int
 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt)
 {
 	struct	pte *pt;
 	struct	pvo_entry *victim_pvo;
 	int	i;
 	int	victim_idx;
 	u_int	pteg_bkpidx = ptegidx;
 
 	mtx_assert(&moea_table_mutex, MA_OWNED);
 
 	/*
 	 * First try primary hash.
 	 */
 	for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
 		if ((pt->pte_hi & PTE_VALID) == 0) {
 			pvo_pt->pte_hi &= ~PTE_HID;
 			moea_pte_set(pt, pvo_pt);
 			return (i);
 		}
 	}
 
 	/*
 	 * Now try secondary hash.
 	 */
 	ptegidx ^= moea_pteg_mask;
 
 	for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
 		if ((pt->pte_hi & PTE_VALID) == 0) {
 			pvo_pt->pte_hi |= PTE_HID;
 			moea_pte_set(pt, pvo_pt);
 			return (i);
 		}
 	}
 
 	/* Try again, but this time try to force a PTE out. */
 	ptegidx = pteg_bkpidx;
 
 	victim_pvo = moea_pte_spillable_ident(ptegidx);
 	if (victim_pvo == NULL) {
 		ptegidx ^= moea_pteg_mask;
 		victim_pvo = moea_pte_spillable_ident(ptegidx);
 	}
 
 	if (victim_pvo == NULL) {
 		panic("moea_pte_insert: overflow");
 		return (-1);
 	}
 
 	victim_idx = moea_pvo_pte_index(victim_pvo, ptegidx);
 
 	if (pteg_bkpidx == ptegidx)
 		pvo_pt->pte_hi &= ~PTE_HID;
 	else
 		pvo_pt->pte_hi |= PTE_HID;
 
 	/*
 	 * Synchronize the sacrifice PTE with its PVO, then mark both
 	 * invalid. The PVO will be reused when/if the VM system comes
 	 * here after a fault.
 	 */
 	pt = &moea_pteg_table[victim_idx >> 3].pt[victim_idx & 7];
 
 	if (pt->pte_hi != victim_pvo->pvo_pte.pte.pte_hi)
 	    panic("Victim PVO doesn't match PTE! PVO: %8x, PTE: %8x", victim_pvo->pvo_pte.pte.pte_hi, pt->pte_hi);
 
 	/*
 	 * Set the new PTE.
 	 */
 	moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr);
 	PVO_PTEGIDX_CLR(victim_pvo);
 	moea_pte_overflow++;
 	moea_pte_set(pt, pvo_pt);
 
 	return (victim_idx & 7);
 }
 
 static boolean_t
 moea_query_bit(vm_page_t m, int ptebit)
 {
 	struct	pvo_entry *pvo;
 	struct	pte *pt;
 
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
 	if (moea_attr_fetch(m) & ptebit)
 		return (TRUE);
 
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 
 		/*
 		 * See if we saved the bit off.  If so, cache it and return
 		 * success.
 		 */
 		if (pvo->pvo_pte.pte.pte_lo & ptebit) {
 			moea_attr_save(m, ptebit);
 			return (TRUE);
 		}
 	}
 
 	/*
 	 * No luck, now go through the hard part of looking at the PTEs
 	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
 	 * the PTEs.
 	 */
 	powerpc_sync();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 
 		/*
 		 * See if this pvo has a valid PTE.  if so, fetch the
 		 * REF/CHG bits from the valid PTE.  If the appropriate
 		 * ptebit is set, cache it and return success.
 		 */
 		pt = moea_pvo_to_pte(pvo, -1);
 		if (pt != NULL) {
 			moea_pte_synch(pt, &pvo->pvo_pte.pte);
 			mtx_unlock(&moea_table_mutex);
 			if (pvo->pvo_pte.pte.pte_lo & ptebit) {
 				moea_attr_save(m, ptebit);
 				return (TRUE);
 			}
 		}
 	}
 
 	return (FALSE);
 }
 
 static u_int
 moea_clear_bit(vm_page_t m, int ptebit)
 {
 	u_int	count;
 	struct	pvo_entry *pvo;
 	struct	pte *pt;
 
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
 
 	/*
 	 * Clear the cached value.
 	 */
 	moea_attr_clear(m, ptebit);
 
 	/*
 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
 	 * we can reset the right ones).  note that since the pvo entries and
 	 * list heads are accessed via BAT0 and are never placed in the page
 	 * table, we don't have to worry about further accesses setting the
 	 * REF/CHG bits.
 	 */
 	powerpc_sync();
 
 	/*
 	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
 	 * valid pte clear the ptebit from the valid pte.
 	 */
 	count = 0;
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		pt = moea_pvo_to_pte(pvo, -1);
 		if (pt != NULL) {
 			moea_pte_synch(pt, &pvo->pvo_pte.pte);
 			if (pvo->pvo_pte.pte.pte_lo & ptebit) {
 				count++;
 				moea_pte_clear(pt, PVO_VADDR(pvo), ptebit);
 			}
 			mtx_unlock(&moea_table_mutex);
 		}
 		pvo->pvo_pte.pte.pte_lo &= ~ptebit;
 	}
 
 	return (count);
 }
 
 /*
  * Return true if the physical range is encompassed by the battable[idx]
  */
 static int
 moea_bat_mapped(int idx, vm_paddr_t pa, vm_size_t size)
 {
 	u_int prot;
 	u_int32_t start;
 	u_int32_t end;
 	u_int32_t bat_ble;
 
 	/*
 	 * Return immediately if not a valid mapping
 	 */
 	if (!(battable[idx].batu & BAT_Vs))
 		return (EINVAL);
 
 	/*
 	 * The BAT entry must be cache-inhibited, guarded, and r/w
 	 * so it can function as an i/o page
 	 */
 	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
 	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
 		return (EPERM);
 
 	/*
 	 * The address should be within the BAT range. Assume that the
 	 * start address in the BAT has the correct alignment (thus
 	 * not requiring masking)
 	 */
 	start = battable[idx].batl & BAT_PBS;
 	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
 	end = start | (bat_ble << 15) | 0x7fff;
 
 	if ((pa < start) || ((pa + size) > end))
 		return (ERANGE);
 
 	return (0);
 }
 
 boolean_t
 moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
 {
 	int i;
 
 	/*
 	 * This currently does not work for entries that
 	 * overlap 256M BAT segments.
 	 */
 
 	for(i = 0; i < 16; i++)
 		if (moea_bat_mapped(i, pa, size) == 0)
 			return (0);
 
 	return (EFAULT);
 }
 
 /*
  * Map a set of physical memory pages into the kernel virtual
  * address space. Return a pointer to where it is mapped. This
  * routine is intended to be used for mapping device memory,
  * NOT real memory.
  */
 void *
 moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
 {
 
 	return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
 }
 
 void *
 moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
 {
 	vm_offset_t va, tmpva, ppa, offset;
 	int i;
 
 	ppa = trunc_page(pa);
 	offset = pa & PAGE_MASK;
 	size = roundup(offset + size, PAGE_SIZE);
 
 	/*
 	 * If the physical address lies within a valid BAT table entry,
 	 * return the 1:1 mapping. This currently doesn't work
 	 * for regions that overlap 256M BAT segments.
 	 */
 	for (i = 0; i < 16; i++) {
 		if (moea_bat_mapped(i, pa, size) == 0)
 			return ((void *) pa);
 	}
 
 	va = kva_alloc(size);
 	if (!va)
 		panic("moea_mapdev: Couldn't alloc kernel virtual memory");
 
 	for (tmpva = va; size > 0;) {
 		moea_kenter_attr(mmu, tmpva, ppa, ma);
 		tlbie(tmpva);
 		size -= PAGE_SIZE;
 		tmpva += PAGE_SIZE;
 		ppa += PAGE_SIZE;
 	}
 
 	return ((void *)(va + offset));
 }
 
 void
 moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t base, offset;
 
 	/*
 	 * If this is outside kernel virtual space, then it's a
 	 * battable entry and doesn't require unmapping
 	 */
 	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) {
 		base = trunc_page(va);
 		offset = va & PAGE_MASK;
 		size = roundup(offset + size, PAGE_SIZE);
 		kva_free(base, size);
 	}
 }
 
 static void
 moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
 {
 	struct pvo_entry *pvo;
 	vm_offset_t lim;
 	vm_paddr_t pa;
 	vm_size_t len;
 
 	PMAP_LOCK(pm);
 	while (sz > 0) {
 		lim = round_page(va);
 		len = MIN(lim - va, sz);
 		pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
 		if (pvo != NULL) {
 			pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
 			    (va & ADDR_POFF);
 			moea_syncicache(pa, len);
 		}
 		va += len;
 		sz -= len;
 	}
 	PMAP_UNLOCK(pm);
 }
 
 void
 moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
 {
 
 	*va = (void *)pa;
 }
 
 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
 
 void
 moea_scan_init(mmu_t mmu)
 {
 	struct pvo_entry *pvo;
 	vm_offset_t va;
 	int i;
 
 	if (!do_minidump) {
 		/* Initialize phys. segments for dumpsys(). */
 		memset(&dump_map, 0, sizeof(dump_map));
 		mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
 		for (i = 0; i < pregions_sz; i++) {
 			dump_map[i].pa_start = pregions[i].mr_start;
 			dump_map[i].pa_size = pregions[i].mr_size;
 		}
 		return;
 	}
 
 	/* Virtual segments for minidumps: */
 	memset(&dump_map, 0, sizeof(dump_map));
 
 	/* 1st: kernel .data and .bss. */
 	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
 	dump_map[0].pa_size =
 	    round_page((uintptr_t)_end) - dump_map[0].pa_start;
 
 	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
 	dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
 	dump_map[1].pa_size = round_page(msgbufp->msg_size);
 
 	/* 3rd: kernel VM. */
 	va = dump_map[1].pa_start + dump_map[1].pa_size;
 	/* Find start of next chunk (from va). */
 	while (va < virtual_end) {
 		/* Don't dump the buffer cache. */
 		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
 			va = kmi.buffer_eva;
 			continue;
 		}
 		pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
 		if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID))
 			break;
 		va += PAGE_SIZE;
 	}
 	if (va < virtual_end) {
 		dump_map[2].pa_start = va;
 		va += PAGE_SIZE;
 		/* Find last page in chunk. */
 		while (va < virtual_end) {
 			/* Don't run into the buffer cache. */
 			if (va == kmi.buffer_sva)
 				break;
 			pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF,
 			    NULL);
 			if (pvo == NULL ||
 			    !(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
 				break;
 			va += PAGE_SIZE;
 		}
 		dump_map[2].pa_size = va - dump_map[2].pa_start;
 	}
 }
Index: head/sys/powerpc/aim/mmu_oea64.c
===================================================================
--- head/sys/powerpc/aim/mmu_oea64.c	(revision 328529)
+++ head/sys/powerpc/aim/mmu_oea64.c	(revision 328530)
@@ -1,2807 +1,2835 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (c) 2008-2015 Nathan Whitehorn
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  *
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 /*
  * Manages physical address maps.
  *
  * Since the information managed by this module is also stored by the
  * logical address mapping module, this module may throw away valid virtual
  * to physical mappings at almost any time.  However, invalidations of
  * mappings must be done as requested.
  *
  * In order to cope with hardware architectures which make virtual to
  * physical map invalidates expensive, this module may delay invalidate
  * reduced protection operations until such time as they are actually
  * necessary.  This module is given full information as to which processors
  * are currently using which maps, and to when physical maps must be made
  * correct.
  */
 
 #include "opt_compat.h"
 #include "opt_kstack_pages.h"
 
 #include <sys/param.h>
 #include <sys/kernel.h>
 #include <sys/conf.h>
 #include <sys/queue.h>
 #include <sys/cpuset.h>
 #include <sys/kerneldump.h>
 #include <sys/ktr.h>
 #include <sys/lock.h>
 #include <sys/msgbuf.h>
 #include <sys/malloc.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/rwlock.h>
 #include <sys/sched.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
 #include <sys/vmmeter.h>
 #include <sys/smp.h>
 
 #include <sys/kdb.h>
 
 #include <dev/ofw/openfirm.h>
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_page.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_pageout.h>
 #include <vm/uma.h>
 
 #include <machine/_inttypes.h>
 #include <machine/cpu.h>
 #include <machine/platform.h>
 #include <machine/frame.h>
 #include <machine/md_var.h>
 #include <machine/psl.h>
 #include <machine/bat.h>
 #include <machine/hid.h>
 #include <machine/pte.h>
 #include <machine/sr.h>
 #include <machine/trap.h>
 #include <machine/mmuvar.h>
 
 #include "mmu_oea64.h"
 #include "mmu_if.h"
 #include "moea64_if.h"
 
 void moea64_release_vsid(uint64_t vsid);
 uintptr_t moea64_get_unique_vsid(void); 
 
 #define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
 #define ENABLE_TRANS(msr)	mtmsr(msr)
 
 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
 #define	VSID_HASH_MASK		0x0000007fffffffffULL
 
 /*
  * Locking semantics:
  * 
  * There are two locks of interest: the page locks and the pmap locks, which
  * protect their individual PVO lists and are locked in that order. The contents
  * of all PVO entries are protected by the locks of their respective pmaps.
  * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
  * into any list.
  *
  */
 
 #define PV_LOCK_COUNT	PA_LOCK_COUNT*3
 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
  
 #define PV_LOCKPTR(pa)	((struct mtx *)(&pv_lock[pa_index(pa) % PV_LOCK_COUNT]))
 #define PV_LOCK(pa)		mtx_lock(PV_LOCKPTR(pa))
 #define PV_UNLOCK(pa)		mtx_unlock(PV_LOCKPTR(pa))
 #define PV_LOCKASSERT(pa) 	mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
 #define PV_PAGE_LOCK(m)		PV_LOCK(VM_PAGE_TO_PHYS(m))
 #define PV_PAGE_UNLOCK(m)	PV_UNLOCK(VM_PAGE_TO_PHYS(m))
 #define PV_PAGE_LOCKASSERT(m)	PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
 
 struct ofw_map {
 	cell_t	om_va;
 	cell_t	om_len;
 	uint64_t om_pa;
 	cell_t	om_mode;
 };
 
 extern unsigned char _etext[];
 extern unsigned char _end[];
 
 extern void *slbtrap, *slbtrapend;
 
 /*
  * Map of physical memory regions.
  */
 static struct	mem_region *regions;
 static struct	mem_region *pregions;
 static u_int	phys_avail_count;
 static int	regions_sz, pregions_sz;
 
 extern void bs_remap_earlyboot(void);
 
 /*
  * Lock for the SLB tables.
  */
 struct mtx	moea64_slb_mutex;
 
 /*
  * PTEG data.
  */
 u_int		moea64_pteg_count;
 u_int		moea64_pteg_mask;
 
 /*
  * PVO data.
  */
 
 uma_zone_t	moea64_pvo_zone; /* zone for pvo entries */
 
 static struct	pvo_entry *moea64_bpvo_pool;
 static int	moea64_bpvo_pool_index = 0;
 static int	moea64_bpvo_pool_size = 327680;
 TUNABLE_INT("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size);
 SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD, 
     &moea64_bpvo_pool_index, 0, "");
 
 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
 #ifdef __powerpc64__
 #define	NVSIDS		(NPMAPS * 16)
 #define VSID_HASHMASK	0xffffffffUL
 #else
 #define NVSIDS		NPMAPS
 #define VSID_HASHMASK	0xfffffUL
 #endif
 static u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
 
 static boolean_t moea64_initialized = FALSE;
 
 /*
  * Statistics.
  */
 u_int	moea64_pte_valid = 0;
 u_int	moea64_pte_overflow = 0;
 u_int	moea64_pvo_entries = 0;
 u_int	moea64_pvo_enter_calls = 0;
 u_int	moea64_pvo_remove_calls = 0;
 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 
     &moea64_pte_valid, 0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
     &moea64_pte_overflow, 0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 
     &moea64_pvo_entries, 0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
     &moea64_pvo_enter_calls, 0, "");
 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
     &moea64_pvo_remove_calls, 0, "");
 
 vm_offset_t	moea64_scratchpage_va[2];
 struct pvo_entry *moea64_scratchpage_pvo[2];
 struct	mtx	moea64_scratchpage_mtx;
 
 uint64_t 	moea64_large_page_mask = 0;
 uint64_t	moea64_large_page_size = 0;
 int		moea64_large_page_shift = 0;
 
 /*
  * PVO calls.
  */
 static int	moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
 		    struct pvo_head *pvo_head);
 static void	moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
 static void	moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
 static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
 
 /*
  * Utility routines.
  */
 static boolean_t	moea64_query_bit(mmu_t, vm_page_t, uint64_t);
 static u_int		moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
 static void		moea64_kremove(mmu_t, vm_offset_t);
 static void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 
 			    vm_paddr_t pa, vm_size_t sz);
 static void		moea64_pmap_init_qpages(void);
 
 /*
  * Kernel MMU interface
  */
 void moea64_clear_modify(mmu_t, vm_page_t);
 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
 void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
 int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
     u_int flags, int8_t psind);
 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
     vm_prot_t);
 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
 void moea64_init(mmu_t);
 boolean_t moea64_is_modified(mmu_t, vm_page_t);
 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
 boolean_t moea64_is_referenced(mmu_t, vm_page_t);
 int moea64_ts_referenced(mmu_t, vm_page_t);
 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
 void moea64_page_init(mmu_t, vm_page_t);
 int moea64_page_wired_mappings(mmu_t, vm_page_t);
 void moea64_pinit(mmu_t, pmap_t);
 void moea64_pinit0(mmu_t, pmap_t);
 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
 void moea64_qremove(mmu_t, vm_offset_t, int);
 void moea64_release(mmu_t, pmap_t);
 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 void moea64_remove_pages(mmu_t, pmap_t);
 void moea64_remove_all(mmu_t, vm_page_t);
 void moea64_remove_write(mmu_t, vm_page_t);
 void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 void moea64_zero_page(mmu_t, vm_page_t);
 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
 void moea64_activate(mmu_t, struct thread *);
 void moea64_deactivate(mmu_t, struct thread *);
 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
 void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
 vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
 void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
 boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
 void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
     void **va);
 void moea64_scan_init(mmu_t mmu);
 vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
 void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
 static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm,
     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
+static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+    int *is_user, vm_offset_t *decoded_addr);
 
 
 static mmu_method_t moea64_methods[] = {
 	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
 	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
 	MMUMETHOD(mmu_copy_pages,	moea64_copy_pages),
 	MMUMETHOD(mmu_enter,		moea64_enter),
 	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
 	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
 	MMUMETHOD(mmu_extract,		moea64_extract),
 	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
 	MMUMETHOD(mmu_init,		moea64_init),
 	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
 	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
 	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
 	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
 	MMUMETHOD(mmu_map,     		moea64_map),
 	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
 	MMUMETHOD(mmu_page_init,	moea64_page_init),
 	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
 	MMUMETHOD(mmu_pinit,		moea64_pinit),
 	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
 	MMUMETHOD(mmu_protect,		moea64_protect),
 	MMUMETHOD(mmu_qenter,		moea64_qenter),
 	MMUMETHOD(mmu_qremove,		moea64_qremove),
 	MMUMETHOD(mmu_release,		moea64_release),
 	MMUMETHOD(mmu_remove,		moea64_remove),
 	MMUMETHOD(mmu_remove_pages,	moea64_remove_pages),
 	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
 	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
 	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
 	MMUMETHOD(mmu_unwire,		moea64_unwire),
 	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
 	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
 	MMUMETHOD(mmu_activate,		moea64_activate),
 	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
 	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
 	MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
 	MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
 
 	/* Internal interfaces */
 	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
 	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
 	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
 	MMUMETHOD(mmu_kextract,		moea64_kextract),
 	MMUMETHOD(mmu_kenter,		moea64_kenter),
 	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
 	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
 	MMUMETHOD(mmu_scan_init,	moea64_scan_init),
 	MMUMETHOD(mmu_dumpsys_map,	moea64_dumpsys_map),
 	MMUMETHOD(mmu_map_user_ptr,	moea64_map_user_ptr),
+	MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr),
 
 	{ 0, 0 }
 };
 
 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
 
 static struct pvo_head *
 vm_page_to_pvoh(vm_page_t m)
 {
 
 	mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
 	return (&m->md.mdpg_pvoh);
 }
 
 static struct pvo_entry *
 alloc_pvo_entry(int bootstrap)
 {
 	struct pvo_entry *pvo;
 
 	if (!moea64_initialized || bootstrap) {
 		if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
 			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
 			      moea64_bpvo_pool_index, moea64_bpvo_pool_size, 
 			      moea64_bpvo_pool_size * sizeof(struct pvo_entry));
 		}
 		pvo = &moea64_bpvo_pool[
 		    atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
 		bzero(pvo, sizeof(*pvo));
 		pvo->pvo_vaddr = PVO_BOOTSTRAP;
 	} else {
 		pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT);
 		bzero(pvo, sizeof(*pvo));
 	}
 
 	return (pvo);
 }
 
 
 static void
 init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
 {
 	uint64_t vsid;
 	uint64_t hash;
 	int shift;
 
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 
 	pvo->pvo_pmap = pmap;
 	va &= ~ADDR_POFF;
 	pvo->pvo_vaddr |= va;
 	vsid = va_to_vsid(pmap, va);
 	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
 	    | (vsid << 16);
 
 	shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift :
 	    ADDR_PIDX_SHFT;
 	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
 	pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
 }
 
 static void
 free_pvo_entry(struct pvo_entry *pvo)
 {
 
 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
 		uma_zfree(moea64_pvo_zone, pvo);
 }
 
 void
 moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
 {
 
 	lpte->pte_hi = (pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) &
 	    LPTE_AVPN_MASK;
 	lpte->pte_hi |= LPTE_VALID;
 	
 	if (pvo->pvo_vaddr & PVO_LARGE)
 		lpte->pte_hi |= LPTE_BIG;
 	if (pvo->pvo_vaddr & PVO_WIRED)
 		lpte->pte_hi |= LPTE_WIRED;
 	if (pvo->pvo_vaddr & PVO_HID)
 		lpte->pte_hi |= LPTE_HID;
 
 	lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
 	if (pvo->pvo_pte.prot & VM_PROT_WRITE)
 		lpte->pte_lo |= LPTE_BW;
 	else
 		lpte->pte_lo |= LPTE_BR;
 
 	if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
 		lpte->pte_lo |= LPTE_NOEXEC;
 }
 
 static __inline uint64_t
 moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
 {
 	uint64_t pte_lo;
 	int i;
 
 	if (ma != VM_MEMATTR_DEFAULT) {
 		switch (ma) {
 		case VM_MEMATTR_UNCACHEABLE:
 			return (LPTE_I | LPTE_G);
 		case VM_MEMATTR_CACHEABLE:
 			return (LPTE_M);
 		case VM_MEMATTR_WRITE_COMBINING:
 		case VM_MEMATTR_WRITE_BACK:
 		case VM_MEMATTR_PREFETCHABLE:
 			return (LPTE_I);
 		case VM_MEMATTR_WRITE_THROUGH:
 			return (LPTE_W | LPTE_M);
 		}
 	}
 
 	/*
 	 * Assume the page is cache inhibited and access is guarded unless
 	 * it's in our available memory array.
 	 */
 	pte_lo = LPTE_I | LPTE_G;
 	for (i = 0; i < pregions_sz; i++) {
 		if ((pa >= pregions[i].mr_start) &&
 		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
 			pte_lo &= ~(LPTE_I | LPTE_G);
 			pte_lo |= LPTE_M;
 			break;
 		}
 	}
 
 	return pte_lo;
 }
 
 /*
  * Quick sort callout for comparing memory regions.
  */
 static int	om_cmp(const void *a, const void *b);
 
 static int
 om_cmp(const void *a, const void *b)
 {
 	const struct	ofw_map *mapa;
 	const struct	ofw_map *mapb;
 
 	mapa = a;
 	mapb = b;
 	if (mapa->om_pa < mapb->om_pa)
 		return (-1);
 	else if (mapa->om_pa > mapb->om_pa)
 		return (1);
 	else
 		return (0);
 }
 
 static void
 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
 {
 	struct ofw_map	translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
 	pcell_t		acells, trans_cells[sz/sizeof(cell_t)];
 	struct pvo_entry *pvo;
 	register_t	msr;
 	vm_offset_t	off;
 	vm_paddr_t	pa_base;
 	int		i, j;
 
 	bzero(translations, sz);
 	OF_getencprop(OF_finddevice("/"), "#address-cells", &acells,
 	    sizeof(acells));
 	if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1)
 		panic("moea64_bootstrap: can't get ofw translations");
 
 	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
 	sz /= sizeof(cell_t);
 	for (i = 0, j = 0; i < sz; j++) {
 		translations[j].om_va = trans_cells[i++];
 		translations[j].om_len = trans_cells[i++];
 		translations[j].om_pa = trans_cells[i++];
 		if (acells == 2) {
 			translations[j].om_pa <<= 32;
 			translations[j].om_pa |= trans_cells[i++];
 		}
 		translations[j].om_mode = trans_cells[i++];
 	}
 	KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
 	    i, sz));
 
 	sz = j;
 	qsort(translations, sz, sizeof (*translations), om_cmp);
 
 	for (i = 0; i < sz; i++) {
 		pa_base = translations[i].om_pa;
 	      #ifndef __powerpc64__
 		if ((translations[i].om_pa >> 32) != 0)
 			panic("OFW translations above 32-bit boundary!");
 	      #endif
 
 		if (pa_base % PAGE_SIZE)
 			panic("OFW translation not page-aligned (phys)!");
 		if (translations[i].om_va % PAGE_SIZE)
 			panic("OFW translation not page-aligned (virt)!");
 
 		CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
 		    pa_base, translations[i].om_va, translations[i].om_len);
 
 		/* Now enter the pages for this mapping */
 
 		DISABLE_TRANS(msr);
 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
 			/* If this address is direct-mapped, skip remapping */
 			if (hw_direct_map &&
 			    translations[i].om_va == PHYS_TO_DMAP(pa_base) &&
 			    moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) 			    == LPTE_M)
 				continue;
 
 			PMAP_LOCK(kernel_pmap);
 			pvo = moea64_pvo_find_va(kernel_pmap,
 			    translations[i].om_va + off);
 			PMAP_UNLOCK(kernel_pmap);
 			if (pvo != NULL)
 				continue;
 
 			moea64_kenter(mmup, translations[i].om_va + off,
 			    pa_base + off);
 		}
 		ENABLE_TRANS(msr);
 	}
 }
 
 #ifdef __powerpc64__
 static void
 moea64_probe_large_page(void)
 {
 	uint16_t pvr = mfpvr() >> 16;
 
 	switch (pvr) {
 	case IBM970:
 	case IBM970FX:
 	case IBM970MP:
 		powerpc_sync(); isync();
 		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
 		powerpc_sync(); isync();
 		
 		/* FALLTHROUGH */
 	default:
 		if (moea64_large_page_size == 0) {
 			moea64_large_page_size = 0x1000000; /* 16 MB */
 			moea64_large_page_shift = 24;
 		}
 	}
 
 	moea64_large_page_mask = moea64_large_page_size - 1;
 }
 
 static void
 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
 {
 	struct slb *cache;
 	struct slb entry;
 	uint64_t esid, slbe;
 	uint64_t i;
 
 	cache = PCPU_GET(slb);
 	esid = va >> ADDR_SR_SHFT;
 	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
 
 	for (i = 0; i < 64; i++) {
 		if (cache[i].slbe == (slbe | i))
 			return;
 	}
 
 	entry.slbe = slbe;
 	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
 	if (large)
 		entry.slbv |= SLBV_L;
 
 	slb_insert_kernel(entry.slbe, entry.slbv);
 }
 #endif
 
 static void
 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
     vm_offset_t kernelend)
 {
 	struct pvo_entry *pvo;
 	register_t msr;
 	vm_paddr_t pa;
 	vm_offset_t size, off;
 	uint64_t pte_lo;
 	int i;
 
 	if (moea64_large_page_size == 0) 
 		hw_direct_map = 0;
 
 	DISABLE_TRANS(msr);
 	if (hw_direct_map) {
 		PMAP_LOCK(kernel_pmap);
 		for (i = 0; i < pregions_sz; i++) {
 		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
 		     pregions[i].mr_size; pa += moea64_large_page_size) {
 			pte_lo = LPTE_M;
 
 			pvo = alloc_pvo_entry(1 /* bootstrap */);
 			pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
 			init_pvo_entry(pvo, kernel_pmap, PHYS_TO_DMAP(pa));
 
 			/*
 			 * Set memory access as guarded if prefetch within
 			 * the page could exit the available physmem area.
 			 */
 			if (pa & moea64_large_page_mask) {
 				pa &= moea64_large_page_mask;
 				pte_lo |= LPTE_G;
 			}
 			if (pa + moea64_large_page_size >
 			    pregions[i].mr_start + pregions[i].mr_size)
 				pte_lo |= LPTE_G;
 
 			pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
 			    VM_PROT_EXECUTE;
 			pvo->pvo_pte.pa = pa | pte_lo;
 			moea64_pvo_enter(mmup, pvo, NULL);
 		  }
 		}
 		PMAP_UNLOCK(kernel_pmap);
 	} else {
 		size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
 		off = (vm_offset_t)(moea64_bpvo_pool);
 		for (pa = off; pa < off + size; pa += PAGE_SIZE) 
 		moea64_kenter(mmup, pa, pa);
 
 		/*
 		 * Map certain important things, like ourselves.
 		 *
 		 * NOTE: We do not map the exception vector space. That code is
 		 * used only in real mode, and leaving it unmapped allows us to
 		 * catch NULL pointer deferences, instead of making NULL a valid
 		 * address.
 		 */
 
 		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
 		    pa += PAGE_SIZE) 
 			moea64_kenter(mmup, pa, pa);
 	}
 	ENABLE_TRANS(msr);
 
 	/*
 	 * Allow user to override unmapped_buf_allowed for testing.
 	 * XXXKIB Only direct map implementation was tested.
 	 */
 	if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
 	    &unmapped_buf_allowed))
 		unmapped_buf_allowed = hw_direct_map;
 }
 
 void
 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
 {
 	int		i, j;
 	vm_size_t	physsz, hwphyssz;
 
 #ifndef __powerpc64__
 	/* We don't have a direct map since there is no BAT */
 	hw_direct_map = 0;
 
 	/* Make sure battable is zero, since we have no BAT */
 	for (i = 0; i < 16; i++) {
 		battable[i].batu = 0;
 		battable[i].batl = 0;
 	}
 #else
 	moea64_probe_large_page();
 
 	/* Use a direct map if we have large page support */
 	if (moea64_large_page_size > 0)
 		hw_direct_map = 1;
 	else
 		hw_direct_map = 0;
 
 	/* Install trap handlers for SLBs */
 	bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap);
 	bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap);
 	__syncicache((void *)EXC_DSE, 0x80);
 	__syncicache((void *)EXC_ISE, 0x80);
 #endif
 
 	/* Get physical memory regions from firmware */
 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
 	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
 
 	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
 		panic("moea64_bootstrap: phys_avail too small");
 
 	phys_avail_count = 0;
 	physsz = 0;
 	hwphyssz = 0;
 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
 		CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
 		    regions[i].mr_start, regions[i].mr_start +
 		    regions[i].mr_size, regions[i].mr_size);
 		if (hwphyssz != 0 &&
 		    (physsz + regions[i].mr_size) >= hwphyssz) {
 			if (physsz < hwphyssz) {
 				phys_avail[j] = regions[i].mr_start;
 				phys_avail[j + 1] = regions[i].mr_start +
 				    hwphyssz - physsz;
 				physsz = hwphyssz;
 				phys_avail_count++;
 			}
 			break;
 		}
 		phys_avail[j] = regions[i].mr_start;
 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
 		phys_avail_count++;
 		physsz += regions[i].mr_size;
 	}
 
 	/* Check for overlap with the kernel and exception vectors */
 	for (j = 0; j < 2*phys_avail_count; j+=2) {
 		if (phys_avail[j] < EXC_LAST)
 			phys_avail[j] += EXC_LAST;
 
 		if (kernelstart >= phys_avail[j] &&
 		    kernelstart < phys_avail[j+1]) {
 			if (kernelend < phys_avail[j+1]) {
 				phys_avail[2*phys_avail_count] =
 				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
 				phys_avail[2*phys_avail_count + 1] =
 				    phys_avail[j+1];
 				phys_avail_count++;
 			}
 
 			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
 		}
 
 		if (kernelend >= phys_avail[j] &&
 		    kernelend < phys_avail[j+1]) {
 			if (kernelstart > phys_avail[j]) {
 				phys_avail[2*phys_avail_count] = phys_avail[j];
 				phys_avail[2*phys_avail_count + 1] =
 				    kernelstart & ~PAGE_MASK;
 				phys_avail_count++;
 			}
 
 			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
 		}
 	}
 
 	physmem = btoc(physsz);
 
 #ifdef PTEGCOUNT
 	moea64_pteg_count = PTEGCOUNT;
 #else
 	moea64_pteg_count = 0x1000;
 
 	while (moea64_pteg_count < physmem)
 		moea64_pteg_count <<= 1;
 
 	moea64_pteg_count >>= 1;
 #endif /* PTEGCOUNT */
 }
 
 void
 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
 {
 	int		i;
 
 	/*
 	 * Set PTEG mask
 	 */
 	moea64_pteg_mask = moea64_pteg_count - 1;
 
 	/*
 	 * Initialize SLB table lock and page locks
 	 */
 	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
 	for (i = 0; i < PV_LOCK_COUNT; i++)
 		mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
 
 	/*
 	 * Initialise the bootstrap pvo pool.
 	 */
 	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
 		moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0);
 	moea64_bpvo_pool_index = 0;
 
 	/*
 	 * Make sure kernel vsid is allocated as well as VSID 0.
 	 */
 	#ifndef __powerpc64__
 	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
 	moea64_vsid_bitmap[0] |= 1;
 	#endif
 
 	/*
 	 * Initialize the kernel pmap (which is statically allocated).
 	 */
 	#ifdef __powerpc64__
 	for (i = 0; i < 64; i++) {
 		pcpup->pc_slb[i].slbv = 0;
 		pcpup->pc_slb[i].slbe = 0;
 	}
 	#else
 	for (i = 0; i < 16; i++) 
 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
 	#endif
 
 	kernel_pmap->pmap_phys = kernel_pmap;
 	CPU_FILL(&kernel_pmap->pm_active);
 	RB_INIT(&kernel_pmap->pmap_pvo);
 
 	PMAP_LOCK_INIT(kernel_pmap);
 
 	/*
 	 * Now map in all the other buffers we allocated earlier
 	 */
 
 	moea64_setup_direct_map(mmup, kernelstart, kernelend);
 }
 
 void
 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
 {
 	ihandle_t	mmui;
 	phandle_t	chosen;
 	phandle_t	mmu;
 	ssize_t		sz;
 	int		i;
 	vm_offset_t	pa, va;
 	void		*dpcpu;
 
 	/*
 	 * Set up the Open Firmware pmap and add its mappings if not in real
 	 * mode.
 	 */
 
 	chosen = OF_finddevice("/chosen");
 	if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) {
 		mmu = OF_instance_to_package(mmui);
 		if (mmu == -1 ||
 		    (sz = OF_getproplen(mmu, "translations")) == -1)
 			sz = 0;
 		if (sz > 6144 /* tmpstksz - 2 KB headroom */)
 			panic("moea64_bootstrap: too many ofw translations");
 
 		if (sz > 0)
 			moea64_add_ofw_mappings(mmup, mmu, sz);
 	}
 
 	/*
 	 * Calculate the last available physical address.
 	 */
 	Maxmem = 0;
 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
 		Maxmem = max(Maxmem, powerpc_btop(phys_avail[i + 1]));
 
 	/*
 	 * Initialize MMU and remap early physical mappings
 	 */
 	MMU_CPU_BOOTSTRAP(mmup,0);
 	mtmsr(mfmsr() | PSL_DR | PSL_IR);
 	pmap_bootstrapped++;
 	bs_remap_earlyboot();
 
 	/*
 	 * Set the start and end of kva.
 	 */
 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
 	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 
 
 	/*
 	 * Map the entire KVA range into the SLB. We must not fault there.
 	 */
 	#ifdef __powerpc64__
 	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
 		moea64_bootstrap_slb_prefault(va, 0);
 	#endif
 
 	/*
 	 * Figure out how far we can extend virtual_end into segment 16
 	 * without running into existing mappings. Segment 16 is guaranteed
 	 * to contain neither RAM nor devices (at least on Apple hardware),
 	 * but will generally contain some OFW mappings we should not
 	 * step on.
 	 */
 
 	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
 	PMAP_LOCK(kernel_pmap);
 	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
 	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
 		virtual_end += PAGE_SIZE;
 	PMAP_UNLOCK(kernel_pmap);
 	#endif
 
 	/*
 	 * Allocate a kernel stack with a guard page for thread0 and map it
 	 * into the kernel page map.
 	 */
 	pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
 	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
 	virtual_avail = va + kstack_pages * PAGE_SIZE;
 	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
 	thread0.td_kstack = va;
 	thread0.td_kstack_pages = kstack_pages;
 	for (i = 0; i < kstack_pages; i++) {
 		moea64_kenter(mmup, va, pa);
 		pa += PAGE_SIZE;
 		va += PAGE_SIZE;
 	}
 
 	/*
 	 * Allocate virtual address space for the message buffer.
 	 */
 	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
 	msgbufp = (struct msgbuf *)virtual_avail;
 	va = virtual_avail;
 	virtual_avail += round_page(msgbufsize);
 	while (va < virtual_avail) {
 		moea64_kenter(mmup, va, pa);
 		pa += PAGE_SIZE;
 		va += PAGE_SIZE;
 	}
 
 	/*
 	 * Allocate virtual address space for the dynamic percpu area.
 	 */
 	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
 	dpcpu = (void *)virtual_avail;
 	va = virtual_avail;
 	virtual_avail += DPCPU_SIZE;
 	while (va < virtual_avail) {
 		moea64_kenter(mmup, va, pa);
 		pa += PAGE_SIZE;
 		va += PAGE_SIZE;
 	}
 	dpcpu_init(dpcpu, curcpu);
 
 	/*
 	 * Allocate some things for page zeroing. We put this directly
 	 * in the page table and use MOEA64_PTE_REPLACE to avoid any
 	 * of the PVO book-keeping or other parts of the VM system
 	 * from even knowing that this hack exists.
 	 */
 
 	if (!hw_direct_map) {
 		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
 		    MTX_DEF);
 		for (i = 0; i < 2; i++) {
 			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
 			virtual_end -= PAGE_SIZE;
 
 			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
 
 			PMAP_LOCK(kernel_pmap);
 			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
 			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
 			PMAP_UNLOCK(kernel_pmap);
 		}
 	}
 }
 
 static void
 moea64_pmap_init_qpages(void)
 {
 	struct pcpu *pc;
 	int i;
 
 	if (hw_direct_map)
 		return;
 
 	CPU_FOREACH(i) {
 		pc = pcpu_find(i);
 		pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
 		if (pc->pc_qmap_addr == 0)
 			panic("pmap_init_qpages: unable to allocate KVA");
 		PMAP_LOCK(kernel_pmap);
 		pc->pc_qmap_pvo = moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
 		PMAP_UNLOCK(kernel_pmap);
 		mtx_init(&pc->pc_qmap_lock, "qmap lock", NULL, MTX_DEF);
 	}
 }
 
 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
 
 /*
  * Activate a user pmap.  This mostly involves setting some non-CPU
  * state.
  */
 void
 moea64_activate(mmu_t mmu, struct thread *td)
 {
 	pmap_t	pm;
 
 	pm = &td->td_proc->p_vmspace->vm_pmap;
 	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
 
 	#ifdef __powerpc64__
 	PCPU_SET(userslb, pm->pm_slb);
 	__asm __volatile("slbmte %0, %1; isync" ::
 	    "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
 	#else
 	PCPU_SET(curpmap, pm->pmap_phys);
 	mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
 	#endif
 }
 
 void
 moea64_deactivate(mmu_t mmu, struct thread *td)
 {
 	pmap_t	pm;
 
 	__asm __volatile("isync; slbie %0" :: "r"(USER_ADDR));
 
 	pm = &td->td_proc->p_vmspace->vm_pmap;
 	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
 	#ifdef __powerpc64__
 	PCPU_SET(userslb, NULL);
 	#else
 	PCPU_SET(curpmap, NULL);
 	#endif
 }
 
 void
 moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 {
 	struct	pvo_entry key, *pvo;
 	vm_page_t m;
 	int64_t	refchg;
 
 	key.pvo_vaddr = sva;
 	PMAP_LOCK(pm);
 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 	    pvo != NULL && PVO_VADDR(pvo) < eva;
 	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
 		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
 			panic("moea64_unwire: pvo %p is missing PVO_WIRED",
 			    pvo);
 		pvo->pvo_vaddr &= ~PVO_WIRED;
 		refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */);
 		if ((pvo->pvo_vaddr & PVO_MANAGED) &&
 		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
 			if (refchg < 0)
 				refchg = LPTE_CHG;
 			m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
 
 			refchg |= atomic_readandclear_32(&m->md.mdpg_attrs);
 			if (refchg & LPTE_CHG)
 				vm_page_dirty(m);
 			if (refchg & LPTE_REF)
 				vm_page_aflag_set(m, PGA_REFERENCED);
 		}
 		pm->pm_stats.wired_count--;
 	}
 	PMAP_UNLOCK(pm);
 }
 
 /*
  * This goes through and sets the physical address of our
  * special scratch PTE to the PA we want to zero or copy. Because
  * of locking issues (this can get called in pvo_enter() by
  * the UMA allocator), we can't use most other utility functions here
  */
 
 static __inline
 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) {
 
 	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
 	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
 
 	moea64_scratchpage_pvo[which]->pvo_pte.pa =
 	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
 	MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
 	    MOEA64_PTE_INVALIDATE);
 	isync();
 }
 
 void
 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
 {
 	vm_offset_t	dst;
 	vm_offset_t	src;
 
 	dst = VM_PAGE_TO_PHYS(mdst);
 	src = VM_PAGE_TO_PHYS(msrc);
 
 	if (hw_direct_map) {
 		bcopy((void *)PHYS_TO_DMAP(src), (void *)PHYS_TO_DMAP(dst),
 		    PAGE_SIZE);
 	} else {
 		mtx_lock(&moea64_scratchpage_mtx);
 
 		moea64_set_scratchpage_pa(mmu, 0, src);
 		moea64_set_scratchpage_pa(mmu, 1, dst);
 
 		bcopy((void *)moea64_scratchpage_va[0], 
 		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
 
 		mtx_unlock(&moea64_scratchpage_mtx);
 	}
 }
 
 static inline void
 moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
 {
 	void *a_cp, *b_cp;
 	vm_offset_t a_pg_offset, b_pg_offset;
 	int cnt;
 
 	while (xfersize > 0) {
 		a_pg_offset = a_offset & PAGE_MASK;
 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
 		a_cp = (char *)PHYS_TO_DMAP(
 		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
 		    a_pg_offset;
 		b_pg_offset = b_offset & PAGE_MASK;
 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
 		b_cp = (char *)PHYS_TO_DMAP(
 		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
 		    b_pg_offset;
 		bcopy(a_cp, b_cp, cnt);
 		a_offset += cnt;
 		b_offset += cnt;
 		xfersize -= cnt;
 	}
 }
 
 static inline void
 moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
 {
 	void *a_cp, *b_cp;
 	vm_offset_t a_pg_offset, b_pg_offset;
 	int cnt;
 
 	mtx_lock(&moea64_scratchpage_mtx);
 	while (xfersize > 0) {
 		a_pg_offset = a_offset & PAGE_MASK;
 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
 		moea64_set_scratchpage_pa(mmu, 0,
 		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
 		a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
 		b_pg_offset = b_offset & PAGE_MASK;
 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
 		moea64_set_scratchpage_pa(mmu, 1,
 		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
 		b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
 		bcopy(a_cp, b_cp, cnt);
 		a_offset += cnt;
 		b_offset += cnt;
 		xfersize -= cnt;
 	}
 	mtx_unlock(&moea64_scratchpage_mtx);
 }
 
 void
 moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
 {
 
 	if (hw_direct_map) {
 		moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
 		    xfersize);
 	} else {
 		moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
 		    xfersize);
 	}
 }
 
 void
 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
 {
 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
 
 	if (size + off > PAGE_SIZE)
 		panic("moea64_zero_page: size + off > PAGE_SIZE");
 
 	if (hw_direct_map) {
 		bzero((caddr_t)PHYS_TO_DMAP(pa) + off, size);
 	} else {
 		mtx_lock(&moea64_scratchpage_mtx);
 		moea64_set_scratchpage_pa(mmu, 0, pa);
 		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
 		mtx_unlock(&moea64_scratchpage_mtx);
 	}
 }
 
 /*
  * Zero a page of physical memory by temporarily mapping it
  */
 void
 moea64_zero_page(mmu_t mmu, vm_page_t m)
 {
 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
 	vm_offset_t va, off;
 
 	if (!hw_direct_map) {
 		mtx_lock(&moea64_scratchpage_mtx);
 
 		moea64_set_scratchpage_pa(mmu, 0, pa);
 		va = moea64_scratchpage_va[0];
 	} else {
 		va = PHYS_TO_DMAP(pa);
 	}
 
 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
 		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
 
 	if (!hw_direct_map)
 		mtx_unlock(&moea64_scratchpage_mtx);
 }
 
 vm_offset_t
 moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
 {
 	struct pvo_entry *pvo;
 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
 
 	if (hw_direct_map)
 		return (PHYS_TO_DMAP(pa));
 
 	/*
  	 * MOEA64_PTE_REPLACE does some locking, so we can't just grab
 	 * a critical section and access the PCPU data like on i386.
 	 * Instead, pin the thread and grab the PCPU lock to prevent
 	 * a preempting thread from using the same PCPU data.
 	 */
 	sched_pin();
 
 	mtx_assert(PCPU_PTR(qmap_lock), MA_NOTOWNED);
 	pvo = PCPU_GET(qmap_pvo);
 
 	mtx_lock(PCPU_PTR(qmap_lock));
 	pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
 	    (uint64_t)pa;
 	MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
 	isync();
 
 	return (PCPU_GET(qmap_addr));
 }
 
 void
 moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
 {
 	if (hw_direct_map)
 		return;
 
 	mtx_assert(PCPU_PTR(qmap_lock), MA_OWNED);
 	KASSERT(PCPU_GET(qmap_addr) == addr,
 	    ("moea64_quick_remove_page: invalid address"));
 	mtx_unlock(PCPU_PTR(qmap_lock));
 	sched_unpin();	
 }
 
 /*
  * Map the given physical page at the specified virtual address in the
  * target pmap with the protection requested.  If specified the page
  * will be wired down.
  */
 
 int
 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 
     vm_prot_t prot, u_int flags, int8_t psind)
 {
 	struct		pvo_entry *pvo, *oldpvo;
 	struct		pvo_head *pvo_head;
 	uint64_t	pte_lo;
 	int		error;
 
 	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	pvo = alloc_pvo_entry(0);
 	pvo->pvo_pmap = NULL; /* to be filled in later */
 	pvo->pvo_pte.prot = prot;
 
 	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
 	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo;
 
 	if ((flags & PMAP_ENTER_WIRED) != 0)
 		pvo->pvo_vaddr |= PVO_WIRED;
 
 	if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
 		pvo_head = NULL;
 	} else {
 		pvo_head = &m->md.mdpg_pvoh;
 		pvo->pvo_vaddr |= PVO_MANAGED;
 	}
 	
 	for (;;) {
 		PV_PAGE_LOCK(m);
 		PMAP_LOCK(pmap);
 		if (pvo->pvo_pmap == NULL)
 			init_pvo_entry(pvo, pmap, va);
 		if (prot & VM_PROT_WRITE)
 			if (pmap_bootstrapped &&
 			    (m->oflags & VPO_UNMANAGED) == 0)
 				vm_page_aflag_set(m, PGA_WRITEABLE);
 
 		oldpvo = moea64_pvo_find_va(pmap, va);
 		if (oldpvo != NULL) {
 			if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
 			    oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
 			    oldpvo->pvo_pte.prot == prot) {
 				/* Identical mapping already exists */
 				error = 0;
 
 				/* If not in page table, reinsert it */
 				if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) {
 					moea64_pte_overflow--;
 					MOEA64_PTE_INSERT(mmu, oldpvo);
 				}
 
 				/* Then just clean up and go home */
 				PV_PAGE_UNLOCK(m);
 				PMAP_UNLOCK(pmap);
 				free_pvo_entry(pvo);
 				break;
 			}
 
 			/* Otherwise, need to kill it first */
 			KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
 			    "mapping does not match new mapping"));
 			moea64_pvo_remove_from_pmap(mmu, oldpvo);
 		}
 		error = moea64_pvo_enter(mmu, pvo, pvo_head);
 		PV_PAGE_UNLOCK(m);
 		PMAP_UNLOCK(pmap);
 
 		/* Free any dead pages */
 		if (oldpvo != NULL) {
 			PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
 			moea64_pvo_remove_from_page(mmu, oldpvo);
 			PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
 			free_pvo_entry(oldpvo);
 		}
 
 		if (error != ENOMEM)
 			break;
 		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
 			return (KERN_RESOURCE_SHORTAGE);
 		VM_OBJECT_ASSERT_UNLOCKED(m->object);
 		VM_WAIT;
 	}
 
 	/*
 	 * Flush the page from the instruction cache if this page is
 	 * mapped executable and cacheable.
 	 */
 	if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
 	    (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
 		vm_page_aflag_set(m, PGA_EXECUTABLE);
 		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
 	}
 	return (KERN_SUCCESS);
 }
 
 static void
 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
     vm_size_t sz)
 {
 
 	/*
 	 * This is much trickier than on older systems because
 	 * we can't sync the icache on physical addresses directly
 	 * without a direct map. Instead we check a couple of cases
 	 * where the memory is already mapped in and, failing that,
 	 * use the same trick we use for page zeroing to create
 	 * a temporary mapping for this physical address.
 	 */
 
 	if (!pmap_bootstrapped) {
 		/*
 		 * If PMAP is not bootstrapped, we are likely to be
 		 * in real mode.
 		 */
 		__syncicache((void *)pa, sz);
 	} else if (pmap == kernel_pmap) {
 		__syncicache((void *)va, sz);
 	} else if (hw_direct_map) {
 		__syncicache((void *)PHYS_TO_DMAP(pa), sz);
 	} else {
 		/* Use the scratch page to set up a temp mapping */
 
 		mtx_lock(&moea64_scratchpage_mtx);
 
 		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
 		__syncicache((void *)(moea64_scratchpage_va[1] + 
 		    (va & ADDR_POFF)), sz);
 
 		mtx_unlock(&moea64_scratchpage_mtx);
 	}
 }
 
 /*
  * Maps a sequence of resident pages belonging to the same object.
  * The sequence begins with the given page m_start.  This page is
  * mapped at the given virtual address start.  Each subsequent page is
  * mapped at a virtual address that is offset from start by the same
  * amount as the page is offset from m_start within the object.  The
  * last page in the sequence is the page with the largest offset from
  * m_start that can be mapped at a virtual address less than the given
  * virtual address end.  Not every virtual page between start and end
  * is mapped; only those for which a resident page exists with the
  * corresponding offset from m_start are mapped.
  */
 void
 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
     vm_page_t m_start, vm_prot_t prot)
 {
 	vm_page_t m;
 	vm_pindex_t diff, psize;
 
 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
 
 	psize = atop(end - start);
 	m = m_start;
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 		moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
 		    (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
 		m = TAILQ_NEXT(m, listq);
 	}
 }
 
 void
 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
     vm_prot_t prot)
 {
 
 	moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
 	    PMAP_ENTER_NOSLEEP, 0);
 }
 
 vm_paddr_t
 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
 {
 	struct	pvo_entry *pvo;
 	vm_paddr_t pa;
 
 	PMAP_LOCK(pm);
 	pvo = moea64_pvo_find_va(pm, va);
 	if (pvo == NULL)
 		pa = 0;
 	else
 		pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
 	PMAP_UNLOCK(pm);
 
 	return (pa);
 }
 
 /*
  * Atomically extract and hold the physical page with the given
  * pmap and virtual address pair if that mapping permits the given
  * protection.
  */
 vm_page_t
 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 {
 	struct	pvo_entry *pvo;
 	vm_page_t m;
         vm_paddr_t pa;
         
 	m = NULL;
 	pa = 0;
 	PMAP_LOCK(pmap);
 retry:
 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
 	if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
 		if (vm_page_pa_tryrelock(pmap,
 		    pvo->pvo_pte.pa & LPTE_RPGN, &pa))
 			goto retry;
 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
 		vm_page_hold(m);
 	}
 	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }
 
 static mmu_t installed_mmu;
 
 static void *
 moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
     uint8_t *flags, int wait)
 {
 	struct pvo_entry *pvo;
         vm_offset_t va;
         vm_page_t m;
         int needed_lock;
 
 	/*
 	 * This entire routine is a horrible hack to avoid bothering kmem
 	 * for new KVA addresses. Because this can get called from inside
 	 * kmem allocation routines, calling kmem for a new address here
 	 * can lead to multiply locking non-recursive mutexes.
 	 */
 
 	*flags = UMA_SLAB_PRIV;
 	needed_lock = !PMAP_LOCKED(kernel_pmap);
 
 	m = vm_page_alloc_domain(NULL, 0, domain,
 	    malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
 	if (m == NULL)
 		return (NULL);
 
 	va = VM_PAGE_TO_PHYS(m);
 
 	pvo = alloc_pvo_entry(1 /* bootstrap */);
 
 	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
 	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
 
 	if (needed_lock)
 		PMAP_LOCK(kernel_pmap);
 
 	init_pvo_entry(pvo, kernel_pmap, va);
 	pvo->pvo_vaddr |= PVO_WIRED;
 
 	moea64_pvo_enter(installed_mmu, pvo, NULL);
 
 	if (needed_lock)
 		PMAP_UNLOCK(kernel_pmap);
 	
 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
                 bzero((void *)va, PAGE_SIZE);
 
 	return (void *)va;
 }
 
 extern int elf32_nxstack;
 
 void
 moea64_init(mmu_t mmu)
 {
 
 	CTR0(KTR_PMAP, "moea64_init");
 
 	moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
 
 	if (!hw_direct_map) {
 		installed_mmu = mmu;
 		uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc);
 	}
 
 #ifdef COMPAT_FREEBSD32
 	elf32_nxstack = 1;
 #endif
 
 	moea64_initialized = TRUE;
 }
 
 boolean_t
 moea64_is_referenced(mmu_t mmu, vm_page_t m)
 {
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_is_referenced: page %p is not managed", m));
 
 	return (moea64_query_bit(mmu, m, LPTE_REF));
 }
 
 boolean_t
 moea64_is_modified(mmu_t mmu, vm_page_t m)
 {
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_is_modified: page %p is not managed", m));
 
 	/*
 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can have LPTE_CHG set.
 	 */
 	VM_OBJECT_ASSERT_LOCKED(m->object);
 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	return (moea64_query_bit(mmu, m, LPTE_CHG));
 }
 
 boolean_t
 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	struct pvo_entry *pvo;
 	boolean_t rv = TRUE;
 
 	PMAP_LOCK(pmap);
 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
 	if (pvo != NULL)
 		rv = FALSE;
 	PMAP_UNLOCK(pmap);
 	return (rv);
 }
 
 void
 moea64_clear_modify(mmu_t mmu, vm_page_t m)
 {
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	KASSERT(!vm_page_xbusied(m),
 	    ("moea64_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
 	 * set.  If the object containing the page is locked and the page is
 	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	moea64_clear_bit(mmu, m, LPTE_CHG);
 }
 
 /*
  * Clear the write and modified bits in each of the given page's mappings.
  */
 void
 moea64_remove_write(mmu_t mmu, vm_page_t m)
 {
 	struct	pvo_entry *pvo;
 	int64_t	refchg, ret;
 	pmap_t	pmap;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_remove_write: page %p is not managed", m));
 
 	/*
 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * set by another thread while the object is locked.  Thus,
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	powerpc_sync();
 	PV_PAGE_LOCK(m);
 	refchg = 0;
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
 		if (!(pvo->pvo_vaddr & PVO_DEAD) &&
 		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
 			pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
 			ret = MOEA64_PTE_REPLACE(mmu, pvo,
 			    MOEA64_PTE_PROT_UPDATE);
 			if (ret < 0)
 				ret = LPTE_CHG;
 			refchg |= ret;
 			if (pvo->pvo_pmap == kernel_pmap)
 				isync();
 		}
 		PMAP_UNLOCK(pmap);
 	}
 	if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
 		vm_page_dirty(m);
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
 	PV_PAGE_UNLOCK(m);
 }
 
 /*
  *	moea64_ts_referenced:
  *
  *	Return a count of reference bits for a page, clearing those bits.
  *	It is not necessary for every reference bit to be cleared, but it
  *	is necessary that 0 only be returned when there are truly no
  *	reference bits set.
  *
  *	XXX: The exact number of bits to check and clear is a matter that
  *	should be tested and standardized at some point in the future for
  *	optimal aging of shared pages.
  */
 int
 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
 {
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_ts_referenced: page %p is not managed", m));
 	return (moea64_clear_bit(mmu, m, LPTE_REF));
 }
 
 /*
  * Modify the WIMG settings of all mappings for a page.
  */
 void
 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
 {
 	struct	pvo_entry *pvo;
 	int64_t	refchg;
 	pmap_t	pmap;
 	uint64_t lo;
 
 	if ((m->oflags & VPO_UNMANAGED) != 0) {
 		m->md.mdpg_cache_attrs = ma;
 		return;
 	}
 
 	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
 
 	PV_PAGE_LOCK(m);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
 		if (!(pvo->pvo_vaddr & PVO_DEAD)) {
 			pvo->pvo_pte.pa &= ~LPTE_WIMG;
 			pvo->pvo_pte.pa |= lo;
 			refchg = MOEA64_PTE_REPLACE(mmu, pvo,
 			    MOEA64_PTE_INVALIDATE);
 			if (refchg < 0)
 				refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
 				    LPTE_CHG : 0;
 			if ((pvo->pvo_vaddr & PVO_MANAGED) &&
 			    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
 				refchg |=
 				    atomic_readandclear_32(&m->md.mdpg_attrs);
 				if (refchg & LPTE_CHG)
 					vm_page_dirty(m);
 				if (refchg & LPTE_REF)
 					vm_page_aflag_set(m, PGA_REFERENCED);
 			}
 			if (pvo->pvo_pmap == kernel_pmap)
 				isync();
 		}
 		PMAP_UNLOCK(pmap);
 	}
 	m->md.mdpg_cache_attrs = ma;
 	PV_PAGE_UNLOCK(m);
 }
 
 /*
  * Map a wired page into kernel virtual address space.
  */
 void
 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
 {
 	int		error;	
 	struct pvo_entry *pvo, *oldpvo;
 
 	pvo = alloc_pvo_entry(0);
 	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
 	pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
 	pvo->pvo_vaddr |= PVO_WIRED;
 
 	PMAP_LOCK(kernel_pmap);
 	oldpvo = moea64_pvo_find_va(kernel_pmap, va);
 	if (oldpvo != NULL)
 		moea64_pvo_remove_from_pmap(mmu, oldpvo);
 	init_pvo_entry(pvo, kernel_pmap, va);
 	error = moea64_pvo_enter(mmu, pvo, NULL);
 	PMAP_UNLOCK(kernel_pmap);
 
 	/* Free any dead pages */
 	if (oldpvo != NULL) {
 		PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
 		moea64_pvo_remove_from_page(mmu, oldpvo);
 		PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
 		free_pvo_entry(oldpvo);
 	}
 
 	if (error != 0 && error != ENOENT)
 		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
 		    pa, error);
 }
 
 void
 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
 {
 
 	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
 }
 
 /*
  * Extract the physical page address associated with the given kernel virtual
  * address.
  */
 vm_paddr_t
 moea64_kextract(mmu_t mmu, vm_offset_t va)
 {
 	struct		pvo_entry *pvo;
 	vm_paddr_t pa;
 
 	/*
 	 * Shortcut the direct-mapped case when applicable.  We never put
 	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
 	 */
 	if (va < VM_MIN_KERNEL_ADDRESS)
 		return (va);
 
 	PMAP_LOCK(kernel_pmap);
 	pvo = moea64_pvo_find_va(kernel_pmap, va);
 	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
 	    va));
 	pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
 	PMAP_UNLOCK(kernel_pmap);
 	return (pa);
 }
 
 /*
  * Remove a wired page from kernel virtual address space.
  */
 void
 moea64_kremove(mmu_t mmu, vm_offset_t va)
 {
 	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
 }
 
 /*
  * Provide a kernel pointer corresponding to a given userland pointer.
  * The returned pointer is valid until the next time this function is
  * called in this thread. This is used internally in copyin/copyout.
  */
 static int
 moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
     void **kaddr, size_t ulen, size_t *klen)
 {
 	size_t l;
 #ifdef __powerpc64__
 	struct slb *slb;
 #endif
 	register_t slbv;
 
 	*kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK);
 	l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr);
 	if (l > ulen)
 		l = ulen;
 	if (klen)
 		*klen = l;
 	else if (l != ulen)
 		return (EFAULT);
 
 #ifdef __powerpc64__
 	/* Try lockless look-up first */
 	slb = user_va_to_slb_entry(pm, (vm_offset_t)uaddr);
 
 	if (slb == NULL) {
 		/* If it isn't there, we need to pre-fault the VSID */
 		PMAP_LOCK(pm);
 		slbv = va_to_vsid(pm, (vm_offset_t)uaddr) << SLBV_VSID_SHIFT;
 		PMAP_UNLOCK(pm);
 	} else {
 		slbv = slb->slbv;
 	}
 
 	/* Mark segment no-execute */
 	slbv |= SLBV_N;
 #else
 	slbv = va_to_vsid(pm, (vm_offset_t)uaddr);
 
 	/* Mark segment no-execute */
 	slbv |= SR_N;
 #endif
 
 	/* If we have already set this VSID, we can just return */
 	if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv)
 		return (0);
   
 	__asm __volatile("isync");
 	curthread->td_pcb->pcb_cpu.aim.usr_segm =
 	    (uintptr_t)uaddr >> ADDR_SR_SHFT;
 	curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv;
 #ifdef __powerpc64__
 	__asm __volatile ("slbie %0; slbmte %1, %2; isync" ::
 	    "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE));
 #else
 	__asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv));
 #endif
+
+	return (0);
+}
+
+/*
+ * Figure out where a given kernel pointer (usually in a fault) points
+ * to from the VM's perspective, potentially remapping into userland's
+ * address space.
+ */
+static int
+moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
+    vm_offset_t *decoded_addr)
+{
+	vm_offset_t user_sr;
+
+	if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
+		user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
+		addr &= ADDR_PIDX | ADDR_POFF;
+		addr |= user_sr << ADDR_SR_SHFT;
+		*decoded_addr = addr;
+		*is_user = 1;
+	} else {
+		*decoded_addr = addr;
+		*is_user = 0;
+	}
 
 	return (0);
 }
 
 /*
  * Map a range of physical addresses into kernel virtual address space.
  *
  * The value passed in *virt is a suggested virtual address for the mapping.
  * Architectures which can support a direct-mapped physical to virtual region
  * can return the appropriate address within that region, leaving '*virt'
  * unchanged.  Other architectures should map the pages starting at '*virt' and
  * update '*virt' with the first usable address after the mapped region.
  */
 vm_offset_t
 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
     vm_paddr_t pa_end, int prot)
 {
 	vm_offset_t	sva, va;
 
 	if (hw_direct_map) {
 		/*
 		 * Check if every page in the region is covered by the direct
 		 * map. The direct map covers all of physical memory. Use
 		 * moea64_calc_wimg() as a shortcut to see if the page is in
 		 * physical memory as a way to see if the direct map covers it.
 		 */
 		for (va = pa_start; va < pa_end; va += PAGE_SIZE)
 			if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
 				break;
 		if (va == pa_end)
 			return (PHYS_TO_DMAP(pa_start));
 	}
 	sva = *virt;
 	va = sva;
 	/* XXX respect prot argument */
 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
 		moea64_kenter(mmu, va, pa_start);
 	*virt = va;
 
 	return (sva);
 }
 
 /*
  * Returns true if the pmap's pv is one of the first
  * 16 pvs linked to from this page.  This count may
  * be changed upwards or downwards in the future; it
  * is only necessary that true be returned for a small
  * subset of pmaps for proper page aging.
  */
 boolean_t
 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
 {
         int loops;
 	struct pvo_entry *pvo;
 	boolean_t rv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_page_exists_quick: page %p is not managed", m));
 	loops = 0;
 	rv = FALSE;
 	PV_PAGE_LOCK(m);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
 			rv = TRUE;
 			break;
 		}
 		if (++loops >= 16)
 			break;
 	}
 	PV_PAGE_UNLOCK(m);
 	return (rv);
 }
 
 void
 moea64_page_init(mmu_t mmu __unused, vm_page_t m)
 {
 
 	m->md.mdpg_attrs = 0;
 	m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
 	LIST_INIT(&m->md.mdpg_pvoh);
 }
 
 /*
  * Return the number of managed mappings to the given physical page
  * that are wired.
  */
 int
 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
 {
 	struct pvo_entry *pvo;
 	int count;
 
 	count = 0;
 	if ((m->oflags & VPO_UNMANAGED) != 0)
 		return (count);
 	PV_PAGE_LOCK(m);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
 		if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
 			count++;
 	PV_PAGE_UNLOCK(m);
 	return (count);
 }
 
 static uintptr_t	moea64_vsidcontext;
 
 uintptr_t
 moea64_get_unique_vsid(void) {
 	u_int entropy;
 	register_t hash;
 	uint32_t mask;
 	int i;
 
 	entropy = 0;
 	__asm __volatile("mftb %0" : "=r"(entropy));
 
 	mtx_lock(&moea64_slb_mutex);
 	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
 		u_int	n;
 
 		/*
 		 * Create a new value by mutiplying by a prime and adding in
 		 * entropy from the timebase register.  This is to make the
 		 * VSID more random so that the PT hash function collides
 		 * less often.  (Note that the prime casues gcc to do shifts
 		 * instead of a multiply.)
 		 */
 		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
 		hash = moea64_vsidcontext & (NVSIDS - 1);
 		if (hash == 0)		/* 0 is special, avoid it */
 			continue;
 		n = hash >> 5;
 		mask = 1 << (hash & (VSID_NBPW - 1));
 		hash = (moea64_vsidcontext & VSID_HASHMASK);
 		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
 			/* anything free in this bucket? */
 			if (moea64_vsid_bitmap[n] == 0xffffffff) {
 				entropy = (moea64_vsidcontext >> 20);
 				continue;
 			}
 			i = ffs(~moea64_vsid_bitmap[n]) - 1;
 			mask = 1 << i;
 			hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
 			hash |= i;
 		}
 		if (hash == VSID_VRMA)	/* also special, avoid this too */
 			continue;
 		KASSERT(!(moea64_vsid_bitmap[n] & mask),
 		    ("Allocating in-use VSID %#zx\n", hash));
 		moea64_vsid_bitmap[n] |= mask;
 		mtx_unlock(&moea64_slb_mutex);
 		return (hash);
 	}
 
 	mtx_unlock(&moea64_slb_mutex);
 	panic("%s: out of segments",__func__);
 }
 
 #ifdef __powerpc64__
 void
 moea64_pinit(mmu_t mmu, pmap_t pmap)
 {
 
 	RB_INIT(&pmap->pmap_pvo);
 
 	pmap->pm_slb_tree_root = slb_alloc_tree();
 	pmap->pm_slb = slb_alloc_user_cache();
 	pmap->pm_slb_len = 0;
 }
 #else
 void
 moea64_pinit(mmu_t mmu, pmap_t pmap)
 {
 	int	i;
 	uint32_t hash;
 
 	RB_INIT(&pmap->pmap_pvo);
 
 	if (pmap_bootstrapped)
 		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
 		    (vm_offset_t)pmap);
 	else
 		pmap->pmap_phys = pmap;
 
 	/*
 	 * Allocate some segment registers for this pmap.
 	 */
 	hash = moea64_get_unique_vsid();
 
 	for (i = 0; i < 16; i++) 
 		pmap->pm_sr[i] = VSID_MAKE(i, hash);
 
 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
 }
 #endif
 
 /*
  * Initialize the pmap associated with process 0.
  */
 void
 moea64_pinit0(mmu_t mmu, pmap_t pm)
 {
 
 	PMAP_LOCK_INIT(pm);
 	moea64_pinit(mmu, pm);
 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
 }
 
 /*
  * Set the physical protection on the specified range of this map as requested.
  */
 static void
 moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
 {
 	struct vm_page *pg;
 	vm_prot_t oldprot;
 	int32_t refchg;
 
 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
 
 	/*
 	 * Change the protection of the page.
 	 */
 	oldprot = pvo->pvo_pte.prot;
 	pvo->pvo_pte.prot = prot;
 	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
 
 	/*
 	 * If the PVO is in the page table, update mapping
 	 */
 	refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
 	if (refchg < 0)
 		refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
 
 	if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
 	    (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
 		if ((pg->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(pg, PGA_EXECUTABLE);
 		moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
 		    pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE);
 	}
 
 	/*
 	 * Update vm about the REF/CHG bits if the page is managed and we have
 	 * removed write access.
 	 */
 	if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
 	    (oldprot & VM_PROT_WRITE)) {
 		refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
 		if (refchg & LPTE_CHG)
 			vm_page_dirty(pg);
 		if (refchg & LPTE_REF)
 			vm_page_aflag_set(pg, PGA_REFERENCED);
 	}
 }
 
 void
 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
     vm_prot_t prot)
 {
 	struct	pvo_entry *pvo, *tpvo, key;
 
 	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
 	    sva, eva, prot);
 
 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
 	    ("moea64_protect: non current pmap"));
 
 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 		moea64_remove(mmu, pm, sva, eva);
 		return;
 	}
 
 	PMAP_LOCK(pm);
 	key.pvo_vaddr = sva;
 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
 		moea64_pvo_protect(mmu, pm, pvo, prot);
 	}
 	PMAP_UNLOCK(pm);
 }
 
 /*
  * Map a list of wired pages into kernel virtual address space.  This is
  * intended for temporary mappings which do not need page modification or
  * references recorded.  Existing mappings in the region are overwritten.
  */
 void
 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
 {
 	while (count-- > 0) {
 		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
 		va += PAGE_SIZE;
 		m++;
 	}
 }
 
 /*
  * Remove page mappings from kernel virtual address space.  Intended for
  * temporary mappings entered by moea64_qenter.
  */
 void
 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
 {
 	while (count-- > 0) {
 		moea64_kremove(mmu, va);
 		va += PAGE_SIZE;
 	}
 }
 
 void
 moea64_release_vsid(uint64_t vsid)
 {
 	int idx, mask;
 
 	mtx_lock(&moea64_slb_mutex);
 	idx = vsid & (NVSIDS-1);
 	mask = 1 << (idx % VSID_NBPW);
 	idx /= VSID_NBPW;
 	KASSERT(moea64_vsid_bitmap[idx] & mask,
 	    ("Freeing unallocated VSID %#jx", vsid));
 	moea64_vsid_bitmap[idx] &= ~mask;
 	mtx_unlock(&moea64_slb_mutex);
 }
 	
 
 void
 moea64_release(mmu_t mmu, pmap_t pmap)
 {
         
 	/*
 	 * Free segment registers' VSIDs
 	 */
     #ifdef __powerpc64__
 	slb_free_tree(pmap);
 	slb_free_user_cache(pmap->pm_slb);
     #else
 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
 
 	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
     #endif
 }
 
 /*
  * Remove all pages mapped by the specified pmap
  */
 void
 moea64_remove_pages(mmu_t mmu, pmap_t pm)
 {
 	struct pvo_entry *pvo, *tpvo;
 	struct pvo_tree tofree;
 
 	RB_INIT(&tofree);
 
 	PMAP_LOCK(pm);
 	RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
 		if (pvo->pvo_vaddr & PVO_WIRED)
 			continue;
 
 		/*
 		 * For locking reasons, remove this from the page table and
 		 * pmap, but save delinking from the vm_page for a second
 		 * pass
 		 */
 		moea64_pvo_remove_from_pmap(mmu, pvo);
 		RB_INSERT(pvo_tree, &tofree, pvo);
 	}
 	PMAP_UNLOCK(pm);
 
 	RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
 		PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
 		moea64_pvo_remove_from_page(mmu, pvo);
 		PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
 		RB_REMOVE(pvo_tree, &tofree, pvo);
 		free_pvo_entry(pvo);
 	}
 }
 
 /*
  * Remove the given range of addresses from the specified map.
  */
 void
 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 {
 	struct  pvo_entry *pvo, *tpvo, key;
 	struct pvo_tree tofree;
 
 	/*
 	 * Perform an unsynchronized read.  This is, however, safe.
 	 */
 	if (pm->pm_stats.resident_count == 0)
 		return;
 
 	key.pvo_vaddr = sva;
 
 	RB_INIT(&tofree);
 
 	PMAP_LOCK(pm);
 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
 
 		/*
 		 * For locking reasons, remove this from the page table and
 		 * pmap, but save delinking from the vm_page for a second
 		 * pass
 		 */
 		moea64_pvo_remove_from_pmap(mmu, pvo);
 		RB_INSERT(pvo_tree, &tofree, pvo);
 	}
 	PMAP_UNLOCK(pm);
 
 	RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
 		PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
 		moea64_pvo_remove_from_page(mmu, pvo);
 		PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
 		RB_REMOVE(pvo_tree, &tofree, pvo);
 		free_pvo_entry(pvo);
 	}
 }
 
 /*
  * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
  * will reflect changes in pte's back to the vm_page.
  */
 void
 moea64_remove_all(mmu_t mmu, vm_page_t m)
 {
 	struct	pvo_entry *pvo, *next_pvo;
 	struct	pvo_head freequeue;
 	int	wasdead;
 	pmap_t	pmap;
 
 	LIST_INIT(&freequeue);
 
 	PV_PAGE_LOCK(m);
 	LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
 		wasdead = (pvo->pvo_vaddr & PVO_DEAD);
 		if (!wasdead)
 			moea64_pvo_remove_from_pmap(mmu, pvo);
 		moea64_pvo_remove_from_page(mmu, pvo);
 		if (!wasdead)
 			LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
 		PMAP_UNLOCK(pmap);
 		
 	}
 	KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
 	KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
 	PV_PAGE_UNLOCK(m);
 
 	/* Clean up UMA allocations */
 	LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
 		free_pvo_entry(pvo);
 }
 
 /*
  * Allocate a physical page of memory directly from the phys_avail map.
  * Can only be called from moea64_bootstrap before avail start and end are
  * calculated.
  */
 vm_offset_t
 moea64_bootstrap_alloc(vm_size_t size, u_int align)
 {
 	vm_offset_t	s, e;
 	int		i, j;
 
 	size = round_page(size);
 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
 		if (align != 0)
 			s = roundup2(phys_avail[i], align);
 		else
 			s = phys_avail[i];
 		e = s + size;
 
 		if (s < phys_avail[i] || e > phys_avail[i + 1])
 			continue;
 
 		if (s + size > platform_real_maxaddr())
 			continue;
 
 		if (s == phys_avail[i]) {
 			phys_avail[i] += size;
 		} else if (e == phys_avail[i + 1]) {
 			phys_avail[i + 1] -= size;
 		} else {
 			for (j = phys_avail_count * 2; j > i; j -= 2) {
 				phys_avail[j] = phys_avail[j - 2];
 				phys_avail[j + 1] = phys_avail[j - 1];
 			}
 
 			phys_avail[i + 3] = phys_avail[i + 1];
 			phys_avail[i + 1] = s;
 			phys_avail[i + 2] = e;
 			phys_avail_count++;
 		}
 
 		return (s);
 	}
 	panic("moea64_bootstrap_alloc: could not allocate memory");
 }
 
 static int
 moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head)
 {
 	int first, err;
 
 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
 	KASSERT(moea64_pvo_find_va(pvo->pvo_pmap, PVO_VADDR(pvo)) == NULL,
 	    ("Existing mapping for VA %#jx", (uintmax_t)PVO_VADDR(pvo)));
 
 	moea64_pvo_enter_calls++;
 
 	/*
 	 * Add to pmap list
 	 */
 	RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
 
 	/*
 	 * Remember if the list was empty and therefore will be the first
 	 * item.
 	 */
 	if (pvo_head != NULL) {
 		if (LIST_FIRST(pvo_head) == NULL)
 			first = 1;
 		LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
 	}
 
 	if (pvo->pvo_vaddr & PVO_WIRED)
 		pvo->pvo_pmap->pm_stats.wired_count++;
 	pvo->pvo_pmap->pm_stats.resident_count++;
 
 	/*
 	 * Insert it into the hardware page table
 	 */
 	err = MOEA64_PTE_INSERT(mmu, pvo);
 	if (err != 0) {
 		panic("moea64_pvo_enter: overflow");
 	}
 
 	moea64_pvo_entries++;
 
 	if (pvo->pvo_pmap == kernel_pmap)
 		isync();
 
 #ifdef __powerpc64__
 	/*
 	 * Make sure all our bootstrap mappings are in the SLB as soon
 	 * as virtual memory is switched on.
 	 */
 	if (!pmap_bootstrapped)
 		moea64_bootstrap_slb_prefault(PVO_VADDR(pvo),
 		    pvo->pvo_vaddr & PVO_LARGE);
 #endif
 
 	return (first ? ENOENT : 0);
 }
 
 static void
 moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
 {
 	struct	vm_page *pg;
 	int32_t refchg;
 
 	KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
 	KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
 
 	/*
 	 * If there is an active pte entry, we need to deactivate it
 	 */
 	refchg = MOEA64_PTE_UNSET(mmu, pvo);
 	if (refchg < 0) {
 		/*
 		 * If it was evicted from the page table, be pessimistic and
 		 * dirty the page.
 		 */
 		if (pvo->pvo_pte.prot & VM_PROT_WRITE)
 			refchg = LPTE_CHG;
 		else
 			refchg = 0;
 	}
 
 	/*
 	 * Update our statistics.
 	 */
 	pvo->pvo_pmap->pm_stats.resident_count--;
 	if (pvo->pvo_vaddr & PVO_WIRED)
 		pvo->pvo_pmap->pm_stats.wired_count--;
 
 	/*
 	 * Remove this PVO from the pmap list.
 	 */
 	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
 
 	/*
 	 * Mark this for the next sweep
 	 */
 	pvo->pvo_vaddr |= PVO_DEAD;
 
 	/* Send RC bits to VM */
 	if ((pvo->pvo_vaddr & PVO_MANAGED) &&
 	    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
 		if (pg != NULL) {
 			refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
 			if (refchg & LPTE_CHG)
 				vm_page_dirty(pg);
 			if (refchg & LPTE_REF)
 				vm_page_aflag_set(pg, PGA_REFERENCED);
 		}
 	}
 }
 
 static void
 moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
 {
 	struct	vm_page *pg;
 
 	KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
 
 	/* Use NULL pmaps as a sentinel for races in page deletion */
 	if (pvo->pvo_pmap == NULL)
 		return;
 	pvo->pvo_pmap = NULL;
 
 	/*
 	 * Update vm about page writeability/executability if managed
 	 */
 	PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
 	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
 
 	if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) {
 		LIST_REMOVE(pvo, pvo_vlink);
 		if (LIST_EMPTY(vm_page_to_pvoh(pg)))
 			vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE);
 	}
 
 	moea64_pvo_entries--;
 	moea64_pvo_remove_calls++;
 }
 
 static struct pvo_entry *
 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
 {
 	struct pvo_entry key;
 
 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
 
 	key.pvo_vaddr = va & ~ADDR_POFF;
 	return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
 }
 
 static boolean_t
 moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
 {
 	struct	pvo_entry *pvo;
 	int64_t ret;
 	boolean_t rv;
 
 	/*
 	 * See if this bit is stored in the page already.
 	 */
 	if (m->md.mdpg_attrs & ptebit)
 		return (TRUE);
 
 	/*
 	 * Examine each PTE.  Sync so that any pending REF/CHG bits are
 	 * flushed to the PTEs.
 	 */
 	rv = FALSE;
 	powerpc_sync();
 	PV_PAGE_LOCK(m);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		ret = 0;
 
 		/*
 		 * See if this pvo has a valid PTE.  if so, fetch the
 		 * REF/CHG bits from the valid PTE.  If the appropriate
 		 * ptebit is set, return success.
 		 */
 		PMAP_LOCK(pvo->pvo_pmap);
 		if (!(pvo->pvo_vaddr & PVO_DEAD))
 			ret = MOEA64_PTE_SYNCH(mmu, pvo);
 		PMAP_UNLOCK(pvo->pvo_pmap);
 
 		if (ret > 0) {
 			atomic_set_32(&m->md.mdpg_attrs,
 			    ret & (LPTE_CHG | LPTE_REF));
 			if (ret & ptebit) {
 				rv = TRUE;
 				break;
 			}
 		}
 	}
 	PV_PAGE_UNLOCK(m);
 
 	return (rv);
 }
 
 static u_int
 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
 {
 	u_int	count;
 	struct	pvo_entry *pvo;
 	int64_t ret;
 
 	/*
 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
 	 * we can reset the right ones).
 	 */
 	powerpc_sync();
 
 	/*
 	 * For each pvo entry, clear the pte's ptebit.
 	 */
 	count = 0;
 	PV_PAGE_LOCK(m);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		ret = 0;
 
 		PMAP_LOCK(pvo->pvo_pmap);
 		if (!(pvo->pvo_vaddr & PVO_DEAD))
 			ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit);
 		PMAP_UNLOCK(pvo->pvo_pmap);
 
 		if (ret > 0 && (ret & ptebit))
 			count++;
 	}
 	atomic_clear_32(&m->md.mdpg_attrs, ptebit);
 	PV_PAGE_UNLOCK(m);
 
 	return (count);
 }
 
 boolean_t
 moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
 {
 	struct pvo_entry *pvo, key;
 	vm_offset_t ppa;
 	int error = 0;
 
 	PMAP_LOCK(kernel_pmap);
 	key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
 	for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
 	    ppa < pa + size; ppa += PAGE_SIZE,
 	    pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
 		if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) {
 			error = EFAULT;
 			break;
 		}
 	}
 	PMAP_UNLOCK(kernel_pmap);
 
 	return (error);
 }
 
 /*
  * Map a set of physical memory pages into the kernel virtual
  * address space. Return a pointer to where it is mapped. This
  * routine is intended to be used for mapping device memory,
  * NOT real memory.
  */
 void *
 moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
 {
 	vm_offset_t va, tmpva, ppa, offset;
 
 	ppa = trunc_page(pa);
 	offset = pa & PAGE_MASK;
 	size = roundup2(offset + size, PAGE_SIZE);
 
 	va = kva_alloc(size);
 
 	if (!va)
 		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
 
 	for (tmpva = va; size > 0;) {
 		moea64_kenter_attr(mmu, tmpva, ppa, ma);
 		size -= PAGE_SIZE;
 		tmpva += PAGE_SIZE;
 		ppa += PAGE_SIZE;
 	}
 
 	return ((void *)(va + offset));
 }
 
 void *
 moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
 {
 
 	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
 }
 
 void
 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t base, offset;
 
 	base = trunc_page(va);
 	offset = va & PAGE_MASK;
 	size = roundup2(offset + size, PAGE_SIZE);
 
 	kva_free(base, size);
 }
 
 void
 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
 {
 	struct pvo_entry *pvo;
 	vm_offset_t lim;
 	vm_paddr_t pa;
 	vm_size_t len;
 
 	PMAP_LOCK(pm);
 	while (sz > 0) {
 		lim = round_page(va);
 		len = MIN(lim - va, sz);
 		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
 		if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
 			pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF);
 			moea64_syncicache(mmu, pm, va, pa, len);
 		}
 		va += len;
 		sz -= len;
 	}
 	PMAP_UNLOCK(pm);
 }
 
 void
 moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
 {
 
 	*va = (void *)pa;
 }
 
 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
 
 void
 moea64_scan_init(mmu_t mmu)
 {
 	struct pvo_entry *pvo;
 	vm_offset_t va;
 	int i;
 
 	if (!do_minidump) {
 		/* Initialize phys. segments for dumpsys(). */
 		memset(&dump_map, 0, sizeof(dump_map));
 		mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
 		for (i = 0; i < pregions_sz; i++) {
 			dump_map[i].pa_start = pregions[i].mr_start;
 			dump_map[i].pa_size = pregions[i].mr_size;
 		}
 		return;
 	}
 
 	/* Virtual segments for minidumps: */
 	memset(&dump_map, 0, sizeof(dump_map));
 
 	/* 1st: kernel .data and .bss. */
 	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
 	dump_map[0].pa_size = round_page((uintptr_t)_end) -
 	    dump_map[0].pa_start;
 
 	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
 	dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
 	dump_map[1].pa_size = round_page(msgbufp->msg_size);
 
 	/* 3rd: kernel VM. */
 	va = dump_map[1].pa_start + dump_map[1].pa_size;
 	/* Find start of next chunk (from va). */
 	while (va < virtual_end) {
 		/* Don't dump the buffer cache. */
 		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
 			va = kmi.buffer_eva;
 			continue;
 		}
 		pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
 		if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
 			break;
 		va += PAGE_SIZE;
 	}
 	if (va < virtual_end) {
 		dump_map[2].pa_start = va;
 		va += PAGE_SIZE;
 		/* Find last page in chunk. */
 		while (va < virtual_end) {
 			/* Don't run into the buffer cache. */
 			if (va == kmi.buffer_sva)
 				break;
 			pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
 			if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
 				break;
 			va += PAGE_SIZE;
 		}
 		dump_map[2].pa_size = va - dump_map[2].pa_start;
 	}
 }
 
Index: head/sys/powerpc/booke/pmap.c
===================================================================
--- head/sys/powerpc/booke/pmap.c	(revision 328529)
+++ head/sys/powerpc/booke/pmap.c	(revision 328530)
@@ -1,4412 +1,4434 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Some hw specific parts of this pmap were derived or influenced
  * by NetBSD's ibm4xx pmap module. More generic code is shared with
  * a few other pmap modules from the FreeBSD tree.
  */
 
  /*
   * VM layout notes:
   *
   * Kernel and user threads run within one common virtual address space
   * defined by AS=0.
   *
   * 32-bit pmap:
   * Virtual address space layout:
   * -----------------------------
   * 0x0000_0000 - 0x7fff_ffff	: user process
   * 0x8000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
   * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
   *   0xc000_0000 - data_end	: kernel code+data, env, metadata etc.
   * 0xc100_0000 - 0xffff_ffff	: KVA
   *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
   *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
   *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
   *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
   *
   * 64-bit pmap:
   * Virtual address space layout:
   * -----------------------------
   * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff      : user process
   *   0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff    : text, data, heap, maps, libraries
   *   0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff    : mmio region
   *   0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff    : stack
   * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff      : kernel reserved
   *   0xc000_0000_0000_0000 - endkernel-1              : kernel code & data
   *               endkernel - msgbufp-1                : flat device tree
   *                 msgbufp - ptbl_bufs-1              : message buffer
   *               ptbl_bufs - kernel_pdir-1            : kernel page tables
   *             kernel_pdir - kernel_pp2d-1            : kernel page directory
   *             kernel_pp2d - .                        : kernel pointers to page directory
   *      pmap_zero_copy_min - crashdumpmap-1           : reserved for page zero/copy
   *            crashdumpmap - ptbl_buf_pool_vabase-1   : reserved for ptbl bufs
   *    ptbl_buf_pool_vabase - virtual_avail-1          : user page directories and page tables
   *           virtual_avail - 0xcfff_ffff_ffff_ffff    : actual free KVA space
   * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff      : coprocessor region
   * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff      : mmio region
   * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff      : direct map
   *   0xf000_0000_0000_0000 - +Maxmem                  : physmem map
   *                         - 0xffff_ffff_ffff_ffff    : device direct map
   */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "opt_kstack_pages.h"
 
 #include <sys/param.h>
 #include <sys/conf.h>
 #include <sys/malloc.h>
 #include <sys/ktr.h>
 #include <sys/proc.h>
 #include <sys/user.h>
 #include <sys/queue.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
 #include <sys/kerneldump.h>
 #include <sys/linker.h>
 #include <sys/msgbuf.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/rwlock.h>
 #include <sys/sched.h>
 #include <sys/smp.h>
 #include <sys/vmmeter.h>
 
 #include <vm/vm.h>
 #include <vm/vm_page.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_pageout.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_object.h>
 #include <vm/vm_param.h>
 #include <vm/vm_map.h>
 #include <vm/vm_pager.h>
 #include <vm/uma.h>
 
 #include <machine/_inttypes.h>
 #include <machine/cpu.h>
 #include <machine/pcb.h>
 #include <machine/platform.h>
 
 #include <machine/tlb.h>
 #include <machine/spr.h>
 #include <machine/md_var.h>
 #include <machine/mmuvar.h>
 #include <machine/pmap.h>
 #include <machine/pte.h>
 
 #include "mmu_if.h"
 
 #define	SPARSE_MAPDEV
 #ifdef  DEBUG
 #define debugf(fmt, args...) printf(fmt, ##args)
 #else
 #define debugf(fmt, args...)
 #endif
 
 #ifdef __powerpc64__
 #define	PRI0ptrX	"016lx"
 #else
 #define	PRI0ptrX	"08x"
 #endif
 
 #define TODO			panic("%s: not implemented", __func__);
 
 extern unsigned char _etext[];
 extern unsigned char _end[];
 
 extern uint32_t *bootinfo;
 
 vm_paddr_t kernload;
 vm_offset_t kernstart;
 vm_size_t kernsize;
 
 /* Message buffer and tables. */
 static vm_offset_t data_start;
 static vm_size_t data_end;
 
 /* Phys/avail memory regions. */
 static struct mem_region *availmem_regions;
 static int availmem_regions_sz;
 static struct mem_region *physmem_regions;
 static int physmem_regions_sz;
 
 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
 static vm_offset_t zero_page_va;
 static struct mtx zero_page_mutex;
 
 static struct mtx tlbivax_mutex;
 
 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
 static vm_offset_t copy_page_src_va;
 static vm_offset_t copy_page_dst_va;
 static struct mtx copy_page_mutex;
 
 /**************************************************************************/
 /* PMAP */
 /**************************************************************************/
 
 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
     vm_prot_t, u_int flags, int8_t psind);
 
 unsigned int kptbl_min;		/* Index of the first kernel ptbl. */
 unsigned int kernel_ptbls;	/* Number of KVA ptbls. */
 #ifdef __powerpc64__
 unsigned int kernel_pdirs;
 #endif
 
 /*
  * If user pmap is processed with mmu_booke_remove and the resident count
  * drops to 0, there are no more pages to remove, so we need not continue.
  */
 #define PMAP_REMOVE_DONE(pmap) \
 	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
 
 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
 extern int elf32_nxstack;
 #endif
 
 /**************************************************************************/
 /* TLB and TID handling */
 /**************************************************************************/
 
 /* Translation ID busy table */
 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
 
 /*
  * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
  * core revisions and should be read from h/w registers during early config.
  */
 uint32_t tlb0_entries;
 uint32_t tlb0_ways;
 uint32_t tlb0_entries_per_way;
 uint32_t tlb1_entries;
 
 #define TLB0_ENTRIES		(tlb0_entries)
 #define TLB0_WAYS		(tlb0_ways)
 #define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
 
 #define TLB1_ENTRIES (tlb1_entries)
 
 static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
 
 static tlbtid_t tid_alloc(struct pmap *);
 static void tid_flush(tlbtid_t tid);
 
 #ifdef __powerpc64__
 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
 #else
 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
 #endif
 
 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
 
 static vm_size_t tsize2size(unsigned int);
 static unsigned int size2tsize(vm_size_t);
 static unsigned int ilog2(unsigned int);
 
 static void set_mas4_defaults(void);
 
 static inline void tlb0_flush_entry(vm_offset_t);
 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
 
 /**************************************************************************/
 /* Page table management */
 /**************************************************************************/
 
 static struct rwlock_padalign pvh_global_lock;
 
 /* Data for the pv entry allocation mechanism */
 static uma_zone_t pvzone;
 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
 
 #define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
 
 #ifndef PMAP_SHPGPERPROC
 #define PMAP_SHPGPERPROC	200
 #endif
 
 static void ptbl_init(void);
 static struct ptbl_buf *ptbl_buf_alloc(void);
 static void ptbl_buf_free(struct ptbl_buf *);
 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
 
 #ifdef __powerpc64__
 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
 			 unsigned int, boolean_t);
 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int);
 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
 #else
 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
 static void ptbl_free(mmu_t, pmap_t, unsigned int);
 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
 #endif
 
 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
 
 static pv_entry_t pv_alloc(void);
 static void pv_free(pv_entry_t);
 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
 
 static void booke_pmap_init_qpages(void);
 
 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
 #ifdef __powerpc64__
 #define PTBL_BUFS               (16UL * 16 * 16)
 #else
 #define PTBL_BUFS		(128 * 16)
 #endif
 
 struct ptbl_buf {
 	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
 	vm_offset_t kva;		/* va of mapping */
 };
 
 /* ptbl free list and a lock used for access synchronization. */
 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
 static struct mtx ptbl_buf_freelist_lock;
 
 /* Base address of kva space allocated fot ptbl bufs. */
 static vm_offset_t ptbl_buf_pool_vabase;
 
 /* Pointer to ptbl_buf structures. */
 static struct ptbl_buf *ptbl_bufs;
 
 #ifdef SMP
 extern tlb_entry_t __boot_tlb1[];
 void pmap_bootstrap_ap(volatile uint32_t *);
 #endif
 
 /*
  * Kernel MMU interface
  */
 static void		mmu_booke_clear_modify(mmu_t, vm_page_t);
 static void		mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
     vm_size_t, vm_offset_t);
 static void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
 static void		mmu_booke_copy_pages(mmu_t, vm_page_t *,
     vm_offset_t, vm_page_t *, vm_offset_t, int);
 static int		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
     vm_prot_t, u_int flags, int8_t psind);
 static void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
     vm_page_t, vm_prot_t);
 static void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
     vm_prot_t);
 static vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
 static vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
     vm_prot_t);
 static void		mmu_booke_init(mmu_t);
 static boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
 static boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
 static boolean_t	mmu_booke_is_referenced(mmu_t, vm_page_t);
 static int		mmu_booke_ts_referenced(mmu_t, vm_page_t);
 static vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
     int);
 static int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
     vm_paddr_t *);
 static void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
     vm_object_t, vm_pindex_t, vm_size_t);
 static boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
 static void		mmu_booke_page_init(mmu_t, vm_page_t);
 static int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
 static void		mmu_booke_pinit(mmu_t, pmap_t);
 static void		mmu_booke_pinit0(mmu_t, pmap_t);
 static void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
     vm_prot_t);
 static void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
 static void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
 static void		mmu_booke_release(mmu_t, pmap_t);
 static void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 static void		mmu_booke_remove_all(mmu_t, vm_page_t);
 static void		mmu_booke_remove_write(mmu_t, vm_page_t);
 static void		mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 static void		mmu_booke_zero_page(mmu_t, vm_page_t);
 static void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
 static void		mmu_booke_activate(mmu_t, struct thread *);
 static void		mmu_booke_deactivate(mmu_t, struct thread *);
 static void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
 static void		*mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
 static void		*mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
 static void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
 static vm_paddr_t	mmu_booke_kextract(mmu_t, vm_offset_t);
 static void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
 static void		mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
 static void		mmu_booke_kremove(mmu_t, vm_offset_t);
 static boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
 static void		mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
     vm_size_t);
 static void		mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
     void **);
 static void		mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
     void *);
 static void		mmu_booke_scan_init(mmu_t);
 static vm_offset_t	mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
 static void		mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
 static int		mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
     vm_size_t sz, vm_memattr_t mode);
 static int		mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
+static int		mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+    int *is_user, vm_offset_t *decoded_addr);
 
 
 static mmu_method_t mmu_booke_methods[] = {
 	/* pmap dispatcher interface */
 	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
 	MMUMETHOD(mmu_copy,		mmu_booke_copy),
 	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
 	MMUMETHOD(mmu_copy_pages,	mmu_booke_copy_pages),
 	MMUMETHOD(mmu_enter,		mmu_booke_enter),
 	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
 	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
 	MMUMETHOD(mmu_extract,		mmu_booke_extract),
 	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
 	MMUMETHOD(mmu_init,		mmu_booke_init),
 	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
 	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
 	MMUMETHOD(mmu_is_referenced,	mmu_booke_is_referenced),
 	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
 	MMUMETHOD(mmu_map,		mmu_booke_map),
 	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
 	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
 	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
 	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
 	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
 	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
 	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
 	MMUMETHOD(mmu_protect,		mmu_booke_protect),
 	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
 	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
 	MMUMETHOD(mmu_release,		mmu_booke_release),
 	MMUMETHOD(mmu_remove,		mmu_booke_remove),
 	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
 	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
 	MMUMETHOD(mmu_sync_icache,	mmu_booke_sync_icache),
 	MMUMETHOD(mmu_unwire,		mmu_booke_unwire),
 	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
 	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
 	MMUMETHOD(mmu_activate,		mmu_booke_activate),
 	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
 	MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
 	MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
 
 	/* Internal interfaces */
 	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
 	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
 	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
 	MMUMETHOD(mmu_mapdev_attr,	mmu_booke_mapdev_attr),
 	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
 	MMUMETHOD(mmu_kenter_attr,	mmu_booke_kenter_attr),
 	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
 	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),
 	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
 	MMUMETHOD(mmu_change_attr,	mmu_booke_change_attr),
 	MMUMETHOD(mmu_map_user_ptr,	mmu_booke_map_user_ptr),
+	MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
 
 	/* dumpsys() support */
 	MMUMETHOD(mmu_dumpsys_map,	mmu_booke_dumpsys_map),
 	MMUMETHOD(mmu_dumpsys_unmap,	mmu_booke_dumpsys_unmap),
 	MMUMETHOD(mmu_scan_init,	mmu_booke_scan_init),
 
 	{ 0, 0 }
 };
 
 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
 
 static __inline uint32_t
 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
 {
 	uint32_t attrib;
 	int i;
 
 	if (ma != VM_MEMATTR_DEFAULT) {
 		switch (ma) {
 		case VM_MEMATTR_UNCACHEABLE:
 			return (MAS2_I | MAS2_G);
 		case VM_MEMATTR_WRITE_COMBINING:
 		case VM_MEMATTR_WRITE_BACK:
 		case VM_MEMATTR_PREFETCHABLE:
 			return (MAS2_I);
 		case VM_MEMATTR_WRITE_THROUGH:
 			return (MAS2_W | MAS2_M);
 		case VM_MEMATTR_CACHEABLE:
 			return (MAS2_M);
 		}
 	}
 
 	/*
 	 * Assume the page is cache inhibited and access is guarded unless
 	 * it's in our available memory array.
 	 */
 	attrib = _TLB_ENTRY_IO;
 	for (i = 0; i < physmem_regions_sz; i++) {
 		if ((pa >= physmem_regions[i].mr_start) &&
 		    (pa < (physmem_regions[i].mr_start +
 		     physmem_regions[i].mr_size))) {
 			attrib = _TLB_ENTRY_MEM;
 			break;
 		}
 	}
 
 	return (attrib);
 }
 
 static inline void
 tlb_miss_lock(void)
 {
 #ifdef SMP
 	struct pcpu *pc;
 
 	if (!smp_started)
 		return;
 
 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
 		if (pc != pcpup) {
 
 			CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
 			    "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
 
 			KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
 			    ("tlb_miss_lock: tried to lock self"));
 
 			tlb_lock(pc->pc_booke_tlb_lock);
 
 			CTR1(KTR_PMAP, "%s: locked", __func__);
 		}
 	}
 #endif
 }
 
 static inline void
 tlb_miss_unlock(void)
 {
 #ifdef SMP
 	struct pcpu *pc;
 
 	if (!smp_started)
 		return;
 
 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
 		if (pc != pcpup) {
 			CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
 			    __func__, pc->pc_cpuid);
 
 			tlb_unlock(pc->pc_booke_tlb_lock);
 
 			CTR1(KTR_PMAP, "%s: unlocked", __func__);
 		}
 	}
 #endif
 }
 
 /* Return number of entries in TLB0. */
 static __inline void
 tlb0_get_tlbconf(void)
 {
 	uint32_t tlb0_cfg;
 
 	tlb0_cfg = mfspr(SPR_TLB0CFG);
 	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
 	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
 	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
 }
 
 /* Return number of entries in TLB1. */
 static __inline void
 tlb1_get_tlbconf(void)
 {
 	uint32_t tlb1_cfg;
 
 	tlb1_cfg = mfspr(SPR_TLB1CFG);
 	tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
 }
 
 /**************************************************************************/
 /* Page table related */
 /**************************************************************************/
 
 #ifdef __powerpc64__
 /* Initialize pool of kva ptbl buffers. */
 static void
 ptbl_init(void)
 {
 	int		i;
 
 	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
 	TAILQ_INIT(&ptbl_buf_freelist);
 
 	for (i = 0; i < PTBL_BUFS; i++) {
 		ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
 		    i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
 		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
 	}
 }
 
 /* Get an sf_buf from the freelist. */
 static struct ptbl_buf *
 ptbl_buf_alloc(void)
 {
 	struct ptbl_buf *buf;
 
 	mtx_lock(&ptbl_buf_freelist_lock);
 	buf = TAILQ_FIRST(&ptbl_buf_freelist);
 	if (buf != NULL)
 		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
 	mtx_unlock(&ptbl_buf_freelist_lock);
 
 	return (buf);
 }
 
 /* Return ptbl buff to free pool. */
 static void
 ptbl_buf_free(struct ptbl_buf *buf)
 {
 	mtx_lock(&ptbl_buf_freelist_lock);
 	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
 	mtx_unlock(&ptbl_buf_freelist_lock);
 }
 
 /*
  * Search the list of allocated ptbl bufs and find on list of allocated ptbls
  */
 static void
 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
 {
 	struct ptbl_buf *pbuf;
 
 	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
 		if (pbuf->kva == (vm_offset_t) ptbl) {
 			/* Remove from pmap ptbl buf list. */
 			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
 
 			/* Free corresponding ptbl buf. */
 			ptbl_buf_free(pbuf);
 
 			break;
 		}
 	}
 }
 
 /* Get a pointer to a PTE in a page table. */
 static __inline pte_t *
 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	pte_t         **pdir;
 	pte_t          *ptbl;
 
 	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
 
 	pdir = pmap->pm_pp2d[PP2D_IDX(va)];
 	if (!pdir)
 		return NULL;
 	ptbl = pdir[PDIR_IDX(va)];
 	return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
 }
 
 /*
  * Search the list of allocated pdir bufs and find on list of allocated pdirs
  */
 static void
 ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
 {
 	struct ptbl_buf *pbuf;
 
 	TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
 		if (pbuf->kva == (vm_offset_t) pdir) {
 			/* Remove from pmap ptbl buf list. */
 			TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
 
 			/* Free corresponding pdir buf. */
 			ptbl_buf_free(pbuf);
 
 			break;
 		}
 	}
 }
 /* Free pdir pages and invalidate pdir entry. */
 static void
 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
 {
 	pte_t         **pdir;
 	vm_paddr_t	pa;
 	vm_offset_t	va;
 	vm_page_t	m;
 	int		i;
 
 	pdir = pmap->pm_pp2d[pp2d_idx];
 
 	KASSERT((pdir != NULL), ("pdir_free: null pdir"));
 
 	pmap->pm_pp2d[pp2d_idx] = NULL;
 
 	for (i = 0; i < PDIR_PAGES; i++) {
 		va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
 		pa = pte_vatopa(mmu, kernel_pmap, va);
 		m = PHYS_TO_VM_PAGE(pa);
 		vm_page_free_zero(m);
 		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 		pmap_kremove(va);
 	}
 
 	ptbl_free_pmap_pdir(mmu, pmap, pdir);
 }
 
 /*
  * Decrement pdir pages hold count and attempt to free pdir pages. Called
  * when removing directory entry from pdir.
  * 
  * Return 1 if pdir pages were freed.
  */
 static int
 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
 {
 	pte_t         **pdir;
 	vm_paddr_t	pa;
 	vm_page_t	m;
 	int		i;
 
 	KASSERT((pmap != kernel_pmap),
 		("pdir_unhold: unholding kernel pdir!"));
 
 	pdir = pmap->pm_pp2d[pp2d_idx];
 
 	KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS),
 	    ("pdir_unhold: non kva pdir"));
 
 	/* decrement hold count */
 	for (i = 0; i < PDIR_PAGES; i++) {
 		pa = pte_vatopa(mmu, kernel_pmap,
 		    (vm_offset_t) pdir + (i * PAGE_SIZE));
 		m = PHYS_TO_VM_PAGE(pa);
 		m->wire_count--;
 	}
 
 	/*
 	 * Free pdir pages if there are no dir entries in this pdir.
 	 * wire_count has the same value for all ptbl pages, so check the
 	 * last page.
 	 */
 	if (m->wire_count == 0) {
 		pdir_free(mmu, pmap, pp2d_idx);
 		return (1);
 	}
 	return (0);
 }
 
 /*
  * Increment hold count for pdir pages. This routine is used when new ptlb
  * entry is being inserted into pdir.
  */
 static void
 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
 {
 	vm_paddr_t	pa;
 	vm_page_t	m;
 	int		i;
 
 	KASSERT((pmap != kernel_pmap),
 		("pdir_hold: holding kernel pdir!"));
 
 	KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
 
 	for (i = 0; i < PDIR_PAGES; i++) {
 		pa = pte_vatopa(mmu, kernel_pmap,
 				(vm_offset_t) pdir + (i * PAGE_SIZE));
 		m = PHYS_TO_VM_PAGE(pa);
 		m->wire_count++;
 	}
 }
 
 /* Allocate page table. */
 static pte_t   *
 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
     boolean_t nosleep)
 {
 	vm_page_t	mtbl  [PTBL_PAGES];
 	vm_page_t	m;
 	struct ptbl_buf *pbuf;
 	unsigned int	pidx;
 	pte_t          *ptbl;
 	int		i, j;
 	int		req;
 
 	KASSERT((pdir[pdir_idx] == NULL),
 		("%s: valid ptbl entry exists!", __func__));
 
 	pbuf = ptbl_buf_alloc();
 	if (pbuf == NULL)
 		panic("%s: couldn't alloc kernel virtual memory", __func__);
 
 	ptbl = (pte_t *) pbuf->kva;
 
 	for (i = 0; i < PTBL_PAGES; i++) {
 		pidx = (PTBL_PAGES * pdir_idx) + i;
 		req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
 		while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
 			PMAP_UNLOCK(pmap);
 			rw_wunlock(&pvh_global_lock);
 			if (nosleep) {
 				ptbl_free_pmap_ptbl(pmap, ptbl);
 				for (j = 0; j < i; j++)
 					vm_page_free(mtbl[j]);
 				atomic_subtract_int(&vm_cnt.v_wire_count, i);
 				return (NULL);
 			}
 			VM_WAIT;
 			rw_wlock(&pvh_global_lock);
 			PMAP_LOCK(pmap);
 		}
 		mtbl[i] = m;
 	}
 
 	/* Mapin allocated pages into kernel_pmap. */
 	mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
 	/* Zero whole ptbl. */
 	bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
 
 	/* Add pbuf to the pmap ptbl bufs list. */
 	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
 
 	return (ptbl);
 }
 
 /* Free ptbl pages and invalidate pdir entry. */
 static void
 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
 {
 	pte_t          *ptbl;
 	vm_paddr_t	pa;
 	vm_offset_t	va;
 	vm_page_t	m;
 	int		i;
 
 	ptbl = pdir[pdir_idx];
 
 	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
 
 	pdir[pdir_idx] = NULL;
 
 	for (i = 0; i < PTBL_PAGES; i++) {
 		va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
 		pa = pte_vatopa(mmu, kernel_pmap, va);
 		m = PHYS_TO_VM_PAGE(pa);
 		vm_page_free_zero(m);
 		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 		pmap_kremove(va);
 	}
 
 	ptbl_free_pmap_ptbl(pmap, ptbl);
 }
 
 /*
  * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
  * when removing pte entry from ptbl.
  * 
  * Return 1 if ptbl pages were freed.
  */
 static int
 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	pte_t          *ptbl;
 	vm_paddr_t	pa;
 	vm_page_t	m;
 	u_int		pp2d_idx;
 	pte_t         **pdir;
 	u_int		pdir_idx;
 	int		i;
 
 	pp2d_idx = PP2D_IDX(va);
 	pdir_idx = PDIR_IDX(va);
 
 	KASSERT((pmap != kernel_pmap),
 		("ptbl_unhold: unholding kernel ptbl!"));
 
 	pdir = pmap->pm_pp2d[pp2d_idx];
 	ptbl = pdir[pdir_idx];
 
 	KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS),
 	    ("ptbl_unhold: non kva ptbl"));
 
 	/* decrement hold count */
 	for (i = 0; i < PTBL_PAGES; i++) {
 		pa = pte_vatopa(mmu, kernel_pmap,
 		    (vm_offset_t) ptbl + (i * PAGE_SIZE));
 		m = PHYS_TO_VM_PAGE(pa);
 		m->wire_count--;
 	}
 
 	/*
 	 * Free ptbl pages if there are no pte entries in this ptbl.
 	 * wire_count has the same value for all ptbl pages, so check the
 	 * last page.
 	 */
 	if (m->wire_count == 0) {
 		/* A pair of indirect entries might point to this ptbl page */
 #if 0
 		tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
 				TLB_SIZE_1M, MAS6_SIND);
 		tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
 				TLB_SIZE_1M, MAS6_SIND);
 #endif
 		ptbl_free(mmu, pmap, pdir, pdir_idx);
 		pdir_unhold(mmu, pmap, pp2d_idx);
 		return (1);
 	}
 	return (0);
 }
 
 /*
  * Increment hold count for ptbl pages. This routine is used when new pte
  * entry is being inserted into ptbl.
  */
 static void
 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
 {
 	vm_paddr_t	pa;
 	pte_t          *ptbl;
 	vm_page_t	m;
 	int		i;
 
 	KASSERT((pmap != kernel_pmap),
 		("ptbl_hold: holding kernel ptbl!"));
 
 	ptbl = pdir[pdir_idx];
 
 	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
 
 	for (i = 0; i < PTBL_PAGES; i++) {
 		pa = pte_vatopa(mmu, kernel_pmap,
 				(vm_offset_t) ptbl + (i * PAGE_SIZE));
 		m = PHYS_TO_VM_PAGE(pa);
 		m->wire_count++;
 	}
 }
 #else
 
 /* Initialize pool of kva ptbl buffers. */
 static void
 ptbl_init(void)
 {
 	int i;
 
 	CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
 	    (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
 	CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
 	    __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
 
 	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
 	TAILQ_INIT(&ptbl_buf_freelist);
 
 	for (i = 0; i < PTBL_BUFS; i++) {
 		ptbl_bufs[i].kva =
 		    ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
 		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
 	}
 }
 
 /* Get a ptbl_buf from the freelist. */
 static struct ptbl_buf *
 ptbl_buf_alloc(void)
 {
 	struct ptbl_buf *buf;
 
 	mtx_lock(&ptbl_buf_freelist_lock);
 	buf = TAILQ_FIRST(&ptbl_buf_freelist);
 	if (buf != NULL)
 		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
 	mtx_unlock(&ptbl_buf_freelist_lock);
 
 	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
 
 	return (buf);
 }
 
 /* Return ptbl buff to free pool. */
 static void
 ptbl_buf_free(struct ptbl_buf *buf)
 {
 
 	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
 
 	mtx_lock(&ptbl_buf_freelist_lock);
 	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
 	mtx_unlock(&ptbl_buf_freelist_lock);
 }
 
 /*
  * Search the list of allocated ptbl bufs and find on list of allocated ptbls
  */
 static void
 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
 {
 	struct ptbl_buf *pbuf;
 
 	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
 
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 
 	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
 		if (pbuf->kva == (vm_offset_t)ptbl) {
 			/* Remove from pmap ptbl buf list. */
 			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
 
 			/* Free corresponding ptbl buf. */
 			ptbl_buf_free(pbuf);
 			break;
 		}
 }
 
 /* Allocate page table. */
 static pte_t *
 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
 {
 	vm_page_t mtbl[PTBL_PAGES];
 	vm_page_t m;
 	struct ptbl_buf *pbuf;
 	unsigned int pidx;
 	pte_t *ptbl;
 	int i, j;
 
 	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
 	    (pmap == kernel_pmap), pdir_idx);
 
 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
 	    ("ptbl_alloc: invalid pdir_idx"));
 	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
 	    ("pte_alloc: valid ptbl entry exists!"));
 
 	pbuf = ptbl_buf_alloc();
 	if (pbuf == NULL)
 		panic("pte_alloc: couldn't alloc kernel virtual memory");
 		
 	ptbl = (pte_t *)pbuf->kva;
 
 	CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
 
 	for (i = 0; i < PTBL_PAGES; i++) {
 		pidx = (PTBL_PAGES * pdir_idx) + i;
 		while ((m = vm_page_alloc(NULL, pidx,
 		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 			PMAP_UNLOCK(pmap);
 			rw_wunlock(&pvh_global_lock);
 			if (nosleep) {
 				ptbl_free_pmap_ptbl(pmap, ptbl);
 				for (j = 0; j < i; j++)
 					vm_page_free(mtbl[j]);
 				atomic_subtract_int(&vm_cnt.v_wire_count, i);
 				return (NULL);
 			}
 			VM_WAIT;
 			rw_wlock(&pvh_global_lock);
 			PMAP_LOCK(pmap);
 		}
 		mtbl[i] = m;
 	}
 
 	/* Map allocated pages into kernel_pmap. */
 	mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
 
 	/* Zero whole ptbl. */
 	bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
 
 	/* Add pbuf to the pmap ptbl bufs list. */
 	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
 
 	return (ptbl);
 }
 
 /* Free ptbl pages and invalidate pdir entry. */
 static void
 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
 {
 	pte_t *ptbl;
 	vm_paddr_t pa;
 	vm_offset_t va;
 	vm_page_t m;
 	int i;
 
 	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
 	    (pmap == kernel_pmap), pdir_idx);
 
 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
 	    ("ptbl_free: invalid pdir_idx"));
 
 	ptbl = pmap->pm_pdir[pdir_idx];
 
 	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
 
 	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
 
 	/*
 	 * Invalidate the pdir entry as soon as possible, so that other CPUs
 	 * don't attempt to look up the page tables we are releasing.
 	 */
 	mtx_lock_spin(&tlbivax_mutex);
 	tlb_miss_lock();
 	
 	pmap->pm_pdir[pdir_idx] = NULL;
 
 	tlb_miss_unlock();
 	mtx_unlock_spin(&tlbivax_mutex);
 
 	for (i = 0; i < PTBL_PAGES; i++) {
 		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
 		pa = pte_vatopa(mmu, kernel_pmap, va);
 		m = PHYS_TO_VM_PAGE(pa);
 		vm_page_free_zero(m);
 		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
 		mmu_booke_kremove(mmu, va);
 	}
 
 	ptbl_free_pmap_ptbl(pmap, ptbl);
 }
 
 /*
  * Decrement ptbl pages hold count and attempt to free ptbl pages.
  * Called when removing pte entry from ptbl.
  *
  * Return 1 if ptbl pages were freed.
  */
 static int
 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
 {
 	pte_t *ptbl;
 	vm_paddr_t pa;
 	vm_page_t m;
 	int i;
 
 	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
 	    (pmap == kernel_pmap), pdir_idx);
 
 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
 	    ("ptbl_unhold: invalid pdir_idx"));
 	KASSERT((pmap != kernel_pmap),
 	    ("ptbl_unhold: unholding kernel ptbl!"));
 
 	ptbl = pmap->pm_pdir[pdir_idx];
 
 	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
 	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
 	    ("ptbl_unhold: non kva ptbl"));
 
 	/* decrement hold count */
 	for (i = 0; i < PTBL_PAGES; i++) {
 		pa = pte_vatopa(mmu, kernel_pmap,
 		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
 		m = PHYS_TO_VM_PAGE(pa);
 		m->wire_count--;
 	}
 
 	/*
 	 * Free ptbl pages if there are no pte etries in this ptbl.
 	 * wire_count has the same value for all ptbl pages, so check the last
 	 * page.
 	 */
 	if (m->wire_count == 0) {
 		ptbl_free(mmu, pmap, pdir_idx);
 
 		//debugf("ptbl_unhold: e (freed ptbl)\n");
 		return (1);
 	}
 
 	return (0);
 }
 
 /*
  * Increment hold count for ptbl pages. This routine is used when a new pte
  * entry is being inserted into the ptbl.
  */
 static void
 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
 {
 	vm_paddr_t pa;
 	pte_t *ptbl;
 	vm_page_t m;
 	int i;
 
 	CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
 	    pdir_idx);
 
 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
 	    ("ptbl_hold: invalid pdir_idx"));
 	KASSERT((pmap != kernel_pmap),
 	    ("ptbl_hold: holding kernel ptbl!"));
 
 	ptbl = pmap->pm_pdir[pdir_idx];
 
 	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
 
 	for (i = 0; i < PTBL_PAGES; i++) {
 		pa = pte_vatopa(mmu, kernel_pmap,
 		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
 		m = PHYS_TO_VM_PAGE(pa);
 		m->wire_count++;
 	}
 }
 #endif
 
 /* Allocate pv_entry structure. */
 pv_entry_t
 pv_alloc(void)
 {
 	pv_entry_t pv;
 
 	pv_entry_count++;
 	if (pv_entry_count > pv_entry_high_water)
 		pagedaemon_wakeup();
 	pv = uma_zalloc(pvzone, M_NOWAIT);
 
 	return (pv);
 }
 
 /* Free pv_entry structure. */
 static __inline void
 pv_free(pv_entry_t pve)
 {
 
 	pv_entry_count--;
 	uma_zfree(pvzone, pve);
 }
 
 
 /* Allocate and initialize pv_entry structure. */
 static void
 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
 {
 	pv_entry_t pve;
 
 	//int su = (pmap == kernel_pmap);
 	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
 	//	(u_int32_t)pmap, va, (u_int32_t)m);
 
 	pve = pv_alloc();
 	if (pve == NULL)
 		panic("pv_insert: no pv entries!");
 
 	pve->pv_pmap = pmap;
 	pve->pv_va = va;
 
 	/* add to pv_list */
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
 
 	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
 
 	//debugf("pv_insert: e\n");
 }
 
 /* Destroy pv entry. */
 static void
 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
 {
 	pv_entry_t pve;
 
 	//int su = (pmap == kernel_pmap);
 	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
 
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 	rw_assert(&pvh_global_lock, RA_WLOCKED);
 
 	/* find pv entry */
 	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
 		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
 			/* remove from pv_list */
 			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
 			if (TAILQ_EMPTY(&m->md.pv_list))
 				vm_page_aflag_clear(m, PGA_WRITEABLE);
 
 			/* free pv entry struct */
 			pv_free(pve);
 			break;
 		}
 	}
 
 	//debugf("pv_remove: e\n");
 }
 
 #ifdef __powerpc64__
 /*
  * Clean pte entry, try to free page table page if requested.
  * 
  * Return 1 if ptbl pages were freed, otherwise return 0.
  */
 static int
 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
 {
 	vm_page_t	m;
 	pte_t          *pte;
 
 	pte = pte_find(mmu, pmap, va);
 	KASSERT(pte != NULL, ("%s: NULL pte", __func__));
 
 	if (!PTE_ISVALID(pte))
 		return (0);
 
 	/* Get vm_page_t for mapped pte. */
 	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
 
 	if (PTE_ISWIRED(pte))
 		pmap->pm_stats.wired_count--;
 
 	/* Handle managed entry. */
 	if (PTE_ISMANAGED(pte)) {
 
 		/* Handle modified pages. */
 		if (PTE_ISMODIFIED(pte))
 			vm_page_dirty(m);
 
 		/* Referenced pages. */
 		if (PTE_ISREFERENCED(pte))
 			vm_page_aflag_set(m, PGA_REFERENCED);
 
 		/* Remove pv_entry from pv_list. */
 		pv_remove(pmap, va, m);
 	} else if (m->md.pv_tracked) {
 		pv_remove(pmap, va, m);
 		if (TAILQ_EMPTY(&m->md.pv_list))
 			m->md.pv_tracked = false;
 	}
 	mtx_lock_spin(&tlbivax_mutex);
 	tlb_miss_lock();
 
 	tlb0_flush_entry(va);
 	*pte = 0;
 
 	tlb_miss_unlock();
 	mtx_unlock_spin(&tlbivax_mutex);
 
 	pmap->pm_stats.resident_count--;
 
 	if (flags & PTBL_UNHOLD) {
 		return (ptbl_unhold(mmu, pmap, va));
 	}
 	return (0);
 }
 
 /*
  * allocate a page of pointers to page directories, do not preallocate the
  * page tables
  */
 static pte_t  **
 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
 {
 	vm_page_t	mtbl  [PDIR_PAGES];
 	vm_page_t	m;
 	struct ptbl_buf *pbuf;
 	pte_t         **pdir;
 	unsigned int	pidx;
 	int		i;
 	int		req;
 
 	pbuf = ptbl_buf_alloc();
 
 	if (pbuf == NULL)
 		panic("%s: couldn't alloc kernel virtual memory", __func__);
 
 	/* Allocate pdir pages, this will sleep! */
 	for (i = 0; i < PDIR_PAGES; i++) {
 		pidx = (PDIR_PAGES * pp2d_idx) + i;
 		req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
 		while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
 			PMAP_UNLOCK(pmap);
 			VM_WAIT;
 			PMAP_LOCK(pmap);
 		}
 		mtbl[i] = m;
 	}
 
 	/* Mapin allocated pages into kernel_pmap. */
 	pdir = (pte_t **) pbuf->kva;
 	pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
 
 	/* Zero whole pdir. */
 	bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
 
 	/* Add pdir to the pmap pdir bufs list. */
 	TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
 
 	return pdir;
 }
 
 /*
  * Insert PTE for a given page and virtual address.
  */
 static int
 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
     boolean_t nosleep)
 {
 	unsigned int	pp2d_idx = PP2D_IDX(va);
 	unsigned int	pdir_idx = PDIR_IDX(va);
 	unsigned int	ptbl_idx = PTBL_IDX(va);
 	pte_t          *ptbl, *pte;
 	pte_t         **pdir;
 
 	/* Get the page directory pointer. */
 	pdir = pmap->pm_pp2d[pp2d_idx];
 	if (pdir == NULL)
 		pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
 
 	/* Get the page table pointer. */
 	ptbl = pdir[pdir_idx];
 
 	if (ptbl == NULL) {
 		/* Allocate page table pages. */
 		ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
 		if (ptbl == NULL) {
 			KASSERT(nosleep, ("nosleep and NULL ptbl"));
 			return (ENOMEM);
 		}
 	} else {
 		/*
 		 * Check if there is valid mapping for requested va, if there
 		 * is, remove it.
 		 */
 		pte = &pdir[pdir_idx][ptbl_idx];
 		if (PTE_ISVALID(pte)) {
 			pte_remove(mmu, pmap, va, PTBL_HOLD);
 		} else {
 			/*
 			 * pte is not used, increment hold count for ptbl
 			 * pages.
 			 */
 			if (pmap != kernel_pmap)
 				ptbl_hold(mmu, pmap, pdir, pdir_idx);
 		}
 	}
 
 	if (pdir[pdir_idx] == NULL) {
 		if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
 			pdir_hold(mmu, pmap, pdir);
 		pdir[pdir_idx] = ptbl;
 	}
 	if (pmap->pm_pp2d[pp2d_idx] == NULL)
 		pmap->pm_pp2d[pp2d_idx] = pdir;
 
 	/*
 	 * Insert pv_entry into pv_list for mapped page if part of managed
 	 * memory.
 	 */
 	if ((m->oflags & VPO_UNMANAGED) == 0) {
 		flags |= PTE_MANAGED;
 
 		/* Create and insert pv entry. */
 		pv_insert(pmap, va, m);
 	}
 
 	mtx_lock_spin(&tlbivax_mutex);
 	tlb_miss_lock();
 
 	tlb0_flush_entry(va);
 	pmap->pm_stats.resident_count++;
 	pte = &pdir[pdir_idx][ptbl_idx];
 	*pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
 	*pte |= (PTE_VALID | flags);
 
 	tlb_miss_unlock();
 	mtx_unlock_spin(&tlbivax_mutex);
 
 	return (0);
 }
 
 /* Return the pa for the given pmap/va. */
 static	vm_paddr_t
 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	vm_paddr_t	pa = 0;
 	pte_t          *pte;
 
 	pte = pte_find(mmu, pmap, va);
 	if ((pte != NULL) && PTE_ISVALID(pte))
 		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
 	return (pa);
 }
 
 
 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
 static void
 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
 {
 	int		i, j;
 	vm_offset_t	va;
 	pte_t		*pte;
 
 	va = addr;
 	/* Initialize kernel pdir */
 	for (i = 0; i < kernel_pdirs; i++) {
 		kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
 		    (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
 		for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
 		    j < PDIR_NENTRIES; j++) {
 			kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
 			    (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
 			     (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
 		}
 	}
 
 	/*
 	 * Fill in PTEs covering kernel code and data. They are not required
 	 * for address translation, as this area is covered by static TLB1
 	 * entries, but for pte_vatopa() to work correctly with kernel area
 	 * addresses.
 	 */
 	for (va = addr; va < data_end; va += PAGE_SIZE) {
 		pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
 		*pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
 		*pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
 		    PTE_VALID | PTE_PS_4KB;
 	}
 }
 #else
 /*
  * Clean pte entry, try to free page table page if requested.
  *
  * Return 1 if ptbl pages were freed, otherwise return 0.
  */
 static int
 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
 {
 	unsigned int pdir_idx = PDIR_IDX(va);
 	unsigned int ptbl_idx = PTBL_IDX(va);
 	vm_page_t m;
 	pte_t *ptbl;
 	pte_t *pte;
 
 	//int su = (pmap == kernel_pmap);
 	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
 	//		su, (u_int32_t)pmap, va, flags);
 
 	ptbl = pmap->pm_pdir[pdir_idx];
 	KASSERT(ptbl, ("pte_remove: null ptbl"));
 
 	pte = &ptbl[ptbl_idx];
 
 	if (pte == NULL || !PTE_ISVALID(pte))
 		return (0);
 
 	if (PTE_ISWIRED(pte))
 		pmap->pm_stats.wired_count--;
 
 	/* Get vm_page_t for mapped pte. */
 	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
 
 	/* Handle managed entry. */
 	if (PTE_ISMANAGED(pte)) {
 
 		if (PTE_ISMODIFIED(pte))
 			vm_page_dirty(m);
 
 		if (PTE_ISREFERENCED(pte))
 			vm_page_aflag_set(m, PGA_REFERENCED);
 
 		pv_remove(pmap, va, m);
 	} else if (m->md.pv_tracked) {
 		/*
 		 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
 		 * used.  This is needed by the NCSW support code for fast
 		 * VA<->PA translation.
 		 */
 		pv_remove(pmap, va, m);
 		if (TAILQ_EMPTY(&m->md.pv_list))
 			m->md.pv_tracked = false;
 	}
 
 	mtx_lock_spin(&tlbivax_mutex);
 	tlb_miss_lock();
 
 	tlb0_flush_entry(va);
 	*pte = 0;
 
 	tlb_miss_unlock();
 	mtx_unlock_spin(&tlbivax_mutex);
 
 	pmap->pm_stats.resident_count--;
 
 	if (flags & PTBL_UNHOLD) {
 		//debugf("pte_remove: e (unhold)\n");
 		return (ptbl_unhold(mmu, pmap, pdir_idx));
 	}
 
 	//debugf("pte_remove: e\n");
 	return (0);
 }
 
 /*
  * Insert PTE for a given page and virtual address.
  */
 static int
 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
     boolean_t nosleep)
 {
 	unsigned int pdir_idx = PDIR_IDX(va);
 	unsigned int ptbl_idx = PTBL_IDX(va);
 	pte_t *ptbl, *pte;
 
 	CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
 	    pmap == kernel_pmap, pmap, va);
 
 	/* Get the page table pointer. */
 	ptbl = pmap->pm_pdir[pdir_idx];
 
 	if (ptbl == NULL) {
 		/* Allocate page table pages. */
 		ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
 		if (ptbl == NULL) {
 			KASSERT(nosleep, ("nosleep and NULL ptbl"));
 			return (ENOMEM);
 		}
 	} else {
 		/*
 		 * Check if there is valid mapping for requested
 		 * va, if there is, remove it.
 		 */
 		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
 		if (PTE_ISVALID(pte)) {
 			pte_remove(mmu, pmap, va, PTBL_HOLD);
 		} else {
 			/*
 			 * pte is not used, increment hold count
 			 * for ptbl pages.
 			 */
 			if (pmap != kernel_pmap)
 				ptbl_hold(mmu, pmap, pdir_idx);
 		}
 	}
 
 	/*
 	 * Insert pv_entry into pv_list for mapped page if part of managed
 	 * memory.
 	 */
 	if ((m->oflags & VPO_UNMANAGED) == 0) {
 		flags |= PTE_MANAGED;
 
 		/* Create and insert pv entry. */
 		pv_insert(pmap, va, m);
 	}
 
 	pmap->pm_stats.resident_count++;
 	
 	mtx_lock_spin(&tlbivax_mutex);
 	tlb_miss_lock();
 
 	tlb0_flush_entry(va);
 	if (pmap->pm_pdir[pdir_idx] == NULL) {
 		/*
 		 * If we just allocated a new page table, hook it in
 		 * the pdir.
 		 */
 		pmap->pm_pdir[pdir_idx] = ptbl;
 	}
 	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
 	*pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
 	*pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
 
 	tlb_miss_unlock();
 	mtx_unlock_spin(&tlbivax_mutex);
 	return (0);
 }
 
 /* Return the pa for the given pmap/va. */
 static vm_paddr_t
 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	vm_paddr_t pa = 0;
 	pte_t *pte;
 
 	pte = pte_find(mmu, pmap, va);
 	if ((pte != NULL) && PTE_ISVALID(pte))
 		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
 	return (pa);
 }
 
 /* Get a pointer to a PTE in a page table. */
 static pte_t *
 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	unsigned int pdir_idx = PDIR_IDX(va);
 	unsigned int ptbl_idx = PTBL_IDX(va);
 
 	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
 
 	if (pmap->pm_pdir[pdir_idx])
 		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
 
 	return (NULL);
 }
 
 /* Set up kernel page tables. */
 static void
 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
 {
 	int		i;
 	vm_offset_t	va;
 	pte_t		*pte;
 
 	/* Initialize kernel pdir */
 	for (i = 0; i < kernel_ptbls; i++)
 		kernel_pmap->pm_pdir[kptbl_min + i] =
 		    (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
 
 	/*
 	 * Fill in PTEs covering kernel code and data. They are not required
 	 * for address translation, as this area is covered by static TLB1
 	 * entries, but for pte_vatopa() to work correctly with kernel area
 	 * addresses.
 	 */
 	for (va = addr; va < data_end; va += PAGE_SIZE) {
 		pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
 		*pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
 		*pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
 		    PTE_VALID | PTE_PS_4KB;
 	}
 }
 #endif
 
 /**************************************************************************/
 /* PMAP related */
 /**************************************************************************/
 
 /*
  * This is called during booke_init, before the system is really initialized.
  */
 static void
 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
 {
 	vm_paddr_t phys_kernelend;
 	struct mem_region *mp, *mp1;
 	int cnt, i, j;
 	vm_paddr_t s, e, sz;
 	vm_paddr_t physsz, hwphyssz;
 	u_int phys_avail_count;
 	vm_size_t kstack0_sz;
 	vm_offset_t kernel_pdir, kstack0;
 	vm_paddr_t kstack0_phys;
 	void *dpcpu;
 
 	debugf("mmu_booke_bootstrap: entered\n");
 
 	/* Set interesting system properties */
 	hw_direct_map = 0;
 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
 	elf32_nxstack = 1;
 #endif
 
 	/* Initialize invalidation mutex */
 	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
 
 	/* Read TLB0 size and associativity. */
 	tlb0_get_tlbconf();
 
 	/*
 	 * Align kernel start and end address (kernel image).
 	 * Note that kernel end does not necessarily relate to kernsize.
 	 * kernsize is the size of the kernel that is actually mapped.
 	 */
 	kernstart = trunc_page(start);
 	data_start = round_page(kernelend);
 	data_end = data_start;
 
 	/*
 	 * Addresses of preloaded modules (like file systems) use
 	 * physical addresses. Make sure we relocate those into
 	 * virtual addresses.
 	 */
 	preload_addr_relocate = kernstart - kernload;
 
 	/* Allocate the dynamic per-cpu area. */
 	dpcpu = (void *)data_end;
 	data_end += DPCPU_SIZE;
 
 	/* Allocate space for the message buffer. */
 	msgbufp = (struct msgbuf *)data_end;
 	data_end += msgbufsize;
 	debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
 	    (uintptr_t)msgbufp, data_end);
 
 	data_end = round_page(data_end);
 
 	/* Allocate space for ptbl_bufs. */
 	ptbl_bufs = (struct ptbl_buf *)data_end;
 	data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
 	debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
 	    (uintptr_t)ptbl_bufs, data_end);
 
 	data_end = round_page(data_end);
 
 	/* Allocate PTE tables for kernel KVA. */
 	kernel_pdir = data_end;
 	kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
 	    PDIR_SIZE);
 #ifdef __powerpc64__
 	kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
 	data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
 #endif
 	data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
 	debugf(" kernel ptbls: %d\n", kernel_ptbls);
 	debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
 	    kernel_pdir, data_end);
 
 	debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
 	if (data_end - kernstart > kernsize) {
 		kernsize += tlb1_mapin_region(kernstart + kernsize,
 		    kernload + kernsize, (data_end - kernstart) - kernsize);
 	}
 	data_end = kernstart + kernsize;
 	debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
 
 	/*
 	 * Clear the structures - note we can only do it safely after the
 	 * possible additional TLB1 translations are in place (above) so that
 	 * all range up to the currently calculated 'data_end' is covered.
 	 */
 	dpcpu_init(dpcpu, 0);
 	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
 #ifdef __powerpc64__
 	memset((void *)kernel_pdir, 0,
 	    kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
 	    kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
 #else
 	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
 #endif
 
 	/*******************************************************/
 	/* Set the start and end of kva. */
 	/*******************************************************/
 	virtual_avail = round_page(data_end);
 	virtual_end = VM_MAX_KERNEL_ADDRESS;
 
 	/* Allocate KVA space for page zero/copy operations. */
 	zero_page_va = virtual_avail;
 	virtual_avail += PAGE_SIZE;
 	copy_page_src_va = virtual_avail;
 	virtual_avail += PAGE_SIZE;
 	copy_page_dst_va = virtual_avail;
 	virtual_avail += PAGE_SIZE;
 	debugf("zero_page_va = 0x%08x\n", zero_page_va);
 	debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
 	debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
 
 	/* Initialize page zero/copy mutexes. */
 	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
 	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
 
 	/* Allocate KVA space for ptbl bufs. */
 	ptbl_buf_pool_vabase = virtual_avail;
 	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
 	debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
 	    ptbl_buf_pool_vabase, virtual_avail);
 
 	/* Calculate corresponding physical addresses for the kernel region. */
 	phys_kernelend = kernload + kernsize;
 	debugf("kernel image and allocated data:\n");
 	debugf(" kernload    = 0x%09llx\n", (uint64_t)kernload);
 	debugf(" kernstart   = 0x%08x\n", kernstart);
 	debugf(" kernsize    = 0x%08x\n", kernsize);
 
 	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
 		panic("mmu_booke_bootstrap: phys_avail too small");
 
 	/*
 	 * Remove kernel physical address range from avail regions list. Page
 	 * align all regions.  Non-page aligned memory isn't very interesting
 	 * to us.  Also, sort the entries for ascending addresses.
 	 */
 
 	/* Retrieve phys/avail mem regions */
 	mem_regions(&physmem_regions, &physmem_regions_sz,
 	    &availmem_regions, &availmem_regions_sz);
 	sz = 0;
 	cnt = availmem_regions_sz;
 	debugf("processing avail regions:\n");
 	for (mp = availmem_regions; mp->mr_size; mp++) {
 		s = mp->mr_start;
 		e = mp->mr_start + mp->mr_size;
 		debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
 		/* Check whether this region holds all of the kernel. */
 		if (s < kernload && e > phys_kernelend) {
 			availmem_regions[cnt].mr_start = phys_kernelend;
 			availmem_regions[cnt++].mr_size = e - phys_kernelend;
 			e = kernload;
 		}
 		/* Look whether this regions starts within the kernel. */
 		if (s >= kernload && s < phys_kernelend) {
 			if (e <= phys_kernelend)
 				goto empty;
 			s = phys_kernelend;
 		}
 		/* Now look whether this region ends within the kernel. */
 		if (e > kernload && e <= phys_kernelend) {
 			if (s >= kernload)
 				goto empty;
 			e = kernload;
 		}
 		/* Now page align the start and size of the region. */
 		s = round_page(s);
 		e = trunc_page(e);
 		if (e < s)
 			e = s;
 		sz = e - s;
 		debugf("%09jx-%09jx = %jx\n",
 		    (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
 
 		/* Check whether some memory is left here. */
 		if (sz == 0) {
 		empty:
 			memmove(mp, mp + 1,
 			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
 			cnt--;
 			mp--;
 			continue;
 		}
 
 		/* Do an insertion sort. */
 		for (mp1 = availmem_regions; mp1 < mp; mp1++)
 			if (s < mp1->mr_start)
 				break;
 		if (mp1 < mp) {
 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
 			mp1->mr_start = s;
 			mp1->mr_size = sz;
 		} else {
 			mp->mr_start = s;
 			mp->mr_size = sz;
 		}
 	}
 	availmem_regions_sz = cnt;
 
 	/*******************************************************/
 	/* Steal physical memory for kernel stack from the end */
 	/* of the first avail region                           */
 	/*******************************************************/
 	kstack0_sz = kstack_pages * PAGE_SIZE;
 	kstack0_phys = availmem_regions[0].mr_start +
 	    availmem_regions[0].mr_size;
 	kstack0_phys -= kstack0_sz;
 	availmem_regions[0].mr_size -= kstack0_sz;
 
 	/*******************************************************/
 	/* Fill in phys_avail table, based on availmem_regions */
 	/*******************************************************/
 	phys_avail_count = 0;
 	physsz = 0;
 	hwphyssz = 0;
 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
 
 	debugf("fill in phys_avail:\n");
 	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
 
 		debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
 		    (uintmax_t)availmem_regions[i].mr_start,
 		    (uintmax_t)availmem_regions[i].mr_start +
 		        availmem_regions[i].mr_size,
 		    (uintmax_t)availmem_regions[i].mr_size);
 
 		if (hwphyssz != 0 &&
 		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
 			debugf(" hw.physmem adjust\n");
 			if (physsz < hwphyssz) {
 				phys_avail[j] = availmem_regions[i].mr_start;
 				phys_avail[j + 1] =
 				    availmem_regions[i].mr_start +
 				    hwphyssz - physsz;
 				physsz = hwphyssz;
 				phys_avail_count++;
 			}
 			break;
 		}
 
 		phys_avail[j] = availmem_regions[i].mr_start;
 		phys_avail[j + 1] = availmem_regions[i].mr_start +
 		    availmem_regions[i].mr_size;
 		phys_avail_count++;
 		physsz += availmem_regions[i].mr_size;
 	}
 	physmem = btoc(physsz);
 
 	/* Calculate the last available physical address. */
 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
 		;
 	Maxmem = powerpc_btop(phys_avail[i + 1]);
 
 	debugf("Maxmem = 0x%08lx\n", Maxmem);
 	debugf("phys_avail_count = %d\n", phys_avail_count);
 	debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
 	    (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
 
 	/*******************************************************/
 	/* Initialize (statically allocated) kernel pmap. */
 	/*******************************************************/
 	PMAP_LOCK_INIT(kernel_pmap);
 #ifndef __powerpc64__
 	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
 #endif
 
 	debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
 	kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
 	for (i = 0; i < MAXCPU; i++) {
 		kernel_pmap->pm_tid[i] = TID_KERNEL;
 		
 		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
 		tidbusy[i][TID_KERNEL] = kernel_pmap;
 	}
 
 	/* Mark kernel_pmap active on all CPUs */
 	CPU_FILL(&kernel_pmap->pm_active);
 
  	/*
 	 * Initialize the global pv list lock.
 	 */
 	rw_init(&pvh_global_lock, "pmap pv global");
 
 	/*******************************************************/
 	/* Final setup */
 	/*******************************************************/
 
 	/* Enter kstack0 into kernel map, provide guard page */
 	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
 	thread0.td_kstack = kstack0;
 	thread0.td_kstack_pages = kstack_pages;
 
 	debugf("kstack_sz = 0x%08x\n", kstack0_sz);
 	debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
 	    kstack0_phys, kstack0_phys + kstack0_sz);
 	debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
 	    kstack0, kstack0 + kstack0_sz);
 	
 	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
 	for (i = 0; i < kstack_pages; i++) {
 		mmu_booke_kenter(mmu, kstack0, kstack0_phys);
 		kstack0 += PAGE_SIZE;
 		kstack0_phys += PAGE_SIZE;
 	}
 
 	pmap_bootstrapped = 1;
 	
 	debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
 	debugf("virtual_end   = %"PRI0ptrX"\n", virtual_end);
 
 	debugf("mmu_booke_bootstrap: exit\n");
 }
 
 #ifdef SMP
  void
 tlb1_ap_prep(void)
 {
 	tlb_entry_t *e, tmp;
 	unsigned int i;
 
 	/* Prepare TLB1 image for AP processors */
 	e = __boot_tlb1;
 	for (i = 0; i < TLB1_ENTRIES; i++) {
 		tlb1_read_entry(&tmp, i);
 
 		if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
 			memcpy(e++, &tmp, sizeof(tmp));
 	}
 }
 
 void
 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
 {
 	int i;
 
 	/*
 	 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
 	 * have the snapshot of its contents in the s/w __boot_tlb1[] table
 	 * created by tlb1_ap_prep(), so use these values directly to
 	 * (re)program AP's TLB1 hardware.
 	 *
 	 * Start at index 1 because index 0 has the kernel map.
 	 */
 	for (i = 1; i < TLB1_ENTRIES; i++) {
 		if (__boot_tlb1[i].mas1 & MAS1_VALID)
 			tlb1_write_entry(&__boot_tlb1[i], i);
 	}
 
 	set_mas4_defaults();
 }
 #endif
 
 static void
 booke_pmap_init_qpages(void)
 {
 	struct pcpu *pc;
 	int i;
 
 	CPU_FOREACH(i) {
 		pc = pcpu_find(i);
 		pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
 		if (pc->pc_qmap_addr == 0)
 			panic("pmap_init_qpages: unable to allocate KVA");
 	}
 }
 
 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
 
 /*
  * Get the physical page address for the given pmap/virtual address.
  */
 static vm_paddr_t
 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	vm_paddr_t pa;
 
 	PMAP_LOCK(pmap);
 	pa = pte_vatopa(mmu, pmap, va);
 	PMAP_UNLOCK(pmap);
 
 	return (pa);
 }
 
 /*
  * Extract the physical page address associated with the given
  * kernel virtual address.
  */
 static vm_paddr_t
 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
 {
 	tlb_entry_t e;
 	vm_paddr_t p = 0;
 	int i;
 
 	if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
 		p = pte_vatopa(mmu, kernel_pmap, va);
 	
 	if (p == 0) {
 		/* Check TLB1 mappings */
 		for (i = 0; i < TLB1_ENTRIES; i++) {
 			tlb1_read_entry(&e, i);
 			if (!(e.mas1 & MAS1_VALID))
 				continue;
 			if (va >= e.virt && va < e.virt + e.size)
 				return (e.phys + (va - e.virt));
 		}
 	}
 
 	return (p);
 }
 
 /*
  * Initialize the pmap module.
  * Called by vm_init, to initialize any structures that the pmap
  * system needs to map virtual memory.
  */
 static void
 mmu_booke_init(mmu_t mmu)
 {
 	int shpgperproc = PMAP_SHPGPERPROC;
 
 	/*
 	 * Initialize the address space (zone) for the pv entries.  Set a
 	 * high water mark so that the system can recover from excessive
 	 * numbers of pv entries.
 	 */
 	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
 
 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
 	pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
 
 	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
 	pv_entry_high_water = 9 * (pv_entry_max / 10);
 
 	uma_zone_reserve_kva(pvzone, pv_entry_max);
 
 	/* Pre-fill pvzone with initial number of pv entries. */
 	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
 
 	/* Initialize ptbl allocation. */
 	ptbl_init();
 }
 
 /*
  * Map a list of wired pages into kernel virtual address space.  This is
  * intended for temporary mappings which do not need page modification or
  * references recorded.  Existing mappings in the region are overwritten.
  */
 static void
 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
 {
 	vm_offset_t va;
 
 	va = sva;
 	while (count-- > 0) {
 		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
 		va += PAGE_SIZE;
 		m++;
 	}
 }
 
 /*
  * Remove page mappings from kernel virtual address space.  Intended for
  * temporary mappings entered by mmu_booke_qenter.
  */
 static void
 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
 {
 	vm_offset_t va;
 
 	va = sva;
 	while (count-- > 0) {
 		mmu_booke_kremove(mmu, va);
 		va += PAGE_SIZE;
 	}
 }
 
 /*
  * Map a wired page into kernel virtual address space.
  */
 static void
 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
 {
 
 	mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
 }
 
 static void
 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
 {
 	uint32_t flags;
 	pte_t *pte;
 
 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
 	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
 
 	flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
 	flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
 	flags |= PTE_PS_4KB;
 
 	pte = pte_find(mmu, kernel_pmap, va);
 	KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va.  NULL PTE"));
 
 	mtx_lock_spin(&tlbivax_mutex);
 	tlb_miss_lock();
 	
 	if (PTE_ISVALID(pte)) {
 	
 		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
 
 		/* Flush entry from TLB0 */
 		tlb0_flush_entry(va);
 	}
 
 	*pte = PTE_RPN_FROM_PA(pa) | flags;
 
 	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
 	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
 	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
 
 	/* Flush the real memory from the instruction cache. */
 	if ((flags & (PTE_I | PTE_G)) == 0)
 		__syncicache((void *)va, PAGE_SIZE);
 
 	tlb_miss_unlock();
 	mtx_unlock_spin(&tlbivax_mutex);
 }
 
 /*
  * Remove a page from kernel page table.
  */
 static void
 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
 {
 	pte_t *pte;
 
 	CTR2(KTR_PMAP,"%s: s (va = 0x%08x)\n", __func__, va);
 
 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
 	    (va <= VM_MAX_KERNEL_ADDRESS)),
 	    ("mmu_booke_kremove: invalid va"));
 
 	pte = pte_find(mmu, kernel_pmap, va);
 
 	if (!PTE_ISVALID(pte)) {
 	
 		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
 
 		return;
 	}
 
 	mtx_lock_spin(&tlbivax_mutex);
 	tlb_miss_lock();
 
 	/* Invalidate entry in TLB0, update PTE. */
 	tlb0_flush_entry(va);
 	*pte = 0;
 
 	tlb_miss_unlock();
 	mtx_unlock_spin(&tlbivax_mutex);
 }
 
 /*
  * Provide a kernel pointer corresponding to a given userland pointer.
  * The returned pointer is valid until the next time this function is
  * called in this thread. This is used internally in copyin/copyout.
  */
 int
 mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
     void **kaddr, size_t ulen, size_t *klen)
 {
 
 	if ((uintptr_t)uaddr + ulen > VM_MAXUSER_ADDRESS + PAGE_SIZE)
 		return (EFAULT);
 
 	*kaddr = (void *)(uintptr_t)uaddr;
 	if (klen)
 		*klen = ulen;
 
+	return (0);
+}
+
+/*
+ * Figure out where a given kernel pointer (usually in a fault) points
+ * to from the VM's perspective, potentially remapping into userland's
+ * address space.
+ */
+static int
+mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
+    vm_offset_t *decoded_addr)
+{
+
+	if (addr < VM_MAXUSER_ADDRESS)
+		*is_user = 1;
+	else
+		*is_user = 0;
+
+	*decoded_addr = addr;
 	return (0);
 }
 
 /*
  * Initialize pmap associated with process 0.
  */
 static void
 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
 {
 
 	PMAP_LOCK_INIT(pmap);
 	mmu_booke_pinit(mmu, pmap);
 	PCPU_SET(curpmap, pmap);
 }
 
 /*
  * Initialize a preallocated and zeroed pmap structure,
  * such as one in a vmspace structure.
  */
 static void
 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
 {
 	int i;
 
 	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
 	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
 
 	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
 
 	for (i = 0; i < MAXCPU; i++)
 		pmap->pm_tid[i] = TID_NONE;
 	CPU_ZERO(&kernel_pmap->pm_active);
 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
 #ifdef __powerpc64__
 	bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
 	TAILQ_INIT(&pmap->pm_pdir_list);
 #else
 	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
 #endif
 	TAILQ_INIT(&pmap->pm_ptbl_list);
 }
 
 /*
  * Release any resources held by the given physical map.
  * Called when a pmap initialized by mmu_booke_pinit is being released.
  * Should only be called if the map contains no valid mappings.
  */
 static void
 mmu_booke_release(mmu_t mmu, pmap_t pmap)
 {
 
 	KASSERT(pmap->pm_stats.resident_count == 0,
 	    ("pmap_release: pmap resident count %ld != 0",
 	    pmap->pm_stats.resident_count));
 }
 
 /*
  * Insert the given physical page at the specified virtual address in the
  * target physical map with the protection requested. If specified the page
  * will be wired down.
  */
 static int
 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
     vm_prot_t prot, u_int flags, int8_t psind)
 {
 	int error;
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
 	error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
 	PMAP_UNLOCK(pmap);
 	rw_wunlock(&pvh_global_lock);
 	return (error);
 }
 
 static int
 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
     vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
 {
 	pte_t *pte;
 	vm_paddr_t pa;
 	uint32_t flags;
 	int error, su, sync;
 
 	pa = VM_PAGE_TO_PHYS(m);
 	su = (pmap == kernel_pmap);
 	sync = 0;
 
 	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
 	//		"pa=0x%08x prot=0x%08x flags=%#x)\n",
 	//		(u_int32_t)pmap, su, pmap->pm_tid,
 	//		(u_int32_t)m, va, pa, prot, flags);
 
 	if (su) {
 		KASSERT(((va >= virtual_avail) &&
 		    (va <= VM_MAX_KERNEL_ADDRESS)),
 		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
 	} else {
 		KASSERT((va <= VM_MAXUSER_ADDRESS),
 		    ("mmu_booke_enter_locked: user pmap, non user va"));
 	}
 	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 
 	/*
 	 * If there is an existing mapping, and the physical address has not
 	 * changed, must be protection or wiring change.
 	 */
 	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
 	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
 	    
 		/*
 		 * Before actually updating pte->flags we calculate and
 		 * prepare its new value in a helper var.
 		 */
 		flags = *pte;
 		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
 
 		/* Wiring change, just update stats. */
 		if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
 			if (!PTE_ISWIRED(pte)) {
 				flags |= PTE_WIRED;
 				pmap->pm_stats.wired_count++;
 			}
 		} else {
 			if (PTE_ISWIRED(pte)) {
 				flags &= ~PTE_WIRED;
 				pmap->pm_stats.wired_count--;
 			}
 		}
 
 		if (prot & VM_PROT_WRITE) {
 			/* Add write permissions. */
 			flags |= PTE_SW;
 			if (!su)
 				flags |= PTE_UW;
 
 			if ((flags & PTE_MANAGED) != 0)
 				vm_page_aflag_set(m, PGA_WRITEABLE);
 		} else {
 			/* Handle modified pages, sense modify status. */
 
 			/*
 			 * The PTE_MODIFIED flag could be set by underlying
 			 * TLB misses since we last read it (above), possibly
 			 * other CPUs could update it so we check in the PTE
 			 * directly rather than rely on that saved local flags
 			 * copy.
 			 */
 			if (PTE_ISMODIFIED(pte))
 				vm_page_dirty(m);
 		}
 
 		if (prot & VM_PROT_EXECUTE) {
 			flags |= PTE_SX;
 			if (!su)
 				flags |= PTE_UX;
 
 			/*
 			 * Check existing flags for execute permissions: if we
 			 * are turning execute permissions on, icache should
 			 * be flushed.
 			 */
 			if ((*pte & (PTE_UX | PTE_SX)) == 0)
 				sync++;
 		}
 
 		flags &= ~PTE_REFERENCED;
 
 		/*
 		 * The new flags value is all calculated -- only now actually
 		 * update the PTE.
 		 */
 		mtx_lock_spin(&tlbivax_mutex);
 		tlb_miss_lock();
 
 		tlb0_flush_entry(va);
 		*pte &= ~PTE_FLAGS_MASK;
 		*pte |= flags;
 
 		tlb_miss_unlock();
 		mtx_unlock_spin(&tlbivax_mutex);
 
 	} else {
 		/*
 		 * If there is an existing mapping, but it's for a different
 		 * physical address, pte_enter() will delete the old mapping.
 		 */
 		//if ((pte != NULL) && PTE_ISVALID(pte))
 		//	debugf("mmu_booke_enter_locked: replace\n");
 		//else
 		//	debugf("mmu_booke_enter_locked: new\n");
 
 		/* Now set up the flags and install the new mapping. */
 		flags = (PTE_SR | PTE_VALID);
 		flags |= PTE_M;
 
 		if (!su)
 			flags |= PTE_UR;
 
 		if (prot & VM_PROT_WRITE) {
 			flags |= PTE_SW;
 			if (!su)
 				flags |= PTE_UW;
 
 			if ((m->oflags & VPO_UNMANAGED) == 0)
 				vm_page_aflag_set(m, PGA_WRITEABLE);
 		}
 
 		if (prot & VM_PROT_EXECUTE) {
 			flags |= PTE_SX;
 			if (!su)
 				flags |= PTE_UX;
 		}
 
 		/* If its wired update stats. */
 		if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
 			flags |= PTE_WIRED;
 
 		error = pte_enter(mmu, pmap, m, va, flags,
 		    (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
 		if (error != 0)
 			return (KERN_RESOURCE_SHORTAGE);
 
 		if ((flags & PMAP_ENTER_WIRED) != 0)
 			pmap->pm_stats.wired_count++;
 
 		/* Flush the real memory from the instruction cache. */
 		if (prot & VM_PROT_EXECUTE)
 			sync++;
 	}
 
 	if (sync && (su || pmap == PCPU_GET(curpmap))) {
 		__syncicache((void *)va, PAGE_SIZE);
 		sync = 0;
 	}
 
 	return (KERN_SUCCESS);
 }
 
 /*
  * Maps a sequence of resident pages belonging to the same object.
  * The sequence begins with the given page m_start.  This page is
  * mapped at the given virtual address start.  Each subsequent page is
  * mapped at a virtual address that is offset from start by the same
  * amount as the page is offset from m_start within the object.  The
  * last page in the sequence is the page with the largest offset from
  * m_start that can be mapped at a virtual address less than the given
  * virtual address end.  Not every virtual page between start and end
  * is mapped; only those for which a resident page exists with the
  * corresponding offset from m_start are mapped.
  */
 static void
 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
     vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
 {
 	vm_page_t m;
 	vm_pindex_t diff, psize;
 
 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
 
 	psize = atop(end - start);
 	m = m_start;
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
 		    prot & (VM_PROT_READ | VM_PROT_EXECUTE),
 		    PMAP_ENTER_NOSLEEP, 0);
 		m = TAILQ_NEXT(m, listq);
 	}
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pmap);
 }
 
 static void
 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
     vm_prot_t prot)
 {
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
 	mmu_booke_enter_locked(mmu, pmap, va, m,
 	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
 	    0);
 	rw_wunlock(&pvh_global_lock);
 	PMAP_UNLOCK(pmap);
 }
 
 /*
  * Remove the given range of addresses from the specified map.
  *
  * It is assumed that the start and end are properly rounded to the page size.
  */
 static void
 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
 {
 	pte_t *pte;
 	uint8_t hold_flag;
 
 	int su = (pmap == kernel_pmap);
 
 	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
 	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
 
 	if (su) {
 		KASSERT(((va >= virtual_avail) &&
 		    (va <= VM_MAX_KERNEL_ADDRESS)),
 		    ("mmu_booke_remove: kernel pmap, non kernel va"));
 	} else {
 		KASSERT((va <= VM_MAXUSER_ADDRESS),
 		    ("mmu_booke_remove: user pmap, non user va"));
 	}
 
 	if (PMAP_REMOVE_DONE(pmap)) {
 		//debugf("mmu_booke_remove: e (empty)\n");
 		return;
 	}
 
 	hold_flag = PTBL_HOLD_FLAG(pmap);
 	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
 	for (; va < endva; va += PAGE_SIZE) {
 		pte = pte_find(mmu, pmap, va);
 		if ((pte != NULL) && PTE_ISVALID(pte))
 			pte_remove(mmu, pmap, va, hold_flag);
 	}
 	PMAP_UNLOCK(pmap);
 	rw_wunlock(&pvh_global_lock);
 
 	//debugf("mmu_booke_remove: e\n");
 }
 
 /*
  * Remove physical page from all pmaps in which it resides.
  */
 static void
 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
 {
 	pv_entry_t pv, pvn;
 	uint8_t hold_flag;
 
 	rw_wlock(&pvh_global_lock);
 	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
 		pvn = TAILQ_NEXT(pv, pv_link);
 
 		PMAP_LOCK(pv->pv_pmap);
 		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
 		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
 		PMAP_UNLOCK(pv->pv_pmap);
 	}
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
 	rw_wunlock(&pvh_global_lock);
 }
 
 /*
  * Map a range of physical addresses into kernel virtual address space.
  */
 static vm_offset_t
 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
     vm_paddr_t pa_end, int prot)
 {
 	vm_offset_t sva = *virt;
 	vm_offset_t va = sva;
 
 	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
 	//		sva, pa_start, pa_end);
 
 	while (pa_start < pa_end) {
 		mmu_booke_kenter(mmu, va, pa_start);
 		va += PAGE_SIZE;
 		pa_start += PAGE_SIZE;
 	}
 	*virt = va;
 
 	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
 	return (sva);
 }
 
 /*
  * The pmap must be activated before it's address space can be accessed in any
  * way.
  */
 static void
 mmu_booke_activate(mmu_t mmu, struct thread *td)
 {
 	pmap_t pmap;
 	u_int cpuid;
 
 	pmap = &td->td_proc->p_vmspace->vm_pmap;
 
 	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
 	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
 
 	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
 
 	sched_pin();
 
 	cpuid = PCPU_GET(cpuid);
 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
 	PCPU_SET(curpmap, pmap);
 	
 	if (pmap->pm_tid[cpuid] == TID_NONE)
 		tid_alloc(pmap);
 
 	/* Load PID0 register with pmap tid value. */
 	mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
 	__asm __volatile("isync");
 
 	mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
 
 	sched_unpin();
 
 	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
 	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
 }
 
 /*
  * Deactivate the specified process's address space.
  */
 static void
 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
 {
 	pmap_t pmap;
 
 	pmap = &td->td_proc->p_vmspace->vm_pmap;
 	
 	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
 	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
 
 	td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
 
 	CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
 	PCPU_SET(curpmap, NULL);
 }
 
 /*
  * Copy the range specified by src_addr/len
  * from the source map to the range dst_addr/len
  * in the destination map.
  *
  * This routine is only advisory and need not do anything.
  */
 static void
 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
     vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
 {
 
 }
 
 /*
  * Set the physical protection on the specified range of this map as requested.
  */
 static void
 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
     vm_prot_t prot)
 {
 	vm_offset_t va;
 	vm_page_t m;
 	pte_t *pte;
 
 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 		mmu_booke_remove(mmu, pmap, sva, eva);
 		return;
 	}
 
 	if (prot & VM_PROT_WRITE)
 		return;
 
 	PMAP_LOCK(pmap);
 	for (va = sva; va < eva; va += PAGE_SIZE) {
 		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
 			if (PTE_ISVALID(pte)) {
 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
 
 				mtx_lock_spin(&tlbivax_mutex);
 				tlb_miss_lock();
 
 				/* Handle modified pages. */
 				if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
 					vm_page_dirty(m);
 
 				tlb0_flush_entry(va);
 				*pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
 
 				tlb_miss_unlock();
 				mtx_unlock_spin(&tlbivax_mutex);
 			}
 		}
 	}
 	PMAP_UNLOCK(pmap);
 }
 
 /*
  * Clear the write and modified bits in each of the given page's mappings.
  */
 static void
 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
 {
 	pv_entry_t pv;
 	pte_t *pte;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("mmu_booke_remove_write: page %p is not managed", m));
 
 	/*
 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * set by another thread while the object is locked.  Thus,
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
 		PMAP_LOCK(pv->pv_pmap);
 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
 			if (PTE_ISVALID(pte)) {
 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
 
 				mtx_lock_spin(&tlbivax_mutex);
 				tlb_miss_lock();
 
 				/* Handle modified pages. */
 				if (PTE_ISMODIFIED(pte))
 					vm_page_dirty(m);
 
 				/* Flush mapping from TLB0. */
 				*pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
 
 				tlb_miss_unlock();
 				mtx_unlock_spin(&tlbivax_mutex);
 			}
 		}
 		PMAP_UNLOCK(pv->pv_pmap);
 	}
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
 	rw_wunlock(&pvh_global_lock);
 }
 
 static void
 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
 {
 	pte_t *pte;
 	pmap_t pmap;
 	vm_page_t m;
 	vm_offset_t addr;
 	vm_paddr_t pa = 0;
 	int active, valid;
  
 	va = trunc_page(va);
 	sz = round_page(sz);
 
 	rw_wlock(&pvh_global_lock);
 	pmap = PCPU_GET(curpmap);
 	active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
 	while (sz > 0) {
 		PMAP_LOCK(pm);
 		pte = pte_find(mmu, pm, va);
 		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
 		if (valid)
 			pa = PTE_PA(pte);
 		PMAP_UNLOCK(pm);
 		if (valid) {
 			if (!active) {
 				/* Create a mapping in the active pmap. */
 				addr = 0;
 				m = PHYS_TO_VM_PAGE(pa);
 				PMAP_LOCK(pmap);
 				pte_enter(mmu, pmap, m, addr,
 				    PTE_SR | PTE_VALID | PTE_UR, FALSE);
 				__syncicache((void *)addr, PAGE_SIZE);
 				pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
 				PMAP_UNLOCK(pmap);
 			} else
 				__syncicache((void *)va, PAGE_SIZE);
 		}
 		va += PAGE_SIZE;
 		sz -= PAGE_SIZE;
 	}
 	rw_wunlock(&pvh_global_lock);
 }
 
 /*
  * Atomically extract and hold the physical page with the given
  * pmap and virtual address pair if that mapping permits the given
  * protection.
  */
 static vm_page_t
 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
     vm_prot_t prot)
 {
 	pte_t *pte;
 	vm_page_t m;
 	uint32_t pte_wbit;
 	vm_paddr_t pa;
 	
 	m = NULL;
 	pa = 0;	
 	PMAP_LOCK(pmap);
 retry:
 	pte = pte_find(mmu, pmap, va);
 	if ((pte != NULL) && PTE_ISVALID(pte)) {
 		if (pmap == kernel_pmap)
 			pte_wbit = PTE_SW;
 		else
 			pte_wbit = PTE_UW;
 
 		if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
 			if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
 				goto retry;
 			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
 			vm_page_hold(m);
 		}
 	}
 
 	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }
 
 /*
  * Initialize a vm_page's machine-dependent fields.
  */
 static void
 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
 {
 
 	m->md.pv_tracked = 0;
 	TAILQ_INIT(&m->md.pv_list);
 }
 
 /*
  * mmu_booke_zero_page_area zeros the specified hardware page by
  * mapping it into virtual memory and using bzero to clear
  * its contents.
  *
  * off and size must reside within a single page.
  */
 static void
 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
 {
 	vm_offset_t va;
 
 	/* XXX KASSERT off and size are within a single page? */
 
 	mtx_lock(&zero_page_mutex);
 	va = zero_page_va;
 
 	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
 	bzero((caddr_t)va + off, size);
 	mmu_booke_kremove(mmu, va);
 
 	mtx_unlock(&zero_page_mutex);
 }
 
 /*
  * mmu_booke_zero_page zeros the specified hardware page.
  */
 static void
 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
 {
 	vm_offset_t off, va;
 
 	mtx_lock(&zero_page_mutex);
 	va = zero_page_va;
 
 	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
 		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
 	mmu_booke_kremove(mmu, va);
 
 	mtx_unlock(&zero_page_mutex);
 }
 
 /*
  * mmu_booke_copy_page copies the specified (machine independent) page by
  * mapping the page into virtual memory and using memcopy to copy the page,
  * one machine dependent page at a time.
  */
 static void
 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
 {
 	vm_offset_t sva, dva;
 
 	sva = copy_page_src_va;
 	dva = copy_page_dst_va;
 
 	mtx_lock(&copy_page_mutex);
 	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
 	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
 	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
 	mmu_booke_kremove(mmu, dva);
 	mmu_booke_kremove(mmu, sva);
 	mtx_unlock(&copy_page_mutex);
 }
 
 static inline void
 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
 {
 	void *a_cp, *b_cp;
 	vm_offset_t a_pg_offset, b_pg_offset;
 	int cnt;
 
 	mtx_lock(&copy_page_mutex);
 	while (xfersize > 0) {
 		a_pg_offset = a_offset & PAGE_MASK;
 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
 		mmu_booke_kenter(mmu, copy_page_src_va,
 		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
 		a_cp = (char *)copy_page_src_va + a_pg_offset;
 		b_pg_offset = b_offset & PAGE_MASK;
 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
 		mmu_booke_kenter(mmu, copy_page_dst_va,
 		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
 		b_cp = (char *)copy_page_dst_va + b_pg_offset;
 		bcopy(a_cp, b_cp, cnt);
 		mmu_booke_kremove(mmu, copy_page_dst_va);
 		mmu_booke_kremove(mmu, copy_page_src_va);
 		a_offset += cnt;
 		b_offset += cnt;
 		xfersize -= cnt;
 	}
 	mtx_unlock(&copy_page_mutex);
 }
 
 static vm_offset_t
 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
 {
 	vm_paddr_t paddr;
 	vm_offset_t qaddr;
 	uint32_t flags;
 	pte_t *pte;
 
 	paddr = VM_PAGE_TO_PHYS(m);
 
 	flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
 	flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
 	flags |= PTE_PS_4KB;
 
 	critical_enter();
 	qaddr = PCPU_GET(qmap_addr);
 
 	pte = pte_find(mmu, kernel_pmap, qaddr);
 
 	KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
 
 	/* 
 	 * XXX: tlbivax is broadcast to other cores, but qaddr should
  	 * not be present in other TLBs.  Is there a better instruction
 	 * sequence to use? Or just forget it & use mmu_booke_kenter()... 
 	 */
 	__asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
 	__asm __volatile("isync; msync");
 
 	*pte = PTE_RPN_FROM_PA(paddr) | flags;
 
 	/* Flush the real memory from the instruction cache. */
 	if ((flags & (PTE_I | PTE_G)) == 0)
 		__syncicache((void *)qaddr, PAGE_SIZE);
 
 	return (qaddr);
 }
 
 static void
 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
 {
 	pte_t *pte;
 
 	pte = pte_find(mmu, kernel_pmap, addr);
 
 	KASSERT(PCPU_GET(qmap_addr) == addr,
 	    ("mmu_booke_quick_remove_page: invalid address"));
 	KASSERT(*pte != 0,
 	    ("mmu_booke_quick_remove_page: PTE not in use"));
 
 	*pte = 0;
 	critical_exit();
 }
 
 /*
  * Return whether or not the specified physical page was modified
  * in any of physical maps.
  */
 static boolean_t
 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
 {
 	pte_t *pte;
 	pv_entry_t pv;
 	boolean_t rv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("mmu_booke_is_modified: page %p is not managed", m));
 	rv = FALSE;
 
 	/*
 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can be modified.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (rv);
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
 		PMAP_LOCK(pv->pv_pmap);
 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
 		    PTE_ISVALID(pte)) {
 			if (PTE_ISMODIFIED(pte))
 				rv = TRUE;
 		}
 		PMAP_UNLOCK(pv->pv_pmap);
 		if (rv)
 			break;
 	}
 	rw_wunlock(&pvh_global_lock);
 	return (rv);
 }
 
 /*
  * Return whether or not the specified virtual address is eligible
  * for prefault.
  */
 static boolean_t
 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
 {
 
 	return (FALSE);
 }
 
 /*
  * Return whether or not the specified physical page was referenced
  * in any physical maps.
  */
 static boolean_t
 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
 {
 	pte_t *pte;
 	pv_entry_t pv;
 	boolean_t rv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("mmu_booke_is_referenced: page %p is not managed", m));
 	rv = FALSE;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
 		PMAP_LOCK(pv->pv_pmap);
 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
 		    PTE_ISVALID(pte)) {
 			if (PTE_ISREFERENCED(pte))
 				rv = TRUE;
 		}
 		PMAP_UNLOCK(pv->pv_pmap);
 		if (rv)
 			break;
 	}
 	rw_wunlock(&pvh_global_lock);
 	return (rv);
 }
 
 /*
  * Clear the modify bits on the specified physical page.
  */
 static void
 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
 {
 	pte_t *pte;
 	pv_entry_t pv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("mmu_booke_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	KASSERT(!vm_page_xbusied(m),
 	    ("mmu_booke_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
 	 * If the object containing the page is locked and the page is not
 	 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
 		PMAP_LOCK(pv->pv_pmap);
 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
 		    PTE_ISVALID(pte)) {
 			mtx_lock_spin(&tlbivax_mutex);
 			tlb_miss_lock();
 			
 			if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
 				tlb0_flush_entry(pv->pv_va);
 				*pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
 				    PTE_REFERENCED);
 			}
 
 			tlb_miss_unlock();
 			mtx_unlock_spin(&tlbivax_mutex);
 		}
 		PMAP_UNLOCK(pv->pv_pmap);
 	}
 	rw_wunlock(&pvh_global_lock);
 }
 
 /*
  * Return a count of reference bits for a page, clearing those bits.
  * It is not necessary for every reference bit to be cleared, but it
  * is necessary that 0 only be returned when there are truly no
  * reference bits set.
  *
  * As an optimization, update the page's dirty field if a modified bit is
  * found while counting reference bits.  This opportunistic update can be
  * performed at low cost and can eliminate the need for some future calls
  * to pmap_is_modified().  However, since this function stops after
  * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
  * dirty pages.  Those dirty pages will only be detected by a future call
  * to pmap_is_modified().
  */
 static int
 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
 {
 	pte_t *pte;
 	pv_entry_t pv;
 	int count;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("mmu_booke_ts_referenced: page %p is not managed", m));
 	count = 0;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
 		PMAP_LOCK(pv->pv_pmap);
 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
 		    PTE_ISVALID(pte)) {
 			if (PTE_ISMODIFIED(pte))
 				vm_page_dirty(m);
 			if (PTE_ISREFERENCED(pte)) {
 				mtx_lock_spin(&tlbivax_mutex);
 				tlb_miss_lock();
 
 				tlb0_flush_entry(pv->pv_va);
 				*pte &= ~PTE_REFERENCED;
 
 				tlb_miss_unlock();
 				mtx_unlock_spin(&tlbivax_mutex);
 
 				if (++count >= PMAP_TS_REFERENCED_MAX) {
 					PMAP_UNLOCK(pv->pv_pmap);
 					break;
 				}
 			}
 		}
 		PMAP_UNLOCK(pv->pv_pmap);
 	}
 	rw_wunlock(&pvh_global_lock);
 	return (count);
 }
 
 /*
  * Clear the wired attribute from the mappings for the specified range of
  * addresses in the given pmap.  Every valid mapping within that range must
  * have the wired attribute set.  In contrast, invalid mappings cannot have
  * the wired attribute set, so they are ignored.
  *
  * The wired attribute of the page table entry is not a hardware feature, so
  * there is no need to invalidate any TLB entries.
  */
 static void
 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 {
 	vm_offset_t va;
 	pte_t *pte;
 
 	PMAP_LOCK(pmap);
 	for (va = sva; va < eva; va += PAGE_SIZE) {
 		if ((pte = pte_find(mmu, pmap, va)) != NULL &&
 		    PTE_ISVALID(pte)) {
 			if (!PTE_ISWIRED(pte))
 				panic("mmu_booke_unwire: pte %p isn't wired",
 				    pte);
 			*pte &= ~PTE_WIRED;
 			pmap->pm_stats.wired_count--;
 		}
 	}
 	PMAP_UNLOCK(pmap);
 
 }
 
 /*
  * Return true if the pmap's pv is one of the first 16 pvs linked to from this
  * page.  This count may be changed upwards or downwards in the future; it is
  * only necessary that true be returned for a small subset of pmaps for proper
  * page aging.
  */
 static boolean_t
 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
 {
 	pv_entry_t pv;
 	int loops;
 	boolean_t rv;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("mmu_booke_page_exists_quick: page %p is not managed", m));
 	loops = 0;
 	rv = FALSE;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
 		if (pv->pv_pmap == pmap) {
 			rv = TRUE;
 			break;
 		}
 		if (++loops >= 16)
 			break;
 	}
 	rw_wunlock(&pvh_global_lock);
 	return (rv);
 }
 
 /*
  * Return the number of managed mappings to the given physical page that are
  * wired.
  */
 static int
 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
 {
 	pv_entry_t pv;
 	pte_t *pte;
 	int count = 0;
 
 	if ((m->oflags & VPO_UNMANAGED) != 0)
 		return (count);
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
 		PMAP_LOCK(pv->pv_pmap);
 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
 			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
 				count++;
 		PMAP_UNLOCK(pv->pv_pmap);
 	}
 	rw_wunlock(&pvh_global_lock);
 	return (count);
 }
 
 static int
 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
 {
 	int i;
 	vm_offset_t va;
 
 	/*
 	 * This currently does not work for entries that
 	 * overlap TLB1 entries.
 	 */
 	for (i = 0; i < TLB1_ENTRIES; i ++) {
 		if (tlb1_iomapped(i, pa, size, &va) == 0)
 			return (0);
 	}
 
 	return (EFAULT);
 }
 
 void
 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
 {
 	vm_paddr_t ppa;
 	vm_offset_t ofs;
 	vm_size_t gran;
 
 	/* Minidumps are based on virtual memory addresses. */
 	if (do_minidump) {
 		*va = (void *)(vm_offset_t)pa;
 		return;
 	}
 
 	/* Raw physical memory dumps don't have a virtual address. */
 	/* We always map a 256MB page at 256M. */
 	gran = 256 * 1024 * 1024;
 	ppa = rounddown2(pa, gran);
 	ofs = pa - ppa;
 	*va = (void *)gran;
 	tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
 
 	if (sz > (gran - ofs))
 		tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
 		    _TLB_ENTRY_IO);
 }
 
 void
 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
 {
 	vm_paddr_t ppa;
 	vm_offset_t ofs;
 	vm_size_t gran;
 	tlb_entry_t e;
 	int i;
 
 	/* Minidumps are based on virtual memory addresses. */
 	/* Nothing to do... */
 	if (do_minidump)
 		return;
 
 	for (i = 0; i < TLB1_ENTRIES; i++) {
 		tlb1_read_entry(&e, i);
 		if (!(e.mas1 & MAS1_VALID))
 			break;
 	}
 
 	/* Raw physical memory dumps don't have a virtual address. */
 	i--;
 	e.mas1 = 0;
 	e.mas2 = 0;
 	e.mas3 = 0;
 	tlb1_write_entry(&e, i);
 
 	gran = 256 * 1024 * 1024;
 	ppa = rounddown2(pa, gran);
 	ofs = pa - ppa;
 	if (sz > (gran - ofs)) {
 		i--;
 		e.mas1 = 0;
 		e.mas2 = 0;
 		e.mas3 = 0;
 		tlb1_write_entry(&e, i);
 	}
 }
 
 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
 
 void
 mmu_booke_scan_init(mmu_t mmu)
 {
 	vm_offset_t va;
 	pte_t *pte;
 	int i;
 
 	if (!do_minidump) {
 		/* Initialize phys. segments for dumpsys(). */
 		memset(&dump_map, 0, sizeof(dump_map));
 		mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
 		    &availmem_regions_sz);
 		for (i = 0; i < physmem_regions_sz; i++) {
 			dump_map[i].pa_start = physmem_regions[i].mr_start;
 			dump_map[i].pa_size = physmem_regions[i].mr_size;
 		}
 		return;
 	}
 
 	/* Virtual segments for minidumps: */
 	memset(&dump_map, 0, sizeof(dump_map));
 
 	/* 1st: kernel .data and .bss. */
 	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
 	dump_map[0].pa_size =
 	    round_page((uintptr_t)_end) - dump_map[0].pa_start;
 
 	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
 	dump_map[1].pa_start = data_start;
 	dump_map[1].pa_size = data_end - data_start;
 
 	/* 3rd: kernel VM. */
 	va = dump_map[1].pa_start + dump_map[1].pa_size;
 	/* Find start of next chunk (from va). */
 	while (va < virtual_end) {
 		/* Don't dump the buffer cache. */
 		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
 			va = kmi.buffer_eva;
 			continue;
 		}
 		pte = pte_find(mmu, kernel_pmap, va);
 		if (pte != NULL && PTE_ISVALID(pte))
 			break;
 		va += PAGE_SIZE;
 	}
 	if (va < virtual_end) {
 		dump_map[2].pa_start = va;
 		va += PAGE_SIZE;
 		/* Find last page in chunk. */
 		while (va < virtual_end) {
 			/* Don't run into the buffer cache. */
 			if (va == kmi.buffer_sva)
 				break;
 			pte = pte_find(mmu, kernel_pmap, va);
 			if (pte == NULL || !PTE_ISVALID(pte))
 				break;
 			va += PAGE_SIZE;
 		}
 		dump_map[2].pa_size = va - dump_map[2].pa_start;
 	}
 }
 
 /*
  * Map a set of physical memory pages into the kernel virtual address space.
  * Return a pointer to where it is mapped. This routine is intended to be used
  * for mapping device memory, NOT real memory.
  */
 static void *
 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
 {
 
 	return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
 }
 
 static void *
 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
 {
 	tlb_entry_t e;
 	void *res;
 	uintptr_t va, tmpva;
 	vm_size_t sz;
 	int i;
 
 	/*
 	 * Check if this is premapped in TLB1. Note: this should probably also
 	 * check whether a sequence of TLB1 entries exist that match the
 	 * requirement, but now only checks the easy case.
 	 */
 	for (i = 0; i < TLB1_ENTRIES; i++) {
 		tlb1_read_entry(&e, i);
 		if (!(e.mas1 & MAS1_VALID))
 			continue;
 		if (pa >= e.phys &&
 		    (pa + size) <= (e.phys + e.size) &&
 		    (ma == VM_MEMATTR_DEFAULT ||
 		     tlb_calc_wimg(pa, ma) ==
 		      (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))))
 			return (void *)(e.virt +
 			    (vm_offset_t)(pa - e.phys));
 	}
 
 	size = roundup(size, PAGE_SIZE);
 
 	/*
 	 * The device mapping area is between VM_MAXUSER_ADDRESS and
 	 * VM_MIN_KERNEL_ADDRESS.  This gives 1GB of device addressing.
 	 */
 #ifdef SPARSE_MAPDEV
 	/*
 	 * With a sparse mapdev, align to the largest starting region.  This
 	 * could feasibly be optimized for a 'best-fit' alignment, but that
 	 * calculation could be very costly.
 	 * Align to the smaller of:
 	 * - first set bit in overlap of (pa & size mask)
 	 * - largest size envelope
 	 *
 	 * It's possible the device mapping may start at a PA that's not larger
 	 * than the size mask, so we need to offset in to maximize the TLB entry
 	 * range and minimize the number of used TLB entries.
 	 */
 	do {
 	    tmpva = tlb1_map_base;
 	    sz = ffsl(((1 << flsl(size-1)) - 1) & pa);
 	    sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
 	    va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
 #ifdef __powerpc64__
 	} while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
 #else
 	} while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
 #endif
 #else
 #ifdef __powerpc64__
 	va = atomic_fetchadd_long(&tlb1_map_base, size);
 #else
 	va = atomic_fetchadd_int(&tlb1_map_base, size);
 #endif
 #endif
 	res = (void *)va;
 
 	do {
 		sz = 1 << (ilog2(size) & ~1);
 		/* Align size to PA */
 		if (pa % sz != 0) {
 			do {
 				sz >>= 2;
 			} while (pa % sz != 0);
 		}
 		/* Now align from there to VA */
 		if (va % sz != 0) {
 			do {
 				sz >>= 2;
 			} while (va % sz != 0);
 		}
 		if (bootverbose)
 			printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
 			    va, (uintmax_t)pa, sz);
 		if (tlb1_set_entry(va, pa, sz,
 		    _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0)
 			return (NULL);
 		size -= sz;
 		pa += sz;
 		va += sz;
 	} while (size > 0);
 
 	return (res);
 }
 
 /*
  * 'Unmap' a range mapped by mmu_booke_mapdev().
  */
 static void
 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
 {
 #ifdef SUPPORTS_SHRINKING_TLB1
 	vm_offset_t base, offset;
 
 	/*
 	 * Unmap only if this is inside kernel virtual space.
 	 */
 	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
 		base = trunc_page(va);
 		offset = va & PAGE_MASK;
 		size = roundup(offset + size, PAGE_SIZE);
 		kva_free(base, size);
 	}
 #endif
 }
 
 /*
  * mmu_booke_object_init_pt preloads the ptes for a given object into the
  * specified pmap. This eliminates the blast of soft faults on process startup
  * and immediately after an mmap.
  */
 static void
 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
     vm_object_t object, vm_pindex_t pindex, vm_size_t size)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 	    ("mmu_booke_object_init_pt: non-device object"));
 }
 
 /*
  * Perform the pmap work for mincore.
  */
 static int
 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
     vm_paddr_t *locked_pa)
 {
 
 	/* XXX: this should be implemented at some point */
 	return (0);
 }
 
 static int
 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
     vm_memattr_t mode)
 {
 	vm_offset_t va;
 	pte_t *pte;
 	int i, j;
 	tlb_entry_t e;
 
 	/* Check TLB1 mappings */
 	for (i = 0; i < TLB1_ENTRIES; i++) {
 		tlb1_read_entry(&e, i);
 		if (!(e.mas1 & MAS1_VALID))
 			continue;
 		if (addr >= e.virt && addr < e.virt + e.size)
 			break;
 	}
 	if (i < TLB1_ENTRIES) {
 		/* Only allow full mappings to be modified for now. */
 		/* Validate the range. */
 		for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
 			tlb1_read_entry(&e, j);
 			if (va != e.virt || (sz - (va - addr) < e.size))
 				return (EINVAL);
 		}
 		for (va = addr; va < addr + sz; va += e.size, i++) {
 			tlb1_read_entry(&e, i);
 			e.mas2 &= ~MAS2_WIMGE_MASK;
 			e.mas2 |= tlb_calc_wimg(e.phys, mode);
 
 			/*
 			 * Write it out to the TLB.  Should really re-sync with other
 			 * cores.
 			 */
 			tlb1_write_entry(&e, i);
 		}
 		return (0);
 	}
 
 	/* Not in TLB1, try through pmap */
 	/* First validate the range. */
 	for (va = addr; va < addr + sz; va += PAGE_SIZE) {
 		pte = pte_find(mmu, kernel_pmap, va);
 		if (pte == NULL || !PTE_ISVALID(pte))
 			return (EINVAL);
 	}
 
 	mtx_lock_spin(&tlbivax_mutex);
 	tlb_miss_lock();
 	for (va = addr; va < addr + sz; va += PAGE_SIZE) {
 		pte = pte_find(mmu, kernel_pmap, va);
 		*pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
 		*pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
 		tlb0_flush_entry(va);
 	}
 	tlb_miss_unlock();
 	mtx_unlock_spin(&tlbivax_mutex);
 
 	return (0);
 }
 
 /**************************************************************************/
 /* TID handling */
 /**************************************************************************/
 
 /*
  * Allocate a TID. If necessary, steal one from someone else.
  * The new TID is flushed from the TLB before returning.
  */
 static tlbtid_t
 tid_alloc(pmap_t pmap)
 {
 	tlbtid_t tid;
 	int thiscpu;
 
 	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
 
 	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
 
 	thiscpu = PCPU_GET(cpuid);
 
 	tid = PCPU_GET(tid_next);
 	if (tid > TID_MAX)
 		tid = TID_MIN;
 	PCPU_SET(tid_next, tid + 1);
 
 	/* If we are stealing TID then clear the relevant pmap's field */
 	if (tidbusy[thiscpu][tid] != NULL) {
 
 		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
 		
 		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
 
 		/* Flush all entries from TLB0 matching this TID. */
 		tid_flush(tid);
 	}
 
 	tidbusy[thiscpu][tid] = pmap;
 	pmap->pm_tid[thiscpu] = tid;
 	__asm __volatile("msync; isync");
 
 	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
 	    PCPU_GET(tid_next));
 
 	return (tid);
 }
 
 /**************************************************************************/
 /* TLB0 handling */
 /**************************************************************************/
 
 static void
 #ifdef __powerpc64__
 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
 #else
 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
 #endif
     uint32_t mas7)
 {
 	int as;
 	char desc[3];
 	tlbtid_t tid;
 	vm_size_t size;
 	unsigned int tsize;
 
 	desc[2] = '\0';
 	if (mas1 & MAS1_VALID)
 		desc[0] = 'V';
 	else
 		desc[0] = ' ';
 
 	if (mas1 & MAS1_IPROT)
 		desc[1] = 'P';
 	else
 		desc[1] = ' ';
 
 	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
 	tid = MAS1_GETTID(mas1);
 
 	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
 	size = 0;
 	if (tsize)
 		size = tsize2size(tsize);
 
 	debugf("%3d: (%s) [AS=%d] "
 	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
 	    "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
 	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
 }
 
 /* Convert TLB0 va and way number to tlb0[] table index. */
 static inline unsigned int
 tlb0_tableidx(vm_offset_t va, unsigned int way)
 {
 	unsigned int idx;
 
 	idx = (way * TLB0_ENTRIES_PER_WAY);
 	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
 	return (idx);
 }
 
 /*
  * Invalidate TLB0 entry.
  */
 static inline void
 tlb0_flush_entry(vm_offset_t va)
 {
 
 	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
 
 	mtx_assert(&tlbivax_mutex, MA_OWNED);
 
 	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
 	__asm __volatile("isync; msync");
 	__asm __volatile("tlbsync; msync");
 
 	CTR1(KTR_PMAP, "%s: e", __func__);
 }
 
 /* Print out contents of the MAS registers for each TLB0 entry */
 void
 tlb0_print_tlbentries(void)
 {
 	uint32_t mas0, mas1, mas3, mas7;
 #ifdef __powerpc64__
 	uint64_t mas2;
 #else
 	uint32_t mas2;
 #endif
 	int entryidx, way, idx;
 
 	debugf("TLB0 entries:\n");
 	for (way = 0; way < TLB0_WAYS; way ++)
 		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
 
 			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
 			mtspr(SPR_MAS0, mas0);
 			__asm __volatile("isync");
 
 			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
 			mtspr(SPR_MAS2, mas2);
 
 			__asm __volatile("isync; tlbre");
 
 			mas1 = mfspr(SPR_MAS1);
 			mas2 = mfspr(SPR_MAS2);
 			mas3 = mfspr(SPR_MAS3);
 			mas7 = mfspr(SPR_MAS7);
 
 			idx = tlb0_tableidx(mas2, way);
 			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
 		}
 }
 
 /**************************************************************************/
 /* TLB1 handling */
 /**************************************************************************/
 
 /*
  * TLB1 mapping notes:
  *
  * TLB1[0]	Kernel text and data.
  * TLB1[1-15]	Additional kernel text and data mappings (if required), PCI
  *		windows, other devices mappings.
  */
 
  /*
  * Read an entry from given TLB1 slot.
  */
 void
 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
 {
 	register_t msr;
 	uint32_t mas0;
 
 	KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
 
 	msr = mfmsr();
 	__asm __volatile("wrteei 0");
 
 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
 	mtspr(SPR_MAS0, mas0);
 	__asm __volatile("isync; tlbre");
 
 	entry->mas1 = mfspr(SPR_MAS1);
 	entry->mas2 = mfspr(SPR_MAS2);
 	entry->mas3 = mfspr(SPR_MAS3);
 
 	switch ((mfpvr() >> 16) & 0xFFFF) {
 	case FSL_E500v2:
 	case FSL_E500mc:
 	case FSL_E5500:
 	case FSL_E6500:
 		entry->mas7 = mfspr(SPR_MAS7);
 		break;
 	default:
 		entry->mas7 = 0;
 		break;
 	}
 	mtmsr(msr);
 
 	entry->virt = entry->mas2 & MAS2_EPN_MASK;
 	entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
 	    (entry->mas3 & MAS3_RPN);
 	entry->size =
 	    tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
 }
 
 struct tlbwrite_args {
 	tlb_entry_t *e;
 	unsigned int idx;
 };
 
 static void
 tlb1_write_entry_int(void *arg)
 {
 	struct tlbwrite_args *args = arg;
 	uint32_t mas0;
 
 	/* Select entry */
 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(args->idx);
 
 	mtspr(SPR_MAS0, mas0);
 	__asm __volatile("isync");
 	mtspr(SPR_MAS1, args->e->mas1);
 	__asm __volatile("isync");
 	mtspr(SPR_MAS2, args->e->mas2);
 	__asm __volatile("isync");
 	mtspr(SPR_MAS3, args->e->mas3);
 	__asm __volatile("isync");
 	switch ((mfpvr() >> 16) & 0xFFFF) {
 	case FSL_E500mc:
 	case FSL_E5500:
 	case FSL_E6500:
 		mtspr(SPR_MAS8, 0);
 		__asm __volatile("isync");
 		/* FALLTHROUGH */
 	case FSL_E500v2:
 		mtspr(SPR_MAS7, args->e->mas7);
 		__asm __volatile("isync");
 		break;
 	default:
 		break;
 	}
 
 	__asm __volatile("tlbwe; isync; msync");
 
 }
 
 static void
 tlb1_write_entry_sync(void *arg)
 {
 	/* Empty synchronization point for smp_rendezvous(). */
 }
 
 /*
  * Write given entry to TLB1 hardware.
  */
 static void
 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
 {
 	struct tlbwrite_args args;
 
 	args.e = e;
 	args.idx = idx;
 
 #ifdef SMP
 	if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) {
 		mb();
 		smp_rendezvous(tlb1_write_entry_sync,
 		    tlb1_write_entry_int,
 		    tlb1_write_entry_sync, &args);
 	} else
 #endif
 	{
 		register_t msr;
 
 		msr = mfmsr();
 		__asm __volatile("wrteei 0");
 		tlb1_write_entry_int(&args);
 		mtmsr(msr);
 	}
 }
 
 /*
  * Return the largest uint value log such that 2^log <= num.
  */
 static unsigned int
 ilog2(unsigned int num)
 {
 	int lz;
 
 	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
 	return (31 - lz);
 }
 
 /*
  * Convert TLB TSIZE value to mapped region size.
  */
 static vm_size_t
 tsize2size(unsigned int tsize)
 {
 
 	/*
 	 * size = 4^tsize KB
 	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
 	 */
 
 	return ((1 << (2 * tsize)) * 1024);
 }
 
 /*
  * Convert region size (must be power of 4) to TLB TSIZE value.
  */
 static unsigned int
 size2tsize(vm_size_t size)
 {
 
 	return (ilog2(size) / 2 - 5);
 }
 
 /*
  * Register permanent kernel mapping in TLB1.
  *
  * Entries are created starting from index 0 (current free entry is
  * kept in tlb1_idx) and are not supposed to be invalidated.
  */
 int
 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
     uint32_t flags)
 {
 	tlb_entry_t e;
 	uint32_t ts, tid;
 	int tsize, index;
 
 	for (index = 0; index < TLB1_ENTRIES; index++) {
 		tlb1_read_entry(&e, index);
 		if ((e.mas1 & MAS1_VALID) == 0)
 			break;
 		/* Check if we're just updating the flags, and update them. */
 		if (e.phys == pa && e.virt == va && e.size == size) {
 			e.mas2 = (va & MAS2_EPN_MASK) | flags;
 			tlb1_write_entry(&e, index);
 			return (0);
 		}
 	}
 	if (index >= TLB1_ENTRIES) {
 		printf("tlb1_set_entry: TLB1 full!\n");
 		return (-1);
 	}
 
 	/* Convert size to TSIZE */
 	tsize = size2tsize(size);
 
 	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
 	/* XXX TS is hard coded to 0 for now as we only use single address space */
 	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
 
 	e.phys = pa;
 	e.virt = va;
 	e.size = size;
 	e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
 	e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
 	e.mas2 = (va & MAS2_EPN_MASK) | flags;
 
 	/* Set supervisor RWX permission bits */
 	e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
 	e.mas7 = (pa >> 32) & MAS7_RPN;
 
 	tlb1_write_entry(&e, index);
 
 	/*
 	 * XXX in general TLB1 updates should be propagated between CPUs,
 	 * since current design assumes to have the same TLB1 set-up on all
 	 * cores.
 	 */
 	return (0);
 }
 
 /*
  * Map in contiguous RAM region into the TLB1 using maximum of
  * KERNEL_REGION_MAX_TLB_ENTRIES entries.
  *
  * If necessary round up last entry size and return total size
  * used by all allocated entries.
  */
 vm_size_t
 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
 {
 	vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
 	vm_size_t mapped, pgsz, base, mask;
 	int idx, nents;
 
 	/* Round up to the next 1M */
 	size = roundup2(size, 1 << 20);
 
 	mapped = 0;
 	idx = 0;
 	base = va;
 	pgsz = 64*1024*1024;
 	while (mapped < size) {
 		while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
 			while (pgsz > (size - mapped))
 				pgsz >>= 2;
 			pgs[idx++] = pgsz;
 			mapped += pgsz;
 		}
 
 		/* We under-map. Correct for this. */
 		if (mapped < size) {
 			while (pgs[idx - 1] == pgsz) {
 				idx--;
 				mapped -= pgsz;
 			}
 			/* XXX We may increase beyond out starting point. */
 			pgsz <<= 2;
 			pgs[idx++] = pgsz;
 			mapped += pgsz;
 		}
 	}
 
 	nents = idx;
 	mask = pgs[0] - 1;
 	/* Align address to the boundary */
 	if (va & mask) {
 		va = (va + mask) & ~mask;
 		pa = (pa + mask) & ~mask;
 	}
 
 	for (idx = 0; idx < nents; idx++) {
 		pgsz = pgs[idx];
 		debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz);
 		tlb1_set_entry(va, pa, pgsz,
 		    _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM);
 		pa += pgsz;
 		va += pgsz;
 	}
 
 	mapped = (va - base);
 	printf("mapped size 0x%"PRI0ptrX" (wasted space 0x%"PRIxPTR")\n",
 	    mapped, mapped - size);
 	return (mapped);
 }
 
 /*
  * TLB1 initialization routine, to be called after the very first
  * assembler level setup done in locore.S.
  */
 void
 tlb1_init()
 {
 	uint32_t mas0, mas1, mas2, mas3, mas7;
 	uint32_t tsz;
 
 	tlb1_get_tlbconf();
 
 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
 	mtspr(SPR_MAS0, mas0);
 	__asm __volatile("isync; tlbre");
 
 	mas1 = mfspr(SPR_MAS1);
 	mas2 = mfspr(SPR_MAS2);
 	mas3 = mfspr(SPR_MAS3);
 	mas7 = mfspr(SPR_MAS7);
 
 	kernload =  ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
 	    (mas3 & MAS3_RPN);
 
 	tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
 	kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
 
 	/* Setup TLB miss defaults */
 	set_mas4_defaults();
 }
 
 /*
  * pmap_early_io_unmap() should be used in short conjunction with
  * pmap_early_io_map(), as in the following snippet:
  *
  * x = pmap_early_io_map(...);
  * <do something with x>
  * pmap_early_io_unmap(x, size);
  *
  * And avoiding more allocations between.
  */
 void
 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
 {
 	int i;
 	tlb_entry_t e;
 	vm_size_t isize;
 
 	size = roundup(size, PAGE_SIZE);
 	isize = size;
 	for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
 		tlb1_read_entry(&e, i);
 		if (!(e.mas1 & MAS1_VALID))
 			continue;
 		if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
 			size -= e.size;
 			e.mas1 &= ~MAS1_VALID;
 			tlb1_write_entry(&e, i);
 		}
 	}
 	if (tlb1_map_base == va + isize)
 		tlb1_map_base -= isize;
 }	
 		
 vm_offset_t 
 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
 {
 	vm_paddr_t pa_base;
 	vm_offset_t va, sz;
 	int i;
 	tlb_entry_t e;
 
 	KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
 	
 	for (i = 0; i < TLB1_ENTRIES; i++) {
 		tlb1_read_entry(&e, i);
 		if (!(e.mas1 & MAS1_VALID))
 			continue;
 		if (pa >= e.phys && (pa + size) <=
 		    (e.phys + e.size))
 			return (e.virt + (pa - e.phys));
 	}
 
 	pa_base = rounddown(pa, PAGE_SIZE);
 	size = roundup(size + (pa - pa_base), PAGE_SIZE);
 	tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
 	va = tlb1_map_base + (pa - pa_base);
 
 	do {
 		sz = 1 << (ilog2(size) & ~1);
 		tlb1_set_entry(tlb1_map_base, pa_base, sz,
 		    _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
 		size -= sz;
 		pa_base += sz;
 		tlb1_map_base += sz;
 	} while (size > 0);
 
 	return (va);
 }
 
 void
 pmap_track_page(pmap_t pmap, vm_offset_t va)
 {
 	vm_paddr_t pa;
 	vm_page_t page;
 	struct pv_entry *pve;
 
 	va = trunc_page(va);
 	pa = pmap_kextract(va);
 	page = PHYS_TO_VM_PAGE(pa);
 
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
 
 	TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
 		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
 			goto out;
 		}
 	}
 	page->md.pv_tracked = true;
 	pv_insert(pmap, va, page);
 out:
 	PMAP_UNLOCK(pmap);
 	rw_wunlock(&pvh_global_lock);
 }
 
 
 /*
  * Setup MAS4 defaults.
  * These values are loaded to MAS0-2 on a TLB miss.
  */
 static void
 set_mas4_defaults(void)
 {
 	uint32_t mas4;
 
 	/* Defaults: TLB0, PID0, TSIZED=4K */
 	mas4 = MAS4_TLBSELD0;
 	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
 #ifdef SMP
 	mas4 |= MAS4_MD;
 #endif
 	mtspr(SPR_MAS4, mas4);
 	__asm __volatile("isync");
 }
 
 /*
  * Print out contents of the MAS registers for each TLB1 entry
  */
 void
 tlb1_print_tlbentries(void)
 {
 	uint32_t mas0, mas1, mas3, mas7;
 #ifdef __powerpc64__
 	uint64_t mas2;
 #else
 	uint32_t mas2;
 #endif
 	int i;
 
 	debugf("TLB1 entries:\n");
 	for (i = 0; i < TLB1_ENTRIES; i++) {
 
 		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
 		mtspr(SPR_MAS0, mas0);
 
 		__asm __volatile("isync; tlbre");
 
 		mas1 = mfspr(SPR_MAS1);
 		mas2 = mfspr(SPR_MAS2);
 		mas3 = mfspr(SPR_MAS3);
 		mas7 = mfspr(SPR_MAS7);
 
 		tlb_print_entry(i, mas1, mas2, mas3, mas7);
 	}
 }
 
 /*
  * Return 0 if the physical IO range is encompassed by one of the
  * the TLB1 entries, otherwise return related error code.
  */
 static int
 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
 {
 	uint32_t prot;
 	vm_paddr_t pa_start;
 	vm_paddr_t pa_end;
 	unsigned int entry_tsize;
 	vm_size_t entry_size;
 	tlb_entry_t e;
 
 	*va = (vm_offset_t)NULL;
 
 	tlb1_read_entry(&e, i);
 	/* Skip invalid entries */
 	if (!(e.mas1 & MAS1_VALID))
 		return (EINVAL);
 
 	/*
 	 * The entry must be cache-inhibited, guarded, and r/w
 	 * so it can function as an i/o page
 	 */
 	prot = e.mas2 & (MAS2_I | MAS2_G);
 	if (prot != (MAS2_I | MAS2_G))
 		return (EPERM);
 
 	prot = e.mas3 & (MAS3_SR | MAS3_SW);
 	if (prot != (MAS3_SR | MAS3_SW))
 		return (EPERM);
 
 	/* The address should be within the entry range. */
 	entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
 	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
 
 	entry_size = tsize2size(entry_tsize);
 	pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) | 
 	    (e.mas3 & MAS3_RPN);
 	pa_end = pa_start + entry_size;
 
 	if ((pa < pa_start) || ((pa + size) > pa_end))
 		return (ERANGE);
 
 	/* Return virtual address of this mapping. */
 	*va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
 	return (0);
 }
 
 /*
  * Invalidate all TLB0 entries which match the given TID. Note this is
  * dedicated for cases when invalidations should NOT be propagated to other
  * CPUs.
  */
 static void
 tid_flush(tlbtid_t tid)
 {
 	register_t msr;
 	uint32_t mas0, mas1, mas2;
 	int entry, way;
 
 
 	/* Don't evict kernel translations */
 	if (tid == TID_KERNEL)
 		return;
 
 	msr = mfmsr();
 	__asm __volatile("wrteei 0");
 
 	for (way = 0; way < TLB0_WAYS; way++)
 		for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
 
 			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
 			mtspr(SPR_MAS0, mas0);
 			__asm __volatile("isync");
 
 			mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
 			mtspr(SPR_MAS2, mas2);
 
 			__asm __volatile("isync; tlbre");
 
 			mas1 = mfspr(SPR_MAS1);
 
 			if (!(mas1 & MAS1_VALID))
 				continue;
 			if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
 				continue;
 			mas1 &= ~MAS1_VALID;
 			mtspr(SPR_MAS1, mas1);
 			__asm __volatile("isync; tlbwe; isync; msync");
 		}
 	mtmsr(msr);
 }
Index: head/sys/powerpc/include/pmap.h
===================================================================
--- head/sys/powerpc/include/pmap.h	(revision 328529)
+++ head/sys/powerpc/include/pmap.h	(revision 328530)
@@ -1,291 +1,293 @@
 /*-
  * SPDX-License-Identifier: BSD-3-Clause AND BSD-4-Clause
  *
  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
  * All rights reserved.
  *
  * Adapted for Freescale's e500 core CPUs.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. The name of the author may not be used to endorse or promote products
  *    derived from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 /*-
  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
  * Copyright (C) 1995, 1996 TooLs GmbH.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. All advertising materials mentioning features or use of this software
  *    must display the following acknowledgement:
  *	This product includes software developed by TooLs GmbH.
  * 4. The name of TooLs GmbH may not be used to endorse or promote products
  *    derived from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *	from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $
  */
 
 #ifndef	_MACHINE_PMAP_H_
 #define	_MACHINE_PMAP_H_
 
 #include <sys/queue.h>
 #include <sys/tree.h>
 #include <sys/_cpuset.h>
 #include <sys/_lock.h>
 #include <sys/_mutex.h>
 #include <machine/sr.h>
 #include <machine/pte.h>
 #include <machine/slb.h>
 #include <machine/tlb.h>
 
 struct pmap;
 typedef struct pmap *pmap_t;
 
 #if defined(AIM)
 
 #if !defined(NPMAPS)
 #define	NPMAPS		32768
 #endif /* !defined(NPMAPS) */
 
 struct	slbtnode;
 
 struct pvo_entry {
 	LIST_ENTRY(pvo_entry) pvo_vlink;	/* Link to common virt page */
 #ifndef __powerpc64__
 	LIST_ENTRY(pvo_entry) pvo_olink;	/* Link to overflow entry */
 #endif
 	RB_ENTRY(pvo_entry) pvo_plink;	/* Link to pmap entries */
 	struct {
 #ifndef __powerpc64__
 		/* 32-bit fields */
 		struct	pte pte;
 #endif
 		/* 64-bit fields */
 		uintptr_t   slot;
 		vm_paddr_t  pa;
 		vm_prot_t   prot;
 	} pvo_pte;
 	pmap_t		pvo_pmap;		/* Owning pmap */
 	vm_offset_t	pvo_vaddr;		/* VA of entry */
 	uint64_t	pvo_vpn;		/* Virtual page number */
 };
 LIST_HEAD(pvo_head, pvo_entry);
 RB_HEAD(pvo_tree, pvo_entry);
 int pvo_vaddr_compare(struct pvo_entry *, struct pvo_entry *);
 RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
 
 /* Used by 32-bit PMAP */
 #define	PVO_PTEGIDX_MASK	0x007UL		/* which PTEG slot */
 #define	PVO_PTEGIDX_VALID	0x008UL		/* slot is valid */
 /* Used by 64-bit PMAP */
 #define	PVO_HID			0x008UL		/* PVO entry in alternate hash*/
 /* Used by both */
 #define	PVO_WIRED		0x010UL		/* PVO entry is wired */
 #define	PVO_MANAGED		0x020UL		/* PVO entry is managed */
 #define	PVO_BOOTSTRAP		0x080UL		/* PVO entry allocated during
 						   bootstrap */
 #define PVO_DEAD		0x100UL		/* waiting to be deleted */
 #define PVO_LARGE		0x200UL		/* large page */
 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
 #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
 #define	PVO_PTEGIDX_CLR(pvo)	\
 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
 #define	PVO_PTEGIDX_SET(pvo, i)	\
 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
 #define	PVO_VSID(pvo)		((pvo)->pvo_vpn >> 16)
 
 struct	pmap {
 	struct		pmap_statistics	pm_stats;
 	struct	mtx	pm_mtx;
 	
     #ifdef __powerpc64__
 	struct slbtnode	*pm_slb_tree_root;
 	struct slb	**pm_slb;
 	int		pm_slb_len;
     #else
 	register_t	pm_sr[16];
     #endif
 	cpuset_t	pm_active;
 
 	struct pmap	*pmap_phys;
 	struct pvo_tree pmap_pvo;
 };
 
 struct	md_page {
 	volatile int32_t mdpg_attrs;
 	vm_memattr_t	 mdpg_cache_attrs;
 	struct	pvo_head mdpg_pvoh;
 };
 
 #define	pmap_page_get_memattr(m)	((m)->md.mdpg_cache_attrs)
 #define	pmap_page_is_mapped(m)	(!LIST_EMPTY(&(m)->md.mdpg_pvoh))
 
 /*
  * Return the VSID corresponding to a given virtual address.
  * If no VSID is currently defined, it will allocate one, and add
  * it to a free slot if available.
  *
  * NB: The PMAP MUST be locked already.
  */
 uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
 
 /* Lock-free, non-allocating lookup routines */
 uint64_t kernel_va_to_slbv(vm_offset_t va);
 struct slb *user_va_to_slb_entry(pmap_t pm, vm_offset_t va);
 
 uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large);
 void	free_vsid(pmap_t pm, uint64_t esid, int large);
 void	slb_insert_user(pmap_t pm, struct slb *slb);
 void	slb_insert_kernel(uint64_t slbe, uint64_t slbv);
 
 struct slbtnode *slb_alloc_tree(void);
 void     slb_free_tree(pmap_t pm);
 struct slb **slb_alloc_user_cache(void);
 void	slb_free_user_cache(struct slb **);
 
 #elif defined(BOOKE)
 
 struct pmap {
 	struct pmap_statistics	pm_stats;	/* pmap statistics */
 	struct mtx		pm_mtx;		/* pmap mutex */
 	tlbtid_t		pm_tid[MAXCPU];	/* TID to identify this pmap entries in TLB */
 	cpuset_t		pm_active;	/* active on cpus */
 
 #ifdef __powerpc64__
 	/* Page table directory, array of pointers to page directories. */
 	pte_t **pm_pp2d[PP2D_NENTRIES];
 
 	/* List of allocated pdir bufs (pdir kva regions). */
 	TAILQ_HEAD(, ptbl_buf)	pm_pdir_list;
 #else
 	/* Page table directory, array of pointers to page tables. */
 	pte_t			*pm_pdir[PDIR_NENTRIES];
 #endif
 
 	/* List of allocated ptbl bufs (ptbl kva regions). */
 	TAILQ_HEAD(, ptbl_buf)	pm_ptbl_list;
 };
 
 struct pv_entry {
 	pmap_t pv_pmap;
 	vm_offset_t pv_va;
 	TAILQ_ENTRY(pv_entry) pv_link;
 };
 typedef struct pv_entry *pv_entry_t;
 
 struct md_page {
 	TAILQ_HEAD(, pv_entry) pv_list;
 	int	pv_tracked;
 };
 
 #define	pmap_page_get_memattr(m)	VM_MEMATTR_DEFAULT
 #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
 
 #else
 /*
  * Common pmap members between AIM and BOOKE.
  * libkvm needs pm_stats at the same location between both, as it doesn't define
  * AIM nor BOOKE, and is expected to work across all.
  */
 struct pmap {
 	struct pmap_statistics	pm_stats;	/* pmap statistics */
 	struct mtx		pm_mtx;		/* pmap mutex */
 };
 #endif /* AIM */
 
 extern	struct pmap kernel_pmap_store;
 #define	kernel_pmap	(&kernel_pmap_store)
 
 #ifdef _KERNEL
 
 #define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
 #define	PMAP_LOCK_ASSERT(pmap, type) \
 				mtx_assert(&(pmap)->pm_mtx, (type))
 #define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
 #define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, \
 				    (pmap == kernel_pmap) ? "kernelpmap" : \
 				    "pmap", NULL, MTX_DEF)
 #define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
 #define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
 
 #define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
 
 void		pmap_bootstrap(vm_offset_t, vm_offset_t);
 void		pmap_kenter(vm_offset_t va, vm_paddr_t pa);
 void		pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t);
 void		pmap_kremove(vm_offset_t);
 void		*pmap_mapdev(vm_paddr_t, vm_size_t);
 void		*pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
 void		pmap_unmapdev(vm_offset_t, vm_size_t);
 void		pmap_page_set_memattr(vm_page_t, vm_memattr_t);
 int		pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
 int		pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr,
 		    void **kaddr, size_t ulen, size_t *klen);
+int		pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user,
+		    vm_offset_t *decoded_addr);
 void		pmap_deactivate(struct thread *);
 vm_paddr_t	pmap_kextract(vm_offset_t);
 int		pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
 boolean_t	pmap_mmu_install(char *name, int prio);
 
 #define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
 
 #define PHYS_AVAIL_SZ	256	/* Allows up to 16GB Ram on pSeries with
 				 * logical memory block size of 64MB.
 				 * For more Ram increase the lmb or this value.
 				 */
 
 extern	vm_paddr_t phys_avail[PHYS_AVAIL_SZ];
 extern	vm_offset_t virtual_avail;
 extern	vm_offset_t virtual_end;
 
 extern	vm_offset_t msgbuf_phys;
 
 extern	int pmap_bootstrapped;
 
 vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size);
 void pmap_early_io_unmap(vm_offset_t va, vm_size_t size);
 void pmap_track_page(pmap_t pmap, vm_offset_t va);
 
 #endif
 
 #endif /* !_MACHINE_PMAP_H_ */
Index: head/sys/powerpc/powerpc/mmu_if.m
===================================================================
--- head/sys/powerpc/powerpc/mmu_if.m	(revision 328529)
+++ head/sys/powerpc/powerpc/mmu_if.m	(revision 328530)
@@ -1,1000 +1,1016 @@
 #-
 # Copyright (c) 2005 Peter Grehan
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions
 # are met:
 # 1. Redistributions of source code must retain the above copyright
 #    notice, this list of conditions and the following disclaimer.
 # 2. Redistributions in binary form must reproduce the above copyright
 #    notice, this list of conditions and the following disclaimer in the
 #    documentation and/or other materials provided with the distribution.
 #
 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 # ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 # SUCH DAMAGE.
 #
 # $FreeBSD$
 #
 
 #include <sys/param.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/systm.h>
 
 #include <vm/vm.h>
 #include <vm/vm_page.h>
 
 #include <machine/mmuvar.h>
 
 /**
  * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
  * @brief A set of methods required by all MMU implementations. These
  * are basically direct call-thru's from the pmap machine-dependent
  * code.
  * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
  *@{
  */
 
 INTERFACE mmu;
 
 #
 # Default implementations of some methods
 #
 CODE {
 	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
 	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
 	{
 		return;
 	}
 
 	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
 	{
 		return;
 	}
 
 	static void mmu_null_init(mmu_t mmu)
 	{
 		return;
 	}
 
 	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
 	    vm_offset_t va)
 	{
 		return (FALSE);
 	}
 
 	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
 	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
 	    vm_size_t size)
 	{
 		return;
 	}
 
 	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
 	{
 		return;
 	}
 
 	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
 	{
 		return;
 	}
 
 	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
 	    vm_paddr_t *locked_pa)
 	{
 		return (0);
 	}
 
 	static void mmu_null_deactivate(struct thread *td)
 	{
 		return;
 	}
 
 	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
 	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
 	{
 		return;
 	}
 
 	static void *mmu_null_mapdev_attr(mmu_t mmu, vm_paddr_t pa,
 	    vm_size_t size, vm_memattr_t ma)
 	{
 		return MMU_MAPDEV(mmu, pa, size);
 	}
 
 	static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
 	    vm_paddr_t pa, vm_memattr_t ma)
 	{
 		MMU_KENTER(mmu, va, pa);
 	}
 
 	static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
 	    vm_memattr_t ma)
 	{
 		return;
 	}
 
 	static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
 	    vm_size_t sz, vm_memattr_t mode)
 	{
 		return (0);
 	}
 };
 
 
 /**
  * @brief Apply the given advice to the specified range of addresses within
  * the given pmap.  Depending on the advice, clear the referenced and/or
  * modified flags in each mapping and set the mapped page's dirty field.
  *
  * @param _pmap		physical map
  * @param _start	virtual range start
  * @param _end		virtual range end
  * @param _advice	advice to apply
  */
 METHOD void advise {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_start;
 	vm_offset_t	_end;
 	int		_advice;
 };
 
 
 /**
  * @brief Clear the 'modified' bit on the given physical page
  *
  * @param _pg		physical page
  */
 METHOD void clear_modify {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 };
 
 
 /**
  * @brief Clear the write and modified bits in each of the given
  * physical page's mappings
  *
  * @param _pg		physical page
  */
 METHOD void remove_write {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 };
 
 
 /**
  * @brief Copy the address range given by the source physical map, virtual
  * address and length to the destination physical map and virtual address.
  * This routine is optional (xxx default null implementation ?)
  *
  * @param _dst_pmap	destination physical map
  * @param _src_pmap	source physical map
  * @param _dst_addr	destination virtual address
  * @param _len		size of range
  * @param _src_addr	source virtual address
  */
 METHOD void copy {
 	mmu_t		_mmu;
 	pmap_t		_dst_pmap;
 	pmap_t		_src_pmap;
 	vm_offset_t	_dst_addr;
 	vm_size_t	_len;
 	vm_offset_t	_src_addr;
 } DEFAULT mmu_null_copy;
 
 
 /**
  * @brief Copy the source physical page to the destination physical page
  *
  * @param _src		source physical page
  * @param _dst		destination physical page
  */
 METHOD void copy_page {
 	mmu_t		_mmu;
 	vm_page_t	_src;
 	vm_page_t	_dst;
 };
 
 METHOD void copy_pages {
 	mmu_t		_mmu;
 	vm_page_t	*_ma;
 	vm_offset_t	_a_offset;
 	vm_page_t	*_mb;
 	vm_offset_t	_b_offset;
 	int		_xfersize;
 };
 
 /**
  * @brief Create a mapping between a virtual/physical address pair in the
  * passed physical map with the specified protection and wiring
  *
  * @param _pmap		physical map
  * @param _va		mapping virtual address
  * @param _p		mapping physical page
  * @param _prot		mapping page protection
  * @param _flags	pmap_enter flags
  * @param _psind	superpage size index
  */
 METHOD int enter {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_va;
 	vm_page_t	_p;
 	vm_prot_t	_prot;
 	u_int		_flags;
 	int8_t		_psind;
 };
 
 
 /**
  * @brief Maps a sequence of resident pages belonging to the same object.
  *
  * @param _pmap		physical map
  * @param _start	virtual range start
  * @param _end		virtual range end
  * @param _m_start	physical page mapped at start
  * @param _prot		mapping page protection
  */
 METHOD void enter_object {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_start;
 	vm_offset_t	_end;
 	vm_page_t	_m_start;
 	vm_prot_t	_prot;
 };
 
 
 /**
  * @brief A faster entry point for page mapping where it is possible
  * to short-circuit some of the tests in pmap_enter.
  *
  * @param _pmap		physical map (and also currently active pmap)
  * @param _va		mapping virtual address
  * @param _pg		mapping physical page
  * @param _prot		new page protection - used to see if page is exec.
  */
 METHOD void enter_quick {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_va;
 	vm_page_t	_pg;
 	vm_prot_t	_prot;
 };
 
 
 /**
  * @brief Reverse map the given virtual address, returning the physical
  * page associated with the address if a mapping exists.
  *
  * @param _pmap		physical map
  * @param _va		mapping virtual address
  *
  * @retval 0		No mapping found
  * @retval addr		The mapping physical address
  */
 METHOD vm_paddr_t extract {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_va;
 };
 
 
 /**
  * @brief Reverse map the given virtual address, returning the
  * physical page if found. The page must be held (by calling
  * vm_page_hold) if the page protection matches the given protection
  *
  * @param _pmap		physical map
  * @param _va		mapping virtual address
  * @param _prot		protection used to determine if physical page
  *			should be locked
  *
  * @retval NULL		No mapping found
  * @retval page		Pointer to physical page. Held if protections match
  */
 METHOD vm_page_t extract_and_hold {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_va;
 	vm_prot_t	_prot;
 };
 
 
 /**
  * @brief Increase kernel virtual address space to the given virtual address.
  * Not really required for PowerPC, so optional unless the MMU implementation
  * can use it.
  *
  * @param _va		new upper limit for kernel virtual address space
  */
 METHOD void growkernel {
 	mmu_t		_mmu;
 	vm_offset_t	_va;
 } DEFAULT mmu_null_growkernel;
 
 
 /**
  * @brief Called from vm_mem_init. Zone allocation is available at
  * this stage so a convenient time to create zones. This routine is
  * for MMU-implementation convenience and is optional.
  */
 METHOD void init {
 	mmu_t		_mmu;
 } DEFAULT mmu_null_init;
 
 
 /**
  * @brief Return if the page has been marked by MMU hardware to have been
  * modified
  *
  * @param _pg		physical page to test
  *
  * @retval boolean	TRUE if page has been modified
  */
 METHOD boolean_t is_modified {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 };
 
 
 /**
  * @brief Return whether the specified virtual address is a candidate to be
  * prefaulted in. This routine is optional.
  *
  * @param _pmap		physical map
  * @param _va		virtual address to test
  *
  * @retval boolean	TRUE if the address is a candidate.
  */
 METHOD boolean_t is_prefaultable {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_va;
 } DEFAULT mmu_null_is_prefaultable;
 
 
 /**
  * @brief Return whether or not the specified physical page was referenced
  * in any physical maps.
  *
  * @params _pg		physical page
  *
  * @retval boolean	TRUE if page has been referenced
  */
 METHOD boolean_t is_referenced {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 };
 
 
 /**
  * @brief Return a count of referenced bits for a page, clearing those bits.
  * Not all referenced bits need to be cleared, but it is necessary that 0
  * only be returned when there are none set.
  *
  * @params _m		physical page
  *
  * @retval int		count of referenced bits
  */
 METHOD int ts_referenced {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 };
 
 
 /**
  * @brief Map the requested physical address range into kernel virtual
  * address space. The value in _virt is taken as a hint. The virtual
  * address of the range is returned, or NULL if the mapping could not
  * be created. The range can be direct-mapped if that is supported.
  *
  * @param *_virt	Hint for start virtual address, and also return
  *			value
  * @param _start	physical address range start
  * @param _end		physical address range end
  * @param _prot		protection of range (currently ignored)
  *
  * @retval NULL		could not map the area
  * @retval addr, *_virt	mapping start virtual address
  */
 METHOD vm_offset_t map {
 	mmu_t		_mmu;
 	vm_offset_t	*_virt;
 	vm_paddr_t	_start;
 	vm_paddr_t	_end;
 	int		_prot;
 };
 
 
 /**
  * @brief Used to create a contiguous set of read-only mappings for a
  * given object to try and eliminate a cascade of on-demand faults as
  * the object is accessed sequentially. This routine is optional.
  *
  * @param _pmap		physical map
  * @param _addr		mapping start virtual address
  * @param _object	device-backed V.M. object to be mapped
  * @param _pindex	page-index within object of mapping start
  * @param _size		size in bytes of mapping
  */
 METHOD void object_init_pt {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_addr;
 	vm_object_t	_object;
 	vm_pindex_t	_pindex;
 	vm_size_t	_size;
 } DEFAULT mmu_null_object_init_pt;
 
 
 /**
  * @brief Used to determine if the specified page has a mapping for the
  * given physical map, by scanning the list of reverse-mappings from the
  * page. The list is scanned to a maximum of 16 entries.
  *
  * @param _pmap		physical map
  * @param _pg		physical page
  *
  * @retval bool		TRUE if the physical map was found in the first 16
  *			reverse-map list entries off the physical page.
  */
 METHOD boolean_t page_exists_quick {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_page_t	_pg;
 };
 
 
 /**
  * @brief Initialise the machine-dependent section of the physical page
  * data structure. This routine is optional.
  *
  * @param _pg		physical page
  */
 METHOD void page_init {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 } DEFAULT mmu_null_page_init;
 
 
 /**
  * @brief Count the number of managed mappings to the given physical
  * page that are wired.
  *
  * @param _pg		physical page
  *
  * @retval int		the number of wired, managed mappings to the
  *			given physical page
  */
 METHOD int page_wired_mappings {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 };
 
 
 /**
  * @brief Initialise a physical map data structure
  *
  * @param _pmap		physical map
  */
 METHOD void pinit {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 };
 
 
 /**
  * @brief Initialise the physical map for process 0, the initial process
  * in the system.
  * XXX default to pinit ?
  *
  * @param _pmap		physical map
  */
 METHOD void pinit0 {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 };
 
 
 /**
  * @brief Set the protection for physical pages in the given virtual address
  * range to the given value.
  *
  * @param _pmap		physical map
  * @param _start	virtual range start
  * @param _end		virtual range end
  * @param _prot		new page protection
  */
 METHOD void protect {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_start;
 	vm_offset_t	_end;
 	vm_prot_t	_prot;
 };
 
 
 /**
  * @brief Create a mapping in kernel virtual address space for the given array
  * of wired physical pages.
  *
  * @param _start	mapping virtual address start
  * @param *_m		array of physical page pointers
  * @param _count	array elements
  */
 METHOD void qenter {
 	mmu_t		_mmu;
 	vm_offset_t	_start;
 	vm_page_t	*_pg;
 	int		_count;
 };
 
 
 /**
  * @brief Remove the temporary mappings created by qenter.
  *
  * @param _start	mapping virtual address start
  * @param _count	number of pages in mapping
  */
 METHOD void qremove {
 	mmu_t		_mmu;
 	vm_offset_t	_start;
 	int		_count;
 };
 
 
 /**
  * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
  * should be no existing mappings for the physical map at this point
  *
  * @param _pmap		physical map
  */
 METHOD void release {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 };
 
 
 /**
  * @brief Remove all mappings in the given physical map for the start/end
  * virtual address range. The range will be page-aligned.
  *
  * @param _pmap		physical map
  * @param _start	mapping virtual address start
  * @param _end		mapping virtual address end
  */
 METHOD void remove {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_start;
 	vm_offset_t	_end;
 };
 
 
 /**
  * @brief Traverse the reverse-map list off the given physical page and
  * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
  *
  * @param _pg		physical page
  */
 METHOD void remove_all {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 };
 
 
 /**
  * @brief Remove all mappings in the given start/end virtual address range
  * for the given physical map. Similar to the remove method, but it used
  * when tearing down all mappings in an address space. This method is
  * optional, since pmap_remove will be called for each valid vm_map in
  * the address space later.
  *
  * @param _pmap		physical map
  * @param _start	mapping virtual address start
  * @param _end		mapping virtual address end
  */
 METHOD void remove_pages {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 } DEFAULT mmu_null_remove_pages;
 
 
 /**
  * @brief Clear the wired attribute from the mappings for the specified range
  * of addresses in the given pmap.
  *
  * @param _pmap		physical map
  * @param _start	virtual range start
  * @param _end		virtual range end
  */
 METHOD void unwire {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_start;
 	vm_offset_t	_end;
 };
 
 
 /**
  * @brief Zero a physical page. It is not assumed that the page is mapped,
  * so a temporary (or direct) mapping may need to be used.
  *
  * @param _pg		physical page
  */
 METHOD void zero_page {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 };
 
 
 /**
  * @brief Zero a portion of a physical page, starting at a given offset and
  * for a given size (multiples of 512 bytes for 4k pages).
  *
  * @param _pg		physical page
  * @param _off		byte offset from start of page
  * @param _size		size of area to zero
  */
 METHOD void zero_page_area {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 	int		_off;
 	int		_size;
 };
 
 
 /**
  * @brief Extract mincore(2) information from a mapping.
  *
  * @param _pmap		physical map
  * @param _addr		page virtual address
  * @param _locked_pa	page physical address
  *
  * @retval 0		no result
  * @retval non-zero	mincore(2) flag values
  */
 METHOD int mincore {
 	mmu_t		_mmu;
 	pmap_t		_pmap;
 	vm_offset_t	_addr;
 	vm_paddr_t	*_locked_pa;
 } DEFAULT mmu_null_mincore;
 
 
 /**
  * @brief Perform any operations required to allow a physical map to be used
  * before it's address space is accessed.
  *
  * @param _td		thread associated with physical map
  */
 METHOD void activate {
 	mmu_t		_mmu;
 	struct thread	*_td;
 };
 
 /**
  * @brief Perform any operations required to deactivate a physical map,
  * for instance as it is context-switched out.
  *
  * @param _td		thread associated with physical map
  */
 METHOD void deactivate {
 	mmu_t		_mmu;
 	struct thread	*_td;
 } DEFAULT mmu_null_deactivate;
 
 /**
  * @brief Return a hint for the best virtual address to map a tentative
  * virtual address range in a given VM object. The default is to just
  * return the given tentative start address.
  *
  * @param _obj		VM backing object
  * @param _offset	starting offset with the VM object
  * @param _addr		initial guess at virtual address
  * @param _size		size of virtual address range
  */
 METHOD void align_superpage {
 	mmu_t		_mmu;
 	vm_object_t	_obj;
 	vm_ooffset_t	_offset;
 	vm_offset_t	*_addr;
 	vm_size_t	_size;
 } DEFAULT mmu_null_align_superpage;
 
 
 
 
 /**
  * INTERNAL INTERFACES
  */
 
 /**
  * @brief Bootstrap the VM system. At the completion of this routine, the
  * kernel will be running in its own address space with full control over
  * paging.
  *
  * @param _start	start of reserved memory (obsolete ???)
  * @param _end		end of reserved memory (obsolete ???)
  *			XXX I think the intent of these was to allow
  *			the memory used by kernel text+data+bss and
  *			loader variables/load-time kld's to be carved out
  *			of available physical mem.
  *
  */
 METHOD void bootstrap {
 	mmu_t		_mmu;
 	vm_offset_t	_start;
 	vm_offset_t	_end;
 };
 
 /**
  * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
  * for alternate CPUs on SMP systems.
  *
  * @param _ap		Set to 1 if the CPU being set up is an AP
  *
  */
 METHOD void cpu_bootstrap {
 	mmu_t		_mmu;
 	int		_ap;
 };
 
 
 /**
  * @brief Create a kernel mapping for a given physical address range.
  * Called by bus code on behalf of device drivers. The mapping does not
  * have to be a virtual address: it can be a direct-mapped physical address
  * if that is supported by the MMU.
  *
  * @param _pa		start physical address
  * @param _size		size in bytes of mapping
  *
  * @retval addr		address of mapping.
  */
 METHOD void * mapdev {
 	mmu_t		_mmu;
 	vm_paddr_t	_pa;
 	vm_size_t	_size;
 };
 
 /**
  * @brief Create a kernel mapping for a given physical address range.
  * Called by bus code on behalf of device drivers. The mapping does not
  * have to be a virtual address: it can be a direct-mapped physical address
  * if that is supported by the MMU.
  *
  * @param _pa		start physical address
  * @param _size		size in bytes of mapping
  * @param _attr		cache attributes
  *
  * @retval addr		address of mapping.
  */
 METHOD void * mapdev_attr {
 	mmu_t		_mmu;
 	vm_paddr_t	_pa;
 	vm_size_t	_size;
 	vm_memattr_t	_attr;
 } DEFAULT mmu_null_mapdev_attr;
 
 /**
  * @brief Change cache control attributes for a page. Should modify all
  * mappings for that page.
  *
  * @param _m		page to modify
  * @param _ma		new cache control attributes
  */
 METHOD void page_set_memattr {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 	vm_memattr_t	_ma;
 } DEFAULT mmu_null_page_set_memattr;
 
 /**
  * @brief Remove the mapping created by mapdev. Called when a driver
  * is unloaded.
  *
  * @param _va		Mapping address returned from mapdev
  * @param _size		size in bytes of mapping
  */
 METHOD void unmapdev {
 	mmu_t		_mmu;
 	vm_offset_t	_va;
 	vm_size_t	_size;
 };
 
 /**
  * @brief Provide a kernel-space pointer that can be used to access the
  * given userland address. The kernel accessible length returned in klen
  * may be less than the requested length of the userland buffer (ulen). If
  * so, retry with a higher address to get access to the later parts of the
  * buffer. Returns EFAULT if no mapping can be made, else zero.
  *
  * @param _pm		PMAP for the user pointer.
  * @param _uaddr	Userland address to map.
  * @param _kaddr	Corresponding kernel address.
  * @param _ulen		Length of user buffer.
  * @param _klen		Available subset of ulen with _kaddr.
  */
 METHOD int map_user_ptr {
 	mmu_t		_mmu;
 	pmap_t		_pm;
 	volatile const void *_uaddr;
 	void		**_kaddr;
 	size_t		_ulen;
 	size_t		*_klen;
 };
 
 /**
+ * @brief Decode a kernel pointer, as visible to the current thread,
+ * by setting whether it corresponds to a user or kernel address and
+ * the address in the respective memory maps to which the address as
+ * seen in the kernel corresponds. This is essentially the inverse of
+ * MMU_MAP_USER_PTR() above and is used in kernel-space fault handling.
+ * Returns 0 on success or EFAULT if the address could not be mapped. 
+ */
+METHOD int decode_kernel_ptr {
+	mmu_t		_mmu;
+	vm_offset_t	addr;
+	int		*is_user;
+	vm_offset_t	*decoded_addr;
+};
+
+/**
  * @brief Reverse-map a kernel virtual address
  *
  * @param _va		kernel virtual address to reverse-map
  *
  * @retval pa		physical address corresponding to mapping
  */
 METHOD vm_paddr_t kextract {
 	mmu_t		_mmu;
 	vm_offset_t	_va;
 };
 
 
 /**
  * @brief Map a wired page into kernel virtual address space
  *
  * @param _va		mapping virtual address
  * @param _pa		mapping physical address
  */
 METHOD void kenter {
 	mmu_t		_mmu;
 	vm_offset_t	_va;
 	vm_paddr_t	_pa;
 };
 
 /**
  * @brief Map a wired page into kernel virtual address space
  *
  * @param _va		mapping virtual address
  * @param _pa		mapping physical address
  * @param _ma		mapping cache control attributes
  */
 METHOD void kenter_attr {
 	mmu_t		_mmu;
 	vm_offset_t	_va;
 	vm_paddr_t	_pa;
 	vm_memattr_t	_ma;
 } DEFAULT mmu_null_kenter_attr;
 
 /**
  * @brief Unmap a wired page from kernel virtual address space
  *
  * @param _va		mapped virtual address
  */
 METHOD void kremove {
 	mmu_t		_mmu;
 	vm_offset_t	_va;
 };
 
 /**
  * @brief Determine if the given physical address range has been direct-mapped.
  *
  * @param _pa		physical address start
  * @param _size		physical address range size
  *
  * @retval bool		TRUE if the range is direct-mapped.
  */
 METHOD boolean_t dev_direct_mapped {
 	mmu_t		_mmu;
 	vm_paddr_t	_pa;
 	vm_size_t	_size;
 };
 
 
 /**
  * @brief Enforce instruction cache coherency. Typically called after a
  * region of memory has been modified and before execution of or within
  * that region is attempted. Setting breakpoints in a process through
  * ptrace(2) is one example of when the instruction cache needs to be
  * made coherent.
  *
  * @param _pm		the physical map of the virtual address
  * @param _va		the virtual address of the modified region
  * @param _sz		the size of the modified region
  */
 METHOD void sync_icache {
 	mmu_t		_mmu;
 	pmap_t		_pm;
 	vm_offset_t	_va;
 	vm_size_t	_sz;
 };
 
 
 /**
  * @brief Create temporary memory mapping for use by dumpsys().
  *
  * @param _pa		The physical page to map.
  * @param _sz		The requested size of the mapping.
  * @param _va		The virtual address of the mapping.
  */
 METHOD void dumpsys_map {
 	mmu_t		_mmu;
 	vm_paddr_t	_pa;
 	size_t		_sz;
 	void		**_va;
 };
 
 
 /**
  * @brief Remove temporary dumpsys() mapping.
  *
  * @param _pa		The physical page to map.
  * @param _sz		The requested size of the mapping.
  * @param _va		The virtual address of the mapping.
  */
 METHOD void dumpsys_unmap {
 	mmu_t		_mmu;
 	vm_paddr_t	_pa;
 	size_t		_sz;
 	void		*_va;
 };
 
 
 /**
  * @brief Initialize memory chunks for dumpsys.
  */
 METHOD void scan_init {
 	mmu_t		_mmu;
 };
 
 /**
  * @brief Create a temporary thread-local KVA mapping of a single page.
  *
  * @param _pg		The physical page to map
  *
  * @retval addr		The temporary KVA
  */
 METHOD vm_offset_t quick_enter_page {
 	mmu_t		_mmu;
 	vm_page_t	_pg;
 };
 
 /**
  * @brief Undo a mapping created by quick_enter_page
  *
  * @param _va		The mapped KVA
  */
 METHOD void quick_remove_page {
 	mmu_t		_mmu;
 	vm_offset_t	_va;
 };
 
 /**
  * @brief Change the specified virtual address range's memory type.
  *
  * @param _va		The virtual base address to change
  *
  * @param _sz		Size of the region to change
  *
  * @param _mode		New mode to set on the VA range
  *
  * @retval error	0 on success, EINVAL or ENOMEM on error.
  */
 METHOD int change_attr {
 	mmu_t		_mmu;
 	vm_offset_t	_va;
 	vm_size_t	_sz;
 	vm_memattr_t	_mode;
 } DEFAULT mmu_null_change_attr;
+
Index: head/sys/powerpc/powerpc/pmap_dispatch.c
===================================================================
--- head/sys/powerpc/powerpc/pmap_dispatch.c	(revision 328529)
+++ head/sys/powerpc/powerpc/pmap_dispatch.c	(revision 328530)
@@ -1,614 +1,622 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (c) 2005 Peter Grehan
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 /*
  * Dispatch MI pmap calls to the appropriate MMU implementation
  * through a previously registered kernel object.
  *
  * Before pmap_bootstrap() can be called, a CPU module must have
  * called pmap_mmu_install(). This may be called multiple times:
  * the highest priority call will be installed as the default
  * MMU handler when pmap_bootstrap() is called.
  *
  * It is required that mutex_init() be called before pmap_bootstrap(), 
  * as the PMAP layer makes extensive use of mutexes.
  */
 
 #include <sys/param.h>
 #include <sys/kernel.h>
 #include <sys/conf.h>
 #include <sys/lock.h>
 #include <sys/kerneldump.h>
 #include <sys/ktr.h>
 #include <sys/mutex.h>
 #include <sys/systm.h>
 
 #include <vm/vm.h>
 #include <vm/vm_page.h>
 
 #include <machine/dump.h>
 #include <machine/md_var.h>
 #include <machine/mmuvar.h>
 #include <machine/smp.h>
 
 #include "mmu_if.h"
 
 static mmu_def_t	*mmu_def_impl;
 static mmu_t		mmu_obj;
 static struct mmu_kobj	mmu_kernel_obj;
 static struct kobj_ops	mmu_kernel_kops;
 
 /*
  * pmap globals
  */
 struct pmap kernel_pmap_store;
 
 vm_offset_t    msgbuf_phys;
 
 vm_offset_t kernel_vm_end;
 vm_paddr_t phys_avail[PHYS_AVAIL_SZ];
 vm_offset_t virtual_avail;
 vm_offset_t virtual_end;
 
 int pmap_bootstrapped;
 
 #ifdef AIM
 int
 pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
 {
 	if (PVO_VADDR(a) < PVO_VADDR(b))
 		return (-1);
 	else if (PVO_VADDR(a) > PVO_VADDR(b))
 		return (1);
 	return (0);
 }
 RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
 #endif
 	
 
 void
 pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
 {
 
 	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, start, end,
 	    advice);
 	MMU_ADVISE(mmu_obj, pmap, start, end, advice);
 }
 
 void
 pmap_clear_modify(vm_page_t m)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	MMU_CLEAR_MODIFY(mmu_obj, m);
 }
 
 void
 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
     vm_size_t len, vm_offset_t src_addr)
 {
 
 	CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
 	    src_pmap, dst_addr, len, src_addr);
 	MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
 }
 
 void
 pmap_copy_page(vm_page_t src, vm_page_t dst)
 {
 
 	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
 	MMU_COPY_PAGE(mmu_obj, src, dst);
 }
 
 void
 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
     vm_offset_t b_offset, int xfersize)
 {
 
 	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
 	    a_offset, mb, b_offset, xfersize);
 	MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
 }
 
 int
 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
     u_int flags, int8_t psind)
 {
 
 	CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
 	    p, prot, flags, psind);
 	return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
 }
 
 void
 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
     vm_page_t m_start, vm_prot_t prot)
 {
 
 	CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
 	    end, m_start, prot);
 	MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
 }
 
 void
 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 {
 
 	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
 	MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
 }
 
 vm_paddr_t
 pmap_extract(pmap_t pmap, vm_offset_t va)
 {
 
 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
 	return (MMU_EXTRACT(mmu_obj, pmap, va));
 }
 
 vm_page_t
 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 {
 
 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
 	return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
 }
 
 void
 pmap_growkernel(vm_offset_t va)
 {
 
 	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
 	MMU_GROWKERNEL(mmu_obj, va);
 }
 
 void
 pmap_init(void)
 {
 
 	CTR1(KTR_PMAP, "%s()", __func__);
 	MMU_INIT(mmu_obj);
 }
 
 boolean_t
 pmap_is_modified(vm_page_t m)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	return (MMU_IS_MODIFIED(mmu_obj, m));
 }
 
 boolean_t
 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
 {
 
 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
 	return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
 }
 
 boolean_t
 pmap_is_referenced(vm_page_t m)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	return (MMU_IS_REFERENCED(mmu_obj, m));
 }
 
 boolean_t
 pmap_ts_referenced(vm_page_t m)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	return (MMU_TS_REFERENCED(mmu_obj, m));
 }
 
 vm_offset_t
 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 {
 
 	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
 	    prot);
 	return (MMU_MAP(mmu_obj, virt, start, end, prot));
 }
 
 void
 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
     vm_pindex_t pindex, vm_size_t size)
 {
 
 	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
 	    object, pindex, size);
 	MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
 }
 
 boolean_t
 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 {
 
 	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
 	return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
 }
 
 void
 pmap_page_init(vm_page_t m)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	MMU_PAGE_INIT(mmu_obj, m);
 }
 
 int
 pmap_page_wired_mappings(vm_page_t m)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
 }
 
 int
 pmap_pinit(pmap_t pmap)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
 	MMU_PINIT(mmu_obj, pmap);
 	return (1);
 }
 
 void
 pmap_pinit0(pmap_t pmap)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
 	MMU_PINIT0(mmu_obj, pmap);
 }
 
 void
 pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
 {
 
 	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
 	    prot);
 	MMU_PROTECT(mmu_obj, pmap, start, end, prot);
 }
 
 void
 pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
 {
 
 	CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
 	MMU_QENTER(mmu_obj, start, m, count);
 }
 
 void
 pmap_qremove(vm_offset_t start, int count)
 {
 
 	CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
 	MMU_QREMOVE(mmu_obj, start, count);
 }
 
 void
 pmap_release(pmap_t pmap)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
 	MMU_RELEASE(mmu_obj, pmap);
 }
 
 void
 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
 {
 
 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
 	MMU_REMOVE(mmu_obj, pmap, start, end);
 }
 
 void
 pmap_remove_all(vm_page_t m)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	MMU_REMOVE_ALL(mmu_obj, m);
 }
 
 void
 pmap_remove_pages(pmap_t pmap)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
 	MMU_REMOVE_PAGES(mmu_obj, pmap);
 }
 
 void
 pmap_remove_write(vm_page_t m)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	MMU_REMOVE_WRITE(mmu_obj, m);
 }
 
 void
 pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
 {
 
 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
 	MMU_UNWIRE(mmu_obj, pmap, start, end);
 }
 
 void
 pmap_zero_page(vm_page_t m)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	MMU_ZERO_PAGE(mmu_obj, m);
 }
 
 void
 pmap_zero_page_area(vm_page_t m, int off, int size)
 {
 
 	CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
 	MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
 }
 
 int
 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
 {
 
 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
 	return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
 }
 
 void
 pmap_activate(struct thread *td)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
 	MMU_ACTIVATE(mmu_obj, td);
 }
 
 void
 pmap_deactivate(struct thread *td)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
 	MMU_DEACTIVATE(mmu_obj, td);
 }
 
 /*
  *	Increase the starting virtual address of the given mapping if a
  *	different alignment might result in more superpage mappings.
  */
 void
 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
     vm_offset_t *addr, vm_size_t size)
 {
 
 	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
 	    size);
 	MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
 }
 
 /*
  * Routines used in machine-dependent code
  */
 void
 pmap_bootstrap(vm_offset_t start, vm_offset_t end)
 {
 	mmu_obj = &mmu_kernel_obj;
 
 	/*
 	 * Take care of compiling the selected class, and
 	 * then statically initialise the MMU object
 	 */
 	kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
 	kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
 
 	MMU_BOOTSTRAP(mmu_obj, start, end);
 }
 
 void
 pmap_cpu_bootstrap(int ap)
 {
 	/*
 	 * No KTR here because our console probably doesn't work yet
 	 */
 
 	return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
 }
 
 void *
 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 {
 
 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
 	return (MMU_MAPDEV(mmu_obj, pa, size));
 }
 
 void *
 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
 {
 
 	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
 	return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
 }
 
 void
 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 {
 
 	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
 	return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
 }
 
 void
 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 {
 
 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
 	MMU_UNMAPDEV(mmu_obj, va, size);
 }
 
 vm_paddr_t
 pmap_kextract(vm_offset_t va)
 {
 
 	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
 	return (MMU_KEXTRACT(mmu_obj, va));
 }
 
 void
 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 {
 
 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
 	MMU_KENTER(mmu_obj, va, pa);
 }
 
 void
 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
 {
 
 	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
 	MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
 }
 
 void
 pmap_kremove(vm_offset_t va)
 {
 
 	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
 	return (MMU_KREMOVE(mmu_obj, va));
 }
 
 int
 pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr,
     size_t ulen, size_t *klen)
 {
 
 	CTR2(KTR_PMAP, "%s(%p)", __func__, uaddr);
 	return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen));
 }
 
+int
+pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded)
+{
+
+	CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
+	return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded));
+}
+
 boolean_t
 pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
 {
 
 	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
 	return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
 }
 
 void
 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
 {
  
 	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
 	return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
 }
 
 void
 dumpsys_map_chunk(vm_paddr_t pa, size_t sz, void **va)
 {
 
 	CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
 	return (MMU_DUMPSYS_MAP(mmu_obj, pa, sz, va));
 }
 
 void
 dumpsys_unmap_chunk(vm_paddr_t pa, size_t sz, void *va)
 {
 
 	CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
 	return (MMU_DUMPSYS_UNMAP(mmu_obj, pa, sz, va));
 }
 
 void
 dumpsys_pa_init(void)
 {
 
 	CTR1(KTR_PMAP, "%s()", __func__);
 	return (MMU_SCAN_INIT(mmu_obj));
 }
 
 vm_offset_t
 pmap_quick_enter_page(vm_page_t m)
 {
 	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
 	return (MMU_QUICK_ENTER_PAGE(mmu_obj, m));
 }
 
 void
 pmap_quick_remove_page(vm_offset_t addr)
 {
 	CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
 	MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
 }
 
 int
 pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
 {
 	CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
 	return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
 }
 
 /*
  * MMU install routines. Highest priority wins, equal priority also
  * overrides allowing last-set to win.
  */
 SET_DECLARE(mmu_set, mmu_def_t);
 
 boolean_t
 pmap_mmu_install(char *name, int prio)
 {
 	mmu_def_t	**mmupp, *mmup;
 	static int	curr_prio = 0;
 
 	/*
 	 * Try and locate the MMU kobj corresponding to the name
 	 */
 	SET_FOREACH(mmupp, mmu_set) {
 		mmup = *mmupp;
 
 		if (mmup->name &&
 		    !strcmp(mmup->name, name) &&
 		    (prio >= curr_prio || mmu_def_impl == NULL)) {
 			curr_prio = prio;
 			mmu_def_impl = mmup;
 			return (TRUE);
 		}
 	}
 
 	return (FALSE);
 }
 
 int unmapped_buf_allowed;
Index: head/sys/powerpc/powerpc/trap.c
===================================================================
--- head/sys/powerpc/powerpc/trap.c	(revision 328529)
+++ head/sys/powerpc/powerpc/trap.c	(revision 328530)
@@ -1,907 +1,898 @@
 /*-
  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
  * Copyright (C) 1995, 1996 TooLs GmbH.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. All advertising materials mentioning features or use of this software
  *    must display the following acknowledgement:
  *	This product includes software developed by TooLs GmbH.
  * 4. The name of TooLs GmbH may not be used to endorse or promote products
  *    derived from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/kdb.h>
 #include <sys/proc.h>
 #include <sys/ktr.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/pioctl.h>
 #include <sys/ptrace.h>
 #include <sys/reboot.h>
 #include <sys/syscall.h>
 #include <sys/sysent.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
 #include <sys/uio.h>
 #include <sys/signalvar.h>
 #include <sys/vmmeter.h>
 
 #include <security/audit/audit.h>
 
 #include <vm/vm.h>
 #include <vm/pmap.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_param.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_page.h>
 
 #include <machine/_inttypes.h>
 #include <machine/altivec.h>
 #include <machine/cpu.h>
 #include <machine/db_machdep.h>
 #include <machine/fpu.h>
 #include <machine/frame.h>
 #include <machine/pcb.h>
 #include <machine/psl.h>
 #include <machine/trap.h>
 #include <machine/spr.h>
 #include <machine/sr.h>
 
 /* Below matches setjmp.S */
 #define	FAULTBUF_LR	21
 #define	FAULTBUF_R1	1
 #define	FAULTBUF_R2	2
 #define	FAULTBUF_CR	22
 #define	FAULTBUF_R14	3
 
 #define	MOREARGS(sp)	((caddr_t)((uintptr_t)(sp) + \
     sizeof(struct callframe) - 3*sizeof(register_t))) /* more args go here */
 
 static void	trap_fatal(struct trapframe *frame);
 static void	printtrap(u_int vector, struct trapframe *frame, int isfatal,
 		    int user);
 static int	trap_pfault(struct trapframe *frame, int user);
 static int	fix_unaligned(struct thread *td, struct trapframe *frame);
 static int	handle_onfault(struct trapframe *frame);
 static void	syscall(struct trapframe *frame);
 
 #if defined(__powerpc64__) && defined(AIM)
        void	handle_kernel_slb_spill(int, register_t, register_t);
 static int	handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
 extern int	n_slbs;
 #endif
 
 extern vm_offset_t __startkernel;
 
 #ifdef KDB
 int db_trap_glue(struct trapframe *);		/* Called from trap_subr.S */
 #endif
 
 struct powerpc_exception {
 	u_int	vector;
 	char	*name;
 };
 
 #ifdef KDTRACE_HOOKS
 #include <sys/dtrace_bsd.h>
 
 int (*dtrace_invop_jump_addr)(struct trapframe *);
 #endif
 
 static struct powerpc_exception powerpc_exceptions[] = {
 	{ EXC_CRIT,	"critical input" },
 	{ EXC_RST,	"system reset" },
 	{ EXC_MCHK,	"machine check" },
 	{ EXC_DSI,	"data storage interrupt" },
 	{ EXC_DSE,	"data segment exception" },
 	{ EXC_ISI,	"instruction storage interrupt" },
 	{ EXC_ISE,	"instruction segment exception" },
 	{ EXC_EXI,	"external interrupt" },
 	{ EXC_ALI,	"alignment" },
 	{ EXC_PGM,	"program" },
 	{ EXC_HEA,	"hypervisor emulation assistance" },
 	{ EXC_FPU,	"floating-point unavailable" },
 	{ EXC_APU,	"auxiliary proc unavailable" },
 	{ EXC_DECR,	"decrementer" },
 	{ EXC_FIT,	"fixed-interval timer" },
 	{ EXC_WDOG,	"watchdog timer" },
 	{ EXC_SC,	"system call" },
 	{ EXC_TRC,	"trace" },
 	{ EXC_FPA,	"floating-point assist" },
 	{ EXC_DEBUG,	"debug" },
 	{ EXC_PERF,	"performance monitoring" },
 	{ EXC_VEC,	"altivec unavailable" },
 	{ EXC_VSX,	"vsx unavailable" },
 	{ EXC_FAC,	"facility unavailable" },
 	{ EXC_ITMISS,	"instruction tlb miss" },
 	{ EXC_DLMISS,	"data load tlb miss" },
 	{ EXC_DSMISS,	"data store tlb miss" },
 	{ EXC_BPT,	"instruction breakpoint" },
 	{ EXC_SMI,	"system management" },
 	{ EXC_VECAST_G4,	"altivec assist" },
 	{ EXC_THRM,	"thermal management" },
 	{ EXC_RUNMODETRC,	"run mode/trace" },
 	{ EXC_LAST,	NULL }
 };
 
 #define ESR_BITMASK							\
     "\20"								\
     "\040b0\037b1\036b2\035b3\034PIL\033PRR\032PTR\031FP"		\
     "\030ST\027b9\026DLK\025ILK\024b12\023b13\022BO\021PIE"		\
     "\020b16\017b17\016b18\015b19\014b20\013b21\012b22\011b23"		\
     "\010SPE\007EPID\006b26\005b27\004b28\003b29\002b30\001b31"
 #define	MCSR_BITMASK							\
     "\20"								\
     "\040MCP\037ICERR\036DCERR\035TLBPERR\034L2MMU_MHIT\033b5\032b6\031b7"	\
     "\030b8\027b9\026b10\025NMI\024MAV\023MEA\022b14\021IF"		\
     "\020LD\017ST\016LDG\015b19\014b20\013b21\012b22\011b23"		\
     "\010b24\007b25\006b26\005b27\004b28\003b29\002TLBSYNC\001BSL2_ERR"
 #define	MSSSR_BITMASK							\
     "\20"								\
     "\040b0\037b1\036b2\035b3\034b4\033b5\032b6\031b7"			\
     "\030b8\027b9\026b10\025b11\024b12\023L2TAG\022L2DAT\021L3TAG"	\
     "\020L3DAT\017APE\016DPE\015TEA\014b20\013b21\012b22\011b23"	\
     "\010b24\007b25\006b26\005b27\004b28\003b29\002b30\001b31"
 
 
 static const char *
 trapname(u_int vector)
 {
 	struct	powerpc_exception *pe;
 
 	for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) {
 		if (pe->vector == vector)
 			return (pe->name);
 	}
 
 	return ("unknown");
 }
 
 static inline bool
 frame_is_trap_inst(struct trapframe *frame)
 {
 #ifdef AIM
 	return (frame->exc == EXC_PGM && frame->srr1 & EXC_PGM_TRAP);
 #else
 	return (frame->exc == EXC_DEBUG || frame->cpu.booke.esr & ESR_PTR);
 #endif
 }
 
 void
 trap(struct trapframe *frame)
 {
 	struct thread	*td;
 	struct proc	*p;
 #ifdef KDTRACE_HOOKS
 	uint32_t inst;
 #endif
 	int		sig, type, user;
 	u_int		ucode;
 	ksiginfo_t	ksi;
 
 	VM_CNT_INC(v_trap);
 
 	td = curthread;
 	p = td->td_proc;
 
 	type = ucode = frame->exc;
 	sig = 0;
 	user = frame->srr1 & PSL_PR;
 
 	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
 	    trapname(type), user ? "user" : "kernel");
 
 #ifdef KDTRACE_HOOKS
 	/*
 	 * A trap can occur while DTrace executes a probe. Before
 	 * executing the probe, DTrace blocks re-scheduling and sets
 	 * a flag in its per-cpu flags to indicate that it doesn't
 	 * want to fault. On returning from the probe, the no-fault
 	 * flag is cleared and finally re-scheduling is enabled.
 	 *
 	 * If the DTrace kernel module has registered a trap handler,
 	 * call it and if it returns non-zero, assume that it has
 	 * handled the trap and modified the trap frame so that this
 	 * function can return normally.
 	 */
 	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0)
 		return;
 #endif
 
 	if (user) {
 		td->td_pticks = 0;
 		td->td_frame = frame;
 		if (td->td_cowgen != p->p_cowgen)
 			thread_cow_update(td);
 
 		/* User Mode Traps */
 		switch (type) {
 		case EXC_RUNMODETRC:
 		case EXC_TRC:
 			frame->srr1 &= ~PSL_SE;
 			sig = SIGTRAP;
 			ucode = TRAP_TRACE;
 			break;
 
 #if defined(__powerpc64__) && defined(AIM)
 		case EXC_ISE:
 		case EXC_DSE:
 			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
 			    (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){
 				sig = SIGSEGV;
 				ucode = SEGV_MAPERR;
 			}
 			break;
 #endif
 		case EXC_DSI:
 		case EXC_ISI:
 			sig = trap_pfault(frame, 1);
 			if (sig == SIGSEGV)
 				ucode = SEGV_MAPERR;
 			break;
 
 		case EXC_SC:
 			syscall(frame);
 			break;
 
 		case EXC_FPU:
 			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
 			    ("FPU already enabled for thread"));
 			enable_fpu(td);
 			break;
 
 		case EXC_VEC:
 			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
 			    ("Altivec already enabled for thread"));
 			enable_vec(td);
 			break;
 
 		case EXC_VSX:
 			KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX,
 			    ("VSX already enabled for thread"));
 			if (!(td->td_pcb->pcb_flags & PCB_VEC))
 				enable_vec(td);
 			if (!(td->td_pcb->pcb_flags & PCB_FPU))
 				save_fpu(td);
 			td->td_pcb->pcb_flags |= PCB_VSX;
 			enable_fpu(td);
 			break;
 
 		case EXC_FAC:
 			sig = SIGILL;
 			ucode =	ILL_ILLOPC;
 			break;
 
 		case EXC_VECAST_E:
 		case EXC_VECAST_G4:
 		case EXC_VECAST_G5:
 			/*
 			 * We get a VPU assist exception for IEEE mode
 			 * vector operations on denormalized floats.
 			 * Emulating this is a giant pain, so for now,
 			 * just switch off IEEE mode and treat them as
 			 * zero.
 			 */
 
 			save_vec(td);
 			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
 			enable_vec(td);
 			break;
 
 		case EXC_ALI:
 			if (fix_unaligned(td, frame) != 0) {
 				sig = SIGBUS;
 				ucode = BUS_ADRALN;
 			}
 			else
 				frame->srr0 += 4;
 			break;
 
 		case EXC_DEBUG:	/* Single stepping */
 			mtspr(SPR_DBSR, mfspr(SPR_DBSR));
 			frame->srr1 &= ~PSL_DE;
 			frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
 			sig = SIGTRAP;
 			ucode = TRAP_TRACE;
 			break;
 
 		case EXC_PGM:
 			/* Identify the trap reason */
 			if (frame_is_trap_inst(frame)) {
 #ifdef KDTRACE_HOOKS
 				inst = fuword32((const void *)frame->srr0);
 				if (inst == 0x0FFFDDDD &&
 				    dtrace_pid_probe_ptr != NULL) {
 					(*dtrace_pid_probe_ptr)(frame);
 					break;
 				}
 #endif
  				sig = SIGTRAP;
 				ucode = TRAP_BRKPT;
 			} else {
 				sig = ppc_instr_emulate(frame, td->td_pcb);
 				if (sig == SIGILL) {
 					if (frame->srr1 & EXC_PGM_PRIV)
 						ucode = ILL_PRVOPC;
 					else if (frame->srr1 & EXC_PGM_ILLEGAL)
 						ucode = ILL_ILLOPC;
 				} else if (sig == SIGFPE)
 					ucode = FPE_FLTINV;	/* Punt for now, invalid operation. */
 			}
 			break;
 
 		case EXC_MCHK:
 			/*
 			 * Note that this may not be recoverable for the user
 			 * process, depending on the type of machine check,
 			 * but it at least prevents the kernel from dying.
 			 */
 			sig = SIGBUS;
 			ucode = BUS_OBJERR;
 			break;
 
 		default:
 			trap_fatal(frame);
 		}
 	} else {
 		/* Kernel Mode Traps */
 
 		KASSERT(cold || td->td_ucred != NULL,
 		    ("kernel trap doesn't have ucred"));
 		switch (type) {
 		case EXC_PGM:
 #ifdef KDTRACE_HOOKS
 			if (frame_is_trap_inst(frame)) {
 				if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
 					if (dtrace_invop_jump_addr != NULL) {
 						dtrace_invop_jump_addr(frame);
 						return;
 					}
 				}
 			}
 #endif
 #ifdef KDB
 			if (db_trap_glue(frame))
 				return;
 #endif
 			break;
 #if defined(__powerpc64__) && defined(AIM)
 		case EXC_DSE:
-			if ((frame->dar & SEGMENT_MASK) == USER_ADDR) {
+			if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 &&
+			    (frame->dar & SEGMENT_MASK) == USER_ADDR) {
 				__asm __volatile ("slbmte %0, %1" ::
 					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
 					"r"(USER_SLB_SLBE));
 				return;
 			}
 			break;
 #endif
 		case EXC_DSI:
 			if (trap_pfault(frame, 0) == 0)
  				return;
 			break;
 		case EXC_MCHK:
 			if (handle_onfault(frame))
  				return;
 			break;
 		default:
 			break;
 		}
 		trap_fatal(frame);
 	}
 
 	if (sig != 0) {
 		if (p->p_sysent->sv_transtrap != NULL)
 			sig = (p->p_sysent->sv_transtrap)(sig, type);
 		ksiginfo_init_trap(&ksi);
 		ksi.ksi_signo = sig;
 		ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
 		/* ksi.ksi_addr = ? */
 		ksi.ksi_trapno = type;
 		trapsignal(td, &ksi);
 	}
 
 	userret(td, frame);
 }
 
 static void
 trap_fatal(struct trapframe *frame)
 {
 
 	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
 #ifdef KDB
 	if ((debugger_on_panic || kdb_active) &&
 	    kdb_trap(frame->exc, 0, frame))
 		return;
 #endif
 	panic("%s trap", trapname(frame->exc));
 }
 
 static void
 printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
 {
 	uint16_t ver;
 #ifdef BOOKE
 	vm_paddr_t pa;
 #endif
 
 	printf("\n");
 	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
 	    user ? "user" : "kernel");
 	printf("\n");
 	printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
 	switch (vector) {
 	case EXC_DSE:
 	case EXC_DSI:
 	case EXC_DTMISS:
 		printf("   virtual address = 0x%" PRIxPTR "\n", frame->dar);
 #ifdef AIM
 		printf("   dsisr           = 0x%lx\n",
 		    (u_long)frame->cpu.aim.dsisr);
 #endif
 		break;
 	case EXC_ISE:
 	case EXC_ISI:
 	case EXC_ITMISS:
 		printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
 		break;
 	case EXC_MCHK:
 		ver = mfpvr() >> 16;
 #if defined(AIM)
 		if (MPC745X_P(ver))
 			printf("    msssr0         = 0x%b\n",
 			    (int)mfspr(SPR_MSSSR0), MSSSR_BITMASK);
 #elif defined(BOOKE)
 		pa = mfspr(SPR_MCARU);
 		pa = (pa << 32) | (u_register_t)mfspr(SPR_MCAR);
 		printf("   mcsr            = 0x%b\n",
 		    (int)mfspr(SPR_MCSR), MCSR_BITMASK);
 		printf("   mcar            = 0x%jx\n", (uintmax_t)pa);
 #endif
 		break;
 	}
 #ifdef BOOKE
 	printf("   esr             = 0x%b\n",
 	    (int)frame->cpu.booke.esr, ESR_BITMASK);
 #endif
 	printf("   srr0            = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n",
 	    frame->srr0, frame->srr0 - (register_t)(__startkernel - KERNBASE));
 	printf("   srr1            = 0x%lx\n", (u_long)frame->srr1);
 	printf("   lr              = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n",
 	    frame->lr, frame->lr - (register_t)(__startkernel - KERNBASE));
 	printf("   curthread       = %p\n", curthread);
 	if (curthread != NULL)
 		printf("          pid = %d, comm = %s\n",
 		    curthread->td_proc->p_pid, curthread->td_name);
 	printf("\n");
 }
 
 /*
  * Handles a fatal fault when we have onfault state to recover.  Returns
  * non-zero if there was onfault recovery state available.
  */
 static int
 handle_onfault(struct trapframe *frame)
 {
 	struct		thread *td;
 	jmp_buf		*fb;
 
 	td = curthread;
 	fb = td->td_pcb->pcb_onfault;
 	if (fb != NULL) {
 		frame->srr0 = (*fb)->_jb[FAULTBUF_LR];
 		frame->fixreg[1] = (*fb)->_jb[FAULTBUF_R1];
 		frame->fixreg[2] = (*fb)->_jb[FAULTBUF_R2];
 		frame->fixreg[3] = 1;
 		frame->cr = (*fb)->_jb[FAULTBUF_CR];
 		bcopy(&(*fb)->_jb[FAULTBUF_R14], &frame->fixreg[14],
 		    18 * sizeof(register_t));
 		td->td_pcb->pcb_onfault = NULL; /* Returns twice, not thrice */
 		return (1);
 	}
 	return (0);
 }
 
 int
 cpu_fetch_syscall_args(struct thread *td)
 {
 	struct proc *p;
 	struct trapframe *frame;
 	struct syscall_args *sa;
 	caddr_t	params;
 	size_t argsz;
 	int error, n, i;
 
 	p = td->td_proc;
 	frame = td->td_frame;
 	sa = &td->td_sa;
 
 	sa->code = frame->fixreg[0];
 	params = (caddr_t)(frame->fixreg + FIRSTARG);
 	n = NARGREG;
 
 	if (sa->code == SYS_syscall) {
 		/*
 		 * code is first argument,
 		 * followed by actual args.
 		 */
 		sa->code = *(register_t *) params;
 		params += sizeof(register_t);
 		n -= 1;
 	} else if (sa->code == SYS___syscall) {
 		/*
 		 * Like syscall, but code is a quad,
 		 * so as to maintain quad alignment
 		 * for the rest of the args.
 		 */
 		if (SV_PROC_FLAG(p, SV_ILP32)) {
 			params += sizeof(register_t);
 			sa->code = *(register_t *) params;
 			params += sizeof(register_t);
 			n -= 2;
 		} else {
 			sa->code = *(register_t *) params;
 			params += sizeof(register_t);
 			n -= 1;
 		}
 	}
 
  	if (p->p_sysent->sv_mask)
 		sa->code &= p->p_sysent->sv_mask;
 	if (sa->code >= p->p_sysent->sv_size)
 		sa->callp = &p->p_sysent->sv_table[0];
 	else
 		sa->callp = &p->p_sysent->sv_table[sa->code];
 
 	sa->narg = sa->callp->sy_narg;
 
 	if (SV_PROC_FLAG(p, SV_ILP32)) {
 		argsz = sizeof(uint32_t);
 
 		for (i = 0; i < n; i++)
 			sa->args[i] = ((u_register_t *)(params))[i] &
 			    0xffffffff;
 	} else {
 		argsz = sizeof(uint64_t);
 
 		for (i = 0; i < n; i++)
 			sa->args[i] = ((u_register_t *)(params))[i];
 	}
 
 	if (sa->narg > n)
 		error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
 			       (sa->narg - n) * argsz);
 	else
 		error = 0;
 
 #ifdef __powerpc64__
 	if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
 		/* Expand the size of arguments copied from the stack */
 
 		for (i = sa->narg; i >= n; i--)
 			sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
 	}
 #endif
 
 	if (error == 0) {
 		td->td_retval[0] = 0;
 		td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
 	}
 	return (error);
 }
 
 #include "../../kern/subr_syscall.c"
 
 void
 syscall(struct trapframe *frame)
 {
 	struct thread *td;
 	int error;
 
 	td = curthread;
 	td->td_frame = frame;
 
 #if defined(__powerpc64__) && defined(AIM)
 	/*
 	 * Speculatively restore last user SLB segment, which we know is
 	 * invalid already, since we are likely to do copyin()/copyout().
 	 */
 	if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0)
 		__asm __volatile ("slbmte %0, %1; isync" ::
 		    "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
 #endif
 
 	error = syscallenter(td);
 	syscallret(td, error);
 }
 
 #if defined(__powerpc64__) && defined(AIM)
 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */
 void
 handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
 {
 	struct slb *slbcache;
 	uint64_t slbe, slbv;
 	uint64_t esid, addr;
 	int i;
 
 	addr = (type == EXC_ISE) ? srr0 : dar;
 	slbcache = PCPU_GET(slb);
 	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
 	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
 	
 	/* See if the hardware flushed this somehow (can happen in LPARs) */
 	for (i = 0; i < n_slbs; i++)
 		if (slbcache[i].slbe == (slbe | (uint64_t)i))
 			return;
 
 	/* Not in the map, needs to actually be added */
 	slbv = kernel_va_to_slbv(addr);
 	if (slbcache[USER_SLB_SLOT].slbe == 0) {
 		for (i = 0; i < n_slbs; i++) {
 			if (i == USER_SLB_SLOT)
 				continue;
 			if (!(slbcache[i].slbe & SLBE_VALID))
 				goto fillkernslb;
 		}
 
 		if (i == n_slbs)
 			slbcache[USER_SLB_SLOT].slbe = 1;
 	}
 
 	/* Sacrifice a random SLB entry that is not the user entry */
 	i = mftb() % n_slbs;
 	if (i == USER_SLB_SLOT)
 		i = (i+1) % n_slbs;
 
 fillkernslb:
 	/* Write new entry */
 	slbcache[i].slbv = slbv;
 	slbcache[i].slbe = slbe | (uint64_t)i;
 
 	/* Trap handler will restore from cache on exit */
 }
 
 static int 
 handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
 {
 	struct slb *user_entry;
 	uint64_t esid;
 	int i;
 
 	if (pm->pm_slb == NULL)
 		return (-1);
 
 	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
 
 	PMAP_LOCK(pm);
 	user_entry = user_va_to_slb_entry(pm, addr);
 
 	if (user_entry == NULL) {
 		/* allocate_vsid auto-spills it */
 		(void)allocate_user_vsid(pm, esid, 0);
 	} else {
 		/*
 		 * Check that another CPU has not already mapped this.
 		 * XXX: Per-thread SLB caches would be better.
 		 */
 		for (i = 0; i < pm->pm_slb_len; i++)
 			if (pm->pm_slb[i] == user_entry)
 				break;
 
 		if (i == pm->pm_slb_len)
 			slb_insert_user(pm, user_entry);
 	}
 	PMAP_UNLOCK(pm);
 
 	return (0);
 }
 #endif
 
 static int
 trap_pfault(struct trapframe *frame, int user)
 {
 	vm_offset_t	eva, va;
 	struct		thread *td;
 	struct		proc *p;
 	vm_map_t	map;
 	vm_prot_t	ftype;
-	int		rv;
-#ifdef AIM
-	register_t	user_sr;
-#endif
+	int		rv, is_user;
 
 	td = curthread;
 	p = td->td_proc;
 	if (frame->exc == EXC_ISI) {
 		eva = frame->srr0;
 		ftype = VM_PROT_EXECUTE;
 		if (frame->srr1 & SRR1_ISI_PFAULT)
 			ftype |= VM_PROT_READ;
 	} else {
 		eva = frame->dar;
 #ifdef BOOKE
 		if (frame->cpu.booke.esr & ESR_ST)
 #else
 		if (frame->cpu.aim.dsisr & DSISR_STORE)
 #endif
 			ftype = VM_PROT_WRITE;
 		else
 			ftype = VM_PROT_READ;
 	}
 
 	if (user) {
 		KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace  NULL"));
 		map = &p->p_vmspace->vm_map;
 	} else {
-#ifdef BOOKE
-		if (eva < VM_MAXUSER_ADDRESS) {
-#else
-		if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
-#endif
-			map = &p->p_vmspace->vm_map;
+		rv = pmap_decode_kernel_ptr(eva, &is_user, &eva);
+		if (rv != 0)
+			return (SIGSEGV);
 
-#ifdef AIM
-			user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
-			eva &= ADDR_PIDX | ADDR_POFF;
-			eva |= user_sr << ADDR_SR_SHFT;
-#endif
-		} else {
+		if (is_user)
+			map = &p->p_vmspace->vm_map;
+		else
 			map = kernel_map;
-		}
 	}
 	va = trunc_page(eva);
 
 	/* Fault in the page. */
 	rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
 	/*
 	 * XXXDTRACE: add dtrace_doubletrap_func here?
 	 */
 
 	if (rv == KERN_SUCCESS)
 		return (0);
 
 	if (!user && handle_onfault(frame))
 		return (0);
 
 	return (SIGSEGV);
 }
 
 /*
  * For now, this only deals with the particular unaligned access case
  * that gcc tends to generate.  Eventually it should handle all of the
  * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
  */
 
 static int
 fix_unaligned(struct thread *td, struct trapframe *frame)
 {
 	struct thread	*fputhread;
 #ifdef	__SPE__
 	uint32_t	inst;
 #endif
 	int		indicator, reg;
 	double		*fpr;
 
 #ifdef __SPE__
 	indicator = (frame->cpu.booke.esr & (ESR_ST|ESR_SPE));
 	if (indicator & ESR_SPE) {
 		if (copyin((void *)frame->srr0, &inst, sizeof(inst)) != 0)
 			return (-1);
 		reg = EXC_ALI_SPE_REG(inst);
 		fpr = (double *)td->td_pcb->pcb_vec.vr[reg];
 		fputhread = PCPU_GET(vecthread);
 
 		/* Juggle the SPE to ensure that we've initialized
 		 * the registers, and that their current state is in
 		 * the PCB.
 		 */
 		if (fputhread != td) {
 			if (fputhread)
 				save_vec(fputhread);
 			enable_vec(td);
 		}
 		save_vec(td);
 
 		if (!(indicator & ESR_ST)) {
 			if (copyin((void *)frame->dar, fpr,
 			    sizeof(double)) != 0)
 				return (-1);
 			frame->fixreg[reg] = td->td_pcb->pcb_vec.vr[reg][1];
 			enable_vec(td);
 		} else {
 			td->td_pcb->pcb_vec.vr[reg][1] = frame->fixreg[reg];
 			if (copyout(fpr, (void *)frame->dar,
 			    sizeof(double)) != 0)
 				return (-1);
 		}
 		return (0);
 	}
 #else
 	indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
 
 	switch (indicator) {
 	case EXC_ALI_LFD:
 	case EXC_ALI_STFD:
 		reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
 		fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr;
 		fputhread = PCPU_GET(fputhread);
 
 		/* Juggle the FPU to ensure that we've initialized
 		 * the FPRs, and that their current state is in
 		 * the PCB.
 		 */
 		if (fputhread != td) {
 			if (fputhread)
 				save_fpu(fputhread);
 			enable_fpu(td);
 		}
 		save_fpu(td);
 
 		if (indicator == EXC_ALI_LFD) {
 			if (copyin((void *)frame->dar, fpr,
 			    sizeof(double)) != 0)
 				return (-1);
 			enable_fpu(td);
 		} else {
 			if (copyout(fpr, (void *)frame->dar,
 			    sizeof(double)) != 0)
 				return (-1);
 		}
 		return (0);
 		break;
 	}
 #endif
 
 	return (-1);
 }
 
 #ifdef KDB
 int
 db_trap_glue(struct trapframe *frame)
 {
 
 	if (!(frame->srr1 & PSL_PR)
 	    && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
 	    	|| frame_is_trap_inst(frame)
 		|| frame->exc == EXC_BPT
 		|| frame->exc == EXC_DSI)) {
 		int type = frame->exc;
 
 		/* Ignore DTrace traps. */
 		if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
 			return (0);
 		if (frame_is_trap_inst(frame)) {
 			type = T_BREAKPOINT;
 		}
 		return (kdb_trap(type, 0, frame));
 	}
 
 	return (0);
 }
 #endif