Index: head/sys/powerpc/include/pmap.h =================================================================== --- head/sys/powerpc/include/pmap.h (revision 347077) +++ head/sys/powerpc/include/pmap.h (revision 347078) @@ -1,300 +1,300 @@ /*- * SPDX-License-Identifier: BSD-3-Clause AND BSD-4-Clause * * Copyright (C) 2006 Semihalf, Marian Balakowicz * All rights reserved. * * Adapted for Freescale's e500 core CPUs. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $ */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ #include #include #include #include #include #include #include #include #include struct pmap; typedef struct pmap *pmap_t; -#if defined(AIM) - #if !defined(NPMAPS) #define NPMAPS 32768 #endif /* !defined(NPMAPS) */ struct slbtnode; struct pvo_entry { LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ #ifndef __powerpc64__ LIST_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ #endif RB_ENTRY(pvo_entry) pvo_plink; /* Link to pmap entries */ struct { #ifndef __powerpc64__ /* 32-bit fields */ - struct pte pte; + pte_t pte; #endif /* 64-bit fields */ uintptr_t slot; vm_paddr_t pa; vm_prot_t prot; } pvo_pte; pmap_t pvo_pmap; /* Owning pmap */ vm_offset_t pvo_vaddr; /* VA of entry */ uint64_t pvo_vpn; /* Virtual page number */ }; LIST_HEAD(pvo_head, pvo_entry); RB_HEAD(pvo_tree, pvo_entry); int pvo_vaddr_compare(struct pvo_entry *, struct pvo_entry *); RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare); /* Used by 32-bit PMAP */ #define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */ #define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ /* Used by 64-bit PMAP */ #define PVO_HID 0x008UL /* PVO entry in alternate hash*/ /* Used by both */ #define PVO_WIRED 0x010UL /* PVO entry is wired */ #define PVO_MANAGED 0x020UL /* PVO entry is managed */ #define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during bootstrap */ #define PVO_DEAD 0x100UL /* waiting to be deleted */ #define PVO_LARGE 0x200UL /* large page */ #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) #define PVO_PTEGIDX_CLR(pvo) \ ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) #define PVO_PTEGIDX_SET(pvo, i) \ ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) #define PVO_VSID(pvo) ((pvo)->pvo_vpn >> 16) struct pmap { struct pmap_statistics pm_stats; struct mtx pm_mtx; - - #ifdef __powerpc64__ - struct slbtnode *pm_slb_tree_root; - struct slb **pm_slb; - int pm_slb_len; - #else - register_t pm_sr[16]; - #endif cpuset_t pm_active; + union { +#ifdef AIM + struct { + + #ifdef __powerpc64__ + struct slbtnode *pm_slb_tree_root; + struct slb **pm_slb; + int pm_slb_len; + #else + register_t pm_sr[16]; + #endif - struct pmap *pmap_phys; - struct pvo_tree pmap_pvo; + struct pmap *pmap_phys; + struct pvo_tree pmap_pvo; + }; +#endif +#ifdef BOOKE + struct { + /* TID to identify this pmap entries in TLB */ + tlbtid_t pm_tid[MAXCPU]; + +#ifdef __powerpc64__ + /* + * Page table directory, + * array of pointers to page directories. + */ + pte_t **pm_pp2d[PP2D_NENTRIES]; + + /* List of allocated pdir bufs (pdir kva regions). */ + TAILQ_HEAD(, ptbl_buf) pm_pdir_list; +#else + /* + * Page table directory, + * array of pointers to page tables. + */ + pte_t *pm_pdir[PDIR_NENTRIES]; +#endif + + /* List of allocated ptbl bufs (ptbl kva regions). */ + TAILQ_HEAD(, ptbl_buf) pm_ptbl_list; + }; +#endif + }; }; +struct pv_entry { + pmap_t pv_pmap; + vm_offset_t pv_va; + TAILQ_ENTRY(pv_entry) pv_link; +}; +typedef struct pv_entry *pv_entry_t; + struct md_page { - volatile int32_t mdpg_attrs; - vm_memattr_t mdpg_cache_attrs; - struct pvo_head mdpg_pvoh; + union { + struct { + volatile int32_t mdpg_attrs; + vm_memattr_t mdpg_cache_attrs; + struct pvo_head mdpg_pvoh; + }; + struct { + TAILQ_HEAD(, pv_entry) pv_list; + int pv_tracked; + }; + }; }; +#ifdef AIM #define pmap_page_get_memattr(m) ((m)->md.mdpg_cache_attrs) #define pmap_page_is_mapped(m) (!LIST_EMPTY(&(m)->md.mdpg_pvoh)) +#else +#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT +#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) +#endif /* * Return the VSID corresponding to a given virtual address. * If no VSID is currently defined, it will allocate one, and add * it to a free slot if available. * * NB: The PMAP MUST be locked already. */ uint64_t va_to_vsid(pmap_t pm, vm_offset_t va); /* Lock-free, non-allocating lookup routines */ uint64_t kernel_va_to_slbv(vm_offset_t va); struct slb *user_va_to_slb_entry(pmap_t pm, vm_offset_t va); uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large); void free_vsid(pmap_t pm, uint64_t esid, int large); void slb_insert_user(pmap_t pm, struct slb *slb); void slb_insert_kernel(uint64_t slbe, uint64_t slbv); struct slbtnode *slb_alloc_tree(void); void slb_free_tree(pmap_t pm); struct slb **slb_alloc_user_cache(void); void slb_free_user_cache(struct slb **); - -#elif defined(BOOKE) - -struct pmap { - struct pmap_statistics pm_stats; /* pmap statistics */ - struct mtx pm_mtx; /* pmap mutex */ - tlbtid_t pm_tid[MAXCPU]; /* TID to identify this pmap entries in TLB */ - cpuset_t pm_active; /* active on cpus */ - -#ifdef __powerpc64__ - /* Page table directory, array of pointers to page directories. */ - pte_t **pm_pp2d[PP2D_NENTRIES]; - - /* List of allocated pdir bufs (pdir kva regions). */ - TAILQ_HEAD(, ptbl_buf) pm_pdir_list; -#else - /* Page table directory, array of pointers to page tables. */ - pte_t *pm_pdir[PDIR_NENTRIES]; -#endif - - /* List of allocated ptbl bufs (ptbl kva regions). */ - TAILQ_HEAD(, ptbl_buf) pm_ptbl_list; -}; - -struct pv_entry { - pmap_t pv_pmap; - vm_offset_t pv_va; - TAILQ_ENTRY(pv_entry) pv_link; -}; -typedef struct pv_entry *pv_entry_t; - -struct md_page { - TAILQ_HEAD(, pv_entry) pv_list; - int pv_tracked; -}; - -#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT -#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) - -#else -/* - * Common pmap members between AIM and BOOKE. - * libkvm needs pm_stats at the same location between both, as it doesn't define - * AIM nor BOOKE, and is expected to work across all. - */ -struct pmap { - struct pmap_statistics pm_stats; /* pmap statistics */ - struct mtx pm_mtx; /* pmap mutex */ -}; -#endif /* AIM */ extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) #ifdef _KERNEL #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) #define PMAP_LOCK_ASSERT(pmap, type) \ mtx_assert(&(pmap)->pm_mtx, (type)) #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, \ (pmap == kernel_pmap) ? "kernelpmap" : \ "pmap", NULL, MTX_DEF) #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) void pmap_bootstrap(vm_offset_t, vm_offset_t); void pmap_kenter(vm_offset_t va, vm_paddr_t pa); void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t); void pmap_kremove(vm_offset_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_page_set_memattr(vm_page_t, vm_memattr_t); int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); int pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); int pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded_addr); void pmap_deactivate(struct thread *); vm_paddr_t pmap_kextract(vm_offset_t); int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t); boolean_t pmap_mmu_install(char *name, int prio); #define vtophys(va) pmap_kextract((vm_offset_t)(va)) #define PHYS_AVAIL_SZ 256 /* Allows up to 16GB Ram on pSeries with * logical memory block size of 64MB. * For more Ram increase the lmb or this value. */ extern vm_paddr_t phys_avail[PHYS_AVAIL_SZ]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; extern vm_offset_t msgbuf_phys; extern int pmap_bootstrapped; vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size); void pmap_early_io_unmap(vm_offset_t va, vm_size_t size); void pmap_track_page(pmap_t pmap, vm_offset_t va); static inline int pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused) { return (0); } #endif #endif /* !_MACHINE_PMAP_H_ */ Index: head/sys/powerpc/include/pte.h =================================================================== --- head/sys/powerpc/include/pte.h (revision 347077) +++ head/sys/powerpc/include/pte.h (revision 347078) @@ -1,424 +1,416 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: pte.h,v 1.2 1998/08/31 14:43:40 tsubai Exp $ * $FreeBSD$ */ #ifndef _MACHINE_PTE_H_ #define _MACHINE_PTE_H_ #if defined(AIM) /* * Page Table Entries */ #ifndef LOCORE /* 32-bit PTE */ struct pte { u_int32_t pte_hi; u_int32_t pte_lo; }; struct pteg { struct pte pt[8]; }; /* 64-bit (long) PTE */ struct lpte { u_int64_t pte_hi; u_int64_t pte_lo; }; struct lpteg { struct lpte pt[8]; }; /* Partition table entry */ struct pate { u_int64_t pagetab; u_int64_t proctab; }; +typedef struct pte pte_t; +typedef struct lpte lpte_t; #endif /* LOCORE */ /* 32-bit PTE definitions */ /* High word: */ #define PTE_VALID 0x80000000 #define PTE_VSID_SHFT 7 #define PTE_HID 0x00000040 #define PTE_API 0x0000003f /* Low word: */ #define PTE_RPGN 0xfffff000 #define PTE_REF 0x00000100 #define PTE_CHG 0x00000080 #define PTE_WIMG 0x00000078 #define PTE_W 0x00000040 #define PTE_I 0x00000020 #define PTE_M 0x00000010 #define PTE_G 0x00000008 #define PTE_PP 0x00000003 #define PTE_SO 0x00000000 /* Super. Only (U: XX, S: RW) */ #define PTE_SW 0x00000001 /* Super. Write-Only (U: RO, S: RW) */ #define PTE_BW 0x00000002 /* Supervisor (U: RW, S: RW) */ #define PTE_BR 0x00000003 /* Both Read Only (U: RO, S: RO) */ #define PTE_RW PTE_BW #define PTE_RO PTE_BR #define PTE_EXEC 0x00000200 /* pseudo bit in attrs; page is exec */ /* 64-bit PTE definitions */ /* High quadword: */ #define LPTE_VSID_SHIFT 12 #define LPTE_AVPN_MASK 0xFFFFFFFFFFFFFF80ULL #define LPTE_API 0x0000000000000F80ULL #define LPTE_SWBITS 0x0000000000000078ULL #define LPTE_WIRED 0x0000000000000010ULL #define LPTE_LOCKED 0x0000000000000008ULL #define LPTE_BIG 0x0000000000000004ULL /* 4kb/16Mb page */ #define LPTE_HID 0x0000000000000002ULL #define LPTE_VALID 0x0000000000000001ULL /* Low quadword: */ #define EXTEND_PTE(x) UINT64_C(x) /* make constants 64-bit */ #define LPTE_RPGN 0xfffffffffffff000ULL #define LPTE_REF EXTEND_PTE( PTE_REF ) #define LPTE_CHG EXTEND_PTE( PTE_CHG ) #define LPTE_WIMG EXTEND_PTE( PTE_WIMG ) #define LPTE_W EXTEND_PTE( PTE_W ) #define LPTE_I EXTEND_PTE( PTE_I ) #define LPTE_M EXTEND_PTE( PTE_M ) #define LPTE_G EXTEND_PTE( PTE_G ) #define LPTE_NOEXEC 0x0000000000000004ULL #define LPTE_PP EXTEND_PTE( PTE_PP ) #define LPTE_SO EXTEND_PTE( PTE_SO ) /* Super. Only */ #define LPTE_SW EXTEND_PTE( PTE_SW ) /* Super. Write-Only */ #define LPTE_BW EXTEND_PTE( PTE_BW ) /* Supervisor */ #define LPTE_BR EXTEND_PTE( PTE_BR ) /* Both Read Only */ #define LPTE_RW LPTE_BW #define LPTE_RO LPTE_BR /* POWER ISA 3.0 Radix Table Definitions */ #define RPTE_VALID 0x8000000000000000ULL #define RPTE_LEAF 0x4000000000000000ULL /* is a PTE: always 1 */ #define RPTE_SW0 0x2000000000000000ULL #define RPTE_RPN_MASK 0x00FFFFFFFFFFF000ULL #define RPTE_RPN_SHIFT 12 #define RPTE_SW1 0x0000000000000800ULL #define RPTE_SW2 0x0000000000000400ULL #define RPTE_SW3 0x0000000000000200ULL #define RPTE_R 0x0000000000000100ULL #define RPTE_C 0x0000000000000080ULL #define RPTE_ATTR_MASK 0x0000000000000030ULL #define RPTE_ATTR_MEM 0x0000000000000000ULL /* PTE M */ #define RPTE_ATTR_SAO 0x0000000000000010ULL /* PTE WIM */ #define RPTE_ATTR_GUARDEDIO 0x0000000000000020ULL /* PTE IMG */ #define RPTE_ATTR_UNGUARDEDIO 0x0000000000000030ULL /* PTE IM */ #define RPTE_EAA_MASK 0x000000000000000FULL #define RPTE_EAA_P 0x0000000000000008ULL /* Supervisor only */ #define RPTE_EAA_R 0x0000000000000004ULL /* Read allowed */ #define RPTE_EAA_W 0x0000000000000002ULL /* Write (+read) */ #define RPTE_EAA_X 0x0000000000000001ULL /* Execute allowed */ #define RPDE_VALID RPTE_VALID #define RPDE_LEAF RPTE_LEAF /* is a PTE: always 0 */ #define RPDE_NLB_MASK 0x0FFFFFFFFFFFFF00ULL #define RPDE_NLB_SHIFT 8 #define RPDE_NLS_MASK 0x000000000000001FULL - -#ifndef LOCORE -typedef struct pte pte_t; -typedef struct lpte lpte_t; -#endif /* LOCORE */ - /* * Extract bits from address */ #define ADDR_SR_SHFT 28 #define ADDR_PIDX 0x0ffff000UL #define ADDR_PIDX_SHFT 12 #define ADDR_API_SHFT 22 #define ADDR_API_SHFT64 16 #define ADDR_POFF 0x00000fffUL /* * Bits in DSISR: */ #define DSISR_DIRECT 0x80000000 #define DSISR_NOTFOUND 0x40000000 #define DSISR_PROTECT 0x08000000 #define DSISR_INVRX 0x04000000 #define DSISR_STORE 0x02000000 #define DSISR_DABR 0x00400000 #define DSISR_SEGMENT 0x00200000 #define DSISR_EAR 0x00100000 /* * Bits in SRR1 on ISI: */ #define ISSRR1_NOTFOUND 0x40000000 #define ISSRR1_DIRECT 0x10000000 #define ISSRR1_PROTECT 0x08000000 #define ISSRR1_SEGMENT 0x00200000 #else /* BOOKE */ #include +/* + * Flags for pte_remove() routine. + */ +#define PTBL_HOLD 0x00000001 /* do not unhold ptbl pages */ +#define PTBL_UNHOLD 0x00000002 /* unhold and attempt to free ptbl pages */ + +#define PTBL_HOLD_FLAG(pmap) (((pmap) == kernel_pmap) ? PTBL_HOLD : PTBL_UNHOLD) + +/* + * Page Table Entry definitions and macros. + * + * RPN need only be 32-bit because Book-E has 36-bit addresses, and the smallest + * page size is 4k (12-bit mask), so RPN can really fit into 24 bits. + */ +#ifndef LOCORE +typedef uint64_t pte_t; +#endif + +/* RPN mask, TLB0 4K pages */ +#define PTE_PA_MASK PAGE_MASK + +#if defined(BOOKE_E500) + +/* PTE bits assigned to MAS2, MAS3 flags */ +#define PTE_MAS2_SHIFT 19 +#define PTE_W (MAS2_W << PTE_MAS2_SHIFT) +#define PTE_I (MAS2_I << PTE_MAS2_SHIFT) +#define PTE_M (MAS2_M << PTE_MAS2_SHIFT) +#define PTE_G (MAS2_G << PTE_MAS2_SHIFT) +#define PTE_MAS2_MASK (MAS2_G | MAS2_M | MAS2_I | MAS2_W) + +#define PTE_MAS3_SHIFT 2 +#define PTE_UX (MAS3_UX << PTE_MAS3_SHIFT) +#define PTE_SX (MAS3_SX << PTE_MAS3_SHIFT) +#define PTE_UW (MAS3_UW << PTE_MAS3_SHIFT) +#define PTE_SW (MAS3_SW << PTE_MAS3_SHIFT) +#define PTE_UR (MAS3_UR << PTE_MAS3_SHIFT) +#define PTE_SR (MAS3_SR << PTE_MAS3_SHIFT) +#define PTE_MAS3_MASK ((MAS3_UX | MAS3_SX | MAS3_UW \ + | MAS3_SW | MAS3_UR | MAS3_SR) << PTE_MAS3_SHIFT) + +#define PTE_PS_SHIFT 8 +#define PTE_PS_4KB (2 << PTE_PS_SHIFT) + +#elif defined(BOOKE_PPC4XX) + +#define PTE_WL1 TLB_WL1 +#define PTE_IL2I TLB_IL2I +#define PTE_IL2D TLB_IL2D + +#define PTE_W TLB_W +#define PTE_I TLB_I +#define PTE_M TLB_M +#define PTE_G TLB_G + +#define PTE_UX TLB_UX +#define PTE_SX TLB_SX +#define PTE_UW TLB_UW +#define PTE_SW TLB_SW +#define PTE_UR TLB_UR +#define PTE_SR TLB_SR + +#endif + +/* Other PTE flags */ +#define PTE_VALID 0x00000001 /* Valid */ +#define PTE_MODIFIED 0x00001000 /* Modified */ +#define PTE_WIRED 0x00002000 /* Wired */ +#define PTE_MANAGED 0x00000002 /* Managed */ +#define PTE_REFERENCED 0x00040000 /* Referenced */ + +/* + * Page Table Entry definitions and macros. + * + * We use the hardware page table entry format: + * + * 63 24 23 19 18 17 14 13 12 11 8 7 6 5 4 3 2 1 0 + * --------------------------------------------------------------- + * ARPN(12:51) WIMGE R U0:U3 SW0 C PSIZE UX SX UW SW UR SR SW1 V + * --------------------------------------------------------------- + */ + +/* PTE fields. */ +#define PTE_TSIZE_SHIFT (63-54) +#define PTE_TSIZE_MASK 0x7 +#define PTE_TSIZE_SHIFT_DIRECT (63-55) +#define PTE_TSIZE_MASK_DIRECT 0xf +#define PTE_PS_DIRECT(ps) (ps<> PTE_TSIZE_SHIFT) & PTE_TSIZE_MASK) +#define PTE_TSIZE_DIRECT(pte) (int)((*pte >> PTE_TSIZE_SHIFT_DIRECT) & PTE_TSIZE_MASK_DIRECT) + +/* Macro argument must of pte_t type. */ +#define PTE_ARPN_SHIFT 12 +#define PTE_FLAGS_MASK 0x00ffffff +#define PTE_RPN_FROM_PA(pa) (((pa) & ~PAGE_MASK) << PTE_ARPN_SHIFT) +#define PTE_PA(pte) ((vm_paddr_t)(*pte >> PTE_ARPN_SHIFT) & ~PAGE_MASK) +#define PTE_ISVALID(pte) ((*pte) & PTE_VALID) +#define PTE_ISWIRED(pte) ((*pte) & PTE_WIRED) +#define PTE_ISMANAGED(pte) ((*pte) & PTE_MANAGED) +#define PTE_ISMODIFIED(pte) ((*pte) & PTE_MODIFIED) +#define PTE_ISREFERENCED(pte) ((*pte) & PTE_REFERENCED) + +#endif /* BOOKE */ + +/* Book-E page table format, broken out for the generic pmap.h. */ #ifdef __powerpc64__ #include /* * The virtual address is: * * 4K page size * +-----+-----+-----+-------+-------------+-------------+----------------+ * | - |p2d#h| - | p2d#l | dir# | pte# | off in 4K page | * +-----+-----+-----+-------+-------------+-------------+----------------+ * 63 62 61 60 59 40 39 30 29 ^ 21 20 ^ 12 11 0 * | | * index in 1 page of pointers * * 1st level - pointers to page table directory (pp2d) * * pp2d consists of PP2D_NENTRIES entries, each being a pointer to * second level entity, i.e. the page table directory (pdir). */ -#define HARDWARE_WALKER #define PP2D_H_H 61 #define PP2D_H_L 60 #define PP2D_L_H 39 #define PP2D_L_L 30 /* >30 would work with no page table pool */ -#ifndef LOCORE -#define PP2D_SIZE (1UL << PP2D_L_L) /* va range mapped by pp2d */ -#else #define PP2D_SIZE (1 << PP2D_L_L) /* va range mapped by pp2d */ -#endif #define PP2D_L_SHIFT PP2D_L_L #define PP2D_L_NUM (PP2D_L_H-PP2D_L_L+1) #define PP2D_L_MASK ((1<> PP2D_H_SHIFT) & PP2D_H_MASK) | ((va >> PP2D_L_SHIFT) & PP2D_L_MASK)) #define PP2D_NENTRIES (1<<(PP2D_L_NUM+PP2D_H_NUM)) #define PP2D_ENTRY_SHIFT 3 /* log2 (sizeof(struct pte_entry **)) */ /* * 2nd level - page table directory (pdir) * * pdir consists of PDIR_NENTRIES entries, each being a pointer to * second level entity, i.e. the actual page table (ptbl). */ #define PDIR_H (PP2D_L_L-1) #define PDIR_L 21 #define PDIR_NUM (PDIR_H-PDIR_L+1) #define PDIR_SIZE (1 << PDIR_L) /* va range mapped by pdir */ #define PDIR_MASK ((1<> PDIR_SHIFT) & PDIR_MASK) #define PDIR_ENTRY_SHIFT 3 /* log2 (sizeof(struct pte_entry *)) */ #define PDIR_PAGES ((PDIR_NENTRIES * (1<> PTBL_SHIFT) & PTBL_MASK) #define PTBL_ENTRY_SHIFT 3 /* log2 (sizeof (struct pte_entry)) */ #define PTBL_PAGES ((PTBL_NENTRIES * (1<> PDIR_SHIFT) #define PDIR_ENTRY_SHIFT 2 /* entry size is 2^2 = 4 bytes */ /* * 2nd level - page table (ptbl) * * Page table covers 1024 page table entries. Page * table entry (pte) is 32 bit wide and defines mapping * for a single page. */ #define PTBL_SHIFT PAGE_SHIFT #define PTBL_SIZE PAGE_SIZE /* va range mapped by ptbl entry */ #define PTBL_MASK ((PDIR_SIZE - 1) & ~((1 << PAGE_SHIFT) - 1)) #define PTBL_NENTRIES 1024 /* number of pages mapped by ptbl */ /* Returns ptbl entry number for given va */ #define PTBL_IDX(va) (((va) & PTBL_MASK) >> PTBL_SHIFT) /* Size of ptbl in pages, 1024 entries, each sizeof(struct pte_entry). */ #define PTBL_PAGES 2 #define PTBL_ENTRY_SHIFT 3 /* entry size is 2^3 = 8 bytes */ #endif - -/* - * Flags for pte_remove() routine. - */ -#define PTBL_HOLD 0x00000001 /* do not unhold ptbl pages */ -#define PTBL_UNHOLD 0x00000002 /* unhold and attempt to free ptbl pages */ - -#define PTBL_HOLD_FLAG(pmap) (((pmap) == kernel_pmap) ? PTBL_HOLD : PTBL_UNHOLD) - -/* - * Page Table Entry definitions and macros. - * - * RPN need only be 32-bit because Book-E has 36-bit addresses, and the smallest - * page size is 4k (12-bit mask), so RPN can really fit into 24 bits. - */ -#ifndef LOCORE -typedef uint64_t pte_t; -#endif - -/* RPN mask, TLB0 4K pages */ -#define PTE_PA_MASK PAGE_MASK - -#if defined(BOOKE_E500) - -/* PTE bits assigned to MAS2, MAS3 flags */ -#define PTE_MAS2_SHIFT 19 -#define PTE_W (MAS2_W << PTE_MAS2_SHIFT) -#define PTE_I (MAS2_I << PTE_MAS2_SHIFT) -#define PTE_M (MAS2_M << PTE_MAS2_SHIFT) -#define PTE_G (MAS2_G << PTE_MAS2_SHIFT) -#define PTE_MAS2_MASK (MAS2_G | MAS2_M | MAS2_I | MAS2_W) - -#define PTE_MAS3_SHIFT 2 -#define PTE_UX (MAS3_UX << PTE_MAS3_SHIFT) -#define PTE_SX (MAS3_SX << PTE_MAS3_SHIFT) -#define PTE_UW (MAS3_UW << PTE_MAS3_SHIFT) -#define PTE_SW (MAS3_SW << PTE_MAS3_SHIFT) -#define PTE_UR (MAS3_UR << PTE_MAS3_SHIFT) -#define PTE_SR (MAS3_SR << PTE_MAS3_SHIFT) -#define PTE_MAS3_MASK ((MAS3_UX | MAS3_SX | MAS3_UW \ - | MAS3_SW | MAS3_UR | MAS3_SR) << PTE_MAS3_SHIFT) - -#define PTE_PS_SHIFT 8 -#define PTE_PS_4KB (2 << PTE_PS_SHIFT) - -#elif defined(BOOKE_PPC4XX) - -#define PTE_WL1 TLB_WL1 -#define PTE_IL2I TLB_IL2I -#define PTE_IL2D TLB_IL2D - -#define PTE_W TLB_W -#define PTE_I TLB_I -#define PTE_M TLB_M -#define PTE_G TLB_G - -#define PTE_UX TLB_UX -#define PTE_SX TLB_SX -#define PTE_UW TLB_UW -#define PTE_SW TLB_SW -#define PTE_UR TLB_UR -#define PTE_SR TLB_SR - -#endif - -/* Other PTE flags */ -#define PTE_VALID 0x00000001 /* Valid */ -#define PTE_MODIFIED 0x00001000 /* Modified */ -#define PTE_WIRED 0x00002000 /* Wired */ -#define PTE_MANAGED 0x00000002 /* Managed */ -#define PTE_REFERENCED 0x00040000 /* Referenced */ - -/* - * Page Table Entry definitions and macros. - * - * We use the hardware page table entry format: - * - * 63 24 23 19 18 17 14 13 12 11 8 7 6 5 4 3 2 1 0 - * --------------------------------------------------------------- - * ARPN(12:51) WIMGE R U0:U3 SW0 C PSIZE UX SX UW SW UR SR SW1 V - * --------------------------------------------------------------- - */ - -/* PTE fields. */ -#define PTE_TSIZE_SHIFT (63-54) -#define PTE_TSIZE_MASK 0x7 -#define PTE_TSIZE_SHIFT_DIRECT (63-55) -#define PTE_TSIZE_MASK_DIRECT 0xf -#define PTE_PS_DIRECT(ps) (ps<> PTE_TSIZE_SHIFT) & PTE_TSIZE_MASK) -#define PTE_TSIZE_DIRECT(pte) (int)((*pte >> PTE_TSIZE_SHIFT_DIRECT) & PTE_TSIZE_MASK_DIRECT) - -/* Macro argument must of pte_t type. */ -#define PTE_ARPN_SHIFT 12 -#define PTE_FLAGS_MASK 0x00ffffff -#define PTE_RPN_FROM_PA(pa) (((pa) & ~PAGE_MASK) << PTE_ARPN_SHIFT) -#define PTE_PA(pte) ((vm_paddr_t)(*pte >> PTE_ARPN_SHIFT) & ~PAGE_MASK) -#define PTE_ISVALID(pte) ((*pte) & PTE_VALID) -#define PTE_ISWIRED(pte) ((*pte) & PTE_WIRED) -#define PTE_ISMANAGED(pte) ((*pte) & PTE_MANAGED) -#define PTE_ISMODIFIED(pte) ((*pte) & PTE_MODIFIED) -#define PTE_ISREFERENCED(pte) ((*pte) & PTE_REFERENCED) - -#endif /* BOOKE */ #endif /* _MACHINE_PTE_H_ */