Index: head/lib/libkvm/kvm_arm.c =================================================================== --- head/lib/libkvm/kvm_arm.c (revision 295751) +++ head/lib/libkvm/kvm_arm.c (revision 295752) @@ -1,270 +1,270 @@ /*- * Copyright (c) 2005 Olivier Houchard * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * ARM machine dependent routines for kvm. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #ifdef __arm__ #include #endif #include "kvm_private.h" #include "kvm_arm.h" struct vmstate { arm_pd_entry_t *l1pt; size_t phnum; GElf_Phdr *phdr; }; /* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) { struct vmstate *vm = kd->vmst; GElf_Phdr *p; size_t n; p = vm->phdr; n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) return (0); *ofs = (pa - p->p_paddr) + p->p_offset; if (pgsz == 0) return (p->p_memsz - (pa - p->p_paddr)); return (pgsz - ((size_t)pa & (pgsz - 1))); } static void _arm_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm->phdr); free(vm); kd->vmst = NULL; } static int _arm_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) && !_kvm_is_minidump(kd)); } static int _arm_initvtop(kvm_t *kd) { struct vmstate *vm; struct kvm_nlist nl[2]; kvaddr_t kernbase; arm_physaddr_t physaddr, pa; arm_pd_entry_t *l1pt; size_t i; int found; if (kd->rawdump) { _kvm_err(kd, kd->program, "raw dumps not supported on arm"); return (-1); } vm = _kvm_malloc(kd, sizeof(*vm)); if (vm == 0) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vm; vm->l1pt = NULL; if (_kvm_read_core_phdrs(kd, &vm->phnum, &vm->phdr) == -1) return (-1); found = 0; for (i = 0; i < vm->phnum; i++) { if (vm->phdr[i].p_type == PT_DUMP_DELTA) { kernbase = vm->phdr[i].p_vaddr; physaddr = vm->phdr[i].p_paddr; found = 1; break; } } nl[1].n_name = NULL; if (!found) { nl[0].n_name = "kernbase"; if (kvm_nlist2(kd, nl) != 0) { #ifdef __arm__ kernbase = KERNBASE; #else _kvm_err(kd, kd->program, "cannot resolve kernbase"); return (-1); #endif } else kernbase = nl[0].n_value; nl[0].n_name = "physaddr"; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "couldn't get phys addr"); return (-1); } physaddr = nl[0].n_value; } nl[0].n_name = "kernel_l1pa"; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist"); return (-1); } if (kvm_read2(kd, (nl[0].n_value - kernbase + physaddr), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read kernel_l1pa"); return (-1); } l1pt = _kvm_malloc(kd, ARM_L1_TABLE_SIZE); if (kvm_read2(kd, pa, l1pt, ARM_L1_TABLE_SIZE) != ARM_L1_TABLE_SIZE) { _kvm_err(kd, kd->program, "cannot read l1pt"); free(l1pt); return (-1); } vm->l1pt = l1pt; return 0; } /* from arm/pmap.c */ #define ARM_L1_IDX(va) ((va) >> ARM_L1_S_SHIFT) #define l1pte_section_p(pde) (((pde) & ARM_L1_TYPE_MASK) == ARM_L1_TYPE_S) #define l1pte_valid(pde) ((pde) != 0) #define l2pte_valid(pte) ((pte) != 0) -#define l2pte_index(v) (((v) & ARM_L2_ADDR_BITS) >> ARM_L2_S_SHIFT) +#define l2pte_index(v) (((v) & ARM_L1_S_OFFSET) >> ARM_L2_S_SHIFT) static int _arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm = kd->vmst; arm_pd_entry_t pd; arm_pt_entry_t pte; arm_physaddr_t pte_pa; off_t pte_off; if (vm->l1pt == NULL) return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE)); pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]); if (!l1pte_valid(pd)) goto invalid; if (l1pte_section_p(pd)) { /* 1MB section mapping. */ *pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET); return (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE)); } pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte); _kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE); if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) { _kvm_syserr(kd, kd->program, "_arm_kvatop: pread"); goto invalid; } pte = _kvm32toh(kd, pte); if (!l2pte_valid(pte)) { goto invalid; } if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { *pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET); return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE)); } *pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET); return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE)); invalid: _kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va); return 0; } /* * Machine-dependent initialization for ALL open kvm descriptors, * not just those for a kernel crash dump. Some architectures * have to deal with these NOT being constants! (i.e. m68k) */ #ifdef FBSD_NOT_YET int _kvm_mdopen(kvm_t *kd) { kd->usrstack = USRSTACK; kd->min_uva = VM_MIN_ADDRESS; kd->max_uva = VM_MAXUSER_ADDRESS; return (0); } #endif int _arm_native(kvm_t *kd) { #ifdef __arm__ #if _BYTE_ORDER == _LITTLE_ENDIAN return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB); #else return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); #endif #else return (0); #endif } struct kvm_arch kvm_arm = { .ka_probe = _arm_probe, .ka_initvtop = _arm_initvtop, .ka_freevtop = _arm_freevtop, .ka_kvatop = _arm_kvatop, .ka_native = _arm_native, }; KVM_ARCH(kvm_arm); Index: head/lib/libkvm/kvm_arm.h =================================================================== --- head/lib/libkvm/kvm_arm.h (revision 295751) +++ head/lib/libkvm/kvm_arm.h (revision 295752) @@ -1,114 +1,111 @@ /*- * Copyright (c) 2015 John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __KVM_ARM_H__ #define __KVM_ARM_H__ #ifdef __arm__ #include #endif typedef uint32_t arm_physaddr_t; typedef uint32_t arm_pd_entry_t; typedef uint32_t arm_pt_entry_t; #define ARM_PAGE_SHIFT 12 #define ARM_PAGE_SIZE (1 << ARM_PAGE_SHIFT) /* Page size */ #define ARM_PAGE_MASK (ARM_PAGE_SIZE - 1) #define ARM_L1_TABLE_SIZE 0x4000 /* 16K */ #define ARM_L1_S_SIZE 0x00100000 /* 1M */ #define ARM_L1_S_OFFSET (ARM_L1_S_SIZE - 1) #define ARM_L1_S_FRAME (~ARM_L1_S_OFFSET) #define ARM_L1_S_SHIFT 20 #define ARM_L2_L_SIZE 0x00010000 /* 64K */ #define ARM_L2_L_OFFSET (ARM_L2_L_SIZE - 1) #define ARM_L2_L_FRAME (~ARM_L2_L_OFFSET) #define ARM_L2_L_SHIFT 16 #define ARM_L2_S_SIZE 0x00001000 /* 4K */ #define ARM_L2_S_OFFSET (ARM_L2_S_SIZE - 1) #define ARM_L2_S_FRAME (~ARM_L2_S_OFFSET) #define ARM_L2_S_SHIFT 12 #define ARM_L1_TYPE_INV 0x00 /* Invalid (fault) */ #define ARM_L1_TYPE_C 0x01 /* Coarse L2 */ #define ARM_L1_TYPE_S 0x02 /* Section */ #define ARM_L1_TYPE_MASK 0x03 /* Mask of type bits */ #define ARM_L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ #define ARM_L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ #define ARM_L2_TYPE_INV 0x00 /* Invalid (fault) */ #define ARM_L2_TYPE_L 0x01 /* Large Page - 64k */ #define ARM_L2_TYPE_S 0x02 /* Small Page - 4k */ #define ARM_L2_TYPE_T 0x03 /* Tiny Page - 1k - not used */ #define ARM_L2_TYPE_MASK 0x03 -#define ARM_L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */ - #ifdef __arm__ #include _Static_assert(PAGE_SHIFT == ARM_PAGE_SHIFT, "PAGE_SHIFT mismatch"); _Static_assert(PAGE_SIZE == ARM_PAGE_SIZE, "PAGE_SIZE mismatch"); _Static_assert(PAGE_MASK == ARM_PAGE_MASK, "PAGE_MASK mismatch"); _Static_assert(L1_TABLE_SIZE == ARM_L1_TABLE_SIZE, "L1_TABLE_SIZE mismatch"); _Static_assert(L1_S_SIZE == ARM_L1_S_SIZE, "L1_S_SIZE mismatch"); _Static_assert(L1_S_OFFSET == ARM_L1_S_OFFSET, "L1_S_OFFSET mismatch"); _Static_assert(L1_S_FRAME == ARM_L1_S_FRAME, "L1_S_FRAME mismatch"); _Static_assert(L1_S_SHIFT == ARM_L1_S_SHIFT, "L1_S_SHIFT mismatch"); _Static_assert(L2_L_SIZE == ARM_L2_L_SIZE, "L2_L_SIZE mismatch"); _Static_assert(L2_L_OFFSET == ARM_L2_L_OFFSET, "L2_L_OFFSET mismatch"); _Static_assert(L2_L_FRAME == ARM_L2_L_FRAME, "L2_L_FRAME mismatch"); _Static_assert(L2_L_SHIFT == ARM_L2_L_SHIFT, "L2_L_SHIFT mismatch"); _Static_assert(L2_S_SIZE == ARM_L2_S_SIZE, "L2_S_SIZE mismatch"); _Static_assert(L2_S_OFFSET == ARM_L2_S_OFFSET, "L2_S_OFFSET mismatch"); _Static_assert(L2_S_FRAME == ARM_L2_S_FRAME, "L2_S_FRAME mismatch"); _Static_assert(L2_S_SHIFT == ARM_L2_S_SHIFT, "L2_S_SHIFT mismatch"); _Static_assert(L1_TYPE_INV == ARM_L1_TYPE_INV, "L1_TYPE_INV mismatch"); _Static_assert(L1_TYPE_C == ARM_L1_TYPE_C, "L1_TYPE_C mismatch"); _Static_assert(L1_TYPE_S == ARM_L1_TYPE_S, "L1_TYPE_S mismatch"); _Static_assert(L1_TYPE_MASK == ARM_L1_TYPE_MASK, "L1_TYPE_MASK mismatch"); _Static_assert(L1_S_ADDR_MASK == ARM_L1_S_ADDR_MASK, "L1_S_ADDR_MASK mismatch"); _Static_assert(L1_C_ADDR_MASK == ARM_L1_C_ADDR_MASK, "L1_C_ADDR_MASK mismatch"); _Static_assert(L2_TYPE_INV == ARM_L2_TYPE_INV, "L2_TYPE_INV mismatch"); _Static_assert(L2_TYPE_L == ARM_L2_TYPE_L, "L2_TYPE_L mismatch"); _Static_assert(L2_TYPE_S == ARM_L2_TYPE_S, "L2_TYPE_S mismatch"); #if __ARM_ARCH < 6 _Static_assert(L2_TYPE_T == ARM_L2_TYPE_T, "L2_TYPE_T mismatch"); #endif _Static_assert(L2_TYPE_MASK == ARM_L2_TYPE_MASK, "L2_TYPE_MASK mismatch"); -_Static_assert(L2_ADDR_BITS == ARM_L2_ADDR_BITS, "L2_ADDR_BITS mismatch"); #endif int _arm_native(kvm_t *); #endif /* !__KVM_ARM_H__ */ Index: head/sys/arm/include/pmap.h =================================================================== --- head/sys/arm/include/pmap.h (revision 295751) +++ head/sys/arm/include/pmap.h (revision 295752) @@ -1,547 +1,547 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Derived from hp300 version by Mike Hibler, this version by William * Jolitz uses a recursive map [a pde points to the page directory] to * map the page tables using the pagetables themselves. This is done to * reduce the impact on kernel virtual memory for lots of sparse address * space, and to reduce the cost of memory to each process. * * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 * * $FreeBSD$ */ #include #if __ARM_ARCH >= 6 #include #else /* __ARM_ARCH >= 6 */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ #include #include /* * Pte related macros */ #define PTE_NOCACHE 1 #define PTE_CACHE 2 #define PTE_DEVICE PTE_NOCACHE #define PTE_PAGETABLE 3 enum mem_type { STRONG_ORD = 0, DEVICE_NOSHARE, DEVICE_SHARE, NRML_NOCACHE, NRML_IWT_OWT, NRML_IWB_OWB, NRML_IWBA_OWBA }; #ifndef LOCORE #include #include #include #include #define PDESIZE sizeof(pd_entry_t) /* for assembly files */ #define PTESIZE sizeof(pt_entry_t) /* for assembly files */ #ifdef _KERNEL #define vtophys(va) pmap_kextract((vm_offset_t)(va)) #endif #define pmap_page_get_memattr(m) ((m)->md.pv_memattr) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); /* * Pmap stuff */ /* * This structure is used to hold a virtual<->physical address * association and is used mostly by bootstrap code */ struct pv_addr { SLIST_ENTRY(pv_addr) pv_list; vm_offset_t pv_va; vm_paddr_t pv_pa; }; struct pv_entry; struct pv_chunk; struct md_page { int pvh_attrs; vm_memattr_t pv_memattr; vm_offset_t pv_kva; /* first kernel VA mapping */ TAILQ_HEAD(,pv_entry) pv_list; }; struct l1_ttable; struct l2_dtable; /* * The number of L2 descriptor tables which can be tracked by an l2_dtable. * A bucket size of 16 provides for 16MB of contiguous virtual address * space per l2_dtable. Most processes will, therefore, require only two or * three of these to map their whole working set. */ #define L2_BUCKET_LOG2 4 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) /* * Given the above "L2-descriptors-per-l2_dtable" constant, the number * of l2_dtable structures required to track all possible page descriptors * mappable by an L1 translation table is given by the following constants: */ #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) #define L2_SIZE (1 << L2_LOG2) struct pmap { struct mtx pm_mtx; u_int8_t pm_domain; struct l1_ttable *pm_l1; struct l2_dtable *pm_l2[L2_SIZE]; cpuset_t pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statictics */ TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ }; typedef struct pmap *pmap_t; #ifdef _KERNEL extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) #define PMAP_ASSERT_LOCKED(pmap) \ mtx_assert(&(pmap)->pm_mtx, MA_OWNED) #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ NULL, MTX_DEF | MTX_DUPOK) #define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) #endif /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_list. */ typedef struct pv_entry { vm_offset_t pv_va; /* virtual address for mapping */ TAILQ_ENTRY(pv_entry) pv_list; int pv_flags; /* flags (wired, etc...) */ pmap_t pv_pmap; /* pmap where mapping lies */ TAILQ_ENTRY(pv_entry) pv_plist; } *pv_entry_t; /* * pv_entries are allocated in chunks per-process. This avoids the * need to track per-pmap assignments. */ #define _NPCM 8 #define _NPCPV 252 struct pv_chunk { pmap_t pc_pmap; TAILQ_ENTRY(pv_chunk) pc_list; uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ TAILQ_ENTRY(pv_chunk) pc_lru; struct pv_entry pc_pventry[_NPCPV]; }; #ifdef _KERNEL boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); /* * virtual address to page table entry and * to physical address. Likewise for alternate address space. * Note: these work recursively, thus vtopte of a pte will give * the corresponding pde that in turn maps it. */ /* * The current top of kernel VM. */ extern vm_offset_t pmap_curmaxkvaddr; struct pcb; void pmap_set_pcb_pagedir(pmap_t, struct pcb *); /* Virtual address to page table entry */ static __inline pt_entry_t * vtopte(vm_offset_t va) { pd_entry_t *pdep; pt_entry_t *ptep; if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) return (NULL); return (ptep); } extern vm_paddr_t phys_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); int pmap_change_attr(vm_offset_t, vm_size_t, int); void pmap_kenter(vm_offset_t va, vm_paddr_t pa); void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); void pmap_kremove_device(vm_offset_t, vm_size_t); void *pmap_kenter_temporary(vm_paddr_t pa, int i); void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); vm_paddr_t pmap_kextract(vm_offset_t va); vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); void pmap_kremove(vm_offset_t); void *pmap_mapdev(vm_offset_t, vm_size_t); void pmap_unmapdev(vm_offset_t, vm_size_t); vm_page_t pmap_use_pt(pmap_t, vm_offset_t); void pmap_debug(int); void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); void pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, int cache); int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); /* * Definitions for MMU domains */ #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ #define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ /* * The new pmap ensures that page-tables are always mapping Write-Thru. * Thus, on some platforms we can run fast and loose and avoid syncing PTEs * on every change. * * Unfortunately, not all CPUs have a write-through cache mode. So we * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, * and if there is the chance for PTE syncs to be needed, we define * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) * the code. */ extern int pmap_needs_pte_sync; /* * These macros define the various bit masks in the PTE. * * We use these macros since we use different bits on different processor * models. */ #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ L1_S_XSCALE_TEX(TEX_XSCALE_T)) #define L2_L_CACHE_MASK_generic (L2_B|L2_C) #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ L2_XSCALE_L_TEX(TEX_XSCALE_T)) #define L2_S_PROT_U_generic (L2_AP(AP_U)) #define L2_S_PROT_W_generic (L2_AP(AP_W)) #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) #define L2_S_PROT_U_xscale (L2_AP0(AP_U)) #define L2_S_PROT_W_xscale (L2_AP0(AP_W)) #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) #define L2_S_CACHE_MASK_generic (L2_B|L2_C) #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ L2_XSCALE_T_TEX(TEX_XSCALE_X)) #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) #define L1_S_PROTO_xscale (L1_TYPE_S) #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) #define L1_C_PROTO_xscale (L1_TYPE_C) #define L2_L_PROTO (L2_TYPE_L) #define L2_S_PROTO_generic (L2_TYPE_S) #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) /* * User-visible names for the ones that vary with MMU class. */ #define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) #if ARM_NMMUS > 1 /* More than one MMU class configured; use variables. */ #define L2_S_PROT_U pte_l2_s_prot_u #define L2_S_PROT_W pte_l2_s_prot_w #define L2_S_PROT_MASK pte_l2_s_prot_mask #define L1_S_CACHE_MASK pte_l1_s_cache_mask #define L2_L_CACHE_MASK pte_l2_l_cache_mask #define L2_S_CACHE_MASK pte_l2_s_cache_mask #define L1_S_PROTO pte_l1_s_proto #define L1_C_PROTO pte_l1_c_proto #define L2_S_PROTO pte_l2_s_proto #elif ARM_MMU_GENERIC != 0 #define L2_S_PROT_U L2_S_PROT_U_generic #define L2_S_PROT_W L2_S_PROT_W_generic #define L2_S_PROT_MASK L2_S_PROT_MASK_generic #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic #define L1_S_PROTO L1_S_PROTO_generic #define L1_C_PROTO L1_C_PROTO_generic #define L2_S_PROTO L2_S_PROTO_generic #elif ARM_MMU_XSCALE == 1 #define L2_S_PROT_U L2_S_PROT_U_xscale #define L2_S_PROT_W L2_S_PROT_W_xscale #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale #define L1_S_PROTO L1_S_PROTO_xscale #define L1_C_PROTO L1_C_PROTO_xscale #define L2_S_PROTO L2_S_PROTO_xscale #endif /* ARM_NMMUS > 1 */ #if defined(CPU_XSCALE_81342) #define PMAP_NEEDS_PTE_SYNC 1 #define PMAP_INCLUDE_PTE_SYNC #else #define PMAP_NEEDS_PTE_SYNC 0 #endif /* * These macros return various bits based on kernel/user and protection. * Note that the compiler will usually fold these at compile time. */ #define L1_S_PROT_U (L1_S_AP(AP_U)) #define L1_S_PROT_W (L1_S_AP(AP_W)) #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) #define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) #define L2_L_PROT_U (L2_AP(AP_U)) #define L2_L_PROT_W (L2_AP(AP_W)) #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) /* * Macros to test if a mapping is mappable with an L1 Section mapping * or an L2 Large Page mapping. */ #define L1_S_MAPPABLE_P(va, pa, size) \ ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) #define L2_L_MAPPABLE_P(va, pa, size) \ ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) /* * Provide a fallback in case we were not able to determine it at * compile-time. */ #ifndef PMAP_NEEDS_PTE_SYNC #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync #define PMAP_INCLUDE_PTE_SYNC #endif #ifdef ARM_L2_PIPT #define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) #else #define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) #endif #define PTE_SYNC(pte) \ do { \ if (PMAP_NEEDS_PTE_SYNC) { \ cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ cpu_drain_writebuf(); \ _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ } else \ cpu_drain_writebuf(); \ } while (/*CONSTCOND*/0) #define PTE_SYNC_RANGE(pte, cnt) \ do { \ if (PMAP_NEEDS_PTE_SYNC) { \ cpu_dcache_wb_range((vm_offset_t)(pte), \ (cnt) << 2); /* * sizeof(pt_entry_t) */ \ cpu_drain_writebuf(); \ _sync_l2((vm_offset_t)(pte), \ (cnt) << 2); /* * sizeof(pt_entry_t) */ \ } else \ cpu_drain_writebuf(); \ } while (/*CONSTCOND*/0) extern pt_entry_t pte_l1_s_cache_mode; extern pt_entry_t pte_l1_s_cache_mask; extern pt_entry_t pte_l2_l_cache_mode; extern pt_entry_t pte_l2_l_cache_mask; extern pt_entry_t pte_l2_s_cache_mode; extern pt_entry_t pte_l2_s_cache_mask; extern pt_entry_t pte_l1_s_cache_mode_pt; extern pt_entry_t pte_l2_l_cache_mode_pt; extern pt_entry_t pte_l2_s_cache_mode_pt; extern pt_entry_t pte_l2_s_prot_u; extern pt_entry_t pte_l2_s_prot_w; extern pt_entry_t pte_l2_s_prot_mask; extern pt_entry_t pte_l1_s_proto; extern pt_entry_t pte_l1_c_proto; extern pt_entry_t pte_l2_s_proto; extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); #if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342) void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); void pmap_zero_page_generic(vm_paddr_t, int, int); void pmap_pte_init_generic(void); #endif /* ARM_MMU_GENERIC != 0 */ #if ARM_MMU_XSCALE == 1 void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); void pmap_zero_page_xscale(vm_paddr_t, int, int); void pmap_pte_init_xscale(void); void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); void pmap_use_minicache(vm_offset_t, vm_size_t); #endif /* ARM_MMU_XSCALE == 1 */ #if defined(CPU_XSCALE_81342) #define ARM_HAVE_SUPERSECTIONS #endif #define PTE_KERNEL 0 #define PTE_USER 1 #define l1pte_valid(pde) ((pde) != 0) #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) -#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) +#define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT) #define l2pte_valid(pte) ((pte) != 0) #define l2pte_pa(pte) ((pte) & L2_S_FRAME) #define l2pte_minidata(pte) (((pte) & \ (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) /* L1 and L2 page table macros */ #define pmap_pde_v(pde) l1pte_valid(*(pde)) #define pmap_pde_section(pde) l1pte_section_p(*(pde)) #define pmap_pde_page(pde) l1pte_page_p(*(pde)) #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) #define pmap_pte_v(pte) l2pte_valid(*(pte)) #define pmap_pte_pa(pte) l2pte_pa(*(pte)) /* * Flags that indicate attributes of pages or mappings of pages. * * The PVF_MOD and PVF_REF flags are stored in the mdpage for each * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual * pv_entry's for each page. They live in the same "namespace" so * that we can clear multiple attributes at a time. * * Note the "non-cacheable" flag generally means the page has * multiple mappings in a given address space. */ #define PVF_MOD 0x01 /* page is modified */ #define PVF_REF 0x02 /* page is referenced */ #define PVF_WIRED 0x04 /* mapping is wired */ #define PVF_WRITE 0x08 /* mapping is writable */ #define PVF_EXEC 0x10 /* mapping is executable */ #define PVF_NC 0x20 /* mapping is non-cacheable */ #define PVF_MWC 0x40 /* mapping is used multiple times in userland */ #define PVF_UNMAN 0x80 /* mapping is unmanaged */ void vector_page_setprot(int); #define SECTION_CACHE 0x1 #define SECTION_PT 0x2 void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); #ifdef ARM_HAVE_SUPERSECTIONS void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); #endif extern char *_tmppt; void pmap_postinit(void); extern vm_paddr_t dump_avail[]; #endif /* _KERNEL */ #endif /* !LOCORE */ #endif /* !_MACHINE_PMAP_H_ */ #endif /* __ARM_ARCH >= 6 */ Index: head/sys/arm/include/pte-v6.h =================================================================== --- head/sys/arm/include/pte-v6.h (revision 295751) +++ head/sys/arm/include/pte-v6.h (revision 295752) @@ -1,306 +1,301 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_PTE_H_ #define _MACHINE_PTE_H_ /* * Domain Types for the Domain Access Control Register. */ #define DOMAIN_FAULT 0x00 /* no access */ #define DOMAIN_CLIENT 0x01 /* client */ #define DOMAIN_RESERVED 0x02 /* reserved */ #define DOMAIN_MANAGER 0x03 /* manager */ /* * TEX remap registers attributes */ #define PRRR_SO 0 /* Strongly ordered memory */ #define PRRR_DEV 1 /* Device memory */ #define PRRR_MEM 2 /* Normal memory */ #define PRRR_DS0 (1 << 16) /* Shared bit for Device, S = 0 */ #define PRRR_DS1 (1 << 17) /* Shared bit for Device, S = 1 */ #define PRRR_NS0 (1 << 18) /* Shared bit for Normal, S = 0 */ #define PRRR_NS1 (1 << 19) /* Shared bit for Normal, S = 1 */ #define PRRR_NOS_SHIFT 24 /* base shif for Not Outer Shared bits */ #define NMRR_NC 0 /* Noncachable*/ #define NMRR_WB_WA 1 /* Write Back, Write Allocate */ #define NMRR_WT 2 /* Write Through, Non-Write Allocate */ #define NMRR_WB 3 /* Write Back, Non-Write Allocate */ /* * * The ARM MMU is capable of mapping memory in the following chunks: * * 16M Supersections (L1 table) * * 1M Sections (L1 table) * * 64K Large Pages (L2 table) * * 4K Small Pages (L2 table) * * * Coarse Tables can map Large and Small Pages. * Coarse Tables are 1K in length. * * The Translation Table Base register holds the pointer to the * L1 Table. The L1 Table is a 16K contiguous chunk of memory * aligned to a 16K boundary. Each entry in the L1 Table maps * 1M of virtual address space, either via a Section mapping or * via an L2 Table. * */ #define L1_TABLE_SIZE 0x4000 /* 16K */ #define L1_ENTRIES 0x1000 /* 4K */ #define L2_TABLE_SIZE 0x0400 /* 1K */ #define L2_ENTRIES 0x0100 /* 256 */ /* ARMv6 super-sections. */ #define L1_SUP_SIZE 0x01000000 /* 16M */ #define L1_SUP_OFFSET (L1_SUP_SIZE - 1) #define L1_SUP_FRAME (~L1_SUP_OFFSET) #define L1_SUP_SHIFT 24 #define L1_S_SIZE 0x00100000 /* 1M */ #define L1_S_OFFSET (L1_S_SIZE - 1) #define L1_S_FRAME (~L1_S_OFFSET) #define L1_S_SHIFT 20 #define L2_L_SIZE 0x00010000 /* 64K */ #define L2_L_OFFSET (L2_L_SIZE - 1) #define L2_L_FRAME (~L2_L_OFFSET) #define L2_L_SHIFT 16 #define L2_S_SIZE 0x00001000 /* 4K */ #define L2_S_OFFSET (L2_S_SIZE - 1) #define L2_S_FRAME (~L2_S_OFFSET) #define L2_S_SHIFT 12 /* * ARM MMU L1 Descriptors */ #define L1_TYPE_INV 0x00 /* Invalid (fault) */ #define L1_TYPE_C 0x01 /* Coarse L2 */ #define L1_TYPE_S 0x02 /* Section */ #define L1_TYPE_MASK 0x03 /* Mask of type bits */ /* L1 Section Descriptor */ #define L1_S_B 0x00000004 /* bufferable Section */ #define L1_S_C 0x00000008 /* cacheable Section */ #define L1_S_NX 0x00000010 /* not executeable */ #define L1_S_DOM(x) ((x) << 5) /* domain */ #define L1_S_DOM_MASK L1_S_DOM(0xf) #define L1_S_P 0x00000200 /* ECC enable for this section */ #define L1_S_AP(x) ((x) << 10) /* access permissions */ #define L1_S_AP0 0x00000400 /* access permissions bit 0 */ #define L1_S_AP1 0x00000800 /* access permissions bit 1 */ #define L1_S_TEX(x) ((x) << 12) /* type extension */ #define L1_S_TEX0 0x00001000 /* type extension bit 0 */ #define L1_S_TEX1 0x00002000 /* type extension bit 1 */ #define L1_S_TEX2 0x00004000 /* type extension bit 2 */ #define L1_S_AP2 0x00008000 /* access permissions bit 2 */ #define L1_S_SHARED 0x00010000 /* shared */ #define L1_S_NG 0x00020000 /* not global */ #define L1_S_SUPERSEC 0x00040000 /* Section is a super-section. */ #define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ /* L1 Coarse Descriptor */ #define L1_C_DOM(x) ((x) << 5) /* domain */ #define L1_C_DOM_MASK L1_C_DOM(0xf) #define L1_C_P 0x00000200 /* ECC enable for this section */ #define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ /* * ARM MMU L2 Descriptors */ #define L2_TYPE_INV 0x00 /* Invalid (fault) */ #define L2_TYPE_L 0x01 /* Large Page - 64k - not used yet*/ #define L2_TYPE_S 0x02 /* Small Page - 4 */ #define L2_TYPE_MASK 0x03 #define L2_NX 0x00000001 /* Not executable */ #define L2_B 0x00000004 /* Bufferable page */ #define L2_C 0x00000008 /* Cacheable page */ #define L2_AP(x) ((x) << 4) #define L2_AP0 0x00000010 /* access permissions bit 0*/ #define L2_AP1 0x00000020 /* access permissions bit 1*/ #define L2_TEX(x) ((x) << 6) /* type extension */ #define L2_TEX0 0x00000040 /* type extension bit 0 */ #define L2_TEX1 0x00000080 /* type extension bit 1 */ #define L2_TEX2 0x00000100 /* type extension bit 2 */ #define L2_AP2 0x00000200 /* access permissions bit 2*/ #define L2_SHARED 0x00000400 /* shared */ #define L2_NG 0x00000800 /* not global */ /* * TEX classes encoding */ #define TEX1_CLASS_0 ( 0) #define TEX1_CLASS_1 ( L1_S_B) #define TEX1_CLASS_2 ( L1_S_C ) #define TEX1_CLASS_3 ( L1_S_C | L1_S_B) #define TEX1_CLASS_4 (L1_S_TEX0 ) #define TEX1_CLASS_5 (L1_S_TEX0 | L1_S_B) #define TEX1_CLASS_6 (L1_S_TEX0 | L1_S_C ) /* Reserved for ARM11 */ #define TEX1_CLASS_7 (L1_S_TEX0 | L1_S_C | L1_S_B) #define TEX2_CLASS_0 ( 0) #define TEX2_CLASS_1 ( L2_B) #define TEX2_CLASS_2 ( L2_C ) #define TEX2_CLASS_3 ( L2_C | L2_B) #define TEX2_CLASS_4 (L2_TEX0 ) #define TEX2_CLASS_5 (L2_TEX0 | L2_B) #define TEX2_CLASS_6 (L2_TEX0 | L2_C ) /* Reserved for ARM11 */ #define TEX2_CLASS_7 (L2_TEX0 | L2_C | L2_B) /* L1 table definitions. */ #define NB_IN_PT1 L1_TABLE_SIZE #define NPTE1_IN_PT1 L1_ENTRIES /* L2 table definitions. */ #define NB_IN_PT2 L2_TABLE_SIZE #define NPTE2_IN_PT2 L2_ENTRIES /* * Map memory attributes to TEX classes */ #define PTE2_ATTR_WB_WA TEX2_CLASS_0 #define PTE2_ATTR_NOCACHE TEX2_CLASS_1 #define PTE2_ATTR_DEVICE TEX2_CLASS_2 #define PTE2_ATTR_SO TEX2_CLASS_3 #define PTE2_ATTR_WT TEX2_CLASS_4 /* * Software defined bits for L1 descriptors * - L1_AP0 is used as page accessed bit * - L1_AP2 (RO / not RW) is used as page not modified bit * - L1_TEX0 is used as software emulated RO bit */ #define PTE1_V L1_TYPE_S /* Valid bit */ #define PTE1_A L1_S_AP0 /* Accessed - software emulated */ #define PTE1_NM L1_S_AP2 /* not modified bit - software emulated * used as real write enable bit */ #define PTE1_M 0 /* Modified (dummy) */ #define PTE1_S L1_S_SHARED /* Shared */ #define PTE1_NG L1_S_NG /* Not global */ #define PTE1_G 0 /* Global (dummy) */ #define PTE1_NX L1_S_NX /* Not executable */ #define PTE1_X 0 /* Executable (dummy) */ #define PTE1_RO L1_S_TEX1 /* Read Only */ #define PTE1_RW 0 /* Read-Write (dummy) */ #define PTE1_U L1_S_AP1 /* User */ #define PTE1_NU 0 /* Not user (kernel only) (dummy) */ #define PTE1_W L1_S_TEX2 /* Wired */ #define PTE1_SHIFT L1_S_SHIFT #define PTE1_SIZE L1_S_SIZE #define PTE1_OFFSET L1_S_OFFSET #define PTE1_FRAME L1_S_FRAME #define PTE1_ATTR_MASK (L1_S_TEX0 | L1_S_C | L1_S_B) #define PTE1_AP_KR (PTE1_RO | PTE1_NM) #define PTE1_AP_KRW 0 #define PTE1_AP_KRUR (PTE1_RO | PTE1_NM | PTE1_U) #define PTE1_AP_KRWURW PTE1_U /* * PTE1 descriptors creation macros. */ #define PTE1_PA(pa) ((pa) & PTE1_FRAME) #define PTE1_AP_COMMON (PTE1_V | PTE1_S) #define PTE1(pa, ap, attr) (PTE1_PA(pa) | (ap) | (attr) | PTE1_AP_COMMON) #define PTE1_KERN(pa, ap, attr) PTE1(pa, (ap) | PTE1_A | PTE1_G, attr) #define PTE1_KERN_NG(pa, ap, attr) PTE1(pa, (ap) | PTE1_A | PTE1_NG, attr) #define PTE1_LINK(pa) (((pa) & L1_C_ADDR_MASK) | L1_TYPE_C) /* * Software defined bits for L2 descriptors * - L2_AP0 is used as page accessed bit * - L2_AP2 (RO / not RW) is used as page not modified bit * - L2_TEX0 is used as software emulated RO bit */ #define PTE2_V L2_TYPE_S /* Valid bit */ #define PTE2_A L2_AP0 /* Accessed - software emulated */ #define PTE2_NM L2_AP2 /* not modified bit - software emulated * used as real write enable bit */ #define PTE2_M 0 /* Modified (dummy) */ #define PTE2_S L2_SHARED /* Shared */ #define PTE2_NG L2_NG /* Not global */ #define PTE2_G 0 /* Global (dummy) */ #define PTE2_NX L2_NX /* Not executable */ #define PTE2_X 0 /* Not executable (dummy) */ #define PTE2_RO L2_TEX1 /* Read Only */ #define PTE2_U L2_AP1 /* User */ #define PTE2_NU 0 /* Not user (kernel only) (dummy) */ #define PTE2_W L2_TEX2 /* Wired */ #define PTE2_SHIFT L2_S_SHIFT #define PTE2_SIZE L2_S_SIZE #define PTE2_OFFSET L2_S_OFFSET #define PTE2_FRAME L2_S_FRAME #define PTE2_ATTR_MASK (L2_TEX0 | L2_C | L2_B) #define PTE2_AP_KR (PTE2_RO | PTE2_NM) #define PTE2_AP_KRW 0 #define PTE2_AP_KRUR (PTE2_RO | PTE2_NM | PTE2_U) #define PTE2_AP_KRWURW PTE2_U /* * PTE2 descriptors creation macros. */ #define PTE2_PA(pa) ((pa) & PTE2_FRAME) #define PTE2_AP_COMMON (PTE2_V | PTE2_S) #define PTE2(pa, ap, attr) (PTE2_PA(pa) | (ap) | (attr) | PTE2_AP_COMMON) #define PTE2_KERN(pa, ap, attr) PTE2(pa, (ap) | PTE2_A | PTE2_G, attr) #define PTE2_KERN_NG(pa, ap, attr) PTE2(pa, (ap) | PTE2_A | PTE2_NG, attr) // ----------------- TO BE DELETED --------------------------------------------- /* * sys/arm/arm/elf_trampoline.c */ #define AP_KRW 0x01 /* kernel read/write */ -/* - * lib/libkvm/kvm_arm.c - */ -#define L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */ - // ----------------------------------------------------------------------------- #endif /* !_MACHINE_PTE_H_ */ Index: head/sys/arm/include/pte.h =================================================================== --- head/sys/arm/include/pte.h (revision 295751) +++ head/sys/arm/include/pte.h (revision 295752) @@ -1,358 +1,356 @@ /* $NetBSD: pte.h,v 1.1 2001/11/23 17:39:04 thorpej Exp $ */ /*- * Copyright (c) 1994 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the RiscBSD team. * 4. The name "RiscBSD" nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #if __ARM_ARCH >= 6 #include #else /* __ARM_ARCH >= 6 */ #ifndef _MACHINE_PTE_H_ #define _MACHINE_PTE_H_ #ifndef LOCORE typedef uint32_t pd_entry_t; /* page directory entry */ typedef uint32_t pt_entry_t; /* page table entry */ typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ #endif #define PG_FRAME 0xfffff000 /* The PT_SIZE definition is misleading... A page table is only 0x400 * bytes long. But since VM mapping can only be done to 0x1000 a single * 1KB blocks cannot be steered to a va by itself. Therefore the * pages tables are allocated in blocks of 4. i.e. if a 1 KB block * was allocated for a PT then the other 3KB would also get mapped * whenever the 1KB was mapped. */ #define PT_RSIZE 0x0400 /* Real page table size */ #define PT_SIZE 0x1000 #define PD_SIZE 0x4000 /* Page table types and masks */ #define L1_PAGE 0x01 /* L1 page table mapping */ #define L1_SECTION 0x02 /* L1 section mapping */ #define L1_FPAGE 0x03 /* L1 fine page mapping */ #define L1_MASK 0x03 /* Mask for L1 entry type */ #define L2_LPAGE 0x01 /* L2 large page (64KB) */ #define L2_SPAGE 0x02 /* L2 small page (4KB) */ #define L2_MASK 0x03 /* Mask for L2 entry type */ #define L2_INVAL 0x00 /* L2 invalid type */ /* * The ARM MMU architecture was introduced with ARM v3 (previous ARM * architecture versions used an optional off-CPU memory controller * to perform address translation). * * The ARM MMU consists of a TLB and translation table walking logic. * There is typically one TLB per memory interface (or, put another * way, one TLB per software-visible cache). * * The ARM MMU is capable of mapping memory in the following chunks: * * 1M Sections (L1 table) * * 64K Large Pages (L2 table) * * 4K Small Pages (L2 table) * * 1K Tiny Pages (L2 table) * * There are two types of L2 tables: Coarse Tables and Fine Tables. * Coarse Tables can map Large and Small Pages. Fine Tables can * map Tiny Pages. * * Coarse Tables can define 4 Subpages within Large and Small pages. * Subpages define different permissions for each Subpage within * a Page. * * Coarse Tables are 1K in length. Fine tables are 4K in length. * * The Translation Table Base register holds the pointer to the * L1 Table. The L1 Table is a 16K contiguous chunk of memory * aligned to a 16K boundary. Each entry in the L1 Table maps * 1M of virtual address space, either via a Section mapping or * via an L2 Table. * * In addition, the Fast Context Switching Extension (FCSE) is available * on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating * TLB/cache flushes on context switch by use of a smaller address space * and a "process ID" that modifies the virtual address before being * presented to the translation logic. */ /* ARMv6 super-sections. */ #define L1_SUP_SIZE 0x01000000 /* 16M */ #define L1_SUP_OFFSET (L1_SUP_SIZE - 1) #define L1_SUP_FRAME (~L1_SUP_OFFSET) #define L1_SUP_SHIFT 24 #define L1_S_SIZE 0x00100000 /* 1M */ #define L1_S_OFFSET (L1_S_SIZE - 1) #define L1_S_FRAME (~L1_S_OFFSET) #define L1_S_SHIFT 20 #define L2_L_SIZE 0x00010000 /* 64K */ #define L2_L_OFFSET (L2_L_SIZE - 1) #define L2_L_FRAME (~L2_L_OFFSET) #define L2_L_SHIFT 16 #define L2_S_SIZE 0x00001000 /* 4K */ #define L2_S_OFFSET (L2_S_SIZE - 1) #define L2_S_FRAME (~L2_S_OFFSET) #define L2_S_SHIFT 12 #define L2_T_SIZE 0x00000400 /* 1K */ #define L2_T_OFFSET (L2_T_SIZE - 1) #define L2_T_FRAME (~L2_T_OFFSET) #define L2_T_SHIFT 10 /* * The NetBSD VM implementation only works on whole pages (4K), * whereas the ARM MMU's Coarse tables are sized in terms of 1K * (16K L1 table, 1K L2 table). * * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2 * table. */ -#define L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */ - #define L1_TABLE_SIZE 0x4000 /* 16K */ #define L2_TABLE_SIZE 0x1000 /* 4K */ /* * The new pmap deals with the 1KB coarse L2 tables by * allocating them from a pool. Until every port has been converted, * keep the old L2_TABLE_SIZE define lying around. Converted ports * should use L2_TABLE_SIZE_REAL until then. */ #define L2_TABLE_SIZE_REAL 0x400 /* 1K */ /* Total number of page table entries in L2 table */ #define L2_PTE_NUM_TOTAL (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)) /* * ARM L1 Descriptors */ #define L1_TYPE_INV 0x00 /* Invalid (fault) */ #define L1_TYPE_C 0x01 /* Coarse L2 */ #define L1_TYPE_S 0x02 /* Section */ #define L1_TYPE_F 0x03 /* Fine L2 */ #define L1_TYPE_MASK 0x03 /* mask of type bits */ /* L1 Section Descriptor */ #define L1_S_B 0x00000004 /* bufferable Section */ #define L1_S_C 0x00000008 /* cacheable Section */ #define L1_S_IMP 0x00000010 /* implementation defined */ #define L1_S_XN (1 << 4) /* execute not */ #define L1_S_DOM(x) ((x) << 5) /* domain */ #define L1_S_DOM_MASK L1_S_DOM(0xf) #define L1_S_AP(x) ((x) << 10) /* access permissions */ #define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ #define L1_S_TEX(x) (((x) & 0x7) << 12) /* Type Extension */ #define L1_S_TEX_MASK (0x7 << 12) /* Type Extension */ #define L1_S_APX (1 << 15) #define L1_SHARED (1 << 16) #define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */ #define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */ #define L1_S_SUPERSEC ((1) << 18) /* Section is a super-section. */ /* L1 Coarse Descriptor */ #define L1_C_IMP0 0x00000004 /* implementation defined */ #define L1_C_IMP1 0x00000008 /* implementation defined */ #define L1_C_IMP2 0x00000010 /* implementation defined */ #define L1_C_DOM(x) ((x) << 5) /* domain */ #define L1_C_DOM_MASK L1_C_DOM(0xf) #define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ #define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */ /* L1 Fine Descriptor */ #define L1_F_IMP0 0x00000004 /* implementation defined */ #define L1_F_IMP1 0x00000008 /* implementation defined */ #define L1_F_IMP2 0x00000010 /* implementation defined */ #define L1_F_DOM(x) ((x) << 5) /* domain */ #define L1_F_DOM_MASK L1_F_DOM(0xf) #define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */ #define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */ /* * ARM L2 Descriptors */ #define L2_TYPE_INV 0x00 /* Invalid (fault) */ #define L2_TYPE_L 0x01 /* Large Page */ #define L2_TYPE_S 0x02 /* Small Page */ #define L2_TYPE_T 0x03 /* Tiny Page */ #define L2_TYPE_MASK 0x03 /* mask of type bits */ /* * This L2 Descriptor type is available on XScale processors * when using a Coarse L1 Descriptor. The Extended Small * Descriptor has the same format as the XScale Tiny Descriptor, * but describes a 4K page, rather than a 1K page. */ #define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */ #define L2_B 0x00000004 /* Bufferable page */ #define L2_C 0x00000008 /* Cacheable page */ #define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */ #define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */ #define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */ #define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */ #define L2_SHARED (1 << 10) #define L2_APX (1 << 9) #define L2_XN (1 << 0) #define L2_L_TEX_MASK (0x7 << 12) /* Type Extension */ #define L2_L_TEX(x) (((x) & 0x7) << 12) #define L2_S_TEX_MASK (0x7 << 6) /* Type Extension */ #define L2_S_TEX(x) (((x) & 0x7) << 6) #define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */ #define L2_XSCALE_L_S(x) (1 << 15) /* Shared */ #define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */ /* * Access Permissions for L1 and L2 Descriptors. */ #define AP_W 0x01 /* writable */ #define AP_REF 0x01 /* referenced flag */ #define AP_U 0x02 /* user */ /* * Short-hand for common AP_* constants. * * Note: These values assume the S (System) bit is set and * the R (ROM) bit is clear in CP15 register 1. */ #define AP_KR 0x00 /* kernel read */ #define AP_KRW 0x01 /* kernel read/write */ #define AP_KRWUR 0x02 /* kernel read/write usr read */ #define AP_KRWURW 0x03 /* kernel read/write usr read/write */ /* * Domain Types for the Domain Access Control Register. */ #define DOMAIN_FAULT 0x00 /* no access */ #define DOMAIN_CLIENT 0x01 /* client */ #define DOMAIN_RESERVED 0x02 /* reserved */ #define DOMAIN_MANAGER 0x03 /* manager */ /* * Type Extension bits for XScale processors. * * Behavior of C and B when X == 0: * * C B Cacheable Bufferable Write Policy Line Allocate Policy * 0 0 N N - - * 0 1 N Y - - * 1 0 Y Y Write-through Read Allocate * 1 1 Y Y Write-back Read Allocate * * Behavior of C and B when X == 1: * C B Cacheable Bufferable Write Policy Line Allocate Policy * 0 0 - - - - DO NOT USE * 0 1 N Y - - * 1 0 Mini-Data - - - * 1 1 Y Y Write-back R/W Allocate */ #define TEX_XSCALE_X 0x01 /* X modifies C and B */ #define TEX_XSCALE_E 0x02 #define TEX_XSCALE_T 0x04 /* Xscale core 3 */ /* * * Cache attributes with L2 present, S = 0 * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce * 0 0 0 0 0 N N - N N * 0 0 0 0 1 N N - N Y * 0 0 0 1 0 Y Y WT N Y * 0 0 0 1 1 Y Y WB Y Y * 0 0 1 0 0 N N - Y Y * 0 0 1 0 1 N N - N N * 0 0 1 1 0 Y Y - - N * 0 0 1 1 1 Y Y WT Y Y * 0 1 0 0 0 N N - N N * 0 1 0 0 1 N/A N/A N/A N/A N/A * 0 1 0 1 0 N/A N/A N/A N/A N/A * 0 1 0 1 1 N/A N/A N/A N/A N/A * 0 1 1 X X N/A N/A N/A N/A N/A * 1 X 0 0 0 N N - N Y * 1 X 0 0 1 Y N WB N Y * 1 X 0 1 0 Y N WT N Y * 1 X 0 1 1 Y N WB Y Y * 1 X 1 0 0 N N - Y Y * 1 X 1 0 1 Y Y WB Y Y * 1 X 1 1 0 Y Y WT Y Y * 1 X 1 1 1 Y Y WB Y Y * * * * * Cache attributes with L2 present, S = 1 * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce * 0 0 0 0 0 N N - N N * 0 0 0 0 1 N N - N Y * 0 0 0 1 0 Y Y - N Y * 0 0 0 1 1 Y Y WT Y Y * 0 0 1 0 0 N N - Y Y * 0 0 1 0 1 N N - N N * 0 0 1 1 0 Y Y - - N * 0 0 1 1 1 Y Y WT Y Y * 0 1 0 0 0 N N - N N * 0 1 0 0 1 N/A N/A N/A N/A N/A * 0 1 0 1 0 N/A N/A N/A N/A N/A * 0 1 0 1 1 N/A N/A N/A N/A N/A * 0 1 1 X X N/A N/A N/A N/A N/A * 1 X 0 0 0 N N - N Y * 1 X 0 0 1 Y N - N Y * 1 X 0 1 0 Y N - N Y * 1 X 0 1 1 Y N - Y Y * 1 X 1 0 0 N N - Y Y * 1 X 1 0 1 Y Y WT Y Y * 1 X 1 1 0 Y Y WT Y Y * 1 X 1 1 1 Y Y WT Y Y */ #endif /* !_MACHINE_PTE_H_ */ #endif /* __ARM_ARCH >= 6 */ /* End of pte.h */