Index: head/sys/arm/include/pmap-v6.h =================================================================== --- head/sys/arm/include/pmap-v6.h (revision 295694) +++ head/sys/arm/include/pmap-v6.h (revision 295695) @@ -1,256 +1,254 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The ARM version of this file was more or less based on the i386 version, * which has the following provenance... * * Derived from hp300 version by Mike Hibler, this version by William * Jolitz uses a recursive map [a pde points to the page directory] to * map the page tables using the pagetables themselves. This is done to * reduce the impact on kernel virtual memory for lots of sparse address * space, and to reduce the cost of memory to each process. * * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 * * $FreeBSD$ */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ #include #include #include #include typedef uint32_t pt1_entry_t; /* L1 table entry */ typedef uint32_t pt2_entry_t; /* L2 table entry */ typedef uint32_t ttb_entry_t; /* TTB entry */ #ifdef _KERNEL #if 0 #define PMAP_PTE_NOCACHE // Use uncached page tables #endif /* * (1) During pmap bootstrap, physical pages for L2 page tables are * allocated in advance which are used for KVA continuous mapping * starting from KERNBASE. This makes things more simple. * (2) During vm subsystem initialization, only vm subsystem itself can * allocate physical memory safely. As pmap_map() is called during * this initialization, we must be prepared for that and have some * preallocated physical pages for L2 page tables. * * Note that some more pages for L2 page tables are preallocated too * for mappings laying above VM_MAX_KERNEL_ADDRESS. */ #ifndef NKPT2PG /* * The optimal way is to define this in board configuration as * definition here must be safe enough. It means really big. * * 1 GB KVA <=> 256 kernel L2 page table pages * * From real platforms: * 1 GB physical memory <=> 10 pages is enough * 2 GB physical memory <=> 21 pages is enough */ #define NKPT2PG 32 #endif extern vm_paddr_t phys_avail[]; extern vm_paddr_t dump_avail[]; extern char *_tmppt; /* poor name! */ extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; /* * Pmap stuff */ /* * This structure is used to hold a virtual<->physical address * association and is used mostly by bootstrap code */ struct pv_addr { SLIST_ENTRY(pv_addr) pv_list; vm_offset_t pv_va; vm_paddr_t pv_pa; }; #endif struct pv_entry; struct pv_chunk; struct md_page { TAILQ_HEAD(,pv_entry) pv_list; uint16_t pt2_wirecount[4]; vm_memattr_t pat_mode; }; struct pmap { struct mtx pm_mtx; pt1_entry_t *pm_pt1; /* KVA of pt1 */ pt2_entry_t *pm_pt2tab; /* KVA of pt2 pages table */ TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ cpuset_t pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statictics */ LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ }; typedef struct pmap *pmap_t; #ifdef _KERNEL extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) #define PMAP_LOCK_ASSERT(pmap, type) \ mtx_assert(&(pmap)->pm_mtx, (type)) #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ NULL, MTX_DEF | MTX_DUPOK) #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) #endif /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_list. */ typedef struct pv_entry { vm_offset_t pv_va; /* virtual address for mapping */ TAILQ_ENTRY(pv_entry) pv_next; } *pv_entry_t; /* * pv_entries are allocated in chunks per-process. This avoids the * need to track per-pmap assignments. */ #define _NPCM 11 #define _NPCPV 336 struct pv_chunk { pmap_t pc_pmap; TAILQ_ENTRY(pv_chunk) pc_list; uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ TAILQ_ENTRY(pv_chunk) pc_lru; struct pv_entry pc_pventry[_NPCPV]; }; #ifdef _KERNEL struct pcb; extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */ #define pmap_page_get_memattr(m) ((m)->md.pat_mode) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) /* * Only the following functions or macros may be used before pmap_bootstrap() * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and * vtopte2(). */ void pmap_bootstrap(vm_offset_t ); void pmap_kenter(vm_offset_t , vm_paddr_t ); void *pmap_kenter_temporary(vm_paddr_t , int ); void pmap_kremove(vm_offset_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); boolean_t pmap_page_is_mapped(vm_page_t ); void pmap_page_set_memattr(vm_page_t , vm_memattr_t ); void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); void pmap_kremove_device(vm_offset_t, vm_size_t); void pmap_set_pcb_pagedir(pmap_t , struct pcb *); void pmap_tlb_flush(pmap_t , vm_offset_t ); void pmap_tlb_flush_range(pmap_t , vm_offset_t , vm_size_t ); void pmap_dcache_wb_range(vm_paddr_t , vm_size_t , vm_memattr_t ); vm_paddr_t pmap_kextract(vm_offset_t ); vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); int pmap_fault(pmap_t , vm_offset_t , uint32_t , int , bool); #define vtophys(va) pmap_kextract((vm_offset_t)(va)) void pmap_set_tex(void); void reinit_mmu(ttb_entry_t ttb, u_int aux_clr, u_int aux_set); /* * Pre-bootstrap epoch functions set. */ void pmap_bootstrap_prepare(vm_paddr_t ); vm_paddr_t pmap_preboot_get_pages(u_int ); void pmap_preboot_map_pages(vm_paddr_t , vm_offset_t , u_int ); vm_offset_t pmap_preboot_reserve_pages(u_int ); vm_offset_t pmap_preboot_get_vpages(u_int ); void pmap_preboot_map_attr(vm_paddr_t, vm_offset_t, vm_size_t, vm_prot_t, vm_memattr_t); #endif /* _KERNEL */ // ----------------- TO BE DELETED --------------------------------------------- -#include - #ifdef _KERNEL /* * sys/arm/arm/elf_trampoline.c * sys/arm/arm/genassym.c * sys/arm/arm/machdep.c * sys/arm/arm/mp_machdep.c * sys/arm/arm/locore.S * sys/arm/arm/pmap.c * sys/arm/arm/swtch.S * sys/arm/at91/at91_machdep.c * sys/arm/cavium/cns11xx/econa_machdep.c * sys/arm/s3c2xx0/s3c24x0_machdep.c * sys/arm/xscale/ixp425/avila_machdep.c * sys/arm/xscale/i8134x/crb_machdep.c * sys/arm/xscale/i80321/ep80219_machdep.c * sys/arm/xscale/i80321/iq31244_machdep.c * sys/arm/xscale/pxa/pxa_machdep.c */ #define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ /* * sys/arm/arm/cpufunc.c */ void vector_page_setprot(int); #endif /* _KERNEL */ // ----------------------------------------------------------------------------- #endif /* !_MACHINE_PMAP_H_ */ Index: head/sys/arm/include/pmap_var.h =================================================================== --- head/sys/arm/include/pmap_var.h (revision 295694) +++ head/sys/arm/include/pmap_var.h (revision 295695) @@ -1,511 +1,512 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_PMAP_VAR_H_ #define _MACHINE_PMAP_VAR_H_ #include +#include /* * Various PMAP defines, exports, and inline functions * definitions also usable in other MD code. */ /* A number of pages in L1 page table. */ #define NPG_IN_PT1 (NB_IN_PT1 / PAGE_SIZE) /* A number of L2 page tables in a page. */ #define NPT2_IN_PG (PAGE_SIZE / NB_IN_PT2) /* A number of L2 page table entries in a page. */ #define NPTE2_IN_PG (NPT2_IN_PG * NPTE2_IN_PT2) #ifdef _KERNEL /* * A L2 page tables page contains NPT2_IN_PG L2 page tables. Masking of * pte1_idx by PT2PG_MASK gives us an index to associated L2 page table * in a page. The PT2PG_SHIFT definition depends on NPT2_IN_PG strictly. * I.e., (1 << PT2PG_SHIFT) == NPT2_IN_PG must be fulfilled. */ #define PT2PG_SHIFT 2 #define PT2PG_MASK ((1 << PT2PG_SHIFT) - 1) /* * A PT2TAB holds all allocated L2 page table pages in a pmap. * Right shifting of virtual address by PT2TAB_SHIFT gives us an index * to L2 page table page in PT2TAB which holds the address mapping. */ #define PT2TAB_ENTRIES (NPTE1_IN_PT1 / NPT2_IN_PG) #define PT2TAB_SHIFT (PTE1_SHIFT + PT2PG_SHIFT) /* * All allocated L2 page table pages in a pmap are mapped into PT2MAP space. * An virtual address right shifting by PT2MAP_SHIFT gives us an index to PTE2 * which maps the address. */ #define PT2MAP_SIZE (NPTE1_IN_PT1 * NB_IN_PT2) #define PT2MAP_SHIFT PTE2_SHIFT extern pt1_entry_t *kern_pt1; extern pt2_entry_t *kern_pt2tab; extern pt2_entry_t *PT2MAP; /* * Virtual interface for L1 page table management. */ static __inline u_int pte1_index(vm_offset_t va) { return (va >> PTE1_SHIFT); } static __inline pt1_entry_t * pte1_ptr(pt1_entry_t *pt1, vm_offset_t va) { return (pt1 + pte1_index(va)); } static __inline vm_offset_t pte1_trunc(vm_offset_t va) { return (va & PTE1_FRAME); } static __inline vm_offset_t pte1_roundup(vm_offset_t va) { return ((va + PTE1_OFFSET) & PTE1_FRAME); } /* * Virtual interface for L1 page table entries management. * * XXX: Some of the following functions now with a synchronization barrier * are called in a loop, so it could be useful to have two versions of them. * One with the barrier and one without the barrier. In this case, pure * barrier pte1_sync() should be implemented as well. */ static __inline void pte1_sync(pt1_entry_t *pte1p) { dsb(); #ifndef PMAP_PTE_NOCACHE if (!cpuinfo.coherent_walk) dcache_wb_pou((vm_offset_t)pte1p, sizeof(*pte1p)); #endif } static __inline void pte1_sync_range(pt1_entry_t *pte1p, vm_size_t size) { dsb(); #ifndef PMAP_PTE_NOCACHE if (!cpuinfo.coherent_walk) dcache_wb_pou((vm_offset_t)pte1p, size); #endif } static __inline void pte1_store(pt1_entry_t *pte1p, pt1_entry_t pte1) { atomic_store_rel_int(pte1p, pte1); pte1_sync(pte1p); } static __inline void pte1_clear(pt1_entry_t *pte1p) { pte1_store(pte1p, 0); } static __inline void pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit) { atomic_clear_int(pte1p, bit); pte1_sync(pte1p); } static __inline boolean_t pte1_cmpset(pt1_entry_t *pte1p, pt1_entry_t opte1, pt1_entry_t npte1) { boolean_t ret; ret = atomic_cmpset_int(pte1p, opte1, npte1); if (ret) pte1_sync(pte1p); return (ret); } static __inline boolean_t pte1_is_link(pt1_entry_t pte1) { return ((pte1 & L1_TYPE_MASK) == L1_TYPE_C); } static __inline int pte1_is_section(pt1_entry_t pte1) { return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S); } static __inline boolean_t pte1_is_dirty(pt1_entry_t pte1) { return ((pte1 & (PTE1_NM | PTE1_RO)) == 0); } static __inline boolean_t pte1_is_global(pt1_entry_t pte1) { return ((pte1 & PTE1_NG) == 0); } static __inline boolean_t pte1_is_valid(pt1_entry_t pte1) { int l1_type; l1_type = pte1 & L1_TYPE_MASK; return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S)); } static __inline boolean_t pte1_is_wired(pt1_entry_t pte1) { return (pte1 & PTE1_W); } static __inline pt1_entry_t pte1_load(pt1_entry_t *pte1p) { pt1_entry_t pte1; pte1 = *pte1p; return (pte1); } static __inline pt1_entry_t pte1_load_clear(pt1_entry_t *pte1p) { pt1_entry_t opte1; opte1 = atomic_readandclear_int(pte1p); pte1_sync(pte1p); return (opte1); } static __inline void pte1_set_bit(pt1_entry_t *pte1p, uint32_t bit) { atomic_set_int(pte1p, bit); pte1_sync(pte1p); } static __inline vm_paddr_t pte1_pa(pt1_entry_t pte1) { return ((vm_paddr_t)(pte1 & PTE1_FRAME)); } static __inline vm_paddr_t pte1_link_pa(pt1_entry_t pte1) { return ((vm_paddr_t)(pte1 & L1_C_ADDR_MASK)); } /* * Virtual interface for L2 page table entries management. * * XXX: Some of the following functions now with a synchronization barrier * are called in a loop, so it could be useful to have two versions of them. * One with the barrier and one without the barrier. */ static __inline void pte2_sync(pt2_entry_t *pte2p) { dsb(); #ifndef PMAP_PTE_NOCACHE if (!cpuinfo.coherent_walk) dcache_wb_pou((vm_offset_t)pte2p, sizeof(*pte2p)); #endif } static __inline void pte2_sync_range(pt2_entry_t *pte2p, vm_size_t size) { dsb(); #ifndef PMAP_PTE_NOCACHE if (!cpuinfo.coherent_walk) dcache_wb_pou((vm_offset_t)pte2p, size); #endif } static __inline void pte2_store(pt2_entry_t *pte2p, pt2_entry_t pte2) { atomic_store_rel_int(pte2p, pte2); pte2_sync(pte2p); } static __inline void pte2_clear(pt2_entry_t *pte2p) { pte2_store(pte2p, 0); } static __inline void pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit) { atomic_clear_int(pte2p, bit); pte2_sync(pte2p); } static __inline boolean_t pte2_cmpset(pt2_entry_t *pte2p, pt2_entry_t opte2, pt2_entry_t npte2) { boolean_t ret; ret = atomic_cmpset_int(pte2p, opte2, npte2); if (ret) pte2_sync(pte2p); return (ret); } static __inline boolean_t pte2_is_dirty(pt2_entry_t pte2) { return ((pte2 & (PTE2_NM | PTE2_RO)) == 0); } static __inline boolean_t pte2_is_global(pt2_entry_t pte2) { return ((pte2 & PTE2_NG) == 0); } static __inline boolean_t pte2_is_valid(pt2_entry_t pte2) { return (pte2 & PTE2_V); } static __inline boolean_t pte2_is_wired(pt2_entry_t pte2) { return (pte2 & PTE2_W); } static __inline pt2_entry_t pte2_load(pt2_entry_t *pte2p) { pt2_entry_t pte2; pte2 = *pte2p; return (pte2); } static __inline pt2_entry_t pte2_load_clear(pt2_entry_t *pte2p) { pt2_entry_t opte2; opte2 = atomic_readandclear_int(pte2p); pte2_sync(pte2p); return (opte2); } static __inline void pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit) { atomic_set_int(pte2p, bit); pte2_sync(pte2p); } static __inline void pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired) { /* * Wired bit is transparent for page table walk, * so pte2_sync() is not needed. */ if (wired) atomic_set_int(pte2p, PTE2_W); else atomic_clear_int(pte2p, PTE2_W); } static __inline vm_paddr_t pte2_pa(pt2_entry_t pte2) { return ((vm_paddr_t)(pte2 & PTE2_FRAME)); } static __inline u_int pte2_attr(pt2_entry_t pte2) { return ((u_int)(pte2 & PTE2_ATTR_MASK)); } /* * Virtual interface for L2 page tables mapping management. */ static __inline u_int pt2tab_index(vm_offset_t va) { return (va >> PT2TAB_SHIFT); } static __inline pt2_entry_t * pt2tab_entry(pt2_entry_t *pt2tab, vm_offset_t va) { return (pt2tab + pt2tab_index(va)); } static __inline void pt2tab_store(pt2_entry_t *pte2p, pt2_entry_t pte2) { pte2_store(pte2p,pte2); } static __inline pt2_entry_t pt2tab_load(pt2_entry_t *pte2p) { return (pte2_load(pte2p)); } static __inline pt2_entry_t pt2tab_load_clear(pt2_entry_t *pte2p) { return (pte2_load_clear(pte2p)); } static __inline u_int pt2map_index(vm_offset_t va) { return (va >> PT2MAP_SHIFT); } static __inline pt2_entry_t * pt2map_entry(vm_offset_t va) { return (PT2MAP + pt2map_index(va)); } /* * Virtual interface for pmap structure & kernel shortcuts. */ static __inline pt1_entry_t * pmap_pte1(pmap_t pmap, vm_offset_t va) { return (pte1_ptr(pmap->pm_pt1, va)); } static __inline pt1_entry_t * kern_pte1(vm_offset_t va) { return (pte1_ptr(kern_pt1, va)); } static __inline pt2_entry_t * pmap_pt2tab_entry(pmap_t pmap, vm_offset_t va) { return (pt2tab_entry(pmap->pm_pt2tab, va)); } static __inline pt2_entry_t * kern_pt2tab_entry(vm_offset_t va) { return (pt2tab_entry(kern_pt2tab, va)); } static __inline vm_page_t pmap_pt2_page(pmap_t pmap, vm_offset_t va) { pt2_entry_t pte2; pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME)); } static __inline vm_page_t kern_pt2_page(vm_offset_t va) { pt2_entry_t pte2; pte2 = pte2_load(kern_pt2tab_entry(va)); return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME)); } #endif /* _KERNEL */ #endif /* !_MACHINE_PMAP_VAR_H_ */ Index: head/sys/arm/include/vm.h =================================================================== --- head/sys/arm/include/vm.h (revision 295694) +++ head/sys/arm/include/vm.h (revision 295695) @@ -1,56 +1,54 @@ /*- * Copyright (c) 2009 Alan L. Cox * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_VM_H_ #define _MACHINE_VM_H_ #include #if __ARM_ARCH >= 6 -#include - #define VM_MEMATTR_WB_WA ((vm_memattr_t)0) #define VM_MEMATTR_NOCACHE ((vm_memattr_t)1) #define VM_MEMATTR_DEVICE ((vm_memattr_t)2) #define VM_MEMATTR_SO ((vm_memattr_t)3) #define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)4) #define VM_MEMATTR_DEFAULT VM_MEMATTR_WB_WA #define VM_MEMATTR_UNCACHEABLE VM_MEMATTR_SO /* misused by DMA */ #ifdef _KERNEL /* Don't export aliased VM_MEMATTR to userland */ #define VM_MEMATTR_WRITE_COMBINING VM_MEMATTR_WRITE_THROUGH /* for DRM */ #define VM_MEMATTR_WRITE_BACK VM_MEMATTR_WB_WA /* for DRM */ #endif #else /* Memory attribute configuration. */ #define VM_MEMATTR_DEFAULT 0 #define VM_MEMATTR_UNCACHEABLE 1 #endif #endif /* !_MACHINE_VM_H_ */