diff --git a/sys/compat/linuxkpi/common/include/asm/set_memory.h b/sys/compat/linuxkpi/common/include/asm/set_memory.h index cdb7ad912acc..ae50148f0314 100644 --- a/sys/compat/linuxkpi/common/include/asm/set_memory.h +++ b/sys/compat/linuxkpi/common/include/asm/set_memory.h @@ -1,117 +1,117 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2016 Matt Macy (mmacy@nextbsd.org) * Copyright (c) 2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_ASM_SET_MEMORY_H_ #define _LINUXKPI_ASM_SET_MEMORY_H_ #include static inline int set_memory_uc(unsigned long addr, int numpages) { return (pmap_change_attr(addr, numpages, VM_MEMATTR_UNCACHEABLE)); } static inline int set_memory_wc(unsigned long addr, int numpages) { #ifdef VM_MEMATTR_WRITE_COMBINING return (pmap_change_attr(addr, numpages, VM_MEMATTR_WRITE_COMBINING)); #else return (set_memory_uc(addr, numpages)); #endif } static inline int set_memory_wb(unsigned long addr, int numpages) { return (pmap_change_attr(addr, numpages, VM_MEMATTR_WRITE_BACK)); } static inline int -set_pages_uc(vm_page_t page, int numpages) +set_pages_uc(struct page *page, int numpages) { KASSERT(numpages == 1, ("%s: numpages %d", __func__, numpages)); pmap_page_set_memattr(page, VM_MEMATTR_UNCACHEABLE); return (0); } static inline int -set_pages_wc(vm_page_t page, int numpages) +set_pages_wc(struct page *page, int numpages) { KASSERT(numpages == 1, ("%s: numpages %d", __func__, numpages)); #ifdef VM_MEMATTR_WRITE_COMBINING pmap_page_set_memattr(page, VM_MEMATTR_WRITE_COMBINING); #else return (set_pages_uc(page, numpages)); #endif return (0); } static inline int -set_pages_wb(vm_page_t page, int numpages) +set_pages_wb(struct page *page, int numpages) { KASSERT(numpages == 1, ("%s: numpages %d", __func__, numpages)); pmap_page_set_memattr(page, VM_MEMATTR_WRITE_BACK); return (0); } static inline int -set_pages_array_wb(vm_page_t *pages, int addrinarray) +set_pages_array_wb(struct page **pages, int addrinarray) { int i; for (i = 0; i < addrinarray; i++) set_pages_wb(pages[i], 1); return (0); } static inline int -set_pages_array_wc(vm_page_t *pages, int addrinarray) +set_pages_array_wc(struct page **pages, int addrinarray) { int i; for (i = 0; i < addrinarray; i++) set_pages_wc(pages[i], 1); return (0); } static inline int -set_pages_array_uc(vm_page_t *pages, int addrinarray) +set_pages_array_uc(struct page **pages, int addrinarray) { int i; for (i = 0; i < addrinarray; i++) set_pages_uc(pages[i], 1); return (0); } #endif /* _LINUXKPI_ASM_SET_MEMORY_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/gfp.h b/sys/compat/linuxkpi/common/include/linux/gfp.h index 70c486002335..c5de09e896de 100644 --- a/sys/compat/linuxkpi/common/include/linux/gfp.h +++ b/sys/compat/linuxkpi/common/include/linux/gfp.h @@ -1,216 +1,216 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_GFP_H_ #define _LINUXKPI_LINUX_GFP_H_ #include #include #include #include #include #include #include #include #include #define __GFP_NOWARN 0 #define __GFP_HIGHMEM 0 #define __GFP_ZERO M_ZERO #define __GFP_NORETRY 0 #define __GFP_NOMEMALLOC 0 #define __GFP_RECLAIM 0 #define __GFP_RECLAIMABLE 0 #define __GFP_RETRY_MAYFAIL 0 #define __GFP_MOVABLE 0 #define __GFP_COMP 0 #define __GFP_KSWAPD_RECLAIM 0 #define __GFP_IO 0 #define __GFP_NO_KSWAPD 0 #define __GFP_KSWAPD_RECLAIM 0 #define __GFP_WAIT M_WAITOK #define __GFP_DMA32 (1U << 24) /* LinuxKPI only */ #define __GFP_BITS_SHIFT 25 #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) #define __GFP_NOFAIL M_WAITOK #define GFP_NOWAIT M_NOWAIT #define GFP_ATOMIC (M_NOWAIT | M_USE_RESERVE) #define GFP_KERNEL M_WAITOK #define GFP_USER M_WAITOK #define GFP_HIGHUSER M_WAITOK #define GFP_HIGHUSER_MOVABLE M_WAITOK #define GFP_IOFS M_NOWAIT #define GFP_NOIO M_NOWAIT #define GFP_NOFS M_NOWAIT #define GFP_DMA32 __GFP_DMA32 #define GFP_TEMPORARY M_NOWAIT #define GFP_NATIVE_MASK (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_ZERO) #define GFP_TRANSHUGE 0 #define GFP_TRANSHUGE_LIGHT 0 CTASSERT((__GFP_DMA32 & GFP_NATIVE_MASK) == 0); CTASSERT((__GFP_BITS_MASK & GFP_NATIVE_MASK) == GFP_NATIVE_MASK); struct page_frag_cache { void *va; int pagecnt_bias; }; /* * Resolve a page into a virtual address: * * NOTE: This function only works for pages allocated by the kernel. */ extern void *linux_page_address(struct page *); #define page_address(page) linux_page_address(page) /* * Page management for unmapped pages: */ -extern vm_page_t linux_alloc_pages(gfp_t flags, unsigned int order); -extern void linux_free_pages(vm_page_t page, unsigned int order); +extern struct page *linux_alloc_pages(gfp_t flags, unsigned int order); +extern void linux_free_pages(struct page *page, unsigned int order); void *linuxkpi_page_frag_alloc(struct page_frag_cache *, size_t, gfp_t); void linuxkpi_page_frag_free(void *); void linuxkpi__page_frag_cache_drain(struct page *, size_t); static inline struct page * alloc_page(gfp_t flags) { return (linux_alloc_pages(flags, 0)); } static inline struct page * alloc_pages(gfp_t flags, unsigned int order) { return (linux_alloc_pages(flags, order)); } static inline struct page * alloc_pages_node(int node_id, gfp_t flags, unsigned int order) { return (linux_alloc_pages(flags, order)); } static inline void __free_pages(struct page *page, unsigned int order) { linux_free_pages(page, order); } static inline void __free_page(struct page *page) { linux_free_pages(page, 0); } /* * Page management for mapped pages: */ extern vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order); extern void linux_free_kmem(vm_offset_t, unsigned int order); static inline vm_offset_t get_zeroed_page(gfp_t flags) { return (linux_alloc_kmem(flags | __GFP_ZERO, 0)); } static inline vm_offset_t __get_free_page(gfp_t flags) { return (linux_alloc_kmem(flags, 0)); } static inline vm_offset_t __get_free_pages(gfp_t flags, unsigned int order) { return (linux_alloc_kmem(flags, order)); } static inline void free_pages(uintptr_t addr, unsigned int order) { if (addr == 0) return; linux_free_kmem(addr, order); } static inline void free_page(uintptr_t addr) { if (addr == 0) return; linux_free_kmem(addr, 0); } static inline void * page_frag_alloc(struct page_frag_cache *pfc, size_t fragsz, gfp_t gfp) { return (linuxkpi_page_frag_alloc(pfc, fragsz, gfp)); } static inline void page_frag_free(void *addr) { linuxkpi_page_frag_free(addr); } static inline void __page_frag_cache_drain(struct page *page, size_t count) { linuxkpi__page_frag_cache_drain(page, count); } static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { return ((gfp_flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK); } #define SetPageReserved(page) do { } while (0) /* NOP */ #define ClearPageReserved(page) do { } while (0) /* NOP */ #endif /* _LINUXKPI_LINUX_GFP_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/highmem.h b/sys/compat/linuxkpi/common/include/linux/highmem.h index 1c2c97e03578..a3f9af82400e 100644 --- a/sys/compat/linuxkpi/common/include/linux/highmem.h +++ b/sys/compat/linuxkpi/common/include/linux/highmem.h @@ -1,135 +1,135 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) * Copyright (c) 2017 Mellanox Technologies, Ltd. * Copyright (c) 2021 Vladimir Kondratyev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_HIGHMEM_H_ #define _LINUXKPI_LINUX_HIGHMEM_H_ #include #include #include #include #include #include #include #include #include #include #define PageHighMem(p) (0) -static inline vm_page_t +static inline struct page * kmap_to_page(void *addr) { return (virt_to_page(addr)); } static inline void * -kmap(vm_page_t page) +kmap(struct page *page) { struct sf_buf *sf; if (PMAP_HAS_DMAP) { return ((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))); } else { sched_pin(); sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE); if (sf == NULL) { sched_unpin(); return (NULL); } return ((void *)sf_buf_kva(sf)); } } static inline void * -kmap_atomic_prot(vm_page_t page, pgprot_t prot) +kmap_atomic_prot(struct page *page, pgprot_t prot) { vm_memattr_t attr = pgprot2cachemode(prot); if (attr != VM_MEMATTR_DEFAULT) { vm_page_lock(page); page->flags |= PG_FICTITIOUS; vm_page_unlock(page); pmap_page_set_memattr(page, attr); } return (kmap(page)); } static inline void * -kmap_atomic(vm_page_t page) +kmap_atomic(struct page *page) { return (kmap_atomic_prot(page, VM_PROT_ALL)); } static inline void * -kmap_local_page_prot(vm_page_t page, pgprot_t prot) +kmap_local_page_prot(struct page *page, pgprot_t prot) { return (kmap_atomic_prot(page, prot)); } static inline void -kunmap(vm_page_t page) +kunmap(struct page *page) { struct sf_buf *sf; if (!PMAP_HAS_DMAP) { /* lookup SF buffer in list */ sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE); /* double-free */ sf_buf_free(sf); sf_buf_free(sf); sched_unpin(); } } static inline void kunmap_atomic(void *vaddr) { if (!PMAP_HAS_DMAP) kunmap(virt_to_page(vaddr)); } static inline void kunmap_local(void *addr) { kunmap_atomic(addr); } #endif /* _LINUXKPI_LINUX_HIGHMEM_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/mm.h b/sys/compat/linuxkpi/common/include/linux/mm.h index ce3c94ee64aa..3d826f73a494 100644 --- a/sys/compat/linuxkpi/common/include/linux/mm.h +++ b/sys/compat/linuxkpi/common/include/linux/mm.h @@ -1,370 +1,370 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * Copyright (c) 2015 François Tigeot * Copyright (c) 2015 Matthew Dillon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_MM_H_ #define _LINUXKPI_LINUX_MM_H_ #include #include #include #include #include #include #include #include #include #include #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) /* * Make sure our LinuxKPI defined virtual memory flags don't conflict * with the ones defined by FreeBSD: */ CTASSERT((VM_PROT_ALL & -(1 << 8)) == 0); #define VM_READ VM_PROT_READ #define VM_WRITE VM_PROT_WRITE #define VM_EXEC VM_PROT_EXECUTE #define VM_PFNINTERNAL (1 << 8) /* FreeBSD private flag to vm_insert_pfn() */ #define VM_MIXEDMAP (1 << 9) #define VM_NORESERVE (1 << 10) #define VM_PFNMAP (1 << 11) #define VM_IO (1 << 12) #define VM_MAYWRITE (1 << 13) #define VM_DONTCOPY (1 << 14) #define VM_DONTEXPAND (1 << 15) #define VM_DONTDUMP (1 << 16) #define VM_SHARED (1 << 17) #define VMA_MAX_PREFAULT_RECORD 1 #define FOLL_WRITE (1 << 0) #define FOLL_FORCE (1 << 1) #define VM_FAULT_OOM (1 << 0) #define VM_FAULT_SIGBUS (1 << 1) #define VM_FAULT_MAJOR (1 << 2) #define VM_FAULT_WRITE (1 << 3) #define VM_FAULT_HWPOISON (1 << 4) #define VM_FAULT_HWPOISON_LARGE (1 << 5) #define VM_FAULT_SIGSEGV (1 << 6) #define VM_FAULT_NOPAGE (1 << 7) #define VM_FAULT_LOCKED (1 << 8) #define VM_FAULT_RETRY (1 << 9) #define VM_FAULT_FALLBACK (1 << 10) #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ VM_FAULT_HWPOISON |VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK) #define FAULT_FLAG_WRITE (1 << 0) #define FAULT_FLAG_MKWRITE (1 << 1) #define FAULT_FLAG_ALLOW_RETRY (1 << 2) #define FAULT_FLAG_RETRY_NOWAIT (1 << 3) #define FAULT_FLAG_KILLABLE (1 << 4) #define FAULT_FLAG_TRIED (1 << 5) #define FAULT_FLAG_USER (1 << 6) #define FAULT_FLAG_REMOTE (1 << 7) #define FAULT_FLAG_INSTRUCTION (1 << 8) #define fault_flag_allow_retry_first(flags) \ (((flags) & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_TRIED)) == FAULT_FLAG_ALLOW_RETRY) typedef int (*pte_fn_t)(linux_pte_t *, unsigned long addr, void *data); struct vm_area_struct { vm_offset_t vm_start; vm_offset_t vm_end; vm_offset_t vm_pgoff; pgprot_t vm_page_prot; unsigned long vm_flags; struct mm_struct *vm_mm; void *vm_private_data; const struct vm_operations_struct *vm_ops; struct linux_file *vm_file; /* internal operation */ vm_paddr_t vm_pfn; /* PFN for memory map */ vm_size_t vm_len; /* length for memory map */ vm_pindex_t vm_pfn_first; int vm_pfn_count; int *vm_pfn_pcount; vm_object_t vm_obj; vm_map_t vm_cached_map; TAILQ_ENTRY(vm_area_struct) vm_entry; }; struct vm_fault { unsigned int flags; pgoff_t pgoff; union { /* user-space address */ void *virtual_address; /* < 4.11 */ unsigned long address; /* >= 4.11 */ }; struct page *page; struct vm_area_struct *vma; }; struct vm_operations_struct { void (*open) (struct vm_area_struct *); void (*close) (struct vm_area_struct *); int (*fault) (struct vm_area_struct *, struct vm_fault *); int (*access) (struct vm_area_struct *, unsigned long, void *, int, int); }; struct sysinfo { uint64_t totalram; /* Total usable main memory size */ uint64_t freeram; /* Available memory size */ uint64_t totalhigh; /* Total high memory size */ uint64_t freehigh; /* Available high memory size */ uint32_t mem_unit; /* Memory unit size in bytes */ }; static inline struct page * virt_to_head_page(const void *p) { return (virt_to_page(p)); } /* * Compute log2 of the power of two rounded up count of pages * needed for size bytes. */ static inline int get_order(unsigned long size) { int order; size = (size - 1) >> PAGE_SHIFT; order = 0; while (size) { order++; size >>= 1; } return (order); } static inline void * lowmem_page_address(struct page *page) { return (page_address(page)); } /* * This only works via memory map operations. */ static inline int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, vm_memattr_t prot) { vma->vm_page_prot = prot; vma->vm_pfn = pfn; vma->vm_len = size; return (0); } vm_fault_t lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot); static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) { vm_fault_t ret; VM_OBJECT_WLOCK(vma->vm_obj); ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot); VM_OBJECT_WUNLOCK(vma->vm_obj); return (ret); } #define vmf_insert_pfn_prot(...) \ _Static_assert(false, \ "This function is always called in a loop. Consider using the locked version") static inline int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data) { return (-ENOTSUP); } int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); int lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr, unsigned long start_pfn, unsigned long size, pgprot_t prot); static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { return (lkpi_remap_pfn_range(vma, addr, pfn, size, prot)); } static inline unsigned long vma_pages(struct vm_area_struct *vma) { return ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); } #define offset_in_page(off) ((unsigned long)(off) & (PAGE_SIZE - 1)) static inline void -set_page_dirty(struct vm_page *page) +set_page_dirty(struct page *page) { vm_page_dirty(page); } static inline void -mark_page_accessed(struct vm_page *page) +mark_page_accessed(struct page *page) { vm_page_reference(page); } static inline void -get_page(struct vm_page *page) +get_page(struct page *page) { vm_page_wire(page); } extern long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **, struct vm_area_struct **); static inline long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { return get_user_pages(start, nr_pages, gup_flags, pages, vmas); } extern int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **); static inline int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { return __get_user_pages_fast( start, nr_pages, !!(gup_flags & FOLL_WRITE), pages); } extern long get_user_pages_remote(struct task_struct *, struct mm_struct *, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **, struct vm_area_struct **); static inline long pin_user_pages_remote(struct task_struct *task, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { return get_user_pages_remote( task, mm, start, nr_pages, gup_flags, pages, vmas); } static inline void -put_page(struct vm_page *page) +put_page(struct page *page) { vm_page_unwire(page, PQ_ACTIVE); } #define unpin_user_page(page) put_page(page) #define unpin_user_pages(pages, npages) release_pages(pages, npages) #define copy_highpage(to, from) pmap_copy_page(from, to) static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) { return (vm_flags & VM_PROT_ALL); } -static inline vm_page_t +static inline struct page * vmalloc_to_page(const void *addr) { vm_paddr_t paddr; paddr = pmap_kextract((vm_offset_t)addr); return (PHYS_TO_VM_PAGE(paddr)); } static inline int trylock_page(struct page *page) { return (vm_page_trylock(page)); } static inline void unlock_page(struct page *page) { vm_page_unlock(page); } extern int is_vmalloc_addr(const void *addr); void si_meminfo(struct sysinfo *si); static inline unsigned long totalram_pages(void) { return ((unsigned long)physmem); } #define unmap_mapping_range(...) lkpi_unmap_mapping_range(__VA_ARGS__) void lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused, loff_t const holelen, int even_cows __unused); #define PAGE_ALIGNED(p) __is_aligned(p, PAGE_SIZE) void vma_set_file(struct vm_area_struct *vma, struct linux_file *file); static inline void might_alloc(gfp_t gfp_mask __unused) { } #define is_cow_mapping(flags) (false) #endif /* _LINUXKPI_LINUX_MM_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/pagevec.h b/sys/compat/linuxkpi/common/include/linux/pagevec.h index 4224124c4fe4..9ba8ff8effa0 100644 --- a/sys/compat/linuxkpi/common/include/linux/pagevec.h +++ b/sys/compat/linuxkpi/common/include/linux/pagevec.h @@ -1,69 +1,69 @@ /* Public domain. */ #ifndef _LINUXKPI_LINUX_PAGEVEC_H_ #define _LINUXKPI_LINUX_PAGEVEC_H_ #include #include #include #include #define PAGEVEC_SIZE 15 struct pagevec { uint8_t nr; - struct vm_page *pages[PAGEVEC_SIZE]; + struct page *pages[PAGEVEC_SIZE]; }; static inline unsigned int pagevec_space(struct pagevec *pvec) { return PAGEVEC_SIZE - pvec->nr; } static inline void pagevec_init(struct pagevec *pvec) { pvec->nr = 0; } static inline void pagevec_reinit(struct pagevec *pvec) { pvec->nr = 0; } static inline unsigned int pagevec_count(struct pagevec *pvec) { return pvec->nr; } static inline unsigned int -pagevec_add(struct pagevec *pvec, struct vm_page *page) +pagevec_add(struct pagevec *pvec, struct page *page) { pvec->pages[pvec->nr++] = page; return PAGEVEC_SIZE - pvec->nr; } static inline void __pagevec_release(struct pagevec *pvec) { release_pages(pvec->pages, pagevec_count(pvec)); pagevec_reinit(pvec); } static inline void pagevec_release(struct pagevec *pvec) { if (pagevec_count(pvec)) __pagevec_release(pvec); } static inline void check_move_unevictable_pages(struct pagevec *pvec) { } #endif /* _LINUXKPI_LINUX_PAGEVEC_H_ */ diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c index 41e4989c397c..ca2f6a8e6d08 100644 --- a/sys/compat/linuxkpi/common/src/linux_page.c +++ b/sys/compat/linuxkpi/common/src/linux_page.c @@ -1,536 +1,536 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) * Copyright (c) 2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ DEFINE_IDR(mtrr_idr); static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat"); extern int pat_works; #endif void si_meminfo(struct sysinfo *si) { si->totalram = physmem; si->freeram = vm_free_count(); si->totalhigh = 0; si->freehigh = 0; si->mem_unit = PAGE_SIZE; } void * linux_page_address(struct page *page) { if (page->object != kernel_object) { return (PMAP_HAS_DMAP ? ((void *)(uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))) : NULL); } return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + IDX_TO_OFF(page->pindex))); } -vm_page_t +struct page * linux_alloc_pages(gfp_t flags, unsigned int order) { - vm_page_t page; + struct page *page; if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; int req = VM_ALLOC_WIRED; if ((flags & M_ZERO) != 0) req |= VM_ALLOC_ZERO; if (order == 0 && (flags & GFP_DMA32) == 0) { page = vm_page_alloc_noobj(req); if (page == NULL) return (NULL); } else { vm_paddr_t pmax = (flags & GFP_DMA32) ? BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; retry: page = vm_page_alloc_noobj_contig(req, npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); if (page == NULL) { if (flags & M_WAITOK) { if (!vm_page_reclaim_contig(req, npages, 0, pmax, PAGE_SIZE, 0)) { vm_wait(NULL); } flags &= ~M_WAITOK; goto retry; } return (NULL); } } } else { vm_offset_t vaddr; vaddr = linux_alloc_kmem(flags, order); if (vaddr == 0) return (NULL); page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr)); KASSERT(vaddr == (vm_offset_t)page_address(page), ("Page address mismatch")); } return (page); } void -linux_free_pages(vm_page_t page, unsigned int order) +linux_free_pages(struct page *page, unsigned int order) { if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; unsigned long x; for (x = 0; x != npages; x++) { vm_page_t pgo = page + x; if (vm_page_unwire_noq(pgo)) vm_page_free(pgo); } } else { vm_offset_t vaddr; vaddr = (vm_offset_t)page_address(page); linux_free_kmem(vaddr, order); } } vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order) { size_t size = ((size_t)PAGE_SIZE) << order; vm_offset_t addr; if ((flags & GFP_DMA32) == 0) { addr = kmem_malloc(size, flags & GFP_NATIVE_MASK); } else { addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); } return (addr); } void linux_free_kmem(vm_offset_t addr, unsigned int order) { size_t size = ((size_t)PAGE_SIZE) << order; kmem_free(addr, size); } static int linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages, int write, struct page **pages) { vm_prot_t prot; size_t len; int count; prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; len = ptoa((vm_offset_t)nr_pages); count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages); return (count == -1 ? -EFAULT : nr_pages); } int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { vm_map_t map; vm_page_t *mp; vm_offset_t va; vm_offset_t end; vm_prot_t prot; int count; if (nr_pages == 0 || in_interrupt()) return (0); MPASS(pages != NULL); map = &curthread->td_proc->p_vmspace->vm_map; end = start + ptoa((vm_offset_t)nr_pages); if (!vm_map_range_valid(map, start, end)) return (-EINVAL); prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; for (count = 0, mp = pages, va = start; va < end; mp++, va += PAGE_SIZE, count++) { *mp = pmap_extract_and_hold(map->pmap, va, prot); if (*mp == NULL) break; if ((prot & VM_PROT_WRITE) != 0 && (*mp)->dirty != VM_PAGE_BITS_ALL) { /* * Explicitly dirty the physical page. Otherwise, the * caller's changes may go unnoticed because they are * performed through an unmanaged mapping or by a DMA * operation. * * The object lock is not held here. * See vm_page_clear_dirty_mask(). */ vm_page_dirty(*mp); } } return (count); } long get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { vm_map_t map; map = &task->task_thread->td_proc->p_vmspace->vm_map; return (linux_get_user_pages_internal(map, start, nr_pages, !!(gup_flags & FOLL_WRITE), pages)); } long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { vm_map_t map; map = &curthread->td_proc->p_vmspace->vm_map; return (linux_get_user_pages_internal(map, start, nr_pages, !!(gup_flags & FOLL_WRITE), pages)); } int is_vmalloc_addr(const void *addr) { return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); } vm_fault_t lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) { vm_object_t vm_obj = vma->vm_obj; vm_object_t tmp_obj; vm_page_t page; vm_pindex_t pindex; VM_OBJECT_ASSERT_WLOCKED(vm_obj); pindex = OFF_TO_IDX(addr - vma->vm_start); if (vma->vm_pfn_count == 0) vma->vm_pfn_first = pindex; MPASS(pindex <= OFF_TO_IDX(vma->vm_end)); retry: page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NOCREAT); if (page == NULL) { page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn)); if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) goto retry; if (page->object != NULL) { tmp_obj = page->object; vm_page_xunbusy(page); VM_OBJECT_WUNLOCK(vm_obj); VM_OBJECT_WLOCK(tmp_obj); if (page->object == tmp_obj && vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { KASSERT(page->object == tmp_obj, ("page has changed identity")); KASSERT((page->oflags & VPO_UNMANAGED) == 0, ("page does not belong to shmem")); vm_pager_page_unswapped(page); if (pmap_page_is_mapped(page)) { vm_page_xunbusy(page); VM_OBJECT_WUNLOCK(tmp_obj); printf("%s: page rename failed: page " "is mapped\n", __func__); VM_OBJECT_WLOCK(vm_obj); return (VM_FAULT_NOPAGE); } vm_page_remove(page); } VM_OBJECT_WUNLOCK(tmp_obj); VM_OBJECT_WLOCK(vm_obj); goto retry; } if (vm_page_insert(page, vm_obj, pindex)) { vm_page_xunbusy(page); return (VM_FAULT_OOM); } vm_page_valid(page); } pmap_page_set_memattr(page, pgprot2cachemode(prot)); vma->vm_pfn_count++; return (VM_FAULT_NOPAGE); } int lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr, unsigned long start_pfn, unsigned long size, pgprot_t prot) { vm_object_t vm_obj; unsigned long addr, pfn; int err = 0; vm_obj = vma->vm_obj; VM_OBJECT_WLOCK(vm_obj); for (addr = start_addr, pfn = start_pfn; addr < start_addr + size; addr += PAGE_SIZE) { vm_fault_t ret; retry: ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot); if ((ret & VM_FAULT_OOM) != 0) { VM_OBJECT_WUNLOCK(vm_obj); vm_wait(NULL); VM_OBJECT_WLOCK(vm_obj); goto retry; } if ((ret & VM_FAULT_ERROR) != 0) { err = -EFAULT; break; } pfn++; } VM_OBJECT_WUNLOCK(vm_obj); if (unlikely(err)) { zap_vma_ptes(vma, start_addr, (pfn - start_pfn) << PAGE_SHIFT); return (err); } return (0); } int lkpi_io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size) { pgprot_t prot; int ret; prot = cachemode2protval(iomap->attr); ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot); return (ret); } /* * Although FreeBSD version of unmap_mapping_range has semantics and types of * parameters compatible with Linux version, the values passed in are different * @obj should match to vm_private_data field of vm_area_struct returned by * mmap file operation handler, see linux_file_mmap_single() sources * @holelen should match to size of area to be munmapped. */ void lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused, loff_t const holelen, int even_cows __unused) { vm_object_t devobj; vm_page_t page; int i, page_count; devobj = cdev_pager_lookup(obj); if (devobj != NULL) { page_count = OFF_TO_IDX(holelen); VM_OBJECT_WLOCK(devobj); retry: for (i = 0; i < page_count; i++) { page = vm_page_lookup(devobj, i); if (page == NULL) continue; if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) goto retry; cdev_pager_free_page(devobj, page); } VM_OBJECT_WUNLOCK(devobj); vm_object_deallocate(devobj); } } int lkpi_arch_phys_wc_add(unsigned long base, unsigned long size) { #ifdef __i386__ struct mem_range_desc *mrdesc; int error, id, act; /* If PAT is available, do nothing */ if (pat_works) return (0); mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK); mrdesc->mr_base = base; mrdesc->mr_len = size; mrdesc->mr_flags = MDF_WRITECOMBINE; strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner)); act = MEMRANGE_SET_UPDATE; error = mem_range_attr_set(mrdesc, &act); if (error == 0) { error = idr_get_new(&mtrr_idr, mrdesc, &id); MPASS(idr_find(&mtrr_idr, id) == mrdesc); if (error != 0) { act = MEMRANGE_SET_REMOVE; mem_range_attr_set(mrdesc, &act); } } if (error != 0) { free(mrdesc, M_LKMTRR); pr_warn( "Failed to add WC MTRR for [%p-%p]: %d; " "performance may suffer\n", (void *)base, (void *)(base + size - 1), error); } else pr_warn("Successfully added WC MTRR for [%p-%p]\n", (void *)base, (void *)(base + size - 1)); return (error != 0 ? -error : id + __MTRR_ID_BASE); #else return (0); #endif } void lkpi_arch_phys_wc_del(int reg) { #ifdef __i386__ struct mem_range_desc *mrdesc; int act; /* Check if arch_phys_wc_add() failed. */ if (reg < __MTRR_ID_BASE) return; mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE); MPASS(mrdesc != NULL); idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE); act = MEMRANGE_SET_REMOVE; mem_range_attr_set(mrdesc, &act); free(mrdesc, M_LKMTRR); #endif } /* * This is a highly simplified version of the Linux page_frag_cache. * We only support up-to 1 single page as fragment size and we will * always return a full page. This may be wasteful on small objects * but the only known consumer (mt76) is either asking for a half-page * or a full page. If this was to become a problem we can implement * a more elaborate version. */ void * linuxkpi_page_frag_alloc(struct page_frag_cache *pfc, size_t fragsz, gfp_t gfp) { vm_page_t pages; if (fragsz == 0) return (NULL); KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet " "supported", __func__, fragsz)); pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1)); if (pages == NULL) return (NULL); pfc->va = linux_page_address(pages); /* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */ pfc->pagecnt_bias = 0; return (pfc->va); } void linuxkpi_page_frag_free(void *addr) { vm_page_t page; page = PHYS_TO_VM_PAGE(vtophys(addr)); linux_free_pages(page, 0); } void linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused) { linux_free_pages(page, 0); } diff --git a/sys/compat/linuxkpi/common/src/linux_shmemfs.c b/sys/compat/linuxkpi/common/src/linux_shmemfs.c index 0ebbd34d067d..3c71d6495f4a 100644 --- a/sys/compat/linuxkpi/common/src/linux_shmemfs.c +++ b/sys/compat/linuxkpi/common/src/linux_shmemfs.c @@ -1,126 +1,126 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) * Copyright (c) 2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include struct page * linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp) { - vm_page_t page; + struct page *page; int rv; if ((gfp & GFP_NOWAIT) != 0) panic("GFP_NOWAIT is unimplemented"); VM_OBJECT_WLOCK(obj); rv = vm_page_grab_valid(&page, obj, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); VM_OBJECT_WUNLOCK(obj); if (rv != VM_PAGER_OK) return (ERR_PTR(-EINVAL)); return (page); } struct linux_file * linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags) { struct fileobj { struct linux_file file __aligned(sizeof(void *)); struct vnode vnode __aligned(sizeof(void *)); }; struct fileobj *fileobj; struct linux_file *filp; struct vnode *vp; int error; fileobj = kzalloc(sizeof(*fileobj), GFP_KERNEL); if (fileobj == NULL) { error = -ENOMEM; goto err_0; } filp = &fileobj->file; vp = &fileobj->vnode; filp->f_count = 1; filp->f_vnode = vp; filp->f_shmem = vm_pager_allocate(OBJT_SWAP, NULL, size, VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred); if (filp->f_shmem == NULL) { error = -ENOMEM; goto err_1; } return (filp); err_1: kfree(filp); err_0: return (ERR_PTR(error)); } static vm_ooffset_t linux_invalidate_mapping_pages_sub(vm_object_t obj, vm_pindex_t start, vm_pindex_t end, int flags) { int start_count, end_count; VM_OBJECT_WLOCK(obj); start_count = obj->resident_page_count; vm_object_page_remove(obj, start, end, flags); end_count = obj->resident_page_count; VM_OBJECT_WUNLOCK(obj); return (start_count - end_count); } unsigned long linux_invalidate_mapping_pages(vm_object_t obj, pgoff_t start, pgoff_t end) { return (linux_invalidate_mapping_pages_sub(obj, start, end, OBJPR_CLEANONLY)); } void linux_shmem_truncate_range(vm_object_t obj, loff_t lstart, loff_t lend) { vm_pindex_t start = OFF_TO_IDX(lstart + PAGE_SIZE - 1); vm_pindex_t end = OFF_TO_IDX(lend + 1); (void) linux_invalidate_mapping_pages_sub(obj, start, end, 0); }