Index: head/sys/arm/include/vmparam.h =================================================================== --- head/sys/arm/include/vmparam.h (revision 328177) +++ head/sys/arm/include/vmparam.h (revision 328178) @@ -1,194 +1,196 @@ /* $NetBSD: vmparam.h,v 1.26 2003/08/07 16:27:47 agc Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1988 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_VMPARAM_H_ #define _MACHINE_VMPARAM_H_ /* * Machine dependent constants for ARM. */ /* * Virtual memory related constants, all in bytes */ #ifndef MAXTSIZ #define MAXTSIZ (256UL*1024*1024) /* max text size */ #endif #ifndef DFLDSIZ #define DFLDSIZ (128UL*1024*1024) /* initial data size limit */ #endif #ifndef MAXDSIZ #define MAXDSIZ (512UL*1024*1024) /* max data size */ #endif #ifndef DFLSSIZ #define DFLSSIZ (2UL*1024*1024) /* initial stack size limit */ #endif #ifndef MAXSSIZ #define MAXSSIZ (8UL*1024*1024) /* max stack size */ #endif #ifndef SGROWSIZ #define SGROWSIZ (128UL*1024) /* amount to grow stack */ #endif /* * Address space constants */ /* * The line between user space and kernel space * Mappings >= KERNEL_BASE are constant across all processes */ #ifndef KERNBASE #define KERNBASE 0xc0000000 #endif /* * The virtual address the kernel is linked to run at. For armv4/5 platforms * the low-order 30 bits of this must match the low-order bits of the physical * address the kernel is loaded at, so the value is most often provided as a * kernel config option in the std.platform file. For armv6/7 the kernel can * be loaded at any 2MB boundary, and KERNVIRTADDR can also be set to any 2MB * boundary. It is typically overridden in the std.platform file only when * KERNBASE is also set to a lower address to provide more KVA. */ #ifndef KERNVIRTADDR #define KERNVIRTADDR 0xc0000000 #endif /* * max number of non-contig chunks of physical RAM you can have */ #define VM_PHYSSEG_MAX 32 /* * The physical address space may be sparsely populated on some ARM systems. */ #define VM_PHYSSEG_SPARSE /* * Create one free page pool. Since the ARM kernel virtual address * space does not include a mapping onto the machine's entire physical * memory, VM_FREEPOOL_DIRECT is defined as an alias for the default * pool, VM_FREEPOOL_DEFAULT. */ #define VM_NFREEPOOL 1 #define VM_FREEPOOL_DEFAULT 0 #define VM_FREEPOOL_DIRECT 0 /* * We need just one free list: DEFAULT. */ #define VM_NFREELIST 1 #define VM_FREELIST_DEFAULT 0 /* * The largest allocation size is 1MB. */ #define VM_NFREEORDER 9 /* * Enable superpage reservations: 1 level. */ #ifndef VM_NRESERVLEVEL #define VM_NRESERVLEVEL 1 #endif /* * Level 0 reservations consist of 256 pages. */ #ifndef VM_LEVEL_0_ORDER #define VM_LEVEL_0_ORDER 8 #endif #define VM_MIN_ADDRESS (0x00001000) #ifndef VM_MAXUSER_ADDRESS #define VM_MAXUSER_ADDRESS (KERNBASE - 0x00400000) /* !!! PT2MAP_SIZE */ #endif #define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS #define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE) #define USRSTACK SHAREDPAGE /* initial pagein size of beginning of executable file */ #ifndef VM_INITIAL_PAGEIN #define VM_INITIAL_PAGEIN 16 #endif #ifndef VM_MIN_KERNEL_ADDRESS #define VM_MIN_KERNEL_ADDRESS KERNBASE #endif #define VM_MAX_KERNEL_ADDRESS (vm_max_kernel_address) /* * How many physical pages per kmem arena virtual page. */ #ifndef VM_KMEM_SIZE_SCALE #define VM_KMEM_SIZE_SCALE (3) #endif /* * Optional floor (in bytes) on the size of the kmem arena. */ #ifndef VM_KMEM_SIZE_MIN #define VM_KMEM_SIZE_MIN (12 * 1024 * 1024) #endif /* * Optional ceiling (in bytes) on the size of the kmem arena: 40% of the * kernel map. */ #ifndef VM_KMEM_SIZE_MAX #define VM_KMEM_SIZE_MAX ((vm_max_kernel_address - \ VM_MIN_KERNEL_ADDRESS + 1) * 2 / 5) #endif extern vm_offset_t vm_max_kernel_address; #define ZERO_REGION_SIZE (64 * 1024) /* 64KB */ #ifndef VM_MAX_AUTOTUNE_MAXUSERS #define VM_MAX_AUTOTUNE_MAXUSERS 384 #endif #define SFBUF #define SFBUF_MAP #define PMAP_HAS_DMAP 0 +#define PHYS_TO_DMAP(x) ({ panic("No direct map exists"); 0; }) +#define DMAP_TO_PHYS(x) ({ panic("No direct map exists"); 0; }) #define DEVMAP_MAX_VADDR ARM_VECTORS_HIGH #endif /* _MACHINE_VMPARAM_H_ */ Index: head/sys/compat/linuxkpi/common/src/linux_page.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_page.c (revision 328177) +++ head/sys/compat/linuxkpi/common/src/linux_page.c (revision 328178) @@ -1,395 +1,387 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) * Copyright (c) 2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void * linux_page_address(struct page *page) { if (page->object != kmem_object && page->object != kernel_object) { -#ifdef PHYS_TO_DMAP return (PMAP_HAS_DMAP ? ((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))) : NULL); -#else - return (NULL); -#endif } return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + IDX_TO_OFF(page->pindex))); } vm_page_t linux_alloc_pages(gfp_t flags, unsigned int order) { -#ifdef PHYS_TO_DMAP - KASSERT(PMAP_HAS_DMAP, ("Direct map unavailable")); - unsigned long npages = 1UL << order; - int req = (flags & M_ZERO) ? (VM_ALLOC_ZERO | VM_ALLOC_NOOBJ | - VM_ALLOC_NORMAL) : (VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL); vm_page_t page; - if (order == 0 && (flags & GFP_DMA32) == 0) { - page = vm_page_alloc(NULL, 0, req); - if (page == NULL) - return (NULL); - } else { - vm_paddr_t pmax = (flags & GFP_DMA32) ? - BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; -retry: - page = vm_page_alloc_contig(NULL, 0, req, - npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); + if (PMAP_HAS_DMAP) { + unsigned long npages = 1UL << order; + int req = (flags & M_ZERO) ? (VM_ALLOC_ZERO | VM_ALLOC_NOOBJ | + VM_ALLOC_NORMAL) : (VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL); - if (page == NULL) { - if (flags & M_WAITOK) { - if (!vm_page_reclaim_contig(req, - npages, 0, pmax, PAGE_SIZE, 0)) { - VM_WAIT; + if (order == 0 && (flags & GFP_DMA32) == 0) { + page = vm_page_alloc(NULL, 0, req); + if (page == NULL) + return (NULL); + } else { + vm_paddr_t pmax = (flags & GFP_DMA32) ? + BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; + retry: + page = vm_page_alloc_contig(NULL, 0, req, + npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); + + if (page == NULL) { + if (flags & M_WAITOK) { + if (!vm_page_reclaim_contig(req, + npages, 0, pmax, PAGE_SIZE, 0)) { + VM_WAIT; + } + flags &= ~M_WAITOK; + goto retry; } - flags &= ~M_WAITOK; - goto retry; + return (NULL); } - return (NULL); } - } - if (flags & M_ZERO) { - unsigned long x; + if (flags & M_ZERO) { + unsigned long x; - for (x = 0; x != npages; x++) { - vm_page_t pgo = page + x; + for (x = 0; x != npages; x++) { + vm_page_t pgo = page + x; - if ((pgo->flags & PG_ZERO) == 0) - pmap_zero_page(pgo); + if ((pgo->flags & PG_ZERO) == 0) + pmap_zero_page(pgo); + } } - } -#else - vm_offset_t vaddr; - vm_page_t page; + } else { + vm_offset_t vaddr; - vaddr = linux_alloc_kmem(flags, order); - if (vaddr == 0) - return (NULL); + vaddr = linux_alloc_kmem(flags, order); + if (vaddr == 0) + return (NULL); - page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr)); + page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr)); - KASSERT(vaddr == (vm_offset_t)page_address(page), - ("Page address mismatch")); -#endif + KASSERT(vaddr == (vm_offset_t)page_address(page), + ("Page address mismatch")); + } + return (page); } void linux_free_pages(vm_page_t page, unsigned int order) { -#ifdef PHYS_TO_DMAP if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; unsigned long x; for (x = 0; x != npages; x++) { vm_page_t pgo = page + x; vm_page_lock(pgo); vm_page_free(pgo); vm_page_unlock(pgo); } } else { -#endif vm_offset_t vaddr; vaddr = (vm_offset_t)page_address(page); linux_free_kmem(vaddr, order); -#ifdef PHYS_TO_DMAP } -#endif } vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order) { size_t size = ((size_t)PAGE_SIZE) << order; vm_offset_t addr; if ((flags & GFP_DMA32) == 0) { addr = kmem_malloc(kmem_arena, size, flags & GFP_NATIVE_MASK); } else { addr = kmem_alloc_contig(kmem_arena, size, flags & GFP_NATIVE_MASK, 0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); } return (addr); } void linux_free_kmem(vm_offset_t addr, unsigned int order) { size_t size = ((size_t)PAGE_SIZE) << order; kmem_free(kmem_arena, addr, size); } static int linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages, int write, struct page **pages) { vm_prot_t prot; size_t len; int count; int i; prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; len = ((size_t)nr_pages) << PAGE_SHIFT; count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages); if (count == -1) return (-EFAULT); for (i = 0; i != nr_pages; i++) { struct page *pg = pages[i]; vm_page_lock(pg); vm_page_wire(pg); vm_page_unhold(pg); vm_page_unlock(pg); } return (nr_pages); } int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { vm_map_t map; vm_page_t *mp; vm_offset_t va; vm_offset_t end; vm_prot_t prot; int count; if (nr_pages == 0 || in_interrupt()) return (0); MPASS(pages != NULL); va = start; map = &curthread->td_proc->p_vmspace->vm_map; end = start + (((size_t)nr_pages) << PAGE_SHIFT); if (start < vm_map_min(map) || end > vm_map_max(map)) return (-EINVAL); prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; for (count = 0, mp = pages, va = start; va < end; mp++, va += PAGE_SIZE, count++) { *mp = pmap_extract_and_hold(map->pmap, va, prot); if (*mp == NULL) break; vm_page_lock(*mp); vm_page_wire(*mp); vm_page_unhold(*mp); vm_page_unlock(*mp); if ((prot & VM_PROT_WRITE) != 0 && (*mp)->dirty != VM_PAGE_BITS_ALL) { /* * Explicitly dirty the physical page. Otherwise, the * caller's changes may go unnoticed because they are * performed through an unmanaged mapping or by a DMA * operation. * * The object lock is not held here. * See vm_page_clear_dirty_mask(). */ vm_page_dirty(*mp); } } return (count); } long get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int gup_flags, struct page **pages, struct vm_area_struct **vmas) { vm_map_t map; map = &task->task_thread->td_proc->p_vmspace->vm_map; return (linux_get_user_pages_internal(map, start, nr_pages, !!(gup_flags & FOLL_WRITE), pages)); } long get_user_pages(unsigned long start, unsigned long nr_pages, int gup_flags, struct page **pages, struct vm_area_struct **vmas) { vm_map_t map; map = &curthread->td_proc->p_vmspace->vm_map; return (linux_get_user_pages_internal(map, start, nr_pages, !!(gup_flags & FOLL_WRITE), pages)); } int is_vmalloc_addr(const void *addr) { return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); } struct page * linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp) { vm_page_t page; int rv; if ((gfp & GFP_NOWAIT) != 0) panic("GFP_NOWAIT is unimplemented"); VM_OBJECT_WLOCK(obj); page = vm_page_grab(obj, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); if (page->valid != VM_PAGE_BITS_ALL) { vm_page_xbusy(page); if (vm_pager_has_page(obj, pindex, NULL, NULL)) { rv = vm_pager_get_pages(obj, &page, 1, NULL, NULL); if (rv != VM_PAGER_OK) { vm_page_lock(page); vm_page_unwire(page, PQ_NONE); vm_page_free(page); vm_page_unlock(page); VM_OBJECT_WUNLOCK(obj); return (ERR_PTR(-EINVAL)); } MPASS(page->valid == VM_PAGE_BITS_ALL); } else { pmap_zero_page(page); page->valid = VM_PAGE_BITS_ALL; page->dirty = 0; } vm_page_xunbusy(page); } VM_OBJECT_WUNLOCK(obj); return (page); } struct linux_file * linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags) { struct fileobj { struct linux_file file __aligned(sizeof(void *)); struct vnode vnode __aligned(sizeof(void *)); }; struct fileobj *fileobj; struct linux_file *filp; struct vnode *vp; int error; fileobj = kzalloc(sizeof(*fileobj), GFP_KERNEL); if (fileobj == NULL) { error = -ENOMEM; goto err_0; } filp = &fileobj->file; vp = &fileobj->vnode; filp->f_count = 1; filp->f_vnode = vp; filp->f_shmem = vm_pager_allocate(OBJT_DEFAULT, NULL, size, VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred); if (filp->f_shmem == NULL) { error = -ENOMEM; goto err_1; } return (filp); err_1: kfree(filp); err_0: return (ERR_PTR(error)); } static vm_ooffset_t linux_invalidate_mapping_pages_sub(vm_object_t obj, vm_pindex_t start, vm_pindex_t end, int flags) { int start_count, end_count; VM_OBJECT_WLOCK(obj); start_count = obj->resident_page_count; vm_object_page_remove(obj, start, end, flags); end_count = obj->resident_page_count; VM_OBJECT_WUNLOCK(obj); return (start_count - end_count); } unsigned long linux_invalidate_mapping_pages(vm_object_t obj, pgoff_t start, pgoff_t end) { return (linux_invalidate_mapping_pages_sub(obj, start, end, OBJPR_CLEANONLY)); } void linux_shmem_truncate_range(vm_object_t obj, loff_t lstart, loff_t lend) { vm_pindex_t start = OFF_TO_IDX(lstart + PAGE_SIZE - 1); vm_pindex_t end = OFF_TO_IDX(lend + 1); (void) linux_invalidate_mapping_pages_sub(obj, start, end, 0); } Index: head/sys/i386/include/vmparam.h =================================================================== --- head/sys/i386/include/vmparam.h (revision 328177) +++ head/sys/i386/include/vmparam.h (revision 328178) @@ -1,207 +1,209 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91 * $FreeBSD$ */ #ifndef _MACHINE_VMPARAM_H_ #define _MACHINE_VMPARAM_H_ 1 /* * Machine dependent constants for 386. */ /* * Virtual memory related constants, all in bytes */ #define MAXTSIZ (128UL*1024*1024) /* max text size */ #ifndef DFLDSIZ #define DFLDSIZ (128UL*1024*1024) /* initial data size limit */ #endif #ifndef MAXDSIZ #define MAXDSIZ (512UL*1024*1024) /* max data size */ #endif #ifndef DFLSSIZ #define DFLSSIZ (8UL*1024*1024) /* initial stack size limit */ #endif #ifndef MAXSSIZ #define MAXSSIZ (64UL*1024*1024) /* max stack size */ #endif #ifndef SGROWSIZ #define SGROWSIZ (128UL*1024) /* amount to grow stack */ #endif /* * Choose between DENSE and SPARSE based on whether lower execution time or * lower kernel address space consumption is desired. Under PAE, kernel * address space is often in short supply. */ #ifdef PAE #define VM_PHYSSEG_SPARSE #else #define VM_PHYSSEG_DENSE #endif /* * The number of PHYSSEG entries must be one greater than the number * of phys_avail entries because the phys_avail entry that spans the * largest physical address that is accessible by ISA DMA is split * into two PHYSSEG entries. */ #define VM_PHYSSEG_MAX 17 /* * Create one free page pool. Since the i386 kernel virtual address * space does not include a mapping onto the machine's entire physical * memory, VM_FREEPOOL_DIRECT is defined as an alias for the default * pool, VM_FREEPOOL_DEFAULT. */ #define VM_NFREEPOOL 1 #define VM_FREEPOOL_DEFAULT 0 #define VM_FREEPOOL_DIRECT 0 /* * Create two free page lists: VM_FREELIST_DEFAULT is for physical * pages that are above the largest physical address that is * accessible by ISA DMA and VM_FREELIST_ISADMA is for physical pages * that are below that address. */ #define VM_NFREELIST 2 #define VM_FREELIST_DEFAULT 0 #define VM_FREELIST_ISADMA 1 /* * The largest allocation size is 2MB under PAE and 4MB otherwise. */ #ifdef PAE #define VM_NFREEORDER 10 #else #define VM_NFREEORDER 11 #endif /* * Enable superpage reservations: 1 level. */ #ifndef VM_NRESERVLEVEL #define VM_NRESERVLEVEL 1 #endif /* * Level 0 reservations consist of 512 pages when PAE pagetables are * used, and 1024 pages otherwise. */ #ifndef VM_LEVEL_0_ORDER #if defined(PAE) || defined(PAE_TABLES) #define VM_LEVEL_0_ORDER 9 #else #define VM_LEVEL_0_ORDER 10 #endif #endif /* * Kernel physical load address. */ #ifndef KERNLOAD #define KERNLOAD (1 << PDRSHIFT) #endif /* !defined(KERNLOAD) */ /* * Virtual addresses of things. Derived from the page directory and * page table indexes from pmap.h for precision. * Because of the page that is both a PD and PT, it looks a little * messy at times, but hey, we'll do anything to save a page :-) */ #define VM_MAX_KERNEL_ADDRESS VADDR(KPTDI+NKPDE-1, NPTEPG-1) #define VM_MIN_KERNEL_ADDRESS VADDR(PTDPTDI, PTDPTDI) #define KERNBASE VADDR(KPTDI, 0) #define UPT_MAX_ADDRESS VADDR(PTDPTDI, PTDPTDI) #define UPT_MIN_ADDRESS VADDR(PTDPTDI, 0) #define VM_MAXUSER_ADDRESS VADDR(PTDPTDI, 0) #define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE) #define USRSTACK SHAREDPAGE #define VM_MAX_ADDRESS VADDR(PTDPTDI, PTDPTDI) #define VM_MIN_ADDRESS ((vm_offset_t)0) /* * How many physical pages per kmem arena virtual page. */ #ifndef VM_KMEM_SIZE_SCALE #define VM_KMEM_SIZE_SCALE (3) #endif /* * Optional floor (in bytes) on the size of the kmem arena. */ #ifndef VM_KMEM_SIZE_MIN #define VM_KMEM_SIZE_MIN (12 * 1024 * 1024) #endif /* * Optional ceiling (in bytes) on the size of the kmem arena: 40% of the * kernel map rounded to the nearest multiple of the superpage size. */ #ifndef VM_KMEM_SIZE_MAX #define VM_KMEM_SIZE_MAX (((((VM_MAX_KERNEL_ADDRESS - \ VM_MIN_KERNEL_ADDRESS) >> (PDRSHIFT - 2)) + 5) / 10) << PDRSHIFT) #endif /* initial pagein size of beginning of executable file */ #ifndef VM_INITIAL_PAGEIN #define VM_INITIAL_PAGEIN 16 #endif #define ZERO_REGION_SIZE (64 * 1024) /* 64KB */ #ifndef VM_MAX_AUTOTUNE_MAXUSERS #define VM_MAX_AUTOTUNE_MAXUSERS 384 #endif #define SFBUF #define SFBUF_MAP #define SFBUF_CPUSET #define SFBUF_PROCESS_PAGE #define PMAP_HAS_DMAP 0 +#define PHYS_TO_DMAP(x) ({ panic("No direct map exists"); 0; }) +#define DMAP_TO_PHYS(x) ({ panic("No direct map exists"); 0; }) #endif /* _MACHINE_VMPARAM_H_ */ Index: head/sys/sys/sf_buf.h =================================================================== --- head/sys/sys/sf_buf.h (revision 328177) +++ head/sys/sys/sf_buf.h (revision 328178) @@ -1,200 +1,196 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2014 Gleb Smirnoff * Copyright (c) 2003-2004 Alan L. Cox * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_SF_BUF_H_ #define _SYS_SF_BUF_H_ struct sfstat { /* sendfile statistics */ uint64_t sf_syscalls; /* times sendfile was called */ uint64_t sf_noiocnt; /* times sendfile didn't require I/O */ uint64_t sf_iocnt; /* times sendfile had to do disk I/O */ uint64_t sf_pages_read; /* pages read as part of a request */ uint64_t sf_pages_valid; /* pages were valid for a request */ uint64_t sf_rhpages_requested; /* readahead pages requested */ uint64_t sf_rhpages_read; /* readahead pages read */ uint64_t sf_busy; /* times aborted on a busy page */ uint64_t sf_allocfail; /* times sfbuf allocation failed */ uint64_t sf_allocwait; /* times sfbuf allocation had to wait */ uint64_t sf_pages_bogus; /* times bogus page was used */ }; #ifdef _KERNEL #include #include #include #include #include #include /* * Sf_bufs, or sendfile(2) buffers provide a vm_page that is mapped * into kernel address space. Note, that they aren't used only * by sendfile(2)! * * Sf_bufs could be implemented as a feature of vm_page_t, but that * would require growth of the structure. That's why they are implemented * as a separate hash indexed by vm_page address. Implementation lives in * kern/subr_sfbuf.c. Meanwhile, most 64-bit machines have a physical map, * so they don't require this hash at all, thus ignore subr_sfbuf.c. * * Different 32-bit architectures demand different requirements on sf_buf * hash and functions. They request features in machine/vmparam.h, which * enable parts of this file. They can also optionally provide helpers in * machine/sf_buf.h * * Defines are: * SFBUF This machine requires sf_buf hash. * subr_sfbuf.c should be compiled. * SFBUF_CPUSET This machine can perform SFB_CPUPRIVATE mappings, * that do no invalidate cache on the rest of CPUs. * SFBUF_NOMD This machine doesn't have machine/sf_buf.h * * SFBUF_MAP This machine provides its own sf_buf_map() and * sf_buf_unmap(). * SFBUF_PROCESS_PAGE This machine provides sf_buf_process_page() * function. */ #ifdef SFBUF #if defined(SMP) && defined(SFBUF_CPUSET) #include #endif #include struct sf_buf { LIST_ENTRY(sf_buf) list_entry; /* list of buffers */ TAILQ_ENTRY(sf_buf) free_entry; /* list of buffers */ vm_page_t m; /* currently mapped page */ vm_offset_t kva; /* va of mapping */ int ref_count; /* usage of this mapping */ #if defined(SMP) && defined(SFBUF_CPUSET) cpuset_t cpumask; /* where mapping is valid */ #endif }; #else /* ! SFBUF */ struct sf_buf; #endif /* SFBUF */ #ifndef SFBUF_NOMD #include #endif #ifdef SFBUF struct sf_buf *sf_buf_alloc(struct vm_page *, int); void sf_buf_free(struct sf_buf *); void sf_buf_ref(struct sf_buf *); static inline vm_offset_t sf_buf_kva(struct sf_buf *sf) { -#ifdef PMAP_HAS_DMAP if (PMAP_HAS_DMAP) return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS((vm_page_t)sf))); -#endif return (sf->kva); } static inline vm_page_t sf_buf_page(struct sf_buf *sf) { -#ifdef PMAP_HAS_DMAP if (PMAP_HAS_DMAP) return ((vm_page_t)sf); -#endif return (sf->m); } #ifndef SFBUF_MAP #include static inline void sf_buf_map(struct sf_buf *sf, int flags) { pmap_qenter(sf->kva, &sf->m, 1); } static inline int sf_buf_unmap(struct sf_buf *sf) { return (0); } #endif /* SFBUF_MAP */ #if defined(SMP) && defined(SFBUF_CPUSET) void sf_buf_shootdown(struct sf_buf *, int); #endif #ifdef SFBUF_PROCESS_PAGE boolean_t sf_buf_process_page(vm_page_t, void (*)(struct sf_buf *)); #endif #else /* ! SFBUF */ static inline struct sf_buf * sf_buf_alloc(struct vm_page *m, int pri) { return ((struct sf_buf *)m); } static inline void sf_buf_free(struct sf_buf *sf) { } static inline void sf_buf_ref(struct sf_buf *sf) { } #endif /* SFBUF */ /* * Options to sf_buf_alloc() are specified through its flags argument. This * argument's value should be the result of a bitwise or'ing of one or more * of the following values. */ #define SFB_CATCH 1 /* Check signals if the allocation sleeps. */ #define SFB_CPUPRIVATE 2 /* Create a CPU private mapping. */ #define SFB_DEFAULT 0 #define SFB_NOWAIT 4 /* Return NULL if all bufs are used. */ extern counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)]; #define SFSTAT_ADD(name, val) \ counter_u64_add(sfstat[offsetof(struct sfstat, name) / sizeof(uint64_t)],\ (val)) #define SFSTAT_INC(name) SFSTAT_ADD(name, 1) #endif /* _KERNEL */ #endif /* !_SYS_SF_BUF_H_ */