Index: head/sys/compat/linuxkpi/common/include/asm/pgtable.h =================================================================== --- head/sys/compat/linuxkpi/common/include/asm/pgtable.h (revision 316032) +++ head/sys/compat/linuxkpi/common/include/asm/pgtable.h (revision 316033) @@ -1,36 +1,43 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ASM_PGTABLE_H_ #define _ASM_PGTABLE_H_ #include +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; +typedef struct page *pgtable_t; + #endif /* _ASM_PGTABLE_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/mm.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/mm.h (revision 316032) +++ head/sys/compat/linuxkpi/common/include/linux/mm.h (revision 316033) @@ -1,112 +1,263 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. - * Copyright (c) 2013-2015 Mellanox Technologies, Ltd. + * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * Copyright (c) 2015 François Tigeot * Copyright (c) 2015 Matthew Dillon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_MM_H_ #define _LINUX_MM_H_ #include #include #include #include +#include +#include + #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) +/* + * Make sure our LinuxKPI defined virtual memory flags don't conflict + * with the ones defined by FreeBSD: + */ +CTASSERT((VM_PROT_ALL & -(1 << 8)) == 0); + +#define VM_PFNINTERNAL (1 << 8) /* FreeBSD private flag to vm_insert_pfn() */ +#define VM_MIXEDMAP (1 << 9) +#define VM_NORESERVE (1 << 10) +#define VM_PFNMAP (1 << 11) +#define VM_IO (1 << 12) +#define VM_MAYWRITE (1 << 13) +#define VM_DONTCOPY (1 << 14) +#define VM_DONTEXPAND (1 << 15) +#define VM_DONTDUMP (1 << 16) + +#define VMA_MAX_PREFAULT_RECORD 1 + +#define FOLL_WRITE (1 << 0) +#define FOLL_FORCE (1 << 1) + +#define VM_FAULT_OOM (1 << 0) +#define VM_FAULT_SIGBUS (1 << 1) +#define VM_FAULT_MAJOR (1 << 2) +#define VM_FAULT_WRITE (1 << 3) +#define VM_FAULT_HWPOISON (1 << 4) +#define VM_FAULT_HWPOISON_LARGE (1 << 5) +#define VM_FAULT_SIGSEGV (1 << 6) +#define VM_FAULT_NOPAGE (1 << 7) +#define VM_FAULT_LOCKED (1 << 8) +#define VM_FAULT_RETRY (1 << 9) +#define VM_FAULT_FALLBACK (1 << 10) + +#define FAULT_FLAG_WRITE (1 << 0) +#define FAULT_FLAG_MKWRITE (1 << 1) +#define FAULT_FLAG_ALLOW_RETRY (1 << 2) +#define FAULT_FLAG_RETRY_NOWAIT (1 << 3) +#define FAULT_FLAG_KILLABLE (1 << 4) +#define FAULT_FLAG_TRIED (1 << 5) +#define FAULT_FLAG_USER (1 << 6) +#define FAULT_FLAG_REMOTE (1 << 7) +#define FAULT_FLAG_INSTRUCTION (1 << 8) + +typedef int (*pte_fn_t)(pte_t *, pgtable_t, unsigned long addr, void *data); + struct vm_area_struct { vm_offset_t vm_start; vm_offset_t vm_end; vm_offset_t vm_pgoff; vm_paddr_t vm_pfn; /* PFN For mmap. */ vm_size_t vm_len; /* length for mmap. */ vm_memattr_t vm_page_prot; }; +struct vm_fault { + unsigned int flags; + pgoff_t pgoff; + void *virtual_address; /* user-space address */ + struct page *page; +}; + +struct vm_operations_struct { + void (*open) (struct vm_area_struct *); + void (*close) (struct vm_area_struct *); + int (*fault) (struct vm_area_struct *, struct vm_fault *); +}; + /* * Compute log2 of the power of two rounded up count of pages * needed for size bytes. */ static inline int get_order(unsigned long size) { int order; size = (size - 1) >> PAGE_SHIFT; order = 0; while (size) { order++; size >>= 1; } return (order); } static inline void * lowmem_page_address(struct page *page) { - - return page_address(page); + return (page_address(page)); } /* - * This only works via mmap ops. + * This only works via memory map operations. */ static inline int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, vm_memattr_t prot) { vma->vm_page_prot = prot; vma->vm_pfn = pfn; vma->vm_len = size; return (0); } +static inline int +apply_to_page_range(struct mm_struct *mm, unsigned long address, + unsigned long size, pte_fn_t fn, void *data) +{ + return (-ENOTSUP); +} + +static inline int +zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size) +{ + return (-ENOTSUP); +} + +static inline int +remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + return (-ENOTSUP); +} + static inline unsigned long vma_pages(struct vm_area_struct *vma) { return ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); } #define offset_in_page(off) ((off) & (PAGE_SIZE - 1)) static inline void set_page_dirty(struct vm_page *page) { vm_page_dirty(page); } static inline void +set_page_dirty_lock(struct vm_page *page) +{ + vm_page_lock(page); + vm_page_dirty(page); + vm_page_unlock(page); +} + +static inline void +mark_page_accessed(struct vm_page *page) +{ + vm_page_reference(page); +} + +static inline void get_page(struct vm_page *page) { + vm_page_lock(page); vm_page_hold(page); + vm_page_wire(page); + vm_page_unlock(page); } -#endif /* _LINUX_MM_H_ */ +extern long +get_user_pages(unsigned long start, unsigned long nr_pages, + int gup_flags, struct page **, + struct vm_area_struct **); + +extern int +__get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **); + +extern long +get_user_pages_remote(struct task_struct *, struct mm_struct *, + unsigned long start, unsigned long nr_pages, + int gup_flags, struct page **, + struct vm_area_struct **); + +static inline void +put_page(struct vm_page *page) +{ + vm_page_lock(page); + vm_page_unwire(page, PQ_ACTIVE); + vm_page_unhold(page); + vm_page_unlock(page); +} + +#define copy_highpage(to, from) pmap_copy_page(from, to) + +static inline pgprot_t +vm_get_page_prot(unsigned long vm_flags) +{ + return (vm_flags & VM_PROT_ALL); +} + +extern int vm_insert_mixed(struct vm_area_struct *, unsigned long addr, pfn_t pfn); + +extern int +vm_insert_pfn(struct vm_area_struct *, unsigned long addr, + unsigned long pfn); + +extern int +vm_insert_pfn_prot(struct vm_area_struct *, unsigned long addr, + unsigned long pfn, pgprot_t pgprot); + +static inline vm_page_t +vmalloc_to_page(const void *addr) +{ + vm_paddr_t paddr; + + paddr = pmap_kextract((vm_offset_t)addr); + return (PHYS_TO_VM_PAGE(paddr)); +} + +extern int is_vmalloc_addr(const void *addr); + +#endif /* _LINUX_MM_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/page.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/page.h (revision 316032) +++ head/sys/compat/linuxkpi/common/include/linux/page.h (revision 316033) @@ -1,70 +1,73 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_PAGE_H_ #define _LINUX_PAGE_H_ #include #include #include #include #include #include +typedef unsigned long pte_t; +typedef unsigned long pmd_t; +typedef unsigned long pgd_t; typedef unsigned long pgprot_t; #define page vm_page #define virt_to_page(x) PHYS_TO_VM_PAGE(vtophys((x))) #define page_to_pfn(pp) (VM_PAGE_TO_PHYS((pp)) >> PAGE_SHIFT) #define pfn_to_page(pfn) (PHYS_TO_VM_PAGE((pfn) << PAGE_SHIFT)) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) #define clear_page(page) memset((page), 0, PAGE_SIZE) #define pgprot_noncached(prot) ((pgprot_t)VM_MEMATTR_UNCACHEABLE) #define pgprot_writecombine(prot) ((pgprot_t)VM_MEMATTR_WRITE_COMBINING) #undef PAGE_MASK #define PAGE_MASK (~(PAGE_SIZE-1)) /* * Modifying PAGE_MASK in the above way breaks trunc_page, round_page, * and btoc macros. Therefore, redefine them in a way that makes sense * so the LinuxKPI consumers don't get totally broken behavior. */ #undef btoc #define btoc(x) (((vm_offset_t)(x) + PAGE_SIZE - 1) >> PAGE_SHIFT) #undef round_page #define round_page(x) ((((uintptr_t)(x)) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) #undef trunc_page #define trunc_page(x) ((uintptr_t)(x) & ~(PAGE_SIZE - 1)) #endif /* _LINUX_PAGE_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/pfn.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/pfn.h (nonexistent) +++ head/sys/compat/linuxkpi/common/include/linux/pfn.h (revision 316033) @@ -0,0 +1,44 @@ +/*- + * Copyright (c) 2017 Mellanox Technologies, Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _LINUX_PFN_H_ +#define _LINUX_PFN_H_ + +#include + +typedef struct { + u64 val; +} pfn_t; + +#define PFN_ALIGN(x) (((unsigned long)(x) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) +#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) +#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) +#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) + +#endif /* _LINUX_PFN_H_ */ Property changes on: head/sys/compat/linuxkpi/common/include/linux/pfn.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/compat/linuxkpi/common/include/linux/pfn_t.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/pfn_t.h (nonexistent) +++ head/sys/compat/linuxkpi/common/include/linux/pfn_t.h (revision 316033) @@ -0,0 +1,56 @@ +/*- + * Copyright (c) 2017 Mellanox Technologies, Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _LINUX_PFN_T_H_ +#define _LINUX_PFN_T_H_ + +#include + +CTASSERT(PAGE_SHIFT > 4); + +#define PFN_FLAGS_MASK (((u64)(PAGE_SIZE - 1)) << (64 - PAGE_SHIFT)) +#define PFN_SG_CHAIN (1ULL << (64 - 1)) +#define PFN_SG_LAST (1ULL << (64 - 2)) +#define PFN_DEV (1ULL << (64 - 3)) +#define PFN_MAP (1ULL << (64 - 4)) + +static inline pfn_t +__pfn_to_pfn_t(unsigned long pfn, u64 flags) +{ + pfn_t pfn_t = { pfn | (flags & PFN_FLAGS_MASK) }; + + return (pfn_t); +} + +static inline pfn_t +pfn_to_pfn_t(unsigned long pfn) +{ + return (__pfn_to_pfn_t (pfn, 0)); +} + +#endif /* _LINUX_PFN_T_H_ */ Property changes on: head/sys/compat/linuxkpi/common/include/linux/pfn_t.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/compat/linuxkpi/common/include/linux/preempt.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/preempt.h (nonexistent) +++ head/sys/compat/linuxkpi/common/include/linux/preempt.h (revision 316033) @@ -0,0 +1,37 @@ +/*- + * Copyright (c) 2017 Mellanox Technologies, Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _LINUX_PREEMPT_H_ +#define _LINUX_PREEMPT_H_ + +#include + +#define in_interrupt() \ + (curthread->td_intr_nesting_level || curthread->td_critnest) + +#endif /* _LINUX_PREEMPT_H_ */ Property changes on: head/sys/compat/linuxkpi/common/include/linux/preempt.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/compat/linuxkpi/common/include/linux/types.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/types.h (revision 316032) +++ head/sys/compat/linuxkpi/common/include/linux/types.h (revision 316033) @@ -1,75 +1,76 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_TYPES_H_ #define _LINUX_TYPES_H_ #include #include #include #include #include #include #ifndef __bitwise__ #ifdef __CHECKER__ #define __bitwise__ __attribute__((bitwise)) #else #define __bitwise__ #endif #endif typedef uint16_t __le16; typedef uint16_t __be16; typedef uint32_t __le32; typedef uint32_t __be32; typedef uint64_t __le64; typedef uint64_t __be64; typedef unsigned int uint; typedef unsigned gfp_t; typedef uint64_t loff_t; typedef vm_paddr_t resource_size_t; typedef uint16_t __bitwise__ __sum16; +typedef unsigned long pgoff_t; typedef u64 phys_addr_t; #define DECLARE_BITMAP(n, bits) \ unsigned long n[howmany(bits, sizeof(long) * 8)] struct rcu_head { void *raw[2]; } __aligned(sizeof(void *)); typedef void (*rcu_callback_t)(struct rcu_head *head); typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); typedef int linux_task_fn_t(void *data); #endif /* _LINUX_TYPES_H_ */ Index: head/sys/compat/linuxkpi/common/src/linux_page.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_page.c (revision 316032) +++ head/sys/compat/linuxkpi/common/src/linux_page.c (revision 316033) @@ -1,167 +1,284 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2016 Matt Macy (mmacy@nextbsd.org) * Copyright (c) 2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include -#include - #include +#include +#include +#include +#include +#include #include #include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + void * linux_page_address(struct page *page) { #ifdef __amd64__ return ((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))); #else if (page->object != kmem_object && page->object != kernel_object) return (NULL); return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + IDX_TO_OFF(page->pindex))); #endif } vm_page_t linux_alloc_pages(gfp_t flags, unsigned int order) { #ifdef __amd64__ unsigned long npages = 1UL << order; int req = (flags & M_ZERO) ? (VM_ALLOC_ZERO | VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL) : (VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL); vm_page_t page; if (order == 0 && (flags & GFP_DMA32) == 0) { page = vm_page_alloc(NULL, 0, req); if (page == NULL) return (NULL); } else { vm_paddr_t pmax = (flags & GFP_DMA32) ? BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; retry: page = vm_page_alloc_contig(NULL, 0, req, npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); if (page == NULL) { if (flags & M_WAITOK) { if (!vm_page_reclaim_contig(req, npages, 0, pmax, PAGE_SIZE, 0)) { VM_WAIT; } flags &= ~M_WAITOK; goto retry; } return (NULL); } } if (flags & M_ZERO) { unsigned long x; for (x = 0; x != npages; x++) { vm_page_t pgo = page + x; if ((pgo->flags & PG_ZERO) == 0) pmap_zero_page(pgo); } } #else vm_offset_t vaddr; vm_page_t page; vaddr = linux_alloc_kmem(flags, order); if (vaddr == 0) return (NULL); page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr)); KASSERT(vaddr == (vm_offset_t)page_address(page), ("Page address mismatch")); #endif return (page); } void linux_free_pages(vm_page_t page, unsigned int order) { #ifdef __amd64__ unsigned long npages = 1UL << order; unsigned long x; for (x = 0; x != npages; x++) { vm_page_t pgo = page + x; vm_page_lock(pgo); vm_page_free(pgo); vm_page_unlock(pgo); } #else vm_offset_t vaddr; vaddr = (vm_offset_t)page_address(page); linux_free_kmem(vaddr, order); #endif } vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order) { size_t size = ((size_t)PAGE_SIZE) << order; vm_offset_t addr; if ((flags & GFP_DMA32) == 0) { addr = kmem_malloc(kmem_arena, size, flags & GFP_NATIVE_MASK); } else { addr = kmem_alloc_contig(kmem_arena, size, flags & GFP_NATIVE_MASK, 0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); } return (addr); } void linux_free_kmem(vm_offset_t addr, unsigned int order) { size_t size = ((size_t)PAGE_SIZE) << order; kmem_free(kmem_arena, addr, size); +} + +static int +linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages, + int write, struct page **pages) +{ + vm_prot_t prot; + size_t len; + int count; + int i; + + prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; + len = ((size_t)nr_pages) << PAGE_SHIFT; + count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages); + if (count == -1) + return (-EFAULT); + + for (i = 0; i != nr_pages; i++) { + struct page *pg = pages[i]; + + vm_page_lock(pg); + vm_page_wire(pg); + vm_page_unlock(pg); + } + return (nr_pages); +} + +int +__get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + vm_map_t map; + vm_page_t *mp; + vm_offset_t va; + vm_offset_t end; + vm_prot_t prot; + int count; + + if (nr_pages == 0 || in_interrupt()) + return (0); + + MPASS(pages != NULL); + va = start; + map = &curthread->td_proc->p_vmspace->vm_map; + end = start + (((size_t)nr_pages) << PAGE_SHIFT); + if (start < vm_map_min(map) || end > vm_map_max(map)) + return (-EINVAL); + prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; + for (count = 0, mp = pages, va = start; va < end; + mp++, va += PAGE_SIZE, count++) { + *mp = pmap_extract_and_hold(map->pmap, va, prot); + if (*mp == NULL) + break; + + vm_page_lock(*mp); + vm_page_wire(*mp); + vm_page_unlock(*mp); + + if ((prot & VM_PROT_WRITE) != 0 && + (*mp)->dirty != VM_PAGE_BITS_ALL) { + /* + * Explicitly dirty the physical page. Otherwise, the + * caller's changes may go unnoticed because they are + * performed through an unmanaged mapping or by a DMA + * operation. + * + * The object lock is not held here. + * See vm_page_clear_dirty_mask(). + */ + vm_page_dirty(*mp); + } + } + return (count); +} + +long +get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, int gup_flags, + struct page **pages, struct vm_area_struct **vmas) +{ + vm_map_t map; + + map = &mm->vmspace->vm_map; + return (linux_get_user_pages_internal(map, start, nr_pages, + !!(gup_flags & FOLL_WRITE), pages)); +} + +long +get_user_pages(unsigned long start, unsigned long nr_pages, int gup_flags, + struct page **pages, struct vm_area_struct **vmas) +{ + vm_map_t map; + + map = &curthread->td_proc->p_vmspace->vm_map; + return (linux_get_user_pages_internal(map, start, nr_pages, + !!(gup_flags & FOLL_WRITE), pages)); +} + +int +is_vmalloc_addr(const void *addr) +{ + return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); }