diff --git a/sys/compat/linuxkpi/common/include/linux/io.h b/sys/compat/linuxkpi/common/include/linux/io.h index e402ebed0665..08e1635b70ac 100644 --- a/sys/compat/linuxkpi/common/include/linux/io.h +++ b/sys/compat/linuxkpi/common/include/linux/io.h @@ -1,485 +1,493 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2015 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_IO_H_ #define _LINUX_IO_H_ #include #include #include #include #include /* * XXX This is all x86 specific. It should be bus space access. */ /* rmb and wmb are declared in machine/atomic.h, so should be included first. */ #ifndef __io_br #define __io_br() __compiler_membar() #endif #ifndef __io_ar #ifdef rmb #define __io_ar() rmb() #else #define __io_ar() __compiler_membar() #endif #endif #ifndef __io_bw #ifdef wmb #define __io_bw() wmb() #else #define __io_bw() __compiler_membar() #endif #endif #ifndef __io_aw #define __io_aw() __compiler_membar() #endif /* Access MMIO registers atomically without barriers and byte swapping. */ static inline uint8_t __raw_readb(const volatile void *addr) { return (*(const volatile uint8_t *)addr); } #define __raw_readb(addr) __raw_readb(addr) static inline void __raw_writeb(uint8_t v, volatile void *addr) { *(volatile uint8_t *)addr = v; } #define __raw_writeb(v, addr) __raw_writeb(v, addr) static inline uint16_t __raw_readw(const volatile void *addr) { return (*(const volatile uint16_t *)addr); } #define __raw_readw(addr) __raw_readw(addr) static inline void __raw_writew(uint16_t v, volatile void *addr) { *(volatile uint16_t *)addr = v; } #define __raw_writew(v, addr) __raw_writew(v, addr) static inline uint32_t __raw_readl(const volatile void *addr) { return (*(const volatile uint32_t *)addr); } #define __raw_readl(addr) __raw_readl(addr) static inline void __raw_writel(uint32_t v, volatile void *addr) { *(volatile uint32_t *)addr = v; } #define __raw_writel(v, addr) __raw_writel(v, addr) #ifdef __LP64__ static inline uint64_t __raw_readq(const volatile void *addr) { return (*(const volatile uint64_t *)addr); } #define __raw_readq(addr) __raw_readq(addr) static inline void __raw_writeq(uint64_t v, volatile void *addr) { *(volatile uint64_t *)addr = v; } #define __raw_writeq(v, addr) __raw_writeq(v, addr) #endif #define mmiowb() barrier() /* Access little-endian MMIO registers atomically with memory barriers. */ #undef readb static inline uint8_t readb(const volatile void *addr) { uint8_t v; __io_br(); v = *(const volatile uint8_t *)addr; __io_ar(); return (v); } #define readb(addr) readb(addr) #undef writeb static inline void writeb(uint8_t v, volatile void *addr) { __io_bw(); *(volatile uint8_t *)addr = v; __io_aw(); } #define writeb(v, addr) writeb(v, addr) #undef readw static inline uint16_t readw(const volatile void *addr) { uint16_t v; __io_br(); v = le16toh(__raw_readw(addr)); __io_ar(); return (v); } #define readw(addr) readw(addr) #undef writew static inline void writew(uint16_t v, volatile void *addr) { __io_bw(); __raw_writew(htole16(v), addr); __io_aw(); } #define writew(v, addr) writew(v, addr) #undef readl static inline uint32_t readl(const volatile void *addr) { uint32_t v; __io_br(); v = le32toh(__raw_readl(addr)); __io_ar(); return (v); } #define readl(addr) readl(addr) #undef writel static inline void writel(uint32_t v, volatile void *addr) { __io_bw(); __raw_writel(htole32(v), addr); __io_aw(); } #define writel(v, addr) writel(v, addr) #undef readq #undef writeq #ifdef __LP64__ static inline uint64_t readq(const volatile void *addr) { uint64_t v; __io_br(); v = le64toh(__raw_readq(addr)); __io_ar(); return (v); } #define readq(addr) readq(addr) static inline void writeq(uint64_t v, volatile void *addr) { __io_bw(); __raw_writeq(htole64(v), addr); __io_aw(); } #define writeq(v, addr) writeq(v, addr) #endif /* Access little-endian MMIO registers atomically without memory barriers. */ #undef readb_relaxed static inline uint8_t readb_relaxed(const volatile void *addr) { return (__raw_readb(addr)); } #define readb_relaxed(addr) readb_relaxed(addr) #undef writeb_relaxed static inline void writeb_relaxed(uint8_t v, volatile void *addr) { __raw_writeb(v, addr); } #define writeb_relaxed(v, addr) writeb_relaxed(v, addr) #undef readw_relaxed static inline uint16_t readw_relaxed(const volatile void *addr) { return (le16toh(__raw_readw(addr))); } #define readw_relaxed(addr) readw_relaxed(addr) #undef writew_relaxed static inline void writew_relaxed(uint16_t v, volatile void *addr) { __raw_writew(htole16(v), addr); } #define writew_relaxed(v, addr) writew_relaxed(v, addr) #undef readl_relaxed static inline uint32_t readl_relaxed(const volatile void *addr) { return (le32toh(__raw_readl(addr))); } #define readl_relaxed(addr) readl_relaxed(addr) #undef writel_relaxed static inline void writel_relaxed(uint32_t v, volatile void *addr) { __raw_writel(htole32(v), addr); } #define writel_relaxed(v, addr) writel_relaxed(v, addr) #undef readq_relaxed #undef writeq_relaxed #ifdef __LP64__ static inline uint64_t readq_relaxed(const volatile void *addr) { return (le64toh(__raw_readq(addr))); } #define readq_relaxed(addr) readq_relaxed(addr) static inline void writeq_relaxed(uint64_t v, volatile void *addr) { __raw_writeq(htole64(v), addr); } #define writeq_relaxed(v, addr) writeq_relaxed(v, addr) #endif /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */ #undef ioread8 static inline uint8_t ioread8(const volatile void *addr) { return (readb(addr)); } #define ioread8(addr) ioread8(addr) #undef ioread16 static inline uint16_t ioread16(const volatile void *addr) { return (readw(addr)); } #define ioread16(addr) ioread16(addr) #undef ioread16be static inline uint16_t ioread16be(const volatile void *addr) { uint16_t v; __io_br(); v = (be16toh(__raw_readw(addr))); __io_ar(); return (v); } #define ioread16be(addr) ioread16be(addr) #undef ioread32 static inline uint32_t ioread32(const volatile void *addr) { return (readl(addr)); } #define ioread32(addr) ioread32(addr) #undef ioread32be static inline uint32_t ioread32be(const volatile void *addr) { uint32_t v; __io_br(); v = (be32toh(__raw_readl(addr))); __io_ar(); return (v); } #define ioread32be(addr) ioread32be(addr) #undef iowrite8 static inline void iowrite8(uint8_t v, volatile void *addr) { writeb(v, addr); } #define iowrite8(v, addr) iowrite8(v, addr) #undef iowrite16 static inline void iowrite16(uint16_t v, volatile void *addr) { writew(v, addr); } #define iowrite16 iowrite16 #undef iowrite32 static inline void iowrite32(uint32_t v, volatile void *addr) { writel(v, addr); } #define iowrite32(v, addr) iowrite32(v, addr) #undef iowrite32be static inline void iowrite32be(uint32_t v, volatile void *addr) { __io_bw(); __raw_writel(htobe32(v), addr); __io_aw(); } #define iowrite32be(v, addr) iowrite32be(v, addr) #if defined(__i386__) || defined(__amd64__) static inline void _outb(u_char data, u_int port) { __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port)); } #endif #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr); #else #define _ioremap_attr(...) NULL #endif #ifdef VM_MEMATTR_DEVICE #define ioremap_nocache(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE) #define ioremap_wt(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE) #define ioremap(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE) #else #define ioremap_nocache(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE) #define ioremap_wt(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH) #define ioremap(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE) #endif #ifdef VM_MEMATTR_WRITE_COMBINING #define ioremap_wc(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING) #else #define ioremap_wc(addr, size) ioremap_nocache(addr, size) #endif #define ioremap_wb(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK) void iounmap(void *addr); #define memset_io(a, b, c) memset((a), (b), (c)) #define memcpy_fromio(a, b, c) memcpy((a), (b), (c)) #define memcpy_toio(a, b, c) memcpy((a), (b), (c)) static inline void __iowrite32_copy(void *to, void *from, size_t count) { uint32_t *src; uint32_t *dst; int i; for (i = 0, src = from, dst = to; i < count; i++, src++, dst++) __raw_writel(*src, dst); } static inline void __iowrite64_copy(void *to, void *from, size_t count) { #ifdef __LP64__ uint64_t *src; uint64_t *dst; int i; for (i = 0, src = from, dst = to; i < count; i++, src++, dst++) __raw_writeq(*src, dst); #else __iowrite32_copy(to, from, count * 2); #endif } enum { MEMREMAP_WB = 1 << 0, MEMREMAP_WT = 1 << 1, MEMREMAP_WC = 1 << 2, }; static inline void * memremap(resource_size_t offset, size_t size, unsigned long flags) { void *addr = NULL; if ((flags & MEMREMAP_WB) && (addr = ioremap_wb(offset, size)) != NULL) goto done; if ((flags & MEMREMAP_WT) && (addr = ioremap_wt(offset, size)) != NULL) goto done; if ((flags & MEMREMAP_WC) && (addr = ioremap_wc(offset, size)) != NULL) goto done; done: return (addr); } static inline void memunmap(void *addr) { /* XXX May need to check if this is RAM */ iounmap(addr); } +#define __MTRR_ID_BASE 1 +int lkpi_arch_phys_wc_add(unsigned long, unsigned long); +void lkpi_arch_phys_wc_del(int); +#define arch_phys_wc_add(...) lkpi_arch_phys_wc_add(__VA_ARGS__) +#define arch_phys_wc_del(...) lkpi_arch_phys_wc_del(__VA_ARGS__) +#define arch_phys_wc_index(x) \ + (((x) < __MTRR_ID_BASE) ? -1 : ((x) - __MTRR_ID_BASE)) + #endif /* _LINUX_IO_H_ */ diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c index 3c8bc2bd3c5b..df4a124cf3e2 100644 --- a/sys/compat/linuxkpi/common/src/linux_page.c +++ b/sys/compat/linuxkpi/common/src/linux_page.c @@ -1,359 +1,431 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) * Copyright (c) 2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include +#include +#include + +#ifdef __i386__ +DEFINE_IDR(mtrr_idr); +static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat"); +extern int pat_works; +#endif void si_meminfo(struct sysinfo *si) { si->totalram = physmem; si->totalhigh = 0; si->mem_unit = PAGE_SIZE; } void * linux_page_address(struct page *page) { if (page->object != kernel_object) { return (PMAP_HAS_DMAP ? ((void *)(uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))) : NULL); } return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + IDX_TO_OFF(page->pindex))); } vm_page_t linux_alloc_pages(gfp_t flags, unsigned int order) { vm_page_t page; if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; int req = VM_ALLOC_WIRED; if ((flags & M_ZERO) != 0) req |= VM_ALLOC_ZERO; if (order == 0 && (flags & GFP_DMA32) == 0) { page = vm_page_alloc_noobj(req); if (page == NULL) return (NULL); } else { vm_paddr_t pmax = (flags & GFP_DMA32) ? BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; retry: page = vm_page_alloc_noobj_contig(req, npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); if (page == NULL) { if (flags & M_WAITOK) { if (!vm_page_reclaim_contig(req, npages, 0, pmax, PAGE_SIZE, 0)) { vm_wait(NULL); } flags &= ~M_WAITOK; goto retry; } return (NULL); } } } else { vm_offset_t vaddr; vaddr = linux_alloc_kmem(flags, order); if (vaddr == 0) return (NULL); page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr)); KASSERT(vaddr == (vm_offset_t)page_address(page), ("Page address mismatch")); } return (page); } void linux_free_pages(vm_page_t page, unsigned int order) { if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; unsigned long x; for (x = 0; x != npages; x++) { vm_page_t pgo = page + x; if (vm_page_unwire_noq(pgo)) vm_page_free(pgo); } } else { vm_offset_t vaddr; vaddr = (vm_offset_t)page_address(page); linux_free_kmem(vaddr, order); } } vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order) { size_t size = ((size_t)PAGE_SIZE) << order; vm_offset_t addr; if ((flags & GFP_DMA32) == 0) { addr = kmem_malloc(size, flags & GFP_NATIVE_MASK); } else { addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); } return (addr); } void linux_free_kmem(vm_offset_t addr, unsigned int order) { size_t size = ((size_t)PAGE_SIZE) << order; kmem_free(addr, size); } static int linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages, int write, struct page **pages) { vm_prot_t prot; size_t len; int count; prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; len = ptoa((vm_offset_t)nr_pages); count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages); return (count == -1 ? -EFAULT : nr_pages); } int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { vm_map_t map; vm_page_t *mp; vm_offset_t va; vm_offset_t end; vm_prot_t prot; int count; if (nr_pages == 0 || in_interrupt()) return (0); MPASS(pages != NULL); map = &curthread->td_proc->p_vmspace->vm_map; end = start + ptoa((vm_offset_t)nr_pages); if (!vm_map_range_valid(map, start, end)) return (-EINVAL); prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; for (count = 0, mp = pages, va = start; va < end; mp++, va += PAGE_SIZE, count++) { *mp = pmap_extract_and_hold(map->pmap, va, prot); if (*mp == NULL) break; if ((prot & VM_PROT_WRITE) != 0 && (*mp)->dirty != VM_PAGE_BITS_ALL) { /* * Explicitly dirty the physical page. Otherwise, the * caller's changes may go unnoticed because they are * performed through an unmanaged mapping or by a DMA * operation. * * The object lock is not held here. * See vm_page_clear_dirty_mask(). */ vm_page_dirty(*mp); } } return (count); } long get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int gup_flags, struct page **pages, struct vm_area_struct **vmas) { vm_map_t map; map = &task->task_thread->td_proc->p_vmspace->vm_map; return (linux_get_user_pages_internal(map, start, nr_pages, !!(gup_flags & FOLL_WRITE), pages)); } long get_user_pages(unsigned long start, unsigned long nr_pages, int gup_flags, struct page **pages, struct vm_area_struct **vmas) { vm_map_t map; map = &curthread->td_proc->p_vmspace->vm_map; return (linux_get_user_pages_internal(map, start, nr_pages, !!(gup_flags & FOLL_WRITE), pages)); } int is_vmalloc_addr(const void *addr) { return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); } vm_fault_t lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) { vm_object_t vm_obj = vma->vm_obj; vm_object_t tmp_obj; vm_page_t page; vm_pindex_t pindex; VM_OBJECT_ASSERT_WLOCKED(vm_obj); pindex = OFF_TO_IDX(addr - vma->vm_start); if (vma->vm_pfn_count == 0) vma->vm_pfn_first = pindex; MPASS(pindex <= OFF_TO_IDX(vma->vm_end)); retry: page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NOCREAT); if (page == NULL) { page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn)); if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) goto retry; if (page->object != NULL) { tmp_obj = page->object; vm_page_xunbusy(page); VM_OBJECT_WUNLOCK(vm_obj); VM_OBJECT_WLOCK(tmp_obj); if (page->object == tmp_obj && vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { KASSERT(page->object == tmp_obj, ("page has changed identity")); KASSERT((page->oflags & VPO_UNMANAGED) == 0, ("page does not belong to shmem")); vm_pager_page_unswapped(page); if (pmap_page_is_mapped(page)) { vm_page_xunbusy(page); VM_OBJECT_WUNLOCK(tmp_obj); printf("%s: page rename failed: page " "is mapped\n", __func__); VM_OBJECT_WLOCK(vm_obj); return (VM_FAULT_NOPAGE); } vm_page_remove(page); } VM_OBJECT_WUNLOCK(tmp_obj); VM_OBJECT_WLOCK(vm_obj); goto retry; } if (vm_page_insert(page, vm_obj, pindex)) { vm_page_xunbusy(page); return (VM_FAULT_OOM); } vm_page_valid(page); } pmap_page_set_memattr(page, pgprot2cachemode(prot)); vma->vm_pfn_count++; return (VM_FAULT_NOPAGE); } /* * Although FreeBSD version of unmap_mapping_range has semantics and types of * parameters compatible with Linux version, the values passed in are different * @obj should match to vm_private_data field of vm_area_struct returned by * mmap file operation handler, see linux_file_mmap_single() sources * @holelen should match to size of area to be munmapped. */ void lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused, loff_t const holelen, int even_cows __unused) { vm_object_t devobj; vm_page_t page; int i, page_count; devobj = cdev_pager_lookup(obj); if (devobj != NULL) { page_count = OFF_TO_IDX(holelen); VM_OBJECT_WLOCK(devobj); retry: for (i = 0; i < page_count; i++) { page = vm_page_lookup(devobj, i); if (page == NULL) continue; if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) goto retry; cdev_pager_free_page(devobj, page); } VM_OBJECT_WUNLOCK(devobj); vm_object_deallocate(devobj); } } + +int +lkpi_arch_phys_wc_add(unsigned long base, unsigned long size) +{ +#ifdef __i386__ + struct mem_range_desc *mrdesc; + int error, id, act; + + /* If PAT is available, do nothing */ + if (pat_works) + return (0); + + mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK); + mrdesc->mr_base = base; + mrdesc->mr_len = size; + mrdesc->mr_flags = MDF_WRITECOMBINE; + strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner)); + act = MEMRANGE_SET_UPDATE; + error = mem_range_attr_set(mrdesc, &act); + if (error == 0) { + error = idr_get_new(&mtrr_idr, mrdesc, &id); + MPASS(idr_find(&mtrr_idr, id) == mrdesc); + if (error != 0) { + act = MEMRANGE_SET_REMOVE; + mem_range_attr_set(mrdesc, &act); + } + } + if (error != 0) { + free(mrdesc, M_LKMTRR); + pr_warn( + "Failed to add WC MTRR for [%p-%p]: %d; " + "performance may suffer\n", + (void *)base, (void *)(base + size - 1), error); + } else + pr_warn("Successfully added WC MTRR for [%p-%p]\n", + (void *)base, (void *)(base + size - 1)); + + return (error != 0 ? -error : id + __MTRR_ID_BASE); +#else + return (0); +#endif +} + +void +lkpi_arch_phys_wc_del(int reg) +{ +#ifdef __i386__ + struct mem_range_desc *mrdesc; + int act; + + /* Check if arch_phys_wc_add() failed. */ + if (reg < __MTRR_ID_BASE) + return; + + mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE); + MPASS(mrdesc != NULL); + idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE); + act = MEMRANGE_SET_REMOVE; + mem_range_attr_set(mrdesc, &act); + free(mrdesc, M_LKMTRR); +#endif +}