Index: head/sys/amd64/amd64/mem.c =================================================================== --- head/sys/amd64/amd64/mem.c (revision 307331) +++ head/sys/amd64/amd64/mem.c (revision 307332) @@ -1,239 +1,238 @@ /*- * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and code derived from software contributed to * Berkeley by William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: mem.c 1.13 89/10/08$ * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 */ #include __FBSDID("$FreeBSD$"); /* * Memory special file */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Used in /dev/mem drivers and elsewhere */ MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); /* ARGSUSED */ int memrw(struct cdev *dev, struct uio *uio, int flags) { struct iovec *iov; void *p; ssize_t orig_resid; u_long v, vd; u_int c; int error; error = 0; orig_resid = uio->uio_resid; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } v = uio->uio_offset; c = ulmin(iov->iov_len, PAGE_SIZE - (u_int)(v & PAGE_MASK)); switch (dev2unit(dev)) { case CDEV_MINOR_KMEM: /* * Since c is clamped to be less or equal than * PAGE_SIZE, the uiomove() call does not * access past the end of the direct map. */ if (v >= DMAP_MIN_ADDRESS && v < DMAP_MIN_ADDRESS + dmaplimit) { error = uiomove((void *)v, c, uio); break; } if (!kernacc((void *)v, c, uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE)) { error = EFAULT; break; } /* * If the extracted address is not accessible * through the direct map, then we make a * private (uncached) mapping because we can't * depend on the existing kernel mapping * remaining valid until the completion of * uiomove(). * * XXX We cannot provide access to the * physical page 0 mapped into KVA. */ v = pmap_extract(kernel_pmap, v); if (v == 0) { error = EFAULT; break; } /* FALLTHROUGH */ case CDEV_MINOR_MEM: if (v < dmaplimit) { vd = PHYS_TO_DMAP(v); error = uiomove((void *)vd, c, uio); break; } if (v >= (1ULL << cpu_maxphyaddr)) { error = EFAULT; break; } p = pmap_mapdev(v, PAGE_SIZE); error = uiomove(p, c, uio); pmap_unmapdev((vm_offset_t)p, PAGE_SIZE); break; } } /* * Don't return error if any byte was written. Read and write * can return error only if no i/o was performed. */ if (uio->uio_resid != orig_resid) error = 0; return (error); } /* * allow user processes to MMAP some memory sections * instead of going through read/write */ /* ARGSUSED */ int memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot __unused, vm_memattr_t *memattr __unused) { if (dev2unit(dev) == CDEV_MINOR_MEM) { if (offset >= (1ULL << cpu_maxphyaddr)) return (-1); *paddr = offset; - } else if (dev2unit(dev) == CDEV_MINOR_KMEM) - *paddr = vtophys(offset); - /* else panic! */ - return (0); + return (0); + } + return (-1); } /* * Operations for changing memory attributes. * * This is basically just an ioctl shim for mem_range_attr_get * and mem_range_attr_set. */ /* ARGSUSED */ int memioctl(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags, struct thread *td) { int nd, error = 0; struct mem_range_op *mo = (struct mem_range_op *)data; struct mem_range_desc *md; /* is this for us? */ if ((cmd != MEMRANGE_GET) && (cmd != MEMRANGE_SET)) return (ENOTTY); /* any chance we can handle this? */ if (mem_range_softc.mr_op == NULL) return (EOPNOTSUPP); /* do we have any descriptors? */ if (mem_range_softc.mr_ndesc == 0) return (ENXIO); switch (cmd) { case MEMRANGE_GET: nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); if (nd > 0) { md = (struct mem_range_desc *) malloc(nd * sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); error = mem_range_attr_get(md, &nd); if (!error) error = copyout(md, mo->mo_desc, nd * sizeof(struct mem_range_desc)); free(md, M_MEMDESC); } else nd = mem_range_softc.mr_ndesc; mo->mo_arg[0] = nd; break; case MEMRANGE_SET: md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); /* clamp description string */ md->mr_owner[sizeof(md->mr_owner) - 1] = 0; if (error == 0) error = mem_range_attr_set(md, &mo->mo_arg[0]); free(md, M_MEMDESC); break; } return (error); } Index: head/sys/arm/arm/mem.c =================================================================== --- head/sys/arm/arm/mem.c (revision 307331) +++ head/sys/arm/arm/mem.c (revision 307332) @@ -1,170 +1,169 @@ /*- * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and code derived from software contributed to * Berkeley by William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: mem.c 1.13 89/10/08$ * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 */ #include __FBSDID("$FreeBSD$"); /* * Memory special file */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Used in /dev/mem drivers and elsewhere */ MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); struct mem_range_softc mem_range_softc; static struct sx tmppt_lock; SX_SYSINIT(tmppt, &tmppt_lock, "mem4map"); /* ARGSUSED */ int memrw(struct cdev *dev, struct uio *uio, int flags) { int o; u_int c = 0, v; struct iovec *iov; int error = 0; vm_offset_t addr, eaddr; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } if (dev2unit(dev) == CDEV_MINOR_MEM) { int i; int address_valid = 0; v = uio->uio_offset; v &= ~PAGE_MASK; for (i = 0; dump_avail[i] || dump_avail[i + 1]; i += 2) { if (v >= dump_avail[i] && v < dump_avail[i + 1]) { address_valid = 1; break; } } if (!address_valid) return (EINVAL); sx_xlock(&tmppt_lock); pmap_kenter((vm_offset_t)_tmppt, v); #if __ARM_ARCH >= 6 pmap_tlb_flush(kernel_pmap, (vm_offset_t)_tmppt); #endif o = (int)uio->uio_offset & PAGE_MASK; c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK)); c = min(c, (u_int)(PAGE_SIZE - o)); c = min(c, (u_int)iov->iov_len); error = uiomove((caddr_t)&_tmppt[o], (int)c, uio); pmap_qremove((vm_offset_t)_tmppt, 1); sx_xunlock(&tmppt_lock); continue; } else if (dev2unit(dev) == CDEV_MINOR_KMEM) { c = iov->iov_len; /* * Make sure that all of the pages are currently * resident so that we don't create any zero-fill * pages. */ addr = trunc_page(uio->uio_offset); eaddr = round_page(uio->uio_offset + c); for (; addr < eaddr; addr += PAGE_SIZE) if (pmap_extract(kernel_pmap, addr) == 0) return (EFAULT); if (!kernacc((caddr_t)(int)uio->uio_offset, c, uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE)) return (EFAULT); error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio); continue; } /* else panic! */ } return (error); } /* * allow user processes to MMAP some memory sections * instead of going through read/write */ /* ARGSUSED */ int memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot __unused, vm_memattr_t *memattr __unused) { - if (dev2unit(dev) == CDEV_MINOR_MEM) + if (dev2unit(dev) == CDEV_MINOR_MEM) { *paddr = offset; - else if (dev2unit(dev) == CDEV_MINOR_KMEM) - *paddr = vtophys(offset); - /* else panic! */ - return (0); + return (0); + } + return (-1); } Index: head/sys/arm64/arm64/mem.c =================================================================== --- head/sys/arm64/arm64/mem.c (revision 307331) +++ head/sys/arm64/arm64/mem.c (revision 307332) @@ -1,132 +1,131 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include struct mem_range_softc mem_range_softc; int memrw(struct cdev *dev, struct uio *uio, int flags) { struct iovec *iov; struct vm_page m; vm_page_t marr; vm_offset_t off, v; u_int cnt; int error; error = 0; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } v = uio->uio_offset; off = v & PAGE_MASK; cnt = ulmin(iov->iov_len, PAGE_SIZE - (u_int)off); if (cnt == 0) continue; switch(dev2unit(dev)) { case CDEV_MINOR_KMEM: /* If the address is in the DMAP just copy it */ if (VIRT_IN_DMAP(v)) { error = uiomove((void *)v, cnt, uio); break; } if (!kernacc((void *)v, cnt, uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE)) { error = EFAULT; break; } /* Get the physical address to read */ v = pmap_extract(kernel_pmap, v); if (v == 0) { error = EFAULT; break; } /* FALLTHROUGH */ case CDEV_MINOR_MEM: /* If within the DMAP use this to copy from */ if (PHYS_IN_DMAP(v)) { v = PHYS_TO_DMAP(v); error = uiomove((void *)v, cnt, uio); break; } /* Have uiomove_fromphys handle the data */ m.phys_addr = trunc_page(v); marr = &m; uiomove_fromphys(&marr, off, cnt, uio); break; } } return (error); } /* * allow user processes to MMAP some memory sections * instead of going through read/write */ /* ARGSUSED */ int memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot __unused, vm_memattr_t *memattr __unused) { - if (dev2unit(dev) == CDEV_MINOR_MEM) + if (dev2unit(dev) == CDEV_MINOR_MEM) { *paddr = offset; - else if (dev2unit(dev) == CDEV_MINOR_KMEM) - *paddr = vtophys(offset); - /* else panic! */ - return (0); + return (0); + } + return (-1); } Index: head/sys/i386/i386/mem.c =================================================================== --- head/sys/i386/i386/mem.c (revision 307331) +++ head/sys/i386/i386/mem.c (revision 307332) @@ -1,230 +1,229 @@ /*- * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and code derived from software contributed to * Berkeley by William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: mem.c 1.13 89/10/08$ * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 */ #include __FBSDID("$FreeBSD$"); /* * Memory special file */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Used in /dev/mem drivers and elsewhere */ MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); static struct sx memsxlock; SX_SYSINIT(memsxlockinit, &memsxlock, "/dev/mem lock"); /* ARGSUSED */ int memrw(struct cdev *dev, struct uio *uio, int flags) { int o; u_int c = 0; vm_paddr_t pa; struct iovec *iov; int error = 0; vm_offset_t addr; if (dev2unit(dev) != CDEV_MINOR_MEM && dev2unit(dev) != CDEV_MINOR_KMEM) return EIO; if (dev2unit(dev) == CDEV_MINOR_KMEM && uio->uio_resid > 0) { if (uio->uio_offset < (vm_offset_t)VADDR(PTDPTDI, 0)) return (EFAULT); if (!kernacc((caddr_t)(int)uio->uio_offset, uio->uio_resid, uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE)) return (EFAULT); } while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } if (dev2unit(dev) == CDEV_MINOR_MEM) { pa = uio->uio_offset; pa &= ~PAGE_MASK; } else { /* * Extract the physical page since the mapping may * change at any time. This avoids panics on page * fault in this case but will cause reading/writing * to the wrong page. * Hopefully an application will notice the wrong * data on read access and refrain from writing. * This should be replaced by a special uiomove * type function that just returns an error if there * is a page fault on a kernel page. */ addr = trunc_page(uio->uio_offset); pa = pmap_extract(kernel_pmap, addr); if (pa == 0) return EFAULT; } /* * XXX UPS This should just use sf_buf_alloc. * Unfortunately sf_buf_alloc needs a vm_page * and we may want to look at memory not covered * by the page array. */ sx_xlock(&memsxlock); pmap_kenter((vm_offset_t)ptvmmap, pa); pmap_invalidate_page(kernel_pmap,(vm_offset_t)ptvmmap); o = (int)uio->uio_offset & PAGE_MASK; c = PAGE_SIZE - o; c = min(c, (u_int)iov->iov_len); error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); pmap_qremove((vm_offset_t)ptvmmap, 1); sx_xunlock(&memsxlock); } return (error); } /* * allow user processes to MMAP some memory sections * instead of going through read/write */ /* ARGSUSED */ int memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot __unused, vm_memattr_t *memattr __unused) { - if (dev2unit(dev) == CDEV_MINOR_MEM) + if (dev2unit(dev) == CDEV_MINOR_MEM) { *paddr = offset; - else if (dev2unit(dev) == CDEV_MINOR_KMEM) - *paddr = vtophys(offset); - /* else panic! */ - return (0); + return (0); + } + return (-1); } /* * Operations for changing memory attributes. * * This is basically just an ioctl shim for mem_range_attr_get * and mem_range_attr_set. */ /* ARGSUSED */ int memioctl(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags, struct thread *td) { int nd, error = 0; struct mem_range_op *mo = (struct mem_range_op *)data; struct mem_range_desc *md; /* is this for us? */ if ((cmd != MEMRANGE_GET) && (cmd != MEMRANGE_SET)) return (ENOTTY); /* any chance we can handle this? */ if (mem_range_softc.mr_op == NULL) return (EOPNOTSUPP); /* do we have any descriptors? */ if (mem_range_softc.mr_ndesc == 0) return (ENXIO); switch (cmd) { case MEMRANGE_GET: nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); if (nd > 0) { md = (struct mem_range_desc *) malloc(nd * sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); error = mem_range_attr_get(md, &nd); if (!error) error = copyout(md, mo->mo_desc, nd * sizeof(struct mem_range_desc)); free(md, M_MEMDESC); } else nd = mem_range_softc.mr_ndesc; mo->mo_arg[0] = nd; break; case MEMRANGE_SET: md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); /* clamp description string */ md->mr_owner[sizeof(md->mr_owner) - 1] = 0; if (error == 0) error = mem_range_attr_set(md, &mo->mo_arg[0]); free(md, M_MEMDESC); break; } return (error); } Index: head/sys/powerpc/powerpc/mem.c =================================================================== --- head/sys/powerpc/powerpc/mem.c (revision 307331) +++ head/sys/powerpc/powerpc/mem.c (revision 307332) @@ -1,318 +1,316 @@ /*- * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and code derived from software contributed to * Berkeley by William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: mem.c 1.13 89/10/08$ * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 */ #include __FBSDID("$FreeBSD$"); /* * Memory special file */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void ppc_mrinit(struct mem_range_softc *); static int ppc_mrset(struct mem_range_softc *, struct mem_range_desc *, int *); MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); static struct mem_range_ops ppc_mem_range_ops = { ppc_mrinit, ppc_mrset, NULL, NULL }; struct mem_range_softc mem_range_softc = { &ppc_mem_range_ops, 0, 0, NULL }; /* ARGSUSED */ int memrw(struct cdev *dev, struct uio *uio, int flags) { struct iovec *iov; int error = 0; vm_offset_t va, eva, off, v; vm_prot_t prot; struct vm_page m; vm_page_t marr; vm_size_t cnt; cnt = 0; error = 0; while (uio->uio_resid > 0 && !error) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } if (dev2unit(dev) == CDEV_MINOR_MEM) { kmem_direct_mapped: v = uio->uio_offset; off = uio->uio_offset & PAGE_MASK; cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base & PAGE_MASK); cnt = min(cnt, PAGE_SIZE - off); cnt = min(cnt, iov->iov_len); if (mem_valid(v, cnt)) { error = EFAULT; break; } if (!pmap_dev_direct_mapped(v, cnt)) { error = uiomove((void *)v, cnt, uio); } else { m.phys_addr = trunc_page(v); marr = &m; error = uiomove_fromphys(&marr, off, cnt, uio); } } else if (dev2unit(dev) == CDEV_MINOR_KMEM) { va = uio->uio_offset; if ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end)) goto kmem_direct_mapped; va = trunc_page(uio->uio_offset); eva = round_page(uio->uio_offset + iov->iov_len); /* * Make sure that all the pages are currently resident * so that we don't create any zero-fill pages. */ for (; va < eva; va += PAGE_SIZE) if (pmap_extract(kernel_pmap, va) == 0) return (EFAULT); prot = (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE; va = uio->uio_offset; if (kernacc((void *) va, iov->iov_len, prot) == FALSE) return (EFAULT); error = uiomove((void *)va, iov->iov_len, uio); continue; } } return (error); } /* * allow user processes to MMAP some memory sections * instead of going through read/write */ int memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { int i; if (dev2unit(dev) == CDEV_MINOR_MEM) *paddr = offset; - else if (dev2unit(dev) == CDEV_MINOR_KMEM) - *paddr = vtophys(offset); else return (EFAULT); for (i = 0; i < mem_range_softc.mr_ndesc; i++) { if (!(mem_range_softc.mr_desc[i].mr_flags & MDF_ACTIVE)) continue; if (offset >= mem_range_softc.mr_desc[i].mr_base && offset < mem_range_softc.mr_desc[i].mr_base + mem_range_softc.mr_desc[i].mr_len) { switch (mem_range_softc.mr_desc[i].mr_flags & MDF_ATTRMASK) { case MDF_WRITEBACK: *memattr = VM_MEMATTR_WRITE_BACK; break; case MDF_WRITECOMBINE: *memattr = VM_MEMATTR_WRITE_COMBINING; break; case MDF_UNCACHEABLE: *memattr = VM_MEMATTR_UNCACHEABLE; break; case MDF_WRITETHROUGH: *memattr = VM_MEMATTR_WRITE_THROUGH; break; } break; } } return (0); } static void ppc_mrinit(struct mem_range_softc *sc) { sc->mr_cap = 0; sc->mr_ndesc = 8; /* XXX: Should be dynamically expandable */ sc->mr_desc = malloc(sc->mr_ndesc * sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK | M_ZERO); } static int ppc_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg) { int i; switch(*arg) { case MEMRANGE_SET_UPDATE: for (i = 0; i < sc->mr_ndesc; i++) { if (!sc->mr_desc[i].mr_len) { sc->mr_desc[i] = *desc; sc->mr_desc[i].mr_flags |= MDF_ACTIVE; return (0); } if (sc->mr_desc[i].mr_base == desc->mr_base && sc->mr_desc[i].mr_len == desc->mr_len) return (EEXIST); } return (ENOSPC); case MEMRANGE_SET_REMOVE: for (i = 0; i < sc->mr_ndesc; i++) if (sc->mr_desc[i].mr_base == desc->mr_base && sc->mr_desc[i].mr_len == desc->mr_len) { bzero(&sc->mr_desc[i], sizeof(sc->mr_desc[i])); return (0); } return (ENOENT); default: return (EOPNOTSUPP); } return (0); } /* * Operations for changing memory attributes. * * This is basically just an ioctl shim for mem_range_attr_get * and mem_range_attr_set. */ /* ARGSUSED */ int memioctl(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags, struct thread *td) { int nd, error = 0; struct mem_range_op *mo = (struct mem_range_op *)data; struct mem_range_desc *md; /* is this for us? */ if ((cmd != MEMRANGE_GET) && (cmd != MEMRANGE_SET)) return (ENOTTY); /* any chance we can handle this? */ if (mem_range_softc.mr_op == NULL) return (EOPNOTSUPP); /* do we have any descriptors? */ if (mem_range_softc.mr_ndesc == 0) return (ENXIO); switch (cmd) { case MEMRANGE_GET: nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); if (nd > 0) { md = (struct mem_range_desc *) malloc(nd * sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); error = mem_range_attr_get(md, &nd); if (!error) error = copyout(md, mo->mo_desc, nd * sizeof(struct mem_range_desc)); free(md, M_MEMDESC); } else nd = mem_range_softc.mr_ndesc; mo->mo_arg[0] = nd; break; case MEMRANGE_SET: md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); /* clamp description string */ md->mr_owner[sizeof(md->mr_owner) - 1] = 0; if (error == 0) error = mem_range_attr_set(md, &mo->mo_arg[0]); free(md, M_MEMDESC); break; } return (error); }