Index: head/sys/compat/linuxkpi/common/include/linux/scatterlist.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/scatterlist.h (revision 347088) +++ head/sys/compat/linuxkpi/common/include/linux/scatterlist.h (revision 347089) @@ -1,458 +1,459 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * Copyright (c) 2015 Matthew Dillon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_SCATTERLIST_H_ #define _LINUX_SCATTERLIST_H_ #include #include #include +struct bus_dmamap; struct scatterlist { unsigned long page_link; #define SG_PAGE_LINK_CHAIN 0x1UL #define SG_PAGE_LINK_LAST 0x2UL #define SG_PAGE_LINK_MASK 0x3UL unsigned int offset; unsigned int length; dma_addr_t dma_address; - unsigned int dma_length; + struct bus_dmamap *dma_map; /* FreeBSD specific */ }; CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0); struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; }; struct sg_page_iter { struct scatterlist *sg; unsigned int sg_pgoffset; unsigned int maxents; struct { unsigned int nents; int pg_advance; } internal; }; #define SCATTERLIST_MAX_SEGMENT (-1U & ~(PAGE_SIZE - 1)) #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) #define SG_MAGIC 0x87654321UL #define SG_CHAIN SG_PAGE_LINK_CHAIN #define SG_END SG_PAGE_LINK_LAST #define sg_is_chain(sg) ((sg)->page_link & SG_PAGE_LINK_CHAIN) #define sg_is_last(sg) ((sg)->page_link & SG_PAGE_LINK_LAST) #define sg_chain_ptr(sg) \ ((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK)) #define sg_dma_address(sg) (sg)->dma_address -#define sg_dma_len(sg) (sg)->dma_length +#define sg_dma_len(sg) (sg)->length #define for_each_sg_page(sgl, iter, nents, pgoffset) \ for (_sg_iter_init(sgl, iter, nents, pgoffset); \ (iter)->sg; _sg_iter_next(iter)) #define for_each_sg(sglist, sg, sgmax, iter) \ for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg)) typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t); typedef void (sg_free_fn) (struct scatterlist *, unsigned int); static inline void sg_assign_page(struct scatterlist *sg, struct page *page) { unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK; sg->page_link = page_link | (unsigned long)page; } static inline void sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len, unsigned int offset) { sg_assign_page(sg, page); sg->offset = offset; sg->length = len; } static inline struct page * sg_page(struct scatterlist *sg) { return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK)); } static inline void sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) { sg_set_page(sg, virt_to_page(buf), buflen, ((uintptr_t)buf) & (PAGE_SIZE - 1)); } static inline struct scatterlist * sg_next(struct scatterlist *sg) { if (sg_is_last(sg)) return (NULL); sg++; if (sg_is_chain(sg)) sg = sg_chain_ptr(sg); return (sg); } static inline vm_paddr_t sg_phys(struct scatterlist *sg) { return (VM_PAGE_TO_PHYS(sg_page(sg)) + sg->offset); } static inline void * sg_virt(struct scatterlist *sg) { return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset)); } static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, struct scatterlist *sgl) { struct scatterlist *sg = &prv[prv_nents - 1]; sg->offset = 0; sg->length = 0; sg->page_link = ((unsigned long)sgl | SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST; } static inline void sg_mark_end(struct scatterlist *sg) { sg->page_link |= SG_PAGE_LINK_LAST; sg->page_link &= ~SG_PAGE_LINK_CHAIN; } static inline void sg_init_table(struct scatterlist *sg, unsigned int nents) { bzero(sg, sizeof(*sg) * nents); sg_mark_end(&sg[nents - 1]); } static struct scatterlist * sg_kmalloc(unsigned int nents, gfp_t gfp_mask) { if (nents == SG_MAX_SINGLE_ALLOC) { return ((void *)__get_free_page(gfp_mask)); } else return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask)); } static inline void sg_kfree(struct scatterlist *sg, unsigned int nents) { if (nents == SG_MAX_SINGLE_ALLOC) { free_page((unsigned long)sg); } else kfree(sg); } static inline void __sg_free_table(struct sg_table *table, unsigned int max_ents, bool skip_first_chunk, sg_free_fn * free_fn) { struct scatterlist *sgl, *next; if (unlikely(!table->sgl)) return; sgl = table->sgl; while (table->orig_nents) { unsigned int alloc_size = table->orig_nents; unsigned int sg_size; if (alloc_size > max_ents) { next = sg_chain_ptr(&sgl[max_ents - 1]); alloc_size = max_ents; sg_size = alloc_size - 1; } else { sg_size = alloc_size; next = NULL; } table->orig_nents -= sg_size; if (skip_first_chunk) skip_first_chunk = 0; else free_fn(sgl, alloc_size); sgl = next; } table->sgl = NULL; } static inline void sg_free_table(struct sg_table *table) { __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree); } static inline int __sg_alloc_table(struct sg_table *table, unsigned int nents, unsigned int max_ents, struct scatterlist *first_chunk, gfp_t gfp_mask, sg_alloc_fn *alloc_fn) { struct scatterlist *sg, *prv; unsigned int left; memset(table, 0, sizeof(*table)); if (nents == 0) return (-EINVAL); left = nents; prv = NULL; do { unsigned int sg_size; unsigned int alloc_size = left; if (alloc_size > max_ents) { alloc_size = max_ents; sg_size = alloc_size - 1; } else sg_size = alloc_size; left -= sg_size; if (first_chunk) { sg = first_chunk; first_chunk = NULL; } else { sg = alloc_fn(alloc_size, gfp_mask); } if (unlikely(!sg)) { if (prv) table->nents = ++table->orig_nents; return (-ENOMEM); } sg_init_table(sg, alloc_size); table->nents = table->orig_nents += sg_size; if (prv) sg_chain(prv, max_ents, sg); else table->sgl = sg; if (!left) sg_mark_end(&sg[sg_size - 1]); prv = sg; } while (left); return (0); } static inline int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) { int ret; ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, NULL, gfp_mask, sg_kmalloc); if (unlikely(ret)) __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree); return (ret); } static inline int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int count, unsigned long off, unsigned long size, unsigned int max_segment, gfp_t gfp_mask) { unsigned int i, segs, cur, len; int rc; struct scatterlist *s; if (__predict_false(!max_segment || offset_in_page(max_segment))) return (-EINVAL); len = 0; for (segs = i = 1; i < count; ++i) { len += PAGE_SIZE; if (len >= max_segment || page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) { ++segs; len = 0; } } if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask)))) return (rc); cur = 0; for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { unsigned long seg_size; unsigned int j; len = 0; for (j = cur + 1; j < count; ++j) { len += PAGE_SIZE; if (len >= max_segment || page_to_pfn(pages[j]) != page_to_pfn(pages[j - 1]) + 1) break; } seg_size = ((j - cur) << PAGE_SHIFT) - off; sg_set_page(s, pages[cur], min(size, seg_size), off); size -= seg_size; off = 0; cur = j; } return (0); } static inline int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int count, unsigned long off, unsigned long size, gfp_t gfp_mask) { return (__sg_alloc_table_from_pages(sgt, pages, count, off, size, SCATTERLIST_MAX_SEGMENT, gfp_mask)); } static inline int sg_nents(struct scatterlist *sg) { int nents; for (nents = 0; sg; sg = sg_next(sg)) nents++; return (nents); } static inline void __sg_page_iter_start(struct sg_page_iter *piter, struct scatterlist *sglist, unsigned int nents, unsigned long pgoffset) { piter->internal.pg_advance = 0; piter->internal.nents = nents; piter->sg = sglist; piter->sg_pgoffset = pgoffset; } static inline void _sg_iter_next(struct sg_page_iter *iter) { struct scatterlist *sg; unsigned int pgcount; sg = iter->sg; pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT; ++iter->sg_pgoffset; while (iter->sg_pgoffset >= pgcount) { iter->sg_pgoffset -= pgcount; sg = sg_next(sg); --iter->maxents; if (sg == NULL || iter->maxents == 0) break; pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT; } iter->sg = sg; } static inline int sg_page_count(struct scatterlist *sg) { return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT); } static inline bool __sg_page_iter_next(struct sg_page_iter *piter) { if (piter->internal.nents == 0) return (0); if (piter->sg == NULL) return (0); piter->sg_pgoffset += piter->internal.pg_advance; piter->internal.pg_advance = 1; while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { piter->sg_pgoffset -= sg_page_count(piter->sg); piter->sg = sg_next(piter->sg); if (--piter->internal.nents == 0) return (0); if (piter->sg == NULL) return (0); } return (1); } static inline void _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter, unsigned int nents, unsigned long pgoffset) { if (nents) { iter->sg = sgl; iter->sg_pgoffset = pgoffset - 1; iter->maxents = nents; _sg_iter_next(iter); } else { iter->sg = NULL; iter->sg_pgoffset = 0; iter->maxents = 0; } } static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *spi) { return (spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT)); } static inline struct page * sg_page_iter_page(struct sg_page_iter *piter) { return (nth_page(sg_page(piter->sg), piter->sg_pgoffset)); } #endif /* _LINUX_SCATTERLIST_H_ */ Index: head/sys/compat/linuxkpi/common/src/linux_pci.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_pci.c (revision 347088) +++ head/sys/compat/linuxkpi/common/src/linux_pci.c (revision 347089) @@ -1,829 +1,789 @@ /*- * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static device_probe_t linux_pci_probe; static device_attach_t linux_pci_attach; static device_detach_t linux_pci_detach; static device_suspend_t linux_pci_suspend; static device_resume_t linux_pci_resume; static device_shutdown_t linux_pci_shutdown; static device_method_t pci_methods[] = { DEVMETHOD(device_probe, linux_pci_probe), DEVMETHOD(device_attach, linux_pci_attach), DEVMETHOD(device_detach, linux_pci_detach), DEVMETHOD(device_suspend, linux_pci_suspend), DEVMETHOD(device_resume, linux_pci_resume), DEVMETHOD(device_shutdown, linux_pci_shutdown), DEVMETHOD_END }; struct linux_dma_priv { uint64_t dma_mask; struct mtx lock; bus_dma_tag_t dmat; struct pctrie ptree; }; #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) static int linux_pdev_dma_init(struct pci_dev *pdev) { struct linux_dma_priv *priv; priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); pdev->dev.dma_priv = priv; mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); pctrie_init(&priv->ptree); return (0); } static int linux_pdev_dma_uninit(struct pci_dev *pdev) { struct linux_dma_priv *priv; priv = pdev->dev.dma_priv; if (priv->dmat) bus_dma_tag_destroy(priv->dmat); mtx_destroy(&priv->lock); free(priv, M_DEVBUF); pdev->dev.dma_priv = NULL; return (0); } int linux_dma_tag_init(struct device *dev, u64 dma_mask) { struct linux_dma_priv *priv; int error; priv = dev->dma_priv; if (priv->dmat) { if (priv->dma_mask == dma_mask) return (0); bus_dma_tag_destroy(priv->dmat); } priv->dma_mask = dma_mask; error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1, 0, /* alignment, boundary */ dma_mask, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ BUS_SPACE_MAXSIZE, /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsz */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &priv->dmat); return (-error); } static struct pci_driver * linux_pci_find(device_t dev, const struct pci_device_id **idp) { const struct pci_device_id *id; struct pci_driver *pdrv; uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; vendor = pci_get_vendor(dev); device = pci_get_device(dev); subvendor = pci_get_subvendor(dev); subdevice = pci_get_subdevice(dev); spin_lock(&pci_lock); list_for_each_entry(pdrv, &pci_drivers, links) { for (id = pdrv->id_table; id->vendor != 0; id++) { if (vendor == id->vendor && (PCI_ANY_ID == id->device || device == id->device) && (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { *idp = id; spin_unlock(&pci_lock); return (pdrv); } } } spin_unlock(&pci_lock); return (NULL); } static int linux_pci_probe(device_t dev) { const struct pci_device_id *id; struct pci_driver *pdrv; if ((pdrv = linux_pci_find(dev, &id)) == NULL) return (ENXIO); if (device_get_driver(dev) != &pdrv->bsddriver) return (ENXIO); device_set_desc(dev, pdrv->name); return (0); } static int linux_pci_attach(device_t dev) { struct resource_list_entry *rle; struct pci_bus *pbus; struct pci_dev *pdev; struct pci_devinfo *dinfo; struct pci_driver *pdrv; const struct pci_device_id *id; device_t parent; devclass_t devclass; int error; linux_set_current(curthread); pdrv = linux_pci_find(dev, &id); pdev = device_get_softc(dev); parent = device_get_parent(dev); devclass = device_get_devclass(parent); if (pdrv->isdrm) { dinfo = device_get_ivars(parent); device_set_ivars(dev, dinfo); } else { dinfo = device_get_ivars(dev); } pdev->dev.parent = &linux_root_device; pdev->dev.bsddev = dev; INIT_LIST_HEAD(&pdev->dev.irqents); pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); pdev->device = dinfo->cfg.device; pdev->vendor = dinfo->cfg.vendor; pdev->subsystem_vendor = dinfo->cfg.subvendor; pdev->subsystem_device = dinfo->cfg.subdevice; pdev->class = pci_get_class(dev); pdev->revision = pci_get_revid(dev); pdev->pdrv = pdrv; kobject_init(&pdev->dev.kobj, &linux_dev_ktype); kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, kobject_name(&pdev->dev.kobj)); rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); if (rle != NULL) pdev->dev.irq = rle->start; else pdev->dev.irq = LINUX_IRQ_INVALID; pdev->irq = pdev->dev.irq; error = linux_pdev_dma_init(pdev); if (error) goto out; if (pdev->bus == NULL) { pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); pbus->self = pdev; pbus->number = pci_get_bus(dev); pdev->bus = pbus; } spin_lock(&pci_lock); list_add(&pdev->links, &pci_devices); spin_unlock(&pci_lock); error = pdrv->probe(pdev, id); out: if (error) { spin_lock(&pci_lock); list_del(&pdev->links); spin_unlock(&pci_lock); put_device(&pdev->dev); error = -error; } return (error); } static int linux_pci_detach(device_t dev) { struct pci_dev *pdev; linux_set_current(curthread); pdev = device_get_softc(dev); pdev->pdrv->remove(pdev); linux_pdev_dma_uninit(pdev); spin_lock(&pci_lock); list_del(&pdev->links); spin_unlock(&pci_lock); device_set_desc(dev, NULL); put_device(&pdev->dev); return (0); } static int linux_pci_suspend(device_t dev) { const struct dev_pm_ops *pmops; struct pm_message pm = { }; struct pci_dev *pdev; int error; error = 0; linux_set_current(curthread); pdev = device_get_softc(dev); pmops = pdev->pdrv->driver.pm; if (pdev->pdrv->suspend != NULL) error = -pdev->pdrv->suspend(pdev, pm); else if (pmops != NULL && pmops->suspend != NULL) { error = -pmops->suspend(&pdev->dev); if (error == 0 && pmops->suspend_late != NULL) error = -pmops->suspend_late(&pdev->dev); } return (error); } static int linux_pci_resume(device_t dev) { const struct dev_pm_ops *pmops; struct pci_dev *pdev; int error; error = 0; linux_set_current(curthread); pdev = device_get_softc(dev); pmops = pdev->pdrv->driver.pm; if (pdev->pdrv->resume != NULL) error = -pdev->pdrv->resume(pdev); else if (pmops != NULL && pmops->resume != NULL) { if (pmops->resume_early != NULL) error = -pmops->resume_early(&pdev->dev); if (error == 0 && pmops->resume != NULL) error = -pmops->resume(&pdev->dev); } return (error); } static int linux_pci_shutdown(device_t dev) { struct pci_dev *pdev; linux_set_current(curthread); pdev = device_get_softc(dev); if (pdev->pdrv->shutdown != NULL) pdev->pdrv->shutdown(pdev); return (0); } static int _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) { int error; linux_set_current(curthread); spin_lock(&pci_lock); list_add(&pdrv->links, &pci_drivers); spin_unlock(&pci_lock); pdrv->bsddriver.name = pdrv->name; pdrv->bsddriver.methods = pci_methods; pdrv->bsddriver.size = sizeof(struct pci_dev); mtx_lock(&Giant); error = devclass_add_driver(dc, &pdrv->bsddriver, BUS_PASS_DEFAULT, &pdrv->bsdclass); mtx_unlock(&Giant); return (-error); } int linux_pci_register_driver(struct pci_driver *pdrv) { devclass_t dc; dc = devclass_find("pci"); if (dc == NULL) return (-ENXIO); pdrv->isdrm = false; return (_linux_pci_register_driver(pdrv, dc)); } int linux_pci_register_drm_driver(struct pci_driver *pdrv) { devclass_t dc; dc = devclass_create("vgapci"); if (dc == NULL) return (-ENXIO); pdrv->isdrm = true; pdrv->name = "drmn"; return (_linux_pci_register_driver(pdrv, dc)); } void linux_pci_unregister_driver(struct pci_driver *pdrv) { devclass_t bus; bus = devclass_find("pci"); spin_lock(&pci_lock); list_del(&pdrv->links); spin_unlock(&pci_lock); mtx_lock(&Giant); if (bus != NULL) devclass_delete_driver(bus, &pdrv->bsddriver); mtx_unlock(&Giant); } CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); struct linux_dma_obj { void *vaddr; uint64_t dma_addr; bus_dmamap_t dmamap; }; static uma_zone_t linux_dma_trie_zone; static uma_zone_t linux_dma_obj_zone; static void linux_dma_init(void *arg) { linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 0); linux_dma_obj_zone = uma_zcreate("linux_dma_object", sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); } SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); static void linux_dma_uninit(void *arg) { uma_zdestroy(linux_dma_obj_zone); uma_zdestroy(linux_dma_trie_zone); } SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); static void * linux_dma_trie_alloc(struct pctrie *ptree) { return (uma_zalloc(linux_dma_trie_zone, 0)); } static void linux_dma_trie_free(struct pctrie *ptree, void *node) { uma_zfree(linux_dma_trie_zone, node); } PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, linux_dma_trie_free); void * linux_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { struct linux_dma_priv *priv; vm_paddr_t high; size_t align; void *mem; if (dev == NULL || dev->dma_priv == NULL) { *dma_handle = 0; return (NULL); } priv = dev->dma_priv; if (priv->dma_mask) high = priv->dma_mask; else if (flag & GFP_DMA32) high = BUS_SPACE_MAXADDR_32BIT; else high = BUS_SPACE_MAXADDR; align = PAGE_SIZE << get_order(size); mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, VM_MEMATTR_DEFAULT); if (mem != NULL) { *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); if (*dma_handle == 0) { kmem_free((vm_offset_t)mem, size); mem = NULL; } } else { *dma_handle = 0; } return (mem); } dma_addr_t linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) { struct linux_dma_priv *priv; struct linux_dma_obj *obj; int error, nseg; bus_dma_segment_t seg; priv = dev->dma_priv; obj = uma_zalloc(linux_dma_obj_zone, 0); DMA_PRIV_LOCK(priv); if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); return (0); } nseg = -1; if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, BUS_DMA_NOWAIT, &seg, &nseg) != 0) { bus_dmamap_destroy(priv->dmat, obj->dmamap); DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); return (0); } KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); obj->dma_addr = seg.ds_addr; error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); if (error != 0) { bus_dmamap_unload(priv->dmat, obj->dmamap); bus_dmamap_destroy(priv->dmat, obj->dmamap); DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); return (0); } DMA_PRIV_UNLOCK(priv); return (obj->dma_addr); } void linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) { struct linux_dma_priv *priv; struct linux_dma_obj *obj; priv = dev->dma_priv; DMA_PRIV_LOCK(priv); obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); if (obj == NULL) { DMA_PRIV_UNLOCK(priv); return; } LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); bus_dmamap_unload(priv->dmat, obj->dmamap); bus_dmamap_destroy(priv->dmat, obj->dmamap); DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); } int linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct linux_dma_priv *priv; - struct linux_dma_obj *obj; - struct scatterlist *dma_sg, *sg; - int dma_nents, error, nseg; - size_t seg_len; - vm_paddr_t seg_phys, prev_phys_end; + struct scatterlist *sg; + int i, nseg; bus_dma_segment_t seg; priv = dev->dma_priv; - obj = uma_zalloc(linux_dma_obj_zone, 0); - DMA_PRIV_LOCK(priv); - if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { + + /* create common DMA map in the first S/G entry */ + if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { DMA_PRIV_UNLOCK(priv); - uma_zfree(linux_dma_obj_zone, obj); return (0); } - sg = sgl; - dma_sg = sg; - dma_nents = 0; - - while (nents > 0) { - seg_phys = sg_phys(sg); - seg_len = sg->length; - while (--nents > 0) { - prev_phys_end = sg_phys(sg) + sg->length; - sg = sg_next(sg); - if (prev_phys_end != sg_phys(sg)) - break; - seg_len += sg->length; - } - + /* load all S/G list entries */ + for_each_sg(sgl, sg, nents, i) { nseg = -1; - if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, - seg_phys, seg_len, BUS_DMA_NOWAIT, + if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, + sg_phys(sg), sg->length, BUS_DMA_NOWAIT, &seg, &nseg) != 0) { - bus_dmamap_unload(priv->dmat, obj->dmamap); - bus_dmamap_destroy(priv->dmat, obj->dmamap); + bus_dmamap_unload(priv->dmat, sgl->dma_map); + bus_dmamap_destroy(priv->dmat, sgl->dma_map); DMA_PRIV_UNLOCK(priv); - uma_zfree(linux_dma_obj_zone, obj); return (0); } - KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); + KASSERT(nseg == 0, + ("More than one segment (nseg=%d)", nseg + 1)); - sg_dma_address(dma_sg) = seg.ds_addr; - sg_dma_len(dma_sg) = seg.ds_len; - - dma_sg = sg_next(dma_sg); - dma_nents++; - } - - obj->dma_addr = sg_dma_address(sgl); - - error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); - if (error != 0) { - bus_dmamap_unload(priv->dmat, obj->dmamap); - bus_dmamap_destroy(priv->dmat, obj->dmamap); - DMA_PRIV_UNLOCK(priv); - uma_zfree(linux_dma_obj_zone, obj); - return (0); + sg_dma_address(sg) = seg.ds_addr; } DMA_PRIV_UNLOCK(priv); - return (dma_nents); + + return (nents); } void linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct linux_dma_priv *priv; - struct linux_dma_obj *obj; priv = dev->dma_priv; DMA_PRIV_LOCK(priv); - obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, sg_dma_address(sgl)); - if (obj == NULL) { - DMA_PRIV_UNLOCK(priv); - return; - } - LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, sg_dma_address(sgl)); - bus_dmamap_unload(priv->dmat, obj->dmamap); - bus_dmamap_destroy(priv->dmat, obj->dmamap); + bus_dmamap_unload(priv->dmat, sgl->dma_map); + bus_dmamap_destroy(priv->dmat, sgl->dma_map); DMA_PRIV_UNLOCK(priv); - - uma_zfree(linux_dma_obj_zone, obj); } struct dma_pool { struct device *pool_device; uma_zone_t pool_zone; struct mtx pool_lock; bus_dma_tag_t pool_dmat; size_t pool_entry_size; struct pctrie pool_ptree; }; #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) static inline int dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) { struct linux_dma_obj *obj = mem; struct dma_pool *pool = arg; int error, nseg; bus_dma_segment_t seg; nseg = -1; DMA_POOL_LOCK(pool); error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, &seg, &nseg); DMA_POOL_UNLOCK(pool); if (error != 0) { return (error); } KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); obj->dma_addr = seg.ds_addr; return (0); } static void dma_pool_obj_dtor(void *mem, int size, void *arg) { struct linux_dma_obj *obj = mem; struct dma_pool *pool = arg; DMA_POOL_LOCK(pool); bus_dmamap_unload(pool->pool_dmat, obj->dmamap); DMA_POOL_UNLOCK(pool); } static int dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, int flags) { struct dma_pool *pool = arg; struct linux_dma_priv *priv; struct linux_dma_obj *obj; int error, i; priv = pool->pool_device->dma_priv; for (i = 0; i < count; i++) { obj = uma_zalloc(linux_dma_obj_zone, flags); if (obj == NULL) break; error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, BUS_DMA_NOWAIT, &obj->dmamap); if (error!= 0) { uma_zfree(linux_dma_obj_zone, obj); break; } store[i] = obj; } return (i); } static void dma_pool_obj_release(void *arg, void **store, int count) { struct dma_pool *pool = arg; struct linux_dma_priv *priv; struct linux_dma_obj *obj; int i; priv = pool->pool_device->dma_priv; for (i = 0; i < count; i++) { obj = store[i]; bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); uma_zfree(linux_dma_obj_zone, obj); } } struct dma_pool * linux_dma_pool_create(char *name, struct device *dev, size_t size, size_t align, size_t boundary) { struct linux_dma_priv *priv; struct dma_pool *pool; priv = dev->dma_priv; pool = kzalloc(sizeof(*pool), GFP_KERNEL); pool->pool_device = dev; pool->pool_entry_size = size; if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), align, boundary, /* alignment, boundary */ priv->dma_mask, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsz */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &pool->pool_dmat)) { kfree(pool); return (NULL); } pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, dma_pool_obj_release, pool, 0); mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); pctrie_init(&pool->pool_ptree); return (pool); } void linux_dma_pool_destroy(struct dma_pool *pool) { uma_zdestroy(pool->pool_zone); bus_dma_tag_destroy(pool->pool_dmat); mtx_destroy(&pool->pool_lock); kfree(pool); } void * linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { struct linux_dma_obj *obj; obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); if (obj == NULL) return (NULL); DMA_POOL_LOCK(pool); if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { DMA_POOL_UNLOCK(pool); uma_zfree_arg(pool->pool_zone, obj, pool); return (NULL); } DMA_POOL_UNLOCK(pool); *handle = obj->dma_addr; return (obj->vaddr); } void linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) { struct linux_dma_obj *obj; DMA_POOL_LOCK(pool); obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); if (obj == NULL) { DMA_POOL_UNLOCK(pool); return; } LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); DMA_POOL_UNLOCK(pool); uma_zfree_arg(pool->pool_zone, obj, pool); } Index: head/sys/sys/param.h =================================================================== --- head/sys/sys/param.h (revision 347088) +++ head/sys/sys/param.h (revision 347089) @@ -1,367 +1,367 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here in the doc/ repository: * * head/en_US.ISO8859-1/books/porters-handbook/versions/chapter.xml * * scheme is: Rxx * 'R' is in the range 0 to 4 if this is a release branch or * X.0-CURRENT before releng/X.0 is created, otherwise 'R' is * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1300021 /* Master, propagated to newvers */ +#define __FreeBSD_version 1300022 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * which by definition is always true on FreeBSD. This macro is also defined * on other systems that use the kernel of FreeBSD, such as GNU/kFreeBSD. * * It is tempting to use this macro in userland code when we want to enable * kernel-specific routines, and in fact it's fine to do this in code that * is part of FreeBSD itself. However, be aware that as presence of this * macro is still not widespread (e.g. older FreeBSD versions, 3rd party * compilers, etc), it is STRONGLY DISCOURAGED to check for this macro in * external applications without also checking for __FreeBSD__ as an * alternative. */ #undef __FreeBSD_kernel__ #define __FreeBSD_kernel__ #if defined(_KERNEL) || defined(IN_RTLD) #define P_OSREL_SIGWAIT 700000 #define P_OSREL_SIGSEGV 700004 #define P_OSREL_MAP_ANON 800104 #define P_OSREL_MAP_FSTRICT 1100036 #define P_OSREL_SHUTDOWN_ENOTCONN 1100077 #define P_OSREL_MAP_GUARD 1200035 #define P_OSREL_WRFSBASE 1200041 #define P_OSREL_CK_CYLGRP 1200046 #define P_OSREL_VMTOTAL64 1200054 #define P_OSREL_CK_SUPERBLOCK 1300000 #define P_OSREL_CK_INODE 1300005 #define P_OSREL_MAJOR(x) ((x) / 100000) #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ #define MAXLOGNAME 33 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS (NGROUPS_MAX+1) /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 255 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #ifndef _KERNEL /* Signals. */ #include #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * MAXBCACHEBUF - Maximum size of a buffer in the buffer cache. This must * be >= MAXBSIZE and can be set differently for different * architectures by defining it in . * Making this larger allows NFS to do larger reads/writes. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * The default value here can be overridden on a per-architecture * basis by defining it in . * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #ifndef MAXBCACHEBUF #define MAXBCACHEBUF MAXBSIZE /* must be a power of 2 >= MAXBSIZE */ #endif #ifndef BKVASIZE #define BKVASIZE 16384 /* must be power of 2 */ #endif #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define nitems(x) (sizeof((x)) / sizeof((x)[0])) #define rounddown(x, y) (((x)/(y))*(y)) #define rounddown2(x, y) ((x)&(~((y)-1))) /* if y is power of two */ #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that * can be calculated (assuming 32 bits) can be closely approximated using * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Old spelling of __containerof(). */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) /* * Access a variable length array that has been declared as a fixed * length array. */ #define __PAST_END(array, offset) (((__typeof__(*(array)) *)(array))[offset]) #endif /* _SYS_PARAM_H_ */