diff --git a/sys/compat/linuxkpi/common/include/linux/device.h b/sys/compat/linuxkpi/common/include/linux/device.h index a6e735da92bd..a85ea0450b04 100644 --- a/sys/compat/linuxkpi/common/include/linux/device.h +++ b/sys/compat/linuxkpi/common/include/linux/device.h @@ -1,602 +1,602 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUXKPI_LINUX_DEVICE_H_ #define _LINUXKPI_LINUX_DEVICE_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* via linux/dev_printk.h */ #include #include #include struct device; struct fwnode_handle; struct class { const char *name; struct module *owner; struct kobject kobj; devclass_t bsdclass; const struct dev_pm_ops *pm; const struct attribute_group **dev_groups; void (*class_release)(struct class *class); void (*dev_release)(struct device *dev); char * (*devnode)(struct device *dev, umode_t *mode); }; struct dev_pm_ops { int (*prepare)(struct device *dev); int (*suspend)(struct device *dev); int (*suspend_late)(struct device *dev); int (*resume)(struct device *dev); int (*resume_early)(struct device *dev); int (*freeze)(struct device *dev); int (*freeze_late)(struct device *dev); int (*thaw)(struct device *dev); int (*thaw_early)(struct device *dev); int (*poweroff)(struct device *dev); int (*poweroff_late)(struct device *dev); int (*restore)(struct device *dev); int (*restore_early)(struct device *dev); int (*runtime_suspend)(struct device *dev); int (*runtime_resume)(struct device *dev); int (*runtime_idle)(struct device *dev); }; struct device_driver { const char *name; const struct dev_pm_ops *pm; }; struct device_type { const char *name; }; struct device { struct device *parent; struct list_head irqents; device_t bsddev; /* * The following flag is used to determine if the LinuxKPI is * responsible for detaching the BSD device or not. If the * LinuxKPI got the BSD device using devclass_get_device(), it * must not try to detach or delete it, because it's already * done somewhere else. */ bool bsddev_attached_here; struct device_driver *driver; struct device_type *type; dev_t devt; struct class *class; void (*release)(struct device *dev); struct kobject kobj; void *dma_priv; void *driver_data; unsigned int irq; #define LINUX_IRQ_INVALID 65535 unsigned int irq_start; unsigned int irq_end; const struct attribute_group **groups; struct fwnode_handle *fwnode; struct cdev *backlight_dev; struct backlight_device *bd; spinlock_t devres_lock; struct list_head devres_head; }; extern struct device linux_root_device; extern struct kobject linux_class_root; extern const struct kobj_type linux_dev_ktype; extern const struct kobj_type linux_class_ktype; struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t); const void *(*namespace)(struct class *, const struct class_attribute *); }; #define CLASS_ATTR(_name, _mode, _show, _store) \ struct class_attribute class_attr_##_name = \ { { #_name, NULL, _mode }, _show, _store } struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); }; #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = \ __ATTR(_name, _mode, _show, _store) #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) /* Simple class attribute that is just a static string */ struct class_attribute_string { struct class_attribute attr; char *str; }; static inline ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, char *buf) { struct class_attribute_string *cs; cs = container_of(attr, struct class_attribute_string, attr); return snprintf(buf, PAGE_SIZE, "%s\n", cs->str); } /* Currently read-only only */ #define _CLASS_ATTR_STRING(_name, _mode, _str) \ { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } #define CLASS_ATTR_STRING(_name, _mode, _str) \ struct class_attribute_string class_attr_##_name = \ _CLASS_ATTR_STRING(_name, _mode, _str) #define dev_err(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_crit(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_warn(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_info(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_notice(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_emerg(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_dbg(dev, fmt, ...) do { } while (0) #define dev_printk(lvl, dev, fmt, ...) \ device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_err_once(dev, ...) do { \ static bool __dev_err_once; \ if (!__dev_err_once) { \ __dev_err_once = 1; \ dev_err(dev, __VA_ARGS__); \ } \ } while (0) #define dev_err_ratelimited(dev, ...) do { \ static linux_ratelimit_t __ratelimited; \ if (linux_ratelimited(&__ratelimited)) \ dev_err(dev, __VA_ARGS__); \ } while (0) #define dev_warn_ratelimited(dev, ...) do { \ static linux_ratelimit_t __ratelimited; \ if (linux_ratelimited(&__ratelimited)) \ dev_warn(dev, __VA_ARGS__); \ } while (0) /* Public and LinuxKPI internal devres functions. */ void *lkpi_devres_alloc(void(*release)(struct device *, void *), size_t, gfp_t); void lkpi_devres_add(struct device *, void *); void lkpi_devres_free(void *); void *lkpi_devres_find(struct device *, void(*release)(struct device *, void *), int (*match)(struct device *, void *, void *), void *); int lkpi_devres_destroy(struct device *, void(*release)(struct device *, void *), int (*match)(struct device *, void *, void *), void *); #define devres_alloc(_r, _s, _g) lkpi_devres_alloc(_r, _s, _g) #define devres_add(_d, _p) lkpi_devres_add(_d, _p) #define devres_free(_p) lkpi_devres_free(_p) #define devres_find(_d, _rfn, _mfn, _mp) \ lkpi_devres_find(_d, _rfn, _mfn, _mp) #define devres_destroy(_d, _rfn, _mfn, _mp) \ lkpi_devres_destroy(_d, _rfn, _mfn, _mp) void lkpi_devres_release_free_list(struct device *); void lkpi_devres_unlink(struct device *, void *); void lkpi_devm_kmalloc_release(struct device *, void *); static inline const char * dev_driver_string(const struct device *dev) { driver_t *drv; const char *str = ""; if (dev->bsddev != NULL) { drv = device_get_driver(dev->bsddev); if (drv != NULL) str = drv->name; } return (str); } static inline void * dev_get_drvdata(const struct device *dev) { return dev->driver_data; } static inline void dev_set_drvdata(struct device *dev, void *data) { dev->driver_data = data; } static inline struct device * get_device(struct device *dev) { if (dev) kobject_get(&dev->kobj); return (dev); } static inline char * dev_name(const struct device *dev) { return kobject_name(&dev->kobj); } #define dev_set_name(_dev, _fmt, ...) \ kobject_set_name(&(_dev)->kobj, (_fmt), ##__VA_ARGS__) static inline void put_device(struct device *dev) { if (dev) kobject_put(&dev->kobj); } struct class *class_create(struct module *owner, const char *name); static inline int class_register(struct class *class) { class->bsdclass = devclass_create(class->name); kobject_init(&class->kobj, &linux_class_ktype); kobject_set_name(&class->kobj, class->name); kobject_add(&class->kobj, &linux_class_root, class->name); return (0); } static inline void class_unregister(struct class *class) { kobject_put(&class->kobj); } static inline struct device *kobj_to_dev(struct kobject *kobj) { return container_of(kobj, struct device, kobj); } struct device *device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...); struct device *device_create_groups_vargs(struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, va_list args); /* * Devices are registered and created for exporting to sysfs. Create * implies register and register assumes the device fields have been * setup appropriately before being called. */ static inline void device_initialize(struct device *dev) { device_t bsddev = NULL; int unit = -1; if (dev->devt) { unit = MINOR(dev->devt); bsddev = devclass_get_device(dev->class->bsdclass, unit); dev->bsddev_attached_here = false; } else if (dev->parent == NULL) { bsddev = devclass_get_device(dev->class->bsdclass, 0); dev->bsddev_attached_here = false; } else { dev->bsddev_attached_here = true; } if (bsddev == NULL && dev->parent != NULL) { bsddev = device_add_child(dev->parent->bsddev, dev->class->kobj.name, unit); } if (bsddev != NULL) device_set_softc(bsddev, dev); dev->bsddev = bsddev; MPASS(dev->bsddev != NULL); kobject_init(&dev->kobj, &linux_dev_ktype); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); } static inline int device_add(struct device *dev) { if (dev->bsddev != NULL) { if (dev->devt == 0) dev->devt = makedev(0, device_get_unit(dev->bsddev)); } kobject_add(&dev->kobj, &dev->class->kobj, dev_name(dev)); if (dev->groups) return (sysfs_create_groups(&dev->kobj, dev->groups)); return (0); } static inline void device_create_release(struct device *dev) { kfree(dev); } static inline struct device * device_create_with_groups(struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, ...) { va_list vargs; struct device *dev; va_start(vargs, fmt); dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, fmt, vargs); va_end(vargs); return dev; } static inline bool device_is_registered(struct device *dev) { return (dev->bsddev != NULL); } static inline int device_register(struct device *dev) { device_t bsddev = NULL; int unit = -1; if (device_is_registered(dev)) goto done; if (dev->devt) { unit = MINOR(dev->devt); bsddev = devclass_get_device(dev->class->bsdclass, unit); dev->bsddev_attached_here = false; } else if (dev->parent == NULL) { bsddev = devclass_get_device(dev->class->bsdclass, 0); dev->bsddev_attached_here = false; } else { dev->bsddev_attached_here = true; } if (bsddev == NULL && dev->parent != NULL) { bsddev = device_add_child(dev->parent->bsddev, dev->class->kobj.name, unit); } if (bsddev != NULL) { if (dev->devt == 0) dev->devt = makedev(0, device_get_unit(bsddev)); device_set_softc(bsddev, dev); } dev->bsddev = bsddev; done: kobject_init(&dev->kobj, &linux_dev_ktype); kobject_add(&dev->kobj, &dev->class->kobj, dev_name(dev)); sysfs_create_groups(&dev->kobj, dev->class->dev_groups); return (0); } static inline void device_unregister(struct device *dev) { device_t bsddev; sysfs_remove_groups(&dev->kobj, dev->class->dev_groups); bsddev = dev->bsddev; dev->bsddev = NULL; if (bsddev != NULL && dev->bsddev_attached_here) { - mtx_lock(&Giant); + bus_topo_lock(); device_delete_child(device_get_parent(bsddev), bsddev); - mtx_unlock(&Giant); + bus_topo_unlock(); } put_device(dev); } static inline void device_del(struct device *dev) { device_t bsddev; bsddev = dev->bsddev; dev->bsddev = NULL; if (bsddev != NULL && dev->bsddev_attached_here) { - mtx_lock(&Giant); + bus_topo_lock(); device_delete_child(device_get_parent(bsddev), bsddev); - mtx_unlock(&Giant); + bus_topo_unlock(); } } static inline void device_destroy(struct class *class, dev_t devt) { device_t bsddev; int unit; unit = MINOR(devt); bsddev = devclass_get_device(class->bsdclass, unit); if (bsddev != NULL) device_unregister(device_get_softc(bsddev)); } static inline void device_release_driver(struct device *dev) { #if 0 /* This leads to panics. Disable temporarily. Keep to rework. */ /* We also need to cleanup LinuxKPI bits. What else? */ lkpi_devres_release_free_list(dev); dev_set_drvdata(dev, NULL); /* Do not call dev->release! */ mtx_lock(&Giant); if (device_is_attached(dev->bsddev)) device_detach(dev->bsddev); mtx_unlock(&Giant); #endif } static inline int device_reprobe(struct device *dev) { int error; device_release_driver(dev); mtx_lock(&Giant); error = device_probe_and_attach(dev->bsddev); mtx_unlock(&Giant); return (-error); } #define dev_pm_set_driver_flags(dev, flags) do { \ } while (0) static inline void linux_class_kfree(struct class *class) { kfree(class); } static inline void class_destroy(struct class *class) { if (class == NULL) return; class_unregister(class); } static inline int device_create_file(struct device *dev, const struct device_attribute *attr) { if (dev) return sysfs_create_file(&dev->kobj, &attr->attr); return -EINVAL; } static inline void device_remove_file(struct device *dev, const struct device_attribute *attr) { if (dev) sysfs_remove_file(&dev->kobj, &attr->attr); } static inline int class_create_file(struct class *class, const struct class_attribute *attr) { if (class) return sysfs_create_file(&class->kobj, &attr->attr); return -EINVAL; } static inline void class_remove_file(struct class *class, const struct class_attribute *attr) { if (class) sysfs_remove_file(&class->kobj, &attr->attr); } #define dev_to_node(dev) linux_dev_to_node(dev) #define of_node_to_nid(node) -1 int linux_dev_to_node(struct device *); char *kvasprintf(gfp_t, const char *, va_list); char *kasprintf(gfp_t, const char *, ...); char *lkpi_devm_kasprintf(struct device *, gfp_t, const char *, ...); #define devm_kasprintf(_dev, _gfp, _fmt, ...) \ lkpi_devm_kasprintf(_dev, _gfp, _fmt, ##__VA_ARGS__) static __inline void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) { void *p; p = lkpi_devres_alloc(lkpi_devm_kmalloc_release, size, gfp); if (p != NULL) lkpi_devres_add(dev, p); return (p); } #define devm_kzalloc(_dev, _size, _gfp) \ devm_kmalloc((_dev), (_size), (_gfp) | __GFP_ZERO) #define devm_kcalloc(_dev, _sizen, _size, _gfp) \ devm_kmalloc((_dev), ((_sizen) * (_size)), (_gfp) | __GFP_ZERO) #endif /* _LINUXKPI_LINUX_DEVICE_H_ */ diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c index 3fa6de92bce4..f5de87947c37 100644 --- a/sys/compat/linuxkpi/common/src/linux_pci.c +++ b/sys/compat/linuxkpi/common/src/linux_pci.c @@ -1,1505 +1,1505 @@ /*- * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. * All rights reserved. * Copyright (c) 2020-2022 The FreeBSD Foundation * * Portions of this software were developed by Björn Zeeb * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "backlight_if.h" #include "pcib_if.h" /* Undef the linux function macro defined in linux/pci.h */ #undef pci_get_class extern int linuxkpi_debug; SYSCTL_DECL(_compat_linuxkpi); static counter_u64_t lkpi_pci_nseg1_fail; SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); static device_probe_t linux_pci_probe; static device_attach_t linux_pci_attach; static device_detach_t linux_pci_detach; static device_suspend_t linux_pci_suspend; static device_resume_t linux_pci_resume; static device_shutdown_t linux_pci_shutdown; static pci_iov_init_t linux_pci_iov_init; static pci_iov_uninit_t linux_pci_iov_uninit; static pci_iov_add_vf_t linux_pci_iov_add_vf; static int linux_backlight_get_status(device_t dev, struct backlight_props *props); static int linux_backlight_update_status(device_t dev, struct backlight_props *props); static int linux_backlight_get_info(device_t dev, struct backlight_info *info); static device_method_t pci_methods[] = { DEVMETHOD(device_probe, linux_pci_probe), DEVMETHOD(device_attach, linux_pci_attach), DEVMETHOD(device_detach, linux_pci_detach), DEVMETHOD(device_suspend, linux_pci_suspend), DEVMETHOD(device_resume, linux_pci_resume), DEVMETHOD(device_shutdown, linux_pci_shutdown), DEVMETHOD(pci_iov_init, linux_pci_iov_init), DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), /* backlight interface */ DEVMETHOD(backlight_update_status, linux_backlight_update_status), DEVMETHOD(backlight_get_status, linux_backlight_get_status), DEVMETHOD(backlight_get_info, linux_backlight_get_info), DEVMETHOD_END }; struct linux_dma_priv { uint64_t dma_mask; bus_dma_tag_t dmat; uint64_t dma_coherent_mask; bus_dma_tag_t dmat_coherent; struct mtx lock; struct pctrie ptree; }; #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) static bool linux_is_drm(struct pci_driver *pdrv) { return (pdrv->name != NULL && strcmp(pdrv->name, "drmn") == 0); } static int linux_pdev_dma_uninit(struct pci_dev *pdev) { struct linux_dma_priv *priv; priv = pdev->dev.dma_priv; if (priv->dmat) bus_dma_tag_destroy(priv->dmat); if (priv->dmat_coherent) bus_dma_tag_destroy(priv->dmat_coherent); mtx_destroy(&priv->lock); pdev->dev.dma_priv = NULL; free(priv, M_DEVBUF); return (0); } static int linux_pdev_dma_init(struct pci_dev *pdev) { struct linux_dma_priv *priv; int error; priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); pctrie_init(&priv->ptree); pdev->dev.dma_priv = priv; /* Create a default DMA tags. */ error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); if (error != 0) goto err; /* Coherent is lower 32bit only by default in Linux. */ error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (error != 0) goto err; return (error); err: linux_pdev_dma_uninit(pdev); return (error); } int linux_dma_tag_init(struct device *dev, u64 dma_mask) { struct linux_dma_priv *priv; int error; priv = dev->dma_priv; if (priv->dmat) { if (priv->dma_mask == dma_mask) return (0); bus_dma_tag_destroy(priv->dmat); } priv->dma_mask = dma_mask; error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1, 0, /* alignment, boundary */ dma_mask, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ BUS_SPACE_MAXSIZE, /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsz */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &priv->dmat); return (-error); } int linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) { struct linux_dma_priv *priv; int error; priv = dev->dma_priv; if (priv->dmat_coherent) { if (priv->dma_coherent_mask == dma_mask) return (0); bus_dma_tag_destroy(priv->dmat_coherent); } priv->dma_coherent_mask = dma_mask; error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1, 0, /* alignment, boundary */ dma_mask, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ BUS_SPACE_MAXSIZE, /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsz */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &priv->dmat_coherent); return (-error); } static struct pci_driver * linux_pci_find(device_t dev, const struct pci_device_id **idp) { const struct pci_device_id *id; struct pci_driver *pdrv; uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; vendor = pci_get_vendor(dev); device = pci_get_device(dev); subvendor = pci_get_subvendor(dev); subdevice = pci_get_subdevice(dev); spin_lock(&pci_lock); list_for_each_entry(pdrv, &pci_drivers, node) { for (id = pdrv->id_table; id->vendor != 0; id++) { if (vendor == id->vendor && (PCI_ANY_ID == id->device || device == id->device) && (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { *idp = id; spin_unlock(&pci_lock); return (pdrv); } } } spin_unlock(&pci_lock); return (NULL); } static void lkpi_pci_dev_release(struct device *dev) { lkpi_devres_release_free_list(dev); spin_lock_destroy(&dev->devres_lock); } static void lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) { pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); pdev->vendor = pci_get_vendor(dev); pdev->device = pci_get_device(dev); pdev->subsystem_vendor = pci_get_subvendor(dev); pdev->subsystem_device = pci_get_subdevice(dev); pdev->class = pci_get_class(dev); pdev->revision = pci_get_revid(dev); pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); /* * This should be the upstream bridge; pci_upstream_bridge() * handles that case on demand as otherwise we'll shadow the * entire PCI hierarchy. */ pdev->bus->self = pdev; pdev->bus->number = pci_get_bus(dev); pdev->bus->domain = pci_get_domain(dev); pdev->dev.bsddev = dev; pdev->dev.parent = &linux_root_device; pdev->dev.release = lkpi_pci_dev_release; INIT_LIST_HEAD(&pdev->dev.irqents); kobject_init(&pdev->dev.kobj, &linux_dev_ktype); kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, kobject_name(&pdev->dev.kobj)); spin_lock_init(&pdev->dev.devres_lock); INIT_LIST_HEAD(&pdev->dev.devres_head); } static void lkpinew_pci_dev_release(struct device *dev) { struct pci_dev *pdev; pdev = to_pci_dev(dev); if (pdev->root != NULL) pci_dev_put(pdev->root); if (pdev->bus->self != pdev) pci_dev_put(pdev->bus->self); free(pdev->bus, M_DEVBUF); free(pdev, M_DEVBUF); } struct pci_dev * lkpinew_pci_dev(device_t dev) { struct pci_dev *pdev; pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); lkpifill_pci_dev(dev, pdev); pdev->dev.release = lkpinew_pci_dev_release; return (pdev); } struct pci_dev * lkpi_pci_get_class(unsigned int class, struct pci_dev *from) { device_t dev; device_t devfrom = NULL; struct pci_dev *pdev; if (from != NULL) devfrom = from->dev.bsddev; dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); if (dev == NULL) return (NULL); pdev = lkpinew_pci_dev(dev); return (pdev); } struct pci_dev * lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn) { device_t dev; struct pci_dev *pdev; dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); if (dev == NULL) return (NULL); pdev = lkpinew_pci_dev(dev); return (pdev); } static int linux_pci_probe(device_t dev) { const struct pci_device_id *id; struct pci_driver *pdrv; if ((pdrv = linux_pci_find(dev, &id)) == NULL) return (ENXIO); if (device_get_driver(dev) != &pdrv->bsddriver) return (ENXIO); device_set_desc(dev, pdrv->name); /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ if (pdrv->bsd_probe_return == 0) return (BUS_PROBE_DEFAULT); else return (pdrv->bsd_probe_return); } static int linux_pci_attach(device_t dev) { const struct pci_device_id *id; struct pci_driver *pdrv; struct pci_dev *pdev; pdrv = linux_pci_find(dev, &id); pdev = device_get_softc(dev); MPASS(pdrv != NULL); MPASS(pdev != NULL); return (linux_pci_attach_device(dev, pdrv, id, pdev)); } int linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, const struct pci_device_id *id, struct pci_dev *pdev) { struct resource_list_entry *rle; device_t parent; uintptr_t rid; int error; bool isdrm; linux_set_current(curthread); parent = device_get_parent(dev); isdrm = pdrv != NULL && linux_is_drm(pdrv); if (isdrm) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(parent); device_set_ivars(dev, dinfo); } lkpifill_pci_dev(dev, pdev); if (isdrm) PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); else PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); pdev->devfn = rid; pdev->pdrv = pdrv; rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); if (rle != NULL) pdev->dev.irq = rle->start; else pdev->dev.irq = LINUX_IRQ_INVALID; pdev->irq = pdev->dev.irq; error = linux_pdev_dma_init(pdev); if (error) goto out_dma_init; TAILQ_INIT(&pdev->mmio); spin_lock(&pci_lock); list_add(&pdev->links, &pci_devices); spin_unlock(&pci_lock); if (pdrv != NULL) { error = pdrv->probe(pdev, id); if (error) goto out_probe; } return (0); out_probe: free(pdev->bus, M_DEVBUF); linux_pdev_dma_uninit(pdev); out_dma_init: spin_lock(&pci_lock); list_del(&pdev->links); spin_unlock(&pci_lock); put_device(&pdev->dev); return (-error); } static int linux_pci_detach(device_t dev) { struct pci_dev *pdev; pdev = device_get_softc(dev); MPASS(pdev != NULL); device_set_desc(dev, NULL); return (linux_pci_detach_device(pdev)); } int linux_pci_detach_device(struct pci_dev *pdev) { linux_set_current(curthread); if (pdev->pdrv != NULL) pdev->pdrv->remove(pdev); if (pdev->root != NULL) pci_dev_put(pdev->root); free(pdev->bus, M_DEVBUF); linux_pdev_dma_uninit(pdev); spin_lock(&pci_lock); list_del(&pdev->links); spin_unlock(&pci_lock); put_device(&pdev->dev); return (0); } static int lkpi_pci_disable_dev(struct device *dev) { (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); return (0); } struct pci_devres * lkpi_pci_devres_get_alloc(struct pci_dev *pdev) { struct pci_devres *dr; dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); if (dr == NULL) { dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), GFP_KERNEL | __GFP_ZERO); if (dr != NULL) lkpi_devres_add(&pdev->dev, dr); } return (dr); } void lkpi_pci_devres_release(struct device *dev, void *p) { struct pci_devres *dr; struct pci_dev *pdev; int bar; pdev = to_pci_dev(dev); dr = p; if (pdev->msix_enabled) lkpi_pci_disable_msix(pdev); if (pdev->msi_enabled) lkpi_pci_disable_msi(pdev); if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) dr->enable_io = false; if (dr->region_mask == 0) return; for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { if ((dr->region_mask & (1 << bar)) == 0) continue; pci_release_region(pdev, bar); } } struct pcim_iomap_devres * lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) { struct pcim_iomap_devres *dr; dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, NULL, NULL); if (dr == NULL) { dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, sizeof(*dr), GFP_KERNEL | __GFP_ZERO); if (dr != NULL) lkpi_devres_add(&pdev->dev, dr); } if (dr == NULL) device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); return (dr); } void lkpi_pcim_iomap_table_release(struct device *dev, void *p) { struct pcim_iomap_devres *dr; struct pci_dev *pdev; int bar; dr = p; pdev = to_pci_dev(dev); for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { if (dr->mmio_table[bar] == NULL) continue; pci_iounmap(pdev, dr->mmio_table[bar]); } } static int linux_pci_suspend(device_t dev) { const struct dev_pm_ops *pmops; struct pm_message pm = { }; struct pci_dev *pdev; int error; error = 0; linux_set_current(curthread); pdev = device_get_softc(dev); pmops = pdev->pdrv->driver.pm; if (pdev->pdrv->suspend != NULL) error = -pdev->pdrv->suspend(pdev, pm); else if (pmops != NULL && pmops->suspend != NULL) { error = -pmops->suspend(&pdev->dev); if (error == 0 && pmops->suspend_late != NULL) error = -pmops->suspend_late(&pdev->dev); } return (error); } static int linux_pci_resume(device_t dev) { const struct dev_pm_ops *pmops; struct pci_dev *pdev; int error; error = 0; linux_set_current(curthread); pdev = device_get_softc(dev); pmops = pdev->pdrv->driver.pm; if (pdev->pdrv->resume != NULL) error = -pdev->pdrv->resume(pdev); else if (pmops != NULL && pmops->resume != NULL) { if (pmops->resume_early != NULL) error = -pmops->resume_early(&pdev->dev); if (error == 0 && pmops->resume != NULL) error = -pmops->resume(&pdev->dev); } return (error); } static int linux_pci_shutdown(device_t dev) { struct pci_dev *pdev; linux_set_current(curthread); pdev = device_get_softc(dev); if (pdev->pdrv->shutdown != NULL) pdev->pdrv->shutdown(pdev); return (0); } static int linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) { struct pci_dev *pdev; int error; linux_set_current(curthread); pdev = device_get_softc(dev); if (pdev->pdrv->bsd_iov_init != NULL) error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); else error = EINVAL; return (error); } static void linux_pci_iov_uninit(device_t dev) { struct pci_dev *pdev; linux_set_current(curthread); pdev = device_get_softc(dev); if (pdev->pdrv->bsd_iov_uninit != NULL) pdev->pdrv->bsd_iov_uninit(dev); } static int linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) { struct pci_dev *pdev; int error; linux_set_current(curthread); pdev = device_get_softc(dev); if (pdev->pdrv->bsd_iov_add_vf != NULL) error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); else error = EINVAL; return (error); } static int _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) { int error; linux_set_current(curthread); spin_lock(&pci_lock); list_add(&pdrv->node, &pci_drivers); spin_unlock(&pci_lock); if (pdrv->bsddriver.name == NULL) pdrv->bsddriver.name = pdrv->name; pdrv->bsddriver.methods = pci_methods; pdrv->bsddriver.size = sizeof(struct pci_dev); - mtx_lock(&Giant); + bus_topo_lock(); error = devclass_add_driver(dc, &pdrv->bsddriver, BUS_PASS_DEFAULT, &pdrv->bsdclass); - mtx_unlock(&Giant); + bus_topo_unlock(); return (-error); } int linux_pci_register_driver(struct pci_driver *pdrv) { devclass_t dc; dc = devclass_find("pci"); if (dc == NULL) return (-ENXIO); return (_linux_pci_register_driver(pdrv, dc)); } struct resource_list_entry * linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, int type, int rid) { device_t dev; struct resource *res; KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, ("trying to reserve non-BAR type %d", type)); dev = pdev->pdrv != NULL && linux_is_drm(pdev->pdrv) ? device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 1, 1, 0); if (res == NULL) return (NULL); return (resource_list_find(rl, type, rid)); } unsigned long pci_resource_start(struct pci_dev *pdev, int bar) { struct resource_list_entry *rle; rman_res_t newstart; device_t dev; if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) return (0); dev = pdev->pdrv != NULL && linux_is_drm(pdev->pdrv) ? device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", (uintmax_t)rle->start); return (0); } return (newstart); } unsigned long pci_resource_len(struct pci_dev *pdev, int bar) { struct resource_list_entry *rle; if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) return (0); return (rle->count); } int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) { struct resource *res; struct pci_devres *dr; struct pci_mmio_region *mmio; int rid; int type; type = pci_resource_type(pdev, bar); if (type < 0) return (-ENODEV); rid = PCIR_BAR(bar); res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, RF_ACTIVE|RF_SHAREABLE); if (res == NULL) { device_printf(pdev->dev.bsddev, "%s: failed to alloc " "bar %d type %d rid %d\n", __func__, bar, type, PCIR_BAR(bar)); return (-ENODEV); } /* * It seems there is an implicit devres tracking on these if the device * is managed; otherwise the resources are not automatiaclly freed on * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux * drivers. */ dr = lkpi_pci_devres_find(pdev); if (dr != NULL) { dr->region_mask |= (1 << bar); dr->region_table[bar] = res; } /* Even if the device is not managed we need to track it for iomap. */ mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); mmio->rid = PCIR_BAR(bar); mmio->type = type; mmio->res = res; TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); return (0); } struct resource * _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) { struct pci_mmio_region *mmio, *p; int type; type = pci_resource_type(pdev, bar); if (type < 0) { device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", __func__, bar, type); return (NULL); } /* * Check for duplicate mappings. * This can happen if a driver calls pci_request_region() first. */ TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { return (mmio->res); } } mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); mmio->rid = PCIR_BAR(bar); mmio->type = type; mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, &mmio->rid, RF_ACTIVE|RF_SHAREABLE); if (mmio->res == NULL) { device_printf(pdev->dev.bsddev, "%s: failed to alloc " "bar %d type %d rid %d\n", __func__, bar, type, PCIR_BAR(bar)); free(mmio, M_DEVBUF); return (NULL); } TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); return (mmio->res); } int linux_pci_register_drm_driver(struct pci_driver *pdrv) { devclass_t dc; dc = devclass_create("vgapci"); if (dc == NULL) return (-ENXIO); pdrv->name = "drmn"; return (_linux_pci_register_driver(pdrv, dc)); } void linux_pci_unregister_driver(struct pci_driver *pdrv) { devclass_t bus; bus = devclass_find("pci"); spin_lock(&pci_lock); list_del(&pdrv->node); spin_unlock(&pci_lock); - mtx_lock(&Giant); + bus_topo_lock(); if (bus != NULL) devclass_delete_driver(bus, &pdrv->bsddriver); - mtx_unlock(&Giant); + bus_topo_unlock(); } void linux_pci_unregister_drm_driver(struct pci_driver *pdrv) { devclass_t bus; bus = devclass_find("vgapci"); spin_lock(&pci_lock); list_del(&pdrv->node); spin_unlock(&pci_lock); - mtx_lock(&Giant); + bus_topo_lock(); if (bus != NULL) devclass_delete_driver(bus, &pdrv->bsddriver); - mtx_unlock(&Giant); + bus_topo_unlock(); } int pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, unsigned int flags) { int error; if (flags & PCI_IRQ_MSIX) { struct msix_entry *entries; int i; entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); if (entries == NULL) { error = -ENOMEM; goto out; } for (i = 0; i < maxv; ++i) entries[i].entry = i; error = pci_enable_msix(pdev, entries, maxv); out: kfree(entries); if (error == 0 && pdev->msix_enabled) return (pdev->dev.irq_end - pdev->dev.irq_start); } if (flags & PCI_IRQ_MSI) { error = pci_enable_msi(pdev); if (error == 0 && pdev->msi_enabled) return (pdev->dev.irq_end - pdev->dev.irq_start); } if (flags & PCI_IRQ_LEGACY) { if (pdev->irq) return (1); } return (-EINVAL); } CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); struct linux_dma_obj { void *vaddr; uint64_t dma_addr; bus_dmamap_t dmamap; bus_dma_tag_t dmat; }; static uma_zone_t linux_dma_trie_zone; static uma_zone_t linux_dma_obj_zone; static void linux_dma_init(void *arg) { linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 0); linux_dma_obj_zone = uma_zcreate("linux_dma_object", sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); } SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); static void linux_dma_uninit(void *arg) { counter_u64_free(lkpi_pci_nseg1_fail); uma_zdestroy(linux_dma_obj_zone); uma_zdestroy(linux_dma_trie_zone); } SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); static void * linux_dma_trie_alloc(struct pctrie *ptree) { return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); } static void linux_dma_trie_free(struct pctrie *ptree, void *node) { uma_zfree(linux_dma_trie_zone, node); } PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, linux_dma_trie_free); #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) static dma_addr_t linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, bus_dma_tag_t dmat) { struct linux_dma_priv *priv; struct linux_dma_obj *obj; int error, nseg; bus_dma_segment_t seg; priv = dev->dma_priv; /* * If the resultant mapping will be entirely 1:1 with the * physical address, short-circuit the remainder of the * bus_dma API. This avoids tracking collisions in the pctrie * with the additional benefit of reducing overhead. */ if (bus_dma_id_mapped(dmat, phys, len)) return (phys); obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); if (obj == NULL) { return (0); } obj->dmat = dmat; DMA_PRIV_LOCK(priv); if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); return (0); } nseg = -1; if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, BUS_DMA_NOWAIT, &seg, &nseg) != 0) { bus_dmamap_destroy(obj->dmat, obj->dmamap); DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); counter_u64_add(lkpi_pci_nseg1_fail, 1); if (linuxkpi_debug) dump_stack(); return (0); } KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); obj->dma_addr = seg.ds_addr; error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); if (error != 0) { bus_dmamap_unload(obj->dmat, obj->dmamap); bus_dmamap_destroy(obj->dmat, obj->dmamap); DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); return (0); } DMA_PRIV_UNLOCK(priv); return (obj->dma_addr); } #else static dma_addr_t linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, size_t len __unused, bus_dma_tag_t dmat __unused) { return (phys); } #endif dma_addr_t linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) { struct linux_dma_priv *priv; priv = dev->dma_priv; return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); } #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) void linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) { struct linux_dma_priv *priv; struct linux_dma_obj *obj; priv = dev->dma_priv; if (pctrie_is_empty(&priv->ptree)) return; DMA_PRIV_LOCK(priv); obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); if (obj == NULL) { DMA_PRIV_UNLOCK(priv); return; } LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); bus_dmamap_unload(obj->dmat, obj->dmamap); bus_dmamap_destroy(obj->dmat, obj->dmamap); DMA_PRIV_UNLOCK(priv); uma_zfree(linux_dma_obj_zone, obj); } #else void linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) { } #endif void * linux_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { struct linux_dma_priv *priv; vm_paddr_t high; size_t align; void *mem; if (dev == NULL || dev->dma_priv == NULL) { *dma_handle = 0; return (NULL); } priv = dev->dma_priv; if (priv->dma_coherent_mask) high = priv->dma_coherent_mask; else /* Coherent is lower 32bit only by default in Linux. */ high = BUS_SPACE_MAXADDR_32BIT; align = PAGE_SIZE << get_order(size); /* Always zero the allocation. */ flag |= M_ZERO; mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, align, 0, VM_MEMATTR_DEFAULT); if (mem != NULL) { *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, priv->dmat_coherent); if (*dma_handle == 0) { kmem_free((vm_offset_t)mem, size); mem = NULL; } } else { *dma_handle = 0; } return (mem); } void linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, bus_dmasync_op_t op) { struct linux_dma_priv *priv; struct linux_dma_obj *obj; priv = dev->dma_priv; if (pctrie_is_empty(&priv->ptree)) return; DMA_PRIV_LOCK(priv); obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); if (obj == NULL) { DMA_PRIV_UNLOCK(priv); return; } bus_dmamap_sync(obj->dmat, obj->dmamap, op); DMA_PRIV_UNLOCK(priv); } int linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, unsigned long attrs __unused) { struct linux_dma_priv *priv; struct scatterlist *sg; int i, nseg; bus_dma_segment_t seg; priv = dev->dma_priv; DMA_PRIV_LOCK(priv); /* create common DMA map in the first S/G entry */ if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { DMA_PRIV_UNLOCK(priv); return (0); } /* load all S/G list entries */ for_each_sg(sgl, sg, nents, i) { nseg = -1; if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, sg_phys(sg), sg->length, BUS_DMA_NOWAIT, &seg, &nseg) != 0) { bus_dmamap_unload(priv->dmat, sgl->dma_map); bus_dmamap_destroy(priv->dmat, sgl->dma_map); DMA_PRIV_UNLOCK(priv); return (0); } KASSERT(nseg == 0, ("More than one segment (nseg=%d)", nseg + 1)); sg_dma_address(sg) = seg.ds_addr; } switch (direction) { case DMA_BIDIRECTIONAL: bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); break; case DMA_TO_DEVICE: bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); break; case DMA_FROM_DEVICE: bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); break; default: break; } DMA_PRIV_UNLOCK(priv); return (nents); } void linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents __unused, enum dma_data_direction direction, unsigned long attrs __unused) { struct linux_dma_priv *priv; priv = dev->dma_priv; DMA_PRIV_LOCK(priv); switch (direction) { case DMA_BIDIRECTIONAL: bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); break; case DMA_TO_DEVICE: bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); break; case DMA_FROM_DEVICE: bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); break; default: break; } bus_dmamap_unload(priv->dmat, sgl->dma_map); bus_dmamap_destroy(priv->dmat, sgl->dma_map); DMA_PRIV_UNLOCK(priv); } struct dma_pool { struct device *pool_device; uma_zone_t pool_zone; struct mtx pool_lock; bus_dma_tag_t pool_dmat; size_t pool_entry_size; struct pctrie pool_ptree; }; #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) static inline int dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) { struct linux_dma_obj *obj = mem; struct dma_pool *pool = arg; int error, nseg; bus_dma_segment_t seg; nseg = -1; DMA_POOL_LOCK(pool); error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, &seg, &nseg); DMA_POOL_UNLOCK(pool); if (error != 0) { return (error); } KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); obj->dma_addr = seg.ds_addr; return (0); } static void dma_pool_obj_dtor(void *mem, int size, void *arg) { struct linux_dma_obj *obj = mem; struct dma_pool *pool = arg; DMA_POOL_LOCK(pool); bus_dmamap_unload(pool->pool_dmat, obj->dmamap); DMA_POOL_UNLOCK(pool); } static int dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, int flags) { struct dma_pool *pool = arg; struct linux_dma_obj *obj; int error, i; for (i = 0; i < count; i++) { obj = uma_zalloc(linux_dma_obj_zone, flags); if (obj == NULL) break; error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, BUS_DMA_NOWAIT, &obj->dmamap); if (error!= 0) { uma_zfree(linux_dma_obj_zone, obj); break; } store[i] = obj; } return (i); } static void dma_pool_obj_release(void *arg, void **store, int count) { struct dma_pool *pool = arg; struct linux_dma_obj *obj; int i; for (i = 0; i < count; i++) { obj = store[i]; bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); uma_zfree(linux_dma_obj_zone, obj); } } struct dma_pool * linux_dma_pool_create(char *name, struct device *dev, size_t size, size_t align, size_t boundary) { struct linux_dma_priv *priv; struct dma_pool *pool; priv = dev->dma_priv; pool = kzalloc(sizeof(*pool), GFP_KERNEL); pool->pool_device = dev; pool->pool_entry_size = size; if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), align, boundary, /* alignment, boundary */ priv->dma_mask, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsz */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &pool->pool_dmat)) { kfree(pool); return (NULL); } pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, dma_pool_obj_release, pool, 0); mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); pctrie_init(&pool->pool_ptree); return (pool); } void linux_dma_pool_destroy(struct dma_pool *pool) { uma_zdestroy(pool->pool_zone); bus_dma_tag_destroy(pool->pool_dmat); mtx_destroy(&pool->pool_lock); kfree(pool); } void lkpi_dmam_pool_destroy(struct device *dev, void *p) { struct dma_pool *pool; pool = *(struct dma_pool **)p; LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); linux_dma_pool_destroy(pool); } void * linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { struct linux_dma_obj *obj; obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); if (obj == NULL) return (NULL); DMA_POOL_LOCK(pool); if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { DMA_POOL_UNLOCK(pool); uma_zfree_arg(pool->pool_zone, obj, pool); return (NULL); } DMA_POOL_UNLOCK(pool); *handle = obj->dma_addr; return (obj->vaddr); } void linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) { struct linux_dma_obj *obj; DMA_POOL_LOCK(pool); obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); if (obj == NULL) { DMA_POOL_UNLOCK(pool); return; } LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); DMA_POOL_UNLOCK(pool); uma_zfree_arg(pool->pool_zone, obj, pool); } static int linux_backlight_get_status(device_t dev, struct backlight_props *props) { struct pci_dev *pdev; linux_set_current(curthread); pdev = device_get_softc(dev); props->brightness = pdev->dev.bd->props.brightness; props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; props->nlevels = 0; return (0); } static int linux_backlight_get_info(device_t dev, struct backlight_info *info) { struct pci_dev *pdev; linux_set_current(curthread); pdev = device_get_softc(dev); info->type = BACKLIGHT_TYPE_PANEL; strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); return (0); } static int linux_backlight_update_status(device_t dev, struct backlight_props *props) { struct pci_dev *pdev; linux_set_current(curthread); pdev = device_get_softc(dev); pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * props->brightness / 100; pdev->dev.bd->props.power = props->brightness == 0 ? 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); } struct backlight_device * linux_backlight_device_register(const char *name, struct device *dev, void *data, const struct backlight_ops *ops, struct backlight_properties *props) { dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); dev->bd->ops = ops; dev->bd->props.type = props->type; dev->bd->props.max_brightness = props->max_brightness; dev->bd->props.brightness = props->brightness; dev->bd->props.power = props->power; dev->bd->data = data; dev->bd->dev = dev; dev->bd->name = strdup(name, M_DEVBUF); dev->backlight_dev = backlight_register(name, dev->bsddev); return (dev->bd); } void linux_backlight_device_unregister(struct backlight_device *bd) { backlight_destroy(bd->dev->backlight_dev); free(bd->name, M_DEVBUF); free(bd, M_DEVBUF); } diff --git a/sys/compat/linuxkpi/common/src/linux_usb.c b/sys/compat/linuxkpi/common/src/linux_usb.c index 9474aa6be9ea..05fb63b93142 100644 --- a/sys/compat/linuxkpi/common/src/linux_usb.c +++ b/sys/compat/linuxkpi/common/src/linux_usb.c @@ -1,1718 +1,1720 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2007 Luigi Rizzo - Universita` di Pisa. All rights reserved. * Copyright (c) 2007 Hans Petter Selasky. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR usb_debug #include #include #include #include #include #include #include #include #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ struct usb_linux_softc { LIST_ENTRY(usb_linux_softc) sc_attached_list; device_t sc_fbsd_dev; struct usb_device *sc_fbsd_udev; struct usb_interface *sc_ui; struct usb_driver *sc_udrv; }; /* prototypes */ static device_probe_t usb_linux_probe; static device_attach_t usb_linux_attach; static device_detach_t usb_linux_detach; static device_suspend_t usb_linux_suspend; static device_resume_t usb_linux_resume; static usb_callback_t usb_linux_isoc_callback; static usb_callback_t usb_linux_non_isoc_callback; static usb_complete_t usb_linux_wait_complete; static uint16_t usb_max_isoc_frames(struct usb_device *); static int usb_start_wait_urb(struct urb *, usb_timeout_t, uint16_t *); static const struct usb_device_id *usb_linux_lookup_id( const struct usb_device_id *, struct usb_attach_arg *); static struct usb_driver *usb_linux_get_usb_driver(struct usb_linux_softc *); static int usb_linux_create_usb_device(struct usb_device *, device_t); static void usb_linux_cleanup_interface(struct usb_device *, struct usb_interface *); static void usb_linux_complete(struct usb_xfer *); static int usb_unlink_urb_sub(struct urb *, uint8_t); /*------------------------------------------------------------------------* * FreeBSD USB interface *------------------------------------------------------------------------*/ static LIST_HEAD(, usb_linux_softc) usb_linux_attached_list; static LIST_HEAD(, usb_driver) usb_linux_driver_list; static device_method_t usb_linux_methods[] = { /* Device interface */ DEVMETHOD(device_probe, usb_linux_probe), DEVMETHOD(device_attach, usb_linux_attach), DEVMETHOD(device_detach, usb_linux_detach), DEVMETHOD(device_suspend, usb_linux_suspend), DEVMETHOD(device_resume, usb_linux_resume), DEVMETHOD_END }; static driver_t usb_linux_driver = { .name = "usb_linux", .methods = usb_linux_methods, .size = sizeof(struct usb_linux_softc), }; static devclass_t usb_linux_devclass; DRIVER_MODULE(usb_linux, uhub, usb_linux_driver, usb_linux_devclass, NULL, 0); MODULE_VERSION(usb_linux, 1); /*------------------------------------------------------------------------* * usb_linux_lookup_id * * This functions takes an array of "struct usb_device_id" and tries * to match the entries with the information in "struct usb_attach_arg". * If it finds a match the matching entry will be returned. * Else "NULL" will be returned. *------------------------------------------------------------------------*/ static const struct usb_device_id * usb_linux_lookup_id(const struct usb_device_id *id, struct usb_attach_arg *uaa) { if (id == NULL) { goto done; } /* * Keep on matching array entries until we find one with * "match_flags" equal to zero, which indicates the end of the * array: */ for (; id->match_flags; id++) { if ((id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) && (id->idVendor != uaa->info.idVendor)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_PRODUCT) && (id->idProduct != uaa->info.idProduct)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO) && (id->bcdDevice_lo > uaa->info.bcdDevice)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI) && (id->bcdDevice_hi < uaa->info.bcdDevice)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_CLASS) && (id->bDeviceClass != uaa->info.bDeviceClass)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_SUBCLASS) && (id->bDeviceSubClass != uaa->info.bDeviceSubClass)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) && (id->bDeviceProtocol != uaa->info.bDeviceProtocol)) { continue; } if ((uaa->info.bDeviceClass == 0xFF) && !(id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) && (id->match_flags & (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS | USB_DEVICE_ID_MATCH_INT_PROTOCOL))) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_CLASS) && (id->bInterfaceClass != uaa->info.bInterfaceClass)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_SUBCLASS) && (id->bInterfaceSubClass != uaa->info.bInterfaceSubClass)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_PROTOCOL) && (id->bInterfaceProtocol != uaa->info.bInterfaceProtocol)) { continue; } /* we found a match! */ return (id); } done: return (NULL); } /*------------------------------------------------------------------------* * usb_linux_probe * * This function is the FreeBSD probe callback. It is called from the * FreeBSD USB stack through the "device_probe_and_attach()" function. *------------------------------------------------------------------------*/ static int usb_linux_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct usb_driver *udrv; int err = ENXIO; if (uaa->usb_mode != USB_MODE_HOST) { return (ENXIO); } mtx_lock(&Giant); LIST_FOREACH(udrv, &usb_linux_driver_list, linux_driver_list) { if (usb_linux_lookup_id(udrv->id_table, uaa)) { err = BUS_PROBE_DEFAULT; break; } } mtx_unlock(&Giant); return (err); } /*------------------------------------------------------------------------* * usb_linux_get_usb_driver * * This function returns the pointer to the "struct usb_driver" where * the Linux USB device driver "struct usb_device_id" match was found. * We apply a lock before reading out the pointer to avoid races. *------------------------------------------------------------------------*/ static struct usb_driver * usb_linux_get_usb_driver(struct usb_linux_softc *sc) { struct usb_driver *udrv; mtx_lock(&Giant); udrv = sc->sc_udrv; mtx_unlock(&Giant); return (udrv); } /*------------------------------------------------------------------------* * usb_linux_attach * * This function is the FreeBSD attach callback. It is called from the * FreeBSD USB stack through the "device_probe_and_attach()" function. * This function is called when "usb_linux_probe()" returns zero. *------------------------------------------------------------------------*/ static int usb_linux_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct usb_linux_softc *sc = device_get_softc(dev); struct usb_driver *udrv; const struct usb_device_id *id = NULL; mtx_lock(&Giant); LIST_FOREACH(udrv, &usb_linux_driver_list, linux_driver_list) { id = usb_linux_lookup_id(udrv->id_table, uaa); if (id) break; } mtx_unlock(&Giant); if (id == NULL) { return (ENXIO); } if (usb_linux_create_usb_device(uaa->device, dev) != 0) return (ENOMEM); device_set_usb_desc(dev); sc->sc_fbsd_udev = uaa->device; sc->sc_fbsd_dev = dev; sc->sc_udrv = udrv; sc->sc_ui = usb_ifnum_to_if(uaa->device, uaa->info.bIfaceNum); if (sc->sc_ui == NULL) { return (EINVAL); } if (udrv->probe) { if ((udrv->probe) (sc->sc_ui, id)) { return (ENXIO); } } mtx_lock(&Giant); LIST_INSERT_HEAD(&usb_linux_attached_list, sc, sc_attached_list); mtx_unlock(&Giant); /* success */ return (0); } /*------------------------------------------------------------------------* * usb_linux_detach * * This function is the FreeBSD detach callback. It is called from the * FreeBSD USB stack through the "device_detach()" function. *------------------------------------------------------------------------*/ static int usb_linux_detach(device_t dev) { struct usb_linux_softc *sc = device_get_softc(dev); struct usb_driver *udrv = NULL; mtx_lock(&Giant); if (sc->sc_attached_list.le_prev) { LIST_REMOVE(sc, sc_attached_list); sc->sc_attached_list.le_prev = NULL; udrv = sc->sc_udrv; sc->sc_udrv = NULL; } mtx_unlock(&Giant); if (udrv && udrv->disconnect) { (udrv->disconnect) (sc->sc_ui); } /* * Make sure that we free all FreeBSD USB transfers belonging to * this Linux "usb_interface", hence they will most likely not be * needed any more. */ usb_linux_cleanup_interface(sc->sc_fbsd_udev, sc->sc_ui); return (0); } /*------------------------------------------------------------------------* * usb_linux_suspend * * This function is the FreeBSD suspend callback. Usually it does nothing. *------------------------------------------------------------------------*/ static int usb_linux_suspend(device_t dev) { struct usb_linux_softc *sc = device_get_softc(dev); struct usb_driver *udrv = usb_linux_get_usb_driver(sc); int err; err = 0; if (udrv && udrv->suspend) err = (udrv->suspend) (sc->sc_ui, 0); return (-err); } /*------------------------------------------------------------------------* * usb_linux_resume * * This function is the FreeBSD resume callback. Usually it does nothing. *------------------------------------------------------------------------*/ static int usb_linux_resume(device_t dev) { struct usb_linux_softc *sc = device_get_softc(dev); struct usb_driver *udrv = usb_linux_get_usb_driver(sc); int err; err = 0; if (udrv && udrv->resume) err = (udrv->resume) (sc->sc_ui); return (-err); } /*------------------------------------------------------------------------* * Linux emulation layer *------------------------------------------------------------------------*/ /*------------------------------------------------------------------------* * usb_max_isoc_frames * * The following function returns the maximum number of isochronous * frames that we support per URB. It is not part of the Linux USB API. *------------------------------------------------------------------------*/ static uint16_t usb_max_isoc_frames(struct usb_device *dev) { ; /* indent fix */ switch (usbd_get_speed(dev)) { case USB_SPEED_LOW: case USB_SPEED_FULL: return (USB_MAX_FULL_SPEED_ISOC_FRAMES); default: return (USB_MAX_HIGH_SPEED_ISOC_FRAMES); } } /*------------------------------------------------------------------------* * usb_submit_urb * * This function is used to queue an URB after that it has been * initialized. If it returns non-zero, it means that the URB was not * queued. *------------------------------------------------------------------------*/ int usb_submit_urb(struct urb *urb, uint16_t mem_flags) { struct usb_host_endpoint *uhe; uint8_t do_unlock; int err; if (urb == NULL) return (-EINVAL); do_unlock = mtx_owned(&Giant) ? 0 : 1; if (do_unlock) mtx_lock(&Giant); if (urb->endpoint == NULL) { err = -EINVAL; goto done; } /* * Check to see if the urb is in the process of being killed * and stop a urb that is in the process of being killed from * being re-submitted (e.g. from its completion callback * function). */ if (urb->kill_count != 0) { err = -EPERM; goto done; } uhe = urb->endpoint; /* * Check that we have got a FreeBSD USB transfer that will dequeue * the URB structure and do the real transfer. If there are no USB * transfers, then we return an error. */ if (uhe->bsd_xfer[0] || uhe->bsd_xfer[1]) { /* we are ready! */ TAILQ_INSERT_TAIL(&uhe->bsd_urb_list, urb, bsd_urb_list); urb->status = -EINPROGRESS; usbd_transfer_start(uhe->bsd_xfer[0]); usbd_transfer_start(uhe->bsd_xfer[1]); err = 0; } else { /* no pipes have been setup yet! */ urb->status = -EINVAL; err = -EINVAL; } done: if (do_unlock) mtx_unlock(&Giant); return (err); } /*------------------------------------------------------------------------* * usb_unlink_urb * * This function is used to stop an URB after that it is been * submitted, but before the "complete" callback has been called. On *------------------------------------------------------------------------*/ int usb_unlink_urb(struct urb *urb) { return (usb_unlink_urb_sub(urb, 0)); } static void usb_unlink_bsd(struct usb_xfer *xfer, struct urb *urb, uint8_t drain) { if (xfer == NULL) return; if (!usbd_transfer_pending(xfer)) return; if (xfer->priv_fifo == (void *)urb) { if (drain) { mtx_unlock(&Giant); usbd_transfer_drain(xfer); mtx_lock(&Giant); } else { usbd_transfer_stop(xfer); } usbd_transfer_start(xfer); } } static int usb_unlink_urb_sub(struct urb *urb, uint8_t drain) { struct usb_host_endpoint *uhe; uint16_t x; uint8_t do_unlock; int err; if (urb == NULL) return (-EINVAL); do_unlock = mtx_owned(&Giant) ? 0 : 1; if (do_unlock) mtx_lock(&Giant); if (drain) urb->kill_count++; if (urb->endpoint == NULL) { err = -EINVAL; goto done; } uhe = urb->endpoint; if (urb->bsd_urb_list.tqe_prev) { /* not started yet, just remove it from the queue */ TAILQ_REMOVE(&uhe->bsd_urb_list, urb, bsd_urb_list); urb->bsd_urb_list.tqe_prev = NULL; urb->status = -ECONNRESET; urb->actual_length = 0; for (x = 0; x < urb->number_of_packets; x++) { urb->iso_frame_desc[x].actual_length = 0; } if (urb->complete) { (urb->complete) (urb); } } else { /* * If the URB is not on the URB list, then check if one of * the FreeBSD USB transfer are processing the current URB. * If so, re-start that transfer, which will lead to the * termination of that URB: */ usb_unlink_bsd(uhe->bsd_xfer[0], urb, drain); usb_unlink_bsd(uhe->bsd_xfer[1], urb, drain); } err = 0; done: if (drain) urb->kill_count--; if (do_unlock) mtx_unlock(&Giant); return (err); } /*------------------------------------------------------------------------* * usb_clear_halt * * This function must always be used to clear the stall. Stall is when * an USB endpoint returns a stall message to the USB host controller. * Until the stall is cleared, no data can be transferred. *------------------------------------------------------------------------*/ int usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe) { struct usb_config cfg[1]; struct usb_endpoint *ep; uint8_t type; uint8_t addr; if (uhe == NULL) return (-EINVAL); type = uhe->desc.bmAttributes & UE_XFERTYPE; addr = uhe->desc.bEndpointAddress; memset(cfg, 0, sizeof(cfg)); cfg[0].type = type; cfg[0].endpoint = addr & UE_ADDR; cfg[0].direction = addr & (UE_DIR_OUT | UE_DIR_IN); ep = usbd_get_endpoint(dev, uhe->bsd_iface_index, cfg); if (ep == NULL) return (-EINVAL); usbd_clear_data_toggle(dev, ep); return (usb_control_msg(dev, &dev->ep0, UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT, UF_ENDPOINT_HALT, addr, NULL, 0, 1000)); } /*------------------------------------------------------------------------* * usb_start_wait_urb * * This is an internal function that is used to perform synchronous * Linux USB transfers. *------------------------------------------------------------------------*/ static int usb_start_wait_urb(struct urb *urb, usb_timeout_t timeout, uint16_t *p_actlen) { int err; uint8_t do_unlock; /* you must have a timeout! */ if (timeout == 0) { timeout = 1; } urb->complete = &usb_linux_wait_complete; urb->timeout = timeout; urb->transfer_flags |= URB_WAIT_WAKEUP; urb->transfer_flags &= ~URB_IS_SLEEPING; do_unlock = mtx_owned(&Giant) ? 0 : 1; if (do_unlock) mtx_lock(&Giant); err = usb_submit_urb(urb, 0); if (err) goto done; /* * the URB might have completed before we get here, so check that by * using some flags! */ while (urb->transfer_flags & URB_WAIT_WAKEUP) { urb->transfer_flags |= URB_IS_SLEEPING; cv_wait(&urb->cv_wait, &Giant); urb->transfer_flags &= ~URB_IS_SLEEPING; } err = urb->status; done: if (do_unlock) mtx_unlock(&Giant); if (p_actlen != NULL) { if (err) *p_actlen = 0; else *p_actlen = urb->actual_length; } return (err); } /*------------------------------------------------------------------------* * usb_control_msg * * The following function performs a control transfer sequence one any * control, bulk or interrupt endpoint, specified by "uhe". A control * transfer means that you transfer an 8-byte header first followed by * a data-phase as indicated by the 8-byte header. The "timeout" is * given in milliseconds. * * Return values: * 0: Success * < 0: Failure * > 0: Actual length *------------------------------------------------------------------------*/ int usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *uhe, uint8_t request, uint8_t requesttype, uint16_t value, uint16_t index, void *data, uint16_t size, usb_timeout_t timeout) { struct usb_device_request req; struct urb *urb; int err; uint16_t actlen; uint8_t type; uint8_t addr; req.bmRequestType = requesttype; req.bRequest = request; USETW(req.wValue, value); USETW(req.wIndex, index); USETW(req.wLength, size); if (uhe == NULL) { return (-EINVAL); } type = (uhe->desc.bmAttributes & UE_XFERTYPE); addr = (uhe->desc.bEndpointAddress & UE_ADDR); if (type != UE_CONTROL) { return (-EINVAL); } if (addr == 0) { /* * The FreeBSD USB stack supports standard control * transfers on control endpoint zero: */ err = usbd_do_request_flags(dev, NULL, &req, data, USB_SHORT_XFER_OK, &actlen, timeout); if (err) { err = -EPIPE; } else { err = actlen; } return (err); } if (dev->flags.usb_mode != USB_MODE_HOST) { /* not supported */ return (-EINVAL); } err = usb_setup_endpoint(dev, uhe, 1 /* dummy */ ); /* * NOTE: we need to allocate real memory here so that we don't * transfer data to/from the stack! * * 0xFFFF is a FreeBSD specific magic value. */ urb = usb_alloc_urb(0xFFFF, size); urb->dev = dev; urb->endpoint = uhe; memcpy(urb->setup_packet, &req, sizeof(req)); if (size && (!(req.bmRequestType & UT_READ))) { /* move the data to a real buffer */ memcpy(USB_ADD_BYTES(urb->setup_packet, sizeof(req)), data, size); } err = usb_start_wait_urb(urb, timeout, &actlen); if (req.bmRequestType & UT_READ) { if (actlen) { bcopy(USB_ADD_BYTES(urb->setup_packet, sizeof(req)), data, actlen); } } usb_free_urb(urb); if (err == 0) { err = actlen; } return (err); } /*------------------------------------------------------------------------* * usb_set_interface * * The following function will select which alternate setting of an * USB interface you plan to use. By default alternate setting with * index zero is selected. Note that "iface_no" is not the interface * index, but rather the value of "bInterfaceNumber". *------------------------------------------------------------------------*/ int usb_set_interface(struct usb_device *dev, uint8_t iface_no, uint8_t alt_index) { struct usb_interface *p_ui = usb_ifnum_to_if(dev, iface_no); int err; if (p_ui == NULL) return (-EINVAL); if (alt_index >= p_ui->num_altsetting) return (-EINVAL); usb_linux_cleanup_interface(dev, p_ui); err = -usbd_set_alt_interface_index(dev, p_ui->bsd_iface_index, alt_index); if (err == 0) { p_ui->cur_altsetting = p_ui->altsetting + alt_index; } return (err); } /*------------------------------------------------------------------------* * usb_setup_endpoint * * The following function is an extension to the Linux USB API that * allows you to set a maximum buffer size for a given USB endpoint. * The maximum buffer size is per URB. If you don't call this function * to set a maximum buffer size, the endpoint will not be functional. * Note that for isochronous endpoints the maximum buffer size must be * a non-zero dummy, hence this function will base the maximum buffer * size on "wMaxPacketSize". *------------------------------------------------------------------------*/ int usb_setup_endpoint(struct usb_device *dev, struct usb_host_endpoint *uhe, usb_size_t bufsize) { struct usb_config cfg[2]; uint8_t type = uhe->desc.bmAttributes & UE_XFERTYPE; uint8_t addr = uhe->desc.bEndpointAddress; if (uhe->fbsd_buf_size == bufsize) { /* optimize */ return (0); } usbd_transfer_unsetup(uhe->bsd_xfer, 2); uhe->fbsd_buf_size = bufsize; if (bufsize == 0) { return (0); } memset(cfg, 0, sizeof(cfg)); if (type == UE_ISOCHRONOUS) { /* * Isochronous transfers are special in that they don't fit * into the BULK/INTR/CONTROL transfer model. */ cfg[0].type = type; cfg[0].endpoint = addr & UE_ADDR; cfg[0].direction = addr & (UE_DIR_OUT | UE_DIR_IN); cfg[0].callback = &usb_linux_isoc_callback; cfg[0].bufsize = 0; /* use wMaxPacketSize */ cfg[0].frames = usb_max_isoc_frames(dev); cfg[0].flags.proxy_buffer = 1; #if 0 /* * The Linux USB API allows non back-to-back * isochronous frames which we do not support. If the * isochronous frames are not back-to-back we need to * do a copy, and then we need a buffer for * that. Enable this at your own risk. */ cfg[0].flags.ext_buffer = 1; #endif cfg[0].flags.short_xfer_ok = 1; bcopy(cfg, cfg + 1, sizeof(*cfg)); /* Allocate and setup two generic FreeBSD USB transfers */ if (usbd_transfer_setup(dev, &uhe->bsd_iface_index, uhe->bsd_xfer, cfg, 2, uhe, &Giant)) { return (-EINVAL); } } else { if (bufsize > (1 << 22)) { /* limit buffer size */ bufsize = (1 << 22); } /* Allocate and setup one generic FreeBSD USB transfer */ cfg[0].type = type; cfg[0].endpoint = addr & UE_ADDR; cfg[0].direction = addr & (UE_DIR_OUT | UE_DIR_IN); cfg[0].callback = &usb_linux_non_isoc_callback; cfg[0].bufsize = bufsize; cfg[0].flags.ext_buffer = 1; /* enable zero-copy */ cfg[0].flags.proxy_buffer = 1; cfg[0].flags.short_xfer_ok = 1; if (usbd_transfer_setup(dev, &uhe->bsd_iface_index, uhe->bsd_xfer, cfg, 1, uhe, &Giant)) { return (-EINVAL); } } return (0); } /*------------------------------------------------------------------------* * usb_linux_create_usb_device * * The following function is used to build up a per USB device * structure tree, that mimics the Linux one. The root structure * is returned by this function. *------------------------------------------------------------------------*/ static int usb_linux_create_usb_device(struct usb_device *udev, device_t dev) { struct usb_config_descriptor *cd = usbd_get_config_descriptor(udev); struct usb_descriptor *desc; struct usb_interface_descriptor *id; struct usb_endpoint_descriptor *ed; struct usb_interface *p_ui = NULL; struct usb_host_interface *p_uhi = NULL; struct usb_host_endpoint *p_uhe = NULL; usb_size_t size; uint16_t niface_total; uint16_t nedesc; uint16_t iface_no_curr; uint16_t iface_index; uint8_t pass; uint8_t iface_no; /* * We do two passes. One pass for computing necessary memory size * and one pass to initialize all the allocated memory structures. */ for (pass = 0; pass < 2; pass++) { iface_no_curr = 0xFFFF; niface_total = 0; iface_index = 0; nedesc = 0; desc = NULL; /* * Iterate over all the USB descriptors. Use the USB config * descriptor pointer provided by the FreeBSD USB stack. */ while ((desc = usb_desc_foreach(cd, desc))) { /* * Build up a tree according to the descriptors we * find: */ switch (desc->bDescriptorType) { case UDESC_DEVICE: break; case UDESC_ENDPOINT: ed = (void *)desc; if ((ed->bLength < sizeof(*ed)) || (iface_index == 0)) break; if (p_uhe) { bcopy(ed, &p_uhe->desc, sizeof(p_uhe->desc)); p_uhe->bsd_iface_index = iface_index - 1; TAILQ_INIT(&p_uhe->bsd_urb_list); p_uhe++; } if (p_uhi) { (p_uhi - 1)->desc.bNumEndpoints++; } nedesc++; break; case UDESC_INTERFACE: id = (void *)desc; if (id->bLength < sizeof(*id)) break; if (p_uhi) { bcopy(id, &p_uhi->desc, sizeof(p_uhi->desc)); p_uhi->desc.bNumEndpoints = 0; p_uhi->endpoint = p_uhe; p_uhi->string = ""; p_uhi->bsd_iface_index = iface_index; p_uhi++; } iface_no = id->bInterfaceNumber; niface_total++; if (iface_no_curr != iface_no) { if (p_ui) { p_ui->altsetting = p_uhi - 1; p_ui->cur_altsetting = p_uhi - 1; p_ui->bsd_iface_index = iface_index; p_ui->linux_udev = udev; p_ui++; } iface_no_curr = iface_no; iface_index++; } break; default: break; } } if (pass == 0) { size = (sizeof(*p_uhe) * nedesc) + (sizeof(*p_ui) * iface_index) + (sizeof(*p_uhi) * niface_total); p_uhe = malloc(size, M_USBDEV, M_WAITOK | M_ZERO); p_ui = (void *)(p_uhe + nedesc); p_uhi = (void *)(p_ui + iface_index); udev->linux_iface_start = p_ui; udev->linux_iface_end = p_ui + iface_index; udev->linux_endpoint_start = p_uhe; udev->linux_endpoint_end = p_uhe + nedesc; udev->devnum = device_get_unit(dev); bcopy(&udev->ddesc, &udev->descriptor, sizeof(udev->descriptor)); bcopy(udev->ctrl_ep.edesc, &udev->ep0.desc, sizeof(udev->ep0.desc)); } } return (0); } /*------------------------------------------------------------------------* * usb_alloc_urb * * This function should always be used when you allocate an URB for * use with the USB Linux stack. In case of an isochronous transfer * you must specifiy the maximum number of "iso_packets" which you * plan to transfer per URB. This function is always blocking, and * "mem_flags" are not regarded like on Linux. *------------------------------------------------------------------------*/ struct urb * usb_alloc_urb(uint16_t iso_packets, uint16_t mem_flags) { struct urb *urb; usb_size_t size; if (iso_packets == 0xFFFF) { /* * FreeBSD specific magic value to ask for control transfer * memory allocation: */ size = sizeof(*urb) + sizeof(struct usb_device_request) + mem_flags; } else { size = sizeof(*urb) + (iso_packets * sizeof(urb->iso_frame_desc[0])); } urb = malloc(size, M_USBDEV, M_WAITOK | M_ZERO); cv_init(&urb->cv_wait, "URBWAIT"); if (iso_packets == 0xFFFF) { urb->setup_packet = (void *)(urb + 1); urb->transfer_buffer = (void *)(urb->setup_packet + sizeof(struct usb_device_request)); } else { urb->number_of_packets = iso_packets; } return (urb); } /*------------------------------------------------------------------------* * usb_find_host_endpoint * * The following function will return the Linux USB host endpoint * structure that matches the given endpoint type and endpoint * value. If no match is found, NULL is returned. This function is not * part of the Linux USB API and is only used internally. *------------------------------------------------------------------------*/ struct usb_host_endpoint * usb_find_host_endpoint(struct usb_device *dev, uint8_t type, uint8_t ep) { struct usb_host_endpoint *uhe; struct usb_host_endpoint *uhe_end; struct usb_host_interface *uhi; struct usb_interface *ui; uint8_t ea; uint8_t at; uint8_t mask; if (dev == NULL) { return (NULL); } if (type == UE_CONTROL) { mask = UE_ADDR; } else { mask = (UE_DIR_IN | UE_DIR_OUT | UE_ADDR); } ep &= mask; /* * Iterate over all the interfaces searching the selected alternate * setting only, and all belonging endpoints. */ for (ui = dev->linux_iface_start; ui != dev->linux_iface_end; ui++) { uhi = ui->cur_altsetting; if (uhi) { uhe_end = uhi->endpoint + uhi->desc.bNumEndpoints; for (uhe = uhi->endpoint; uhe != uhe_end; uhe++) { ea = uhe->desc.bEndpointAddress; at = uhe->desc.bmAttributes; if (((ea & mask) == ep) && ((at & UE_XFERTYPE) == type)) { return (uhe); } } } } if ((type == UE_CONTROL) && ((ep & UE_ADDR) == 0)) { return (&dev->ep0); } return (NULL); } /*------------------------------------------------------------------------* * usb_altnum_to_altsetting * * The following function returns a pointer to an alternate setting by * index given a "usb_interface" pointer. If the alternate setting by * index does not exist, NULL is returned. And alternate setting is a * variant of an interface, but usually with slightly different * characteristics. *------------------------------------------------------------------------*/ struct usb_host_interface * usb_altnum_to_altsetting(const struct usb_interface *intf, uint8_t alt_index) { if (alt_index >= intf->num_altsetting) { return (NULL); } return (intf->altsetting + alt_index); } /*------------------------------------------------------------------------* * usb_ifnum_to_if * * The following function searches up an USB interface by * "bInterfaceNumber". If no match is found, NULL is returned. *------------------------------------------------------------------------*/ struct usb_interface * usb_ifnum_to_if(struct usb_device *dev, uint8_t iface_no) { struct usb_interface *p_ui; for (p_ui = dev->linux_iface_start; p_ui != dev->linux_iface_end; p_ui++) { if ((p_ui->num_altsetting > 0) && (p_ui->altsetting->desc.bInterfaceNumber == iface_no)) { return (p_ui); } } return (NULL); } /*------------------------------------------------------------------------* * usb_buffer_alloc *------------------------------------------------------------------------*/ void * usb_buffer_alloc(struct usb_device *dev, usb_size_t size, uint16_t mem_flags, uint8_t *dma_addr) { return (malloc(size, M_USBDEV, M_WAITOK | M_ZERO)); } /*------------------------------------------------------------------------* * usbd_get_intfdata *------------------------------------------------------------------------*/ void * usbd_get_intfdata(struct usb_interface *intf) { return (intf->bsd_priv_sc); } /*------------------------------------------------------------------------* * usb_linux_register * * The following function is used by the "USB_DRIVER_EXPORT()" macro, * and is used to register a Linux USB driver, so that its * "usb_device_id" structures gets searched a probe time. This * function is not part of the Linux USB API, and is for internal use * only. *------------------------------------------------------------------------*/ void usb_linux_register(void *arg) { struct usb_driver *drv = arg; mtx_lock(&Giant); LIST_INSERT_HEAD(&usb_linux_driver_list, drv, linux_driver_list); mtx_unlock(&Giant); usb_needs_explore_all(); } /*------------------------------------------------------------------------* * usb_linux_deregister * * The following function is used by the "USB_DRIVER_EXPORT()" macro, * and is used to deregister a Linux USB driver. This function will * ensure that all driver instances belonging to the Linux USB device * driver in question, gets detached before the driver is * unloaded. This function is not part of the Linux USB API, and is * for internal use only. *------------------------------------------------------------------------*/ void usb_linux_deregister(void *arg) { struct usb_driver *drv = arg; struct usb_linux_softc *sc; repeat: mtx_lock(&Giant); LIST_FOREACH(sc, &usb_linux_attached_list, sc_attached_list) { if (sc->sc_udrv == drv) { mtx_unlock(&Giant); + bus_topo_lock(); device_detach(sc->sc_fbsd_dev); + bus_topo_unlock(); goto repeat; } } LIST_REMOVE(drv, linux_driver_list); mtx_unlock(&Giant); } /*------------------------------------------------------------------------* * usb_linux_free_device * * The following function is only used by the FreeBSD USB stack, to * cleanup and free memory after that a Linux USB device was attached. *------------------------------------------------------------------------*/ void usb_linux_free_device(struct usb_device *dev) { struct usb_host_endpoint *uhe; struct usb_host_endpoint *uhe_end; uhe = dev->linux_endpoint_start; uhe_end = dev->linux_endpoint_end; while (uhe != uhe_end) { usb_setup_endpoint(dev, uhe, 0); uhe++; } usb_setup_endpoint(dev, &dev->ep0, 0); free(dev->linux_endpoint_start, M_USBDEV); } /*------------------------------------------------------------------------* * usb_buffer_free *------------------------------------------------------------------------*/ void usb_buffer_free(struct usb_device *dev, usb_size_t size, void *addr, uint8_t dma_addr) { free(addr, M_USBDEV); } /*------------------------------------------------------------------------* * usb_free_urb *------------------------------------------------------------------------*/ void usb_free_urb(struct urb *urb) { if (urb == NULL) { return; } /* make sure that the current URB is not active */ usb_kill_urb(urb); /* destroy condition variable */ cv_destroy(&urb->cv_wait); /* just free it */ free(urb, M_USBDEV); } /*------------------------------------------------------------------------* * usb_init_urb * * The following function can be used to initialize a custom URB. It * is not recommended to use this function. Use "usb_alloc_urb()" * instead. *------------------------------------------------------------------------*/ void usb_init_urb(struct urb *urb) { if (urb == NULL) { return; } memset(urb, 0, sizeof(*urb)); } /*------------------------------------------------------------------------* * usb_kill_urb *------------------------------------------------------------------------*/ void usb_kill_urb(struct urb *urb) { usb_unlink_urb_sub(urb, 1); } /*------------------------------------------------------------------------* * usb_set_intfdata * * The following function sets the per Linux USB interface private * data pointer. It is used by most Linux USB device drivers. *------------------------------------------------------------------------*/ void usb_set_intfdata(struct usb_interface *intf, void *data) { intf->bsd_priv_sc = data; } /*------------------------------------------------------------------------* * usb_linux_cleanup_interface * * The following function will release all FreeBSD USB transfers * associated with a Linux USB interface. It is for internal use only. *------------------------------------------------------------------------*/ static void usb_linux_cleanup_interface(struct usb_device *dev, struct usb_interface *iface) { struct usb_host_interface *uhi; struct usb_host_interface *uhi_end; struct usb_host_endpoint *uhe; struct usb_host_endpoint *uhe_end; uhi = iface->altsetting; uhi_end = iface->altsetting + iface->num_altsetting; while (uhi != uhi_end) { uhe = uhi->endpoint; uhe_end = uhi->endpoint + uhi->desc.bNumEndpoints; while (uhe != uhe_end) { usb_setup_endpoint(dev, uhe, 0); uhe++; } uhi++; } } /*------------------------------------------------------------------------* * usb_linux_wait_complete * * The following function is used by "usb_start_wait_urb()" to wake it * up, when an USB transfer has finished. *------------------------------------------------------------------------*/ static void usb_linux_wait_complete(struct urb *urb) { if (urb->transfer_flags & URB_IS_SLEEPING) { cv_signal(&urb->cv_wait); } urb->transfer_flags &= ~URB_WAIT_WAKEUP; } /*------------------------------------------------------------------------* * usb_linux_complete *------------------------------------------------------------------------*/ static void usb_linux_complete(struct usb_xfer *xfer) { struct urb *urb; urb = usbd_xfer_get_priv(xfer); usbd_xfer_set_priv(xfer, NULL); if (urb->complete) { (urb->complete) (urb); } } /*------------------------------------------------------------------------* * usb_linux_isoc_callback * * The following is the FreeBSD isochronous USB callback. Isochronous * frames are USB packets transferred 1000 or 8000 times per second, * depending on whether a full- or high- speed USB transfer is * used. *------------------------------------------------------------------------*/ static void usb_linux_isoc_callback(struct usb_xfer *xfer, usb_error_t error) { usb_frlength_t max_frame = xfer->max_frame_size; usb_frlength_t offset; usb_frcount_t x; struct urb *urb = usbd_xfer_get_priv(xfer); struct usb_host_endpoint *uhe = usbd_xfer_softc(xfer); struct usb_iso_packet_descriptor *uipd; DPRINTF("\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (urb->bsd_isread) { /* copy in data with regard to the URB */ offset = 0; for (x = 0; x < urb->number_of_packets; x++) { uipd = urb->iso_frame_desc + x; if (uipd->length > xfer->frlengths[x]) { if (urb->transfer_flags & URB_SHORT_NOT_OK) { /* XXX should be EREMOTEIO */ uipd->status = -EPIPE; } else { uipd->status = 0; } } else { uipd->status = 0; } uipd->actual_length = xfer->frlengths[x]; if (!xfer->flags.ext_buffer) { usbd_copy_out(xfer->frbuffers, offset, USB_ADD_BYTES(urb->transfer_buffer, uipd->offset), uipd->actual_length); } offset += max_frame; } } else { for (x = 0; x < urb->number_of_packets; x++) { uipd = urb->iso_frame_desc + x; uipd->actual_length = xfer->frlengths[x]; uipd->status = 0; } } urb->actual_length = xfer->actlen; /* check for short transfer */ if (xfer->actlen < xfer->sumlen) { /* short transfer */ if (urb->transfer_flags & URB_SHORT_NOT_OK) { /* XXX should be EREMOTEIO */ urb->status = -EPIPE; } else { urb->status = 0; } } else { /* success */ urb->status = 0; } /* call callback */ usb_linux_complete(xfer); case USB_ST_SETUP: tr_setup: if (xfer->priv_fifo == NULL) { /* get next transfer */ urb = TAILQ_FIRST(&uhe->bsd_urb_list); if (urb == NULL) { /* nothing to do */ return; } TAILQ_REMOVE(&uhe->bsd_urb_list, urb, bsd_urb_list); urb->bsd_urb_list.tqe_prev = NULL; x = xfer->max_frame_count; if (urb->number_of_packets > x) { /* XXX simply truncate the transfer */ urb->number_of_packets = x; } } else { DPRINTF("Already got a transfer\n"); /* already got a transfer (should not happen) */ urb = usbd_xfer_get_priv(xfer); } urb->bsd_isread = (uhe->desc.bEndpointAddress & UE_DIR_IN) ? 1 : 0; if (xfer->flags.ext_buffer) { /* set virtual address to load */ usbd_xfer_set_frame_data(xfer, 0, urb->transfer_buffer, 0); } if (!(urb->bsd_isread)) { /* copy out data with regard to the URB */ offset = 0; for (x = 0; x < urb->number_of_packets; x++) { uipd = urb->iso_frame_desc + x; usbd_xfer_set_frame_len(xfer, x, uipd->length); if (!xfer->flags.ext_buffer) { usbd_copy_in(xfer->frbuffers, offset, USB_ADD_BYTES(urb->transfer_buffer, uipd->offset), uipd->length); } offset += uipd->length; } } else { /* * compute the transfer length into the "offset" * variable */ offset = urb->number_of_packets * max_frame; /* setup "frlengths" array */ for (x = 0; x < urb->number_of_packets; x++) { uipd = urb->iso_frame_desc + x; usbd_xfer_set_frame_len(xfer, x, max_frame); } } usbd_xfer_set_priv(xfer, urb); xfer->flags.force_short_xfer = 0; xfer->timeout = urb->timeout; xfer->nframes = urb->number_of_packets; usbd_transfer_submit(xfer); return; default: /* Error */ if (xfer->error == USB_ERR_CANCELLED) { urb->status = -ECONNRESET; } else { urb->status = -EPIPE; /* stalled */ } /* Set zero for "actual_length" */ urb->actual_length = 0; /* Set zero for "actual_length" */ for (x = 0; x < urb->number_of_packets; x++) { urb->iso_frame_desc[x].actual_length = 0; urb->iso_frame_desc[x].status = urb->status; } /* call callback */ usb_linux_complete(xfer); if (xfer->error == USB_ERR_CANCELLED) { /* we need to return in this case */ return; } goto tr_setup; } } /*------------------------------------------------------------------------* * usb_linux_non_isoc_callback * * The following is the FreeBSD BULK/INTERRUPT and CONTROL USB * callback. It dequeues Linux USB stack compatible URB's, transforms * the URB fields into a FreeBSD USB transfer, and defragments the USB * transfer as required. When the transfer is complete the "complete" * callback is called. *------------------------------------------------------------------------*/ static void usb_linux_non_isoc_callback(struct usb_xfer *xfer, usb_error_t error) { enum { REQ_SIZE = sizeof(struct usb_device_request) }; struct urb *urb = usbd_xfer_get_priv(xfer); struct usb_host_endpoint *uhe = usbd_xfer_softc(xfer); uint8_t *ptr; usb_frlength_t max_bulk = usbd_xfer_max_len(xfer); uint8_t data_frame = xfer->flags_int.control_xfr ? 1 : 0; DPRINTF("\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (xfer->flags_int.control_xfr) { /* don't transfer the setup packet again: */ usbd_xfer_set_frame_len(xfer, 0, 0); } if (urb->bsd_isread && (!xfer->flags.ext_buffer)) { /* copy in data with regard to the URB */ usbd_copy_out(xfer->frbuffers + data_frame, 0, urb->bsd_data_ptr, xfer->frlengths[data_frame]); } urb->bsd_length_rem -= xfer->frlengths[data_frame]; urb->bsd_data_ptr += xfer->frlengths[data_frame]; urb->actual_length += xfer->frlengths[data_frame]; /* check for short transfer */ if (xfer->actlen < xfer->sumlen) { urb->bsd_length_rem = 0; /* short transfer */ if (urb->transfer_flags & URB_SHORT_NOT_OK) { urb->status = -EPIPE; } else { urb->status = 0; } } else { /* check remainder */ if (urb->bsd_length_rem > 0) { goto setup_bulk; } /* success */ urb->status = 0; } /* call callback */ usb_linux_complete(xfer); case USB_ST_SETUP: tr_setup: /* get next transfer */ urb = TAILQ_FIRST(&uhe->bsd_urb_list); if (urb == NULL) { /* nothing to do */ return; } TAILQ_REMOVE(&uhe->bsd_urb_list, urb, bsd_urb_list); urb->bsd_urb_list.tqe_prev = NULL; usbd_xfer_set_priv(xfer, urb); xfer->flags.force_short_xfer = 0; xfer->timeout = urb->timeout; if (xfer->flags_int.control_xfr) { /* * USB control transfers need special handling. * First copy in the header, then copy in data! */ if (!xfer->flags.ext_buffer) { usbd_copy_in(xfer->frbuffers, 0, urb->setup_packet, REQ_SIZE); usbd_xfer_set_frame_len(xfer, 0, REQ_SIZE); } else { /* set virtual address to load */ usbd_xfer_set_frame_data(xfer, 0, urb->setup_packet, REQ_SIZE); } ptr = urb->setup_packet; /* setup data transfer direction and length */ urb->bsd_isread = (ptr[0] & UT_READ) ? 1 : 0; urb->bsd_length_rem = ptr[6] | (ptr[7] << 8); } else { /* setup data transfer direction */ urb->bsd_length_rem = urb->transfer_buffer_length; urb->bsd_isread = (uhe->desc.bEndpointAddress & UE_DIR_IN) ? 1 : 0; } urb->bsd_data_ptr = urb->transfer_buffer; urb->actual_length = 0; setup_bulk: if (max_bulk > urb->bsd_length_rem) { max_bulk = urb->bsd_length_rem; } /* check if we need to force a short transfer */ if ((max_bulk == urb->bsd_length_rem) && (urb->transfer_flags & URB_ZERO_PACKET) && (!xfer->flags_int.control_xfr)) { xfer->flags.force_short_xfer = 1; } /* check if we need to copy in data */ if (xfer->flags.ext_buffer) { /* set virtual address to load */ usbd_xfer_set_frame_data(xfer, data_frame, urb->bsd_data_ptr, max_bulk); } else if (!urb->bsd_isread) { /* copy out data with regard to the URB */ usbd_copy_in(xfer->frbuffers + data_frame, 0, urb->bsd_data_ptr, max_bulk); usbd_xfer_set_frame_len(xfer, data_frame, max_bulk); } if (xfer->flags_int.control_xfr) { if (max_bulk > 0) { xfer->nframes = 2; } else { xfer->nframes = 1; } } else { xfer->nframes = 1; } usbd_transfer_submit(xfer); return; default: if (xfer->error == USB_ERR_CANCELLED) { urb->status = -ECONNRESET; } else { urb->status = -EPIPE; } /* Set zero for "actual_length" */ urb->actual_length = 0; /* call callback */ usb_linux_complete(xfer); if (xfer->error == USB_ERR_CANCELLED) { /* we need to return in this case */ return; } goto tr_setup; } } /*------------------------------------------------------------------------* * usb_fill_bulk_urb *------------------------------------------------------------------------*/ void usb_fill_bulk_urb(struct urb *urb, struct usb_device *udev, struct usb_host_endpoint *uhe, void *buf, int length, usb_complete_t callback, void *arg) { urb->dev = udev; urb->endpoint = uhe; urb->transfer_buffer = buf; urb->transfer_buffer_length = length; urb->complete = callback; urb->context = arg; } /*------------------------------------------------------------------------* * usb_bulk_msg * * NOTE: This function can also be used for interrupt endpoints! * * Return values: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ int usb_bulk_msg(struct usb_device *udev, struct usb_host_endpoint *uhe, void *data, int len, uint16_t *pactlen, usb_timeout_t timeout) { struct urb *urb; int err; if (uhe == NULL) return (-EINVAL); if (len < 0) return (-EINVAL); err = usb_setup_endpoint(udev, uhe, 4096 /* bytes */); if (err) return (err); urb = usb_alloc_urb(0, 0); usb_fill_bulk_urb(urb, udev, uhe, data, len, usb_linux_wait_complete, NULL); err = usb_start_wait_urb(urb, timeout, pactlen); usb_free_urb(urb); return (err); } MODULE_DEPEND(linuxkpi, usb, 1, 1, 1); static void usb_linux_init(void *arg) { /* register our function */ usb_linux_free_device_p = &usb_linux_free_device; } SYSINIT(usb_linux_init, SI_SUB_LOCK, SI_ORDER_FIRST, usb_linux_init, NULL); SYSUNINIT(usb_linux_unload, SI_SUB_LOCK, SI_ORDER_ANY, usb_linux_unload, NULL); diff --git a/sys/dev/aac/aac.c b/sys/dev/aac/aac.c index 6e3cca084fe0..c783c390872b 100644 --- a/sys/dev/aac/aac.c +++ b/sys/dev/aac/aac.c @@ -1,3815 +1,3815 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Michael Smith * Copyright (c) 2001 Scott Long * Copyright (c) 2000 BSDi * Copyright (c) 2001 Adaptec, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. */ #define AAC_DRIVERNAME "aac" #include "opt_aac.h" /* #include */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void aac_startup(void *arg); static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f); static void aac_get_bus_info(struct aac_softc *sc); static void aac_daemon(void *arg); /* Command Processing */ static void aac_timeout(struct aac_softc *sc); static void aac_complete(void *context, int pending); static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); static void aac_bio_complete(struct aac_command *cm); static int aac_wait_command(struct aac_command *cm); static void aac_command_thread(struct aac_softc *sc); /* Command Buffer Management */ static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_alloc_commands(struct aac_softc *sc); static void aac_free_commands(struct aac_softc *sc); static void aac_unmap_command(struct aac_command *cm); /* Hardware Interface */ static int aac_alloc(struct aac_softc *sc); static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_check_firmware(struct aac_softc *sc); static int aac_init(struct aac_softc *sc); static int aac_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp); static int aac_setup_intr(struct aac_softc *sc); static int aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm); static int aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, struct aac_fib **fib_addr); static int aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib); /* StrongARM interface */ static int aac_sa_get_fwstatus(struct aac_softc *sc); static void aac_sa_qnotify(struct aac_softc *sc, int qbit); static int aac_sa_get_istatus(struct aac_softc *sc); static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); const struct aac_interface aac_sa_interface = { aac_sa_get_fwstatus, aac_sa_qnotify, aac_sa_get_istatus, aac_sa_clear_istatus, aac_sa_set_mailbox, aac_sa_get_mailbox, aac_sa_set_interrupts, NULL, NULL, NULL }; /* i960Rx interface */ static int aac_rx_get_fwstatus(struct aac_softc *sc); static void aac_rx_qnotify(struct aac_softc *sc, int qbit); static int aac_rx_get_istatus(struct aac_softc *sc); static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); static int aac_rx_get_outb_queue(struct aac_softc *sc); static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); const struct aac_interface aac_rx_interface = { aac_rx_get_fwstatus, aac_rx_qnotify, aac_rx_get_istatus, aac_rx_clear_istatus, aac_rx_set_mailbox, aac_rx_get_mailbox, aac_rx_set_interrupts, aac_rx_send_command, aac_rx_get_outb_queue, aac_rx_set_outb_queue }; /* Rocket/MIPS interface */ static int aac_rkt_get_fwstatus(struct aac_softc *sc); static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); static int aac_rkt_get_istatus(struct aac_softc *sc); static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); static int aac_rkt_get_outb_queue(struct aac_softc *sc); static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); const struct aac_interface aac_rkt_interface = { aac_rkt_get_fwstatus, aac_rkt_qnotify, aac_rkt_get_istatus, aac_rkt_clear_istatus, aac_rkt_set_mailbox, aac_rkt_get_mailbox, aac_rkt_set_interrupts, aac_rkt_send_command, aac_rkt_get_outb_queue, aac_rkt_set_outb_queue }; /* Debugging and Diagnostics */ static void aac_describe_controller(struct aac_softc *sc); static const char *aac_describe_code(const struct aac_code_lookup *table, u_int32_t code); /* Management Interface */ static d_open_t aac_open; static d_ioctl_t aac_ioctl; static d_poll_t aac_poll; static void aac_cdevpriv_dtor(void *arg); static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib); static int aac_rev_check(struct aac_softc *sc, caddr_t udata); static int aac_open_aif(struct aac_softc *sc, caddr_t arg); static int aac_close_aif(struct aac_softc *sc, caddr_t arg); static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr); static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg); static struct aac_mntinforesp * aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); static struct cdevsw aac_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = aac_open, .d_ioctl = aac_ioctl, .d_poll = aac_poll, .d_name = "aac", }; static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); /* sysctl node */ SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "AAC driver parameters"); /* * Device Interface */ /* * Initialize the controller and softc */ int aac_attach(struct aac_softc *sc) { int error, unit; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Initialize per-controller queues. */ aac_initq_free(sc); aac_initq_ready(sc); aac_initq_busy(sc); aac_initq_bio(sc); /* * Initialize command-completion task. */ TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); /* mark controller as suspended until we get ourselves organised */ sc->aac_state |= AAC_STATE_SUSPEND; /* * Check that the firmware on the card is supported. */ if ((error = aac_check_firmware(sc)) != 0) return(error); /* * Initialize locks */ mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF); mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF); mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF); TAILQ_INIT(&sc->aac_container_tqh); TAILQ_INIT(&sc->aac_ev_cmfree); /* Initialize the clock daemon callout. */ callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); /* * Initialize the adapter. */ if ((error = aac_alloc(sc)) != 0) return(error); if ((error = aac_init(sc)) != 0) return(error); /* * Allocate and connect our interrupt. */ if ((error = aac_setup_intr(sc)) != 0) return(error); /* * Print a little information about the controller. */ aac_describe_controller(sc); /* * Add sysctls. */ SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->aac_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->aac_dev)), OID_AUTO, "firmware_build", CTLFLAG_RD, &sc->aac_revision.buildNumber, 0, "firmware build number"); /* * Register to probe our containers later. */ sc->aac_ich.ich_func = aac_startup; sc->aac_ich.ich_arg = sc; if (config_intrhook_establish(&sc->aac_ich) != 0) { device_printf(sc->aac_dev, "can't establish configuration hook\n"); return(ENXIO); } /* * Make the control device. */ unit = device_get_unit(sc->aac_dev); sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "aac%d", unit); (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); sc->aac_dev_t->si_drv1 = sc; /* Create the AIF thread */ if (kproc_create((void(*)(void *))aac_command_thread, sc, &sc->aifthread, 0, 0, "aac%daif", unit)) panic("Could not create AIF thread"); /* Register the shutdown method to only be called post-dump */ if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) device_printf(sc->aac_dev, "shutdown event registration failed\n"); /* Register with CAM for the non-DASD devices */ if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { TAILQ_INIT(&sc->aac_sim_tqh); aac_get_bus_info(sc); } mtx_lock(&sc->aac_io_lock); callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); mtx_unlock(&sc->aac_io_lock); return(0); } static void aac_daemon(void *arg) { struct timeval tv; struct aac_softc *sc; struct aac_fib *fib; sc = arg; mtx_assert(&sc->aac_io_lock, MA_OWNED); if (callout_pending(&sc->aac_daemontime) || callout_active(&sc->aac_daemontime) == 0) return; getmicrotime(&tv); aac_alloc_sync_fib(sc, &fib); *(uint32_t *)fib->data = tv.tv_sec; aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t)); aac_release_sync_fib(sc); callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); } void aac_add_event(struct aac_softc *sc, struct aac_event *event) { switch (event->ev_type & AAC_EVENT_MASK) { case AAC_EVENT_CMFREE: TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); break; default: device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", event->ev_type); break; } } /* * Request information of container #cid */ static struct aac_mntinforesp * aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) { struct aac_mntinfo *mi; mi = (struct aac_mntinfo *)&fib->data[0]; /* use 64-bit LBA if enabled */ mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? VM_NameServe64 : VM_NameServe; mi->MntType = FT_FILESYS; mi->MntCount = cid; if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_mntinfo))) { device_printf(sc->aac_dev, "Error probing container %d\n", cid); return (NULL); } return ((struct aac_mntinforesp *)&fib->data[0]); } /* * Probe for containers, create disks. */ static void aac_startup(void *arg) { struct aac_softc *sc; struct aac_fib *fib; struct aac_mntinforesp *mir; int count = 0, i = 0; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); /* loop over possible containers */ do { if ((mir = aac_get_container_info(sc, fib, i)) == NULL) continue; if (i == 0) count = mir->MntRespCount; aac_add_container(sc, mir, 0); i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); /* mark the controller up */ sc->aac_state &= ~AAC_STATE_SUSPEND; /* poke the bus to actually attach the child devices */ if (bus_generic_attach(sc->aac_dev)) device_printf(sc->aac_dev, "bus_generic_attach failed\n"); /* disconnect ourselves from the intrhook chain */ config_intrhook_disestablish(&sc->aac_ich); /* enable interrupts now */ AAC_UNMASK_INTERRUPTS(sc); } /* * Create a device to represent a new container */ static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) { struct aac_container *co; device_t child; /* * Check container volume type for validity. Note that many of * the possible types may never show up. */ if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { co = (struct aac_container *)malloc(sizeof *co, M_AACBUF, M_NOWAIT | M_ZERO); if (co == NULL) panic("Out of memory?!"); fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", mir->MntTable[0].ObjectId, mir->MntTable[0].FileSystemName, mir->MntTable[0].Capacity, mir->MntTable[0].VolType); if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) device_printf(sc->aac_dev, "device_add_child failed\n"); else device_set_ivars(child, co); device_set_desc(child, aac_describe_code(aac_container_types, mir->MntTable[0].VolType)); co->co_disk = child; co->co_found = f; bcopy(&mir->MntTable[0], &co->co_mntobj, sizeof(struct aac_mntobj)); mtx_lock(&sc->aac_container_lock); TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); mtx_unlock(&sc->aac_container_lock); } } /* * Allocate resources associated with (sc) */ static int aac_alloc(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_SG_64BIT) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->aac_max_sectors << 9, /* maxsize */ sc->aac_sg_tablesize, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->aac_io_lock, /* lockfuncarg */ &sc->aac_buffer_dmat)) { device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for mapping FIBs into controller-addressable space.. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->aac_max_fibs_alloc * sc->aac_max_fib_size, /* maxsize */ 1, /* nsegments */ sc->aac_max_fibs_alloc * sc->aac_max_fib_size, /* maxsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_fib_dmat)) { device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for the common structure and allocate it. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ 8192 + sizeof(struct aac_common), /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_common_dmat)) { device_printf(sc->aac_dev, "can't allocate common structure DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { device_printf(sc->aac_dev, "can't allocate common structure\n"); return (ENOMEM); } /* * Work around a bug in the 2120 and 2200 that cannot DMA commands * below address 8192 in physical memory. * XXX If the padding is not needed, can it be put to use instead * of ignored? */ (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, sc->aac_common, 8192 + sizeof(*sc->aac_common), aac_common_map, sc, 0); if (sc->aac_common_busaddr < 8192) { sc->aac_common = (struct aac_common *) ((uint8_t *)sc->aac_common + 8192); sc->aac_common_busaddr += 8192; } bzero(sc->aac_common, sizeof(*sc->aac_common)); /* Allocate some FIBs and associated command structs */ TAILQ_INIT(&sc->aac_fibmap_tqh); sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), M_AACBUF, M_WAITOK|M_ZERO); while (sc->total_fibs < sc->aac_max_fibs) { if (aac_alloc_commands(sc) != 0) break; } if (sc->total_fibs == 0) return (ENOMEM); return (0); } /* * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void aac_free(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* remove the control device */ if (sc->aac_dev_t != NULL) destroy_dev(sc->aac_dev_t); /* throw away any FIB buffers, discard the FIB DMA tag */ aac_free_commands(sc); if (sc->aac_fib_dmat) bus_dma_tag_destroy(sc->aac_fib_dmat); free(sc->aac_commands, M_AACBUF); /* destroy the common area */ if (sc->aac_common) { bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, sc->aac_common_dmamap); } if (sc->aac_common_dmat) bus_dma_tag_destroy(sc->aac_common_dmat); /* disconnect the interrupt handler */ if (sc->aac_intr) bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); if (sc->aac_irq != NULL) { bus_release_resource(sc->aac_dev, SYS_RES_IRQ, rman_get_rid(sc->aac_irq), sc->aac_irq); pci_release_msi(sc->aac_dev); } /* destroy data-transfer DMA tag */ if (sc->aac_buffer_dmat) bus_dma_tag_destroy(sc->aac_buffer_dmat); /* destroy the parent DMA tag */ if (sc->aac_parent_dmat) bus_dma_tag_destroy(sc->aac_parent_dmat); /* release the register window mapping */ if (sc->aac_regs_res0 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0); if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1); } /* * Disconnect from the controller completely, in preparation for unload. */ int aac_detach(device_t dev) { struct aac_softc *sc; struct aac_container *co; struct aac_sim *sim; int error; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); callout_drain(&sc->aac_daemontime); mtx_lock(&sc->aac_io_lock); while (sc->aifflags & AAC_AIFFLAGS_RUNNING) { sc->aifflags |= AAC_AIFFLAGS_EXIT; wakeup(sc->aifthread); msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0); } mtx_unlock(&sc->aac_io_lock); KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0, ("%s: invalid detach state", __func__)); /* Remove the child containers */ while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { error = device_delete_child(dev, co->co_disk); if (error) return (error); TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); free(co, M_AACBUF); } /* Remove the CAM SIMs */ while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); error = device_delete_child(dev, sim->sim_dev); if (error) return (error); free(sim, M_AACBUF); } if ((error = aac_shutdown(dev))) return(error); EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); aac_free(sc); mtx_destroy(&sc->aac_aifq_lock); mtx_destroy(&sc->aac_io_lock); mtx_destroy(&sc->aac_container_lock); return(0); } /* * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach or system shutdown. * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ int aac_shutdown(device_t dev) { struct aac_softc *sc; struct aac_fib *fib; struct aac_close_command *cc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; /* * Send a Container shutdown followed by a HostShutdown FIB to the * controller to convince it that we don't want to talk to it anymore. * We've been closed and all I/O completed already */ device_printf(sc->aac_dev, "shutting down controller..."); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); cc = (struct aac_close_command *)&fib->data[0]; bzero(cc, sizeof(struct aac_close_command)); cc->Command = VM_CloseAll; cc->ContainerId = 0xffffffff; if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_close_command))) printf("FAILED.\n"); else printf("done\n"); #if 0 else { fib->data[0] = 0; /* * XXX Issuing this command to the controller makes it shut down * but also keeps it from coming back up without a reset of the * PCI bus. This is not desirable if you are just unloading the * driver module with the intent to reload it later. */ if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, fib, 1)) { printf("FAILED.\n"); } else { printf("done.\n"); } } #endif AAC_MASK_INTERRUPTS(sc); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return(0); } /* * Bring the controller to a quiescent state, ready for system suspend. */ int aac_suspend(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; AAC_MASK_INTERRUPTS(sc); return(0); } /* * Bring the controller back to a state ready for operation. */ int aac_resume(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state &= ~AAC_STATE_SUSPEND; AAC_UNMASK_INTERRUPTS(sc); return(0); } /* * Interrupt handler for NEW_COMM interface. */ void aac_new_intr(void *arg) { struct aac_softc *sc; u_int32_t index, fast; struct aac_command *cm; struct aac_fib *fib; int i; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); while (1) { index = AAC_GET_OUTB_QUEUE(sc); if (index == 0xffffffff) index = AAC_GET_OUTB_QUEUE(sc); if (index == 0xffffffff) break; if (index & 2) { if (index == 0xfffffffe) { /* XXX This means that the controller wants * more work. Ignore it for now. */ continue; } /* AIF */ fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF, M_NOWAIT | M_ZERO); if (fib == NULL) { /* If we're really this short on memory, * hopefully breaking out of the handler will * allow something to get freed. This * actually sucks a whole lot. */ break; } index &= ~2; for (i = 0; i < sizeof(struct aac_fib)/4; ++i) ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4); aac_handle_aif(sc, fib); free(fib, M_AACBUF); /* * AIF memory is owned by the adapter, so let it * know that we are done with it. */ AAC_SET_OUTB_QUEUE(sc, index); AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); } else { fast = index & 1; cm = sc->aac_commands + (index >> 2); fib = cm->cm_fib; if (fast) { fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; } aac_remove_busy(cm); aac_unmap_command(cm); cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this * command */ wakeup(cm); } sc->flags &= ~AAC_QUEUE_FRZN; } } /* see if we can start some more I/O */ if ((sc->flags & AAC_QUEUE_FRZN) == 0) aac_startio(sc); mtx_unlock(&sc->aac_io_lock); } /* * Interrupt filter for !NEW_COMM interface. */ int aac_filter(void *arg) { struct aac_softc *sc; u_int16_t reason; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Read the status register directly. This is faster than taking the * driver lock and reading the queues directly. It also saves having * to turn parts of the driver lock into a spin mutex, which would be * ugly. */ reason = AAC_GET_ISTATUS(sc); AAC_CLEAR_ISTATUS(sc, reason); /* handle completion processing */ if (reason & AAC_DB_RESPONSE_READY) taskqueue_enqueue(taskqueue_fast, &sc->aac_task_complete); /* controller wants to talk to us */ if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { /* * XXX Make sure that we don't get fooled by strange messages * that start with a NULL. */ if ((reason & AAC_DB_PRINTF) && (sc->aac_common->ac_printf[0] == 0)) sc->aac_common->ac_printf[0] = 32; /* * This might miss doing the actual wakeup. However, the * msleep that this is waking up has a timeout, so it will * wake up eventually. AIFs and printfs are low enough * priority that they can handle hanging out for a few seconds * if needed. */ wakeup(sc->aifthread); } return (FILTER_HANDLED); } /* * Command Processing */ /* * Start as much queued I/O as possible on the controller */ void aac_startio(struct aac_softc *sc) { struct aac_command *cm; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); for (;;) { /* * This flag might be set if the card is out of resources. * Checking it here prevents an infinite loop of deferrals. */ if (sc->flags & AAC_QUEUE_FRZN) break; /* * Try to get a command that's been put off for lack of * resources */ cm = aac_dequeue_ready(sc); /* * Try to build a command off the bio queue (ignore error * return) */ if (cm == NULL) aac_bio_command(sc, &cm); /* nothing to do? */ if (cm == NULL) break; /* don't map more than once */ if (cm->cm_flags & AAC_CMD_MAPPED) panic("aac: command %p already mapped", cm); /* * Set up the command to go to the controller. If there are no * data buffers associated with the command then it can bypass * busdma. */ if (cm->cm_datalen != 0) { if (cm->cm_flags & AAC_REQ_BIO) error = bus_dmamap_load_bio( sc->aac_buffer_dmat, cm->cm_datamap, (struct bio *)cm->cm_private, aac_map_command_sg, cm, 0); else error = bus_dmamap_load(sc->aac_buffer_dmat, cm->cm_datamap, cm->cm_data, cm->cm_datalen, aac_map_command_sg, cm, 0); if (error == EINPROGRESS) { fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); sc->flags |= AAC_QUEUE_FRZN; } else if (error != 0) panic("aac_startio: unexpected error %d from " "busdma", error); } else aac_map_command_sg(cm, NULL, 0, 0); } } /* * Handle notification of one or more FIBs coming from the controller. */ static void aac_command_thread(struct aac_softc *sc) { struct aac_fib *fib; u_int32_t fib_size; int size, retval; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); sc->aifflags = AAC_AIFFLAGS_RUNNING; while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { retval = 0; if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, "aifthd", AAC_PERIODIC_INTERVAL * hz); /* * First see if any FIBs need to be allocated. This needs * to be called without the driver lock because contigmalloc * can sleep. */ if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { mtx_unlock(&sc->aac_io_lock); aac_alloc_commands(sc); mtx_lock(&sc->aac_io_lock); sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; aac_startio(sc); } /* * While we're here, check to see if any commands are stuck. * This is pretty low-priority, so it's ok if it doesn't * always fire. */ if (retval == EWOULDBLOCK) aac_timeout(sc); /* Check the hardware printf message buffer */ if (sc->aac_common->ac_printf[0] != 0) aac_print_printf(sc); /* Also check to see if the adapter has a command for us. */ if (sc->flags & AAC_FLAGS_NEW_COMM) continue; for (;;) { if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, &fib_size, &fib)) break; AAC_PRINT_FIB(sc, fib); switch (fib->Header.Command) { case AifRequest: aac_handle_aif(sc, fib); break; default: device_printf(sc->aac_dev, "unknown command " "from controller\n"); break; } if ((fib->Header.XferState == 0) || (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { break; } /* Return the AIF to the controller. */ if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; *(AAC_FSAStatus*)fib->data = ST_OK; /* XXX Compute the Size field? */ size = fib->Header.Size; if (size > sizeof(struct aac_fib)) { size = sizeof(struct aac_fib); fib->Header.Size = size; } /* * Since we did not generate this command, it * cannot go through the normal * enqueue->startio chain. */ aac_enqueue_response(sc, AAC_ADAP_NORM_RESP_QUEUE, fib); } } } sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; mtx_unlock(&sc->aac_io_lock); wakeup(sc->aac_dev); kproc_exit(0); } /* * Process completed commands. */ static void aac_complete(void *context, int pending) { struct aac_softc *sc; struct aac_command *cm; struct aac_fib *fib; u_int32_t fib_size; sc = (struct aac_softc *)context; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); /* pull completed commands off the queue */ for (;;) { /* look for completed FIBs on our queue */ if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, &fib)) break; /* nothing to do */ /* get the command, unmap and hand off for processing */ cm = sc->aac_commands + fib->Header.SenderData; if (cm == NULL) { AAC_PRINT_FIB(sc, fib); break; } if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0) device_printf(sc->aac_dev, "COMMAND %p COMPLETED AFTER %d SECONDS\n", cm, (int)(time_uptime-cm->cm_timestamp)); aac_remove_busy(cm); aac_unmap_command(cm); cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this command */ wakeup(cm); } } /* see if we can start some more I/O */ sc->flags &= ~AAC_QUEUE_FRZN; aac_startio(sc); mtx_unlock(&sc->aac_io_lock); } /* * Handle a bio submitted from a disk device. */ void aac_submit_bio(struct bio *bp) { struct aac_disk *ad; struct aac_softc *sc; ad = (struct aac_disk *)bp->bio_disk->d_drv1; sc = ad->ad_controller; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* queue the BIO and try to get some work done */ aac_enqueue_bio(sc, bp); aac_startio(sc); } /* * Get a bio and build a command to go with it. */ static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; struct aac_fib *fib; struct aac_disk *ad; struct bio *bp; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* get the resources we will need */ cm = NULL; bp = NULL; if (aac_alloc_command(sc, &cm)) /* get a command */ goto fail; if ((bp = aac_dequeue_bio(sc)) == NULL) goto fail; /* fill out the command */ cm->cm_datalen = bp->bio_bcount; cm->cm_complete = aac_bio_complete; cm->cm_flags = AAC_REQ_BIO; cm->cm_private = bp; cm->cm_timestamp = time_uptime; /* build the FIB */ fib = cm->cm_fib; fib->Header.Size = sizeof(struct aac_fib_header); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; /* build the read/write request */ ad = (struct aac_disk *)bp->bio_disk->d_drv1; if (sc->flags & AAC_FLAGS_RAW_IO) { struct aac_raw_io *raw; raw = (struct aac_raw_io *)&fib->data[0]; fib->Header.Command = RawIo; raw->BlockNumber = (u_int64_t)bp->bio_pblkno; raw->ByteCount = bp->bio_bcount; raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; raw->BpTotal = 0; raw->BpComplete = 0; fib->Header.Size += sizeof(struct aac_raw_io); cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; if (bp->bio_cmd == BIO_READ) { raw->Flags = 1; cm->cm_flags |= AAC_CMD_DATAIN; } else { raw->Flags = 0; cm->cm_flags |= AAC_CMD_DATAOUT; } } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { fib->Header.Command = ContainerCommand; if (bp->bio_cmd == BIO_READ) { struct aac_blockread *br; br = (struct aac_blockread *)&fib->data[0]; br->Command = VM_CtBlockRead; br->ContainerId = ad->ad_container->co_mntobj.ObjectId; br->BlockNumber = bp->bio_pblkno; br->ByteCount = bp->bio_bcount; fib->Header.Size += sizeof(struct aac_blockread); cm->cm_sgtable = &br->SgMap; cm->cm_flags |= AAC_CMD_DATAIN; } else { struct aac_blockwrite *bw; bw = (struct aac_blockwrite *)&fib->data[0]; bw->Command = VM_CtBlockWrite; bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; bw->BlockNumber = bp->bio_pblkno; bw->ByteCount = bp->bio_bcount; bw->Stable = CUNSTABLE; fib->Header.Size += sizeof(struct aac_blockwrite); cm->cm_flags |= AAC_CMD_DATAOUT; cm->cm_sgtable = &bw->SgMap; } } else { fib->Header.Command = ContainerCommand64; if (bp->bio_cmd == BIO_READ) { struct aac_blockread64 *br; br = (struct aac_blockread64 *)&fib->data[0]; br->Command = VM_CtHostRead64; br->ContainerId = ad->ad_container->co_mntobj.ObjectId; br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; br->BlockNumber = bp->bio_pblkno; br->Pad = 0; br->Flags = 0; fib->Header.Size += sizeof(struct aac_blockread64); cm->cm_flags |= AAC_CMD_DATAIN; cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; } else { struct aac_blockwrite64 *bw; bw = (struct aac_blockwrite64 *)&fib->data[0]; bw->Command = VM_CtHostWrite64; bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; bw->BlockNumber = bp->bio_pblkno; bw->Pad = 0; bw->Flags = 0; fib->Header.Size += sizeof(struct aac_blockwrite64); cm->cm_flags |= AAC_CMD_DATAOUT; cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; } } *cmp = cm; return(0); fail: if (bp != NULL) aac_enqueue_bio(sc, bp); if (cm != NULL) aac_release_command(cm); return(ENOMEM); } /* * Handle a bio-instigated command that has been completed. */ static void aac_bio_complete(struct aac_command *cm) { struct aac_blockread_response *brr; struct aac_blockwrite_response *bwr; struct bio *bp; AAC_FSAStatus status; /* fetch relevant status and then release the command */ bp = (struct bio *)cm->cm_private; if (bp->bio_cmd == BIO_READ) { brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; status = brr->Status; } else { bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; status = bwr->Status; } aac_release_command(cm); /* fix up the bio based on status */ if (status == ST_OK) { bp->bio_resid = 0; } else { bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; } aac_biodone(bp); } /* * Submit a command to the controller, return when it completes. * XXX This is very dangerous! If the card has gone out to lunch, we could * be stuck here forever. At the same time, signals are not caught * because there is a risk that a signal could wakeup the sleep before * the card has a chance to complete the command. Since there is no way * to cancel a command that is in progress, we can't protect against the * card completing a command late and spamming the command and data * memory. So, we are held hostage until the command completes. */ static int aac_wait_command(struct aac_command *cm) { struct aac_softc *sc; int error; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* Put the command on the ready queue and get things going */ aac_enqueue_ready(cm); aac_startio(sc); error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0); return(error); } /* *Command Buffer Management */ /* * Allocate a command. */ int aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((cm = aac_dequeue_free(sc)) == NULL) { if (sc->total_fibs < sc->aac_max_fibs) { mtx_lock(&sc->aac_io_lock); sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; mtx_unlock(&sc->aac_io_lock); wakeup(sc->aifthread); } return (EBUSY); } *cmp = cm; return(0); } /* * Release a command back to the freelist. */ void aac_release_command(struct aac_command *cm) { struct aac_event *event; struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* (re)initialize the command/FIB */ cm->cm_datalen = 0; cm->cm_sgtable = NULL; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_private = NULL; cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; cm->cm_fib->Header.Flags = 0; cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; /* * These are duplicated in aac_start to cover the case where an * intermediate stage may have destroyed them. They're left * initialized here for debugging purposes only. */ cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; cm->cm_fib->Header.SenderData = 0; aac_enqueue_free(cm); if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); event->ev_callback(sc, event, event->ev_arg); } } /* * Map helper for command/FIB allocation. */ static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint64_t *fibphys; fibphys = (uint64_t *)arg; *fibphys = segs[0].ds_addr; } /* * Allocate and initialize commands/FIBs for this adapter. */ static int aac_alloc_commands(struct aac_softc *sc) { struct aac_command *cm; struct aac_fibmap *fm; uint64_t fibphys; int i, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) return (ENOMEM); fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO); if (fm == NULL) return (ENOMEM); /* allocate the FIBs in DMAable memory and load them */ if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, BUS_DMA_NOWAIT, &fm->aac_fibmap)) { device_printf(sc->aac_dev, "Not enough contiguous memory available.\n"); free(fm, M_AACBUF); return (ENOMEM); } /* Ignore errors since this doesn't bounce */ (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size, aac_map_command_helper, &fibphys, 0); /* initialize constant fields in the command structure */ bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); for (i = 0; i < sc->aac_max_fibs_alloc; i++) { cm = sc->aac_commands + sc->total_fibs; fm->aac_commands = cm; cm->cm_sc = sc; cm->cm_fib = (struct aac_fib *) ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; cm->cm_index = sc->total_fibs; if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, &cm->cm_datamap)) != 0) break; mtx_lock(&sc->aac_io_lock); aac_release_command(cm); sc->total_fibs++; mtx_unlock(&sc->aac_io_lock); } if (i > 0) { mtx_lock(&sc->aac_io_lock); TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); mtx_unlock(&sc->aac_io_lock); return (0); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACBUF); return (ENOMEM); } /* * Free FIBs owned by this adapter. */ static void aac_free_commands(struct aac_softc *sc) { struct aac_fibmap *fm; struct aac_command *cm; int i; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); /* * We check against total_fibs to handle partially * allocated blocks. */ for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { cm = fm->aac_commands + i; bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACBUF); } } /* * Command-mapping helper function - populate this command's s/g table. */ static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; struct aac_command *cm; struct aac_fib *fib; int i; cm = (struct aac_command *)arg; sc = cm->cm_sc; fib = cm->cm_fib; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* copy into the FIB */ if (cm->cm_sgtable != NULL) { if (fib->Header.Command == RawIo) { struct aac_sg_tableraw *sg; sg = (struct aac_sg_tableraw *)cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; sg->SgEntryRaw[i].Next = 0; sg->SgEntryRaw[i].Prev = 0; sg->SgEntryRaw[i].Flags = 0; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { struct aac_sg_table *sg; sg = cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntry[i].SgAddress = segs[i].ds_addr; sg->SgEntry[i].SgByteCount = segs[i].ds_len; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry); } else { struct aac_sg_table64 *sg; sg = (struct aac_sg_table64 *)cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntry64[i].SgAddress = segs[i].ds_addr; sg->SgEntry64[i].SgByteCount = segs[i].ds_len; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); } } /* Fix up the address values in the FIB. Use the command array index * instead of a pointer since these fields are only 32 bits. Shift * the SenderFibAddress over to make room for the fast response bit * and for the AIF bit */ cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; /* save a pointer to the command for speedy reverse-lookup */ cm->cm_fib->Header.SenderData = cm->cm_index; if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREWRITE); cm->cm_flags |= AAC_CMD_MAPPED; if (sc->flags & AAC_FLAGS_NEW_COMM) { int count = 10000000L; while (AAC_SEND_COMMAND(sc, cm) != 0) { if (--count == 0) { aac_unmap_command(cm); sc->flags |= AAC_QUEUE_FRZN; aac_requeue_ready(cm); } DELAY(5); /* wait 5 usec. */ } } else { /* Put the FIB on the outbound queue */ if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { aac_unmap_command(cm); sc->flags |= AAC_QUEUE_FRZN; aac_requeue_ready(cm); } } } /* * Unmap a command from controller-visible space. */ static void aac_unmap_command(struct aac_command *cm) { struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (!(cm->cm_flags & AAC_CMD_MAPPED)) return; if (cm->cm_datalen != 0) { if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); } cm->cm_flags &= ~AAC_CMD_MAPPED; } /* * Hardware Interface */ /* * Initialize the adapter. */ static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_common_busaddr = segs[0].ds_addr; } static int aac_check_firmware(struct aac_softc *sc) { u_int32_t code, major, minor, options = 0, atu_size = 0; int rid, status; time_t then; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Wait for the adapter to come ready. */ then = time_uptime; do { code = AAC_GET_FWSTATUS(sc); if (code & AAC_SELF_TEST_FAILED) { device_printf(sc->aac_dev, "FATAL: selftest failed\n"); return(ENXIO); } if (code & AAC_KERNEL_PANIC) { device_printf(sc->aac_dev, "FATAL: controller kernel panic"); return(ENXIO); } if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { device_printf(sc->aac_dev, "FATAL: controller not coming ready, " "status %x\n", code); return(ENXIO); } } while (!(code & AAC_UP_AND_RUNNING)); /* * Retrieve the firmware version numbers. Dell PERC2/QC cards with * firmware version 1.x are not compatible with this driver. */ if (sc->flags & AAC_FLAGS_PERC2QC) { if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, NULL)) { device_printf(sc->aac_dev, "Error reading firmware version\n"); return (EIO); } /* These numbers are stored as ASCII! */ major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; if (major == 1) { device_printf(sc->aac_dev, "Firmware version %d.%d is not supported.\n", major, minor); return (EINVAL); } } /* * Retrieve the capabilities/supported options word so we know what * work-arounds to enable. Some firmware revs don't support this * command. */ if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { if (status != AAC_SRB_STS_INVALID_REQUEST) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); return (EIO); } } else { options = AAC_GET_MAILBOX(sc, 1); atu_size = AAC_GET_MAILBOX(sc, 2); sc->supported_options = options; if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && (sc->flags & AAC_FLAGS_NO4GB) == 0) sc->flags |= AAC_FLAGS_4GB_WINDOW; if (options & AAC_SUPPORTED_NONDASD) sc->flags |= AAC_FLAGS_ENABLE_CAM; if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 && (sizeof(bus_addr_t) > 4)) { device_printf(sc->aac_dev, "Enabling 64-bit address support\n"); sc->flags |= AAC_FLAGS_SG_64BIT; } if ((options & AAC_SUPPORTED_NEW_COMM) && sc->aac_if->aif_send_command) sc->flags |= AAC_FLAGS_NEW_COMM; if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) sc->flags |= AAC_FLAGS_ARRAY_64BIT; } /* Check for broken hardware that does a lower number of commands */ sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); /* Remap mem. resource, if required */ if ((sc->flags & AAC_FLAGS_NEW_COMM) && atu_size > rman_get_size(sc->aac_regs_res1)) { rid = rman_get_rid(sc->aac_regs_res1); bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid, sc->aac_regs_res1); sc->aac_regs_res1 = bus_alloc_resource_anywhere(sc->aac_dev, SYS_RES_MEMORY, &rid, atu_size, RF_ACTIVE); if (sc->aac_regs_res1 == NULL) { sc->aac_regs_res1 = bus_alloc_resource_any( sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->aac_regs_res1 == NULL) { device_printf(sc->aac_dev, "couldn't allocate register window\n"); return (ENXIO); } sc->flags &= ~AAC_FLAGS_NEW_COMM; } sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); if (sc->aac_hwif == AAC_HWIF_NARK) { sc->aac_regs_res0 = sc->aac_regs_res1; sc->aac_btag0 = sc->aac_btag1; sc->aac_bhandle0 = sc->aac_bhandle1; } } /* Read preferred settings */ sc->aac_max_fib_size = sizeof(struct aac_fib); sc->aac_max_sectors = 128; /* 64KB */ if (sc->flags & AAC_FLAGS_SG_64BIT) sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite64)) / sizeof(struct aac_sg_entry64); else sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite)) / sizeof(struct aac_sg_entry); if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { options = AAC_GET_MAILBOX(sc, 1); sc->aac_max_fib_size = (options & 0xFFFF); sc->aac_max_sectors = (options >> 16) << 1; options = AAC_GET_MAILBOX(sc, 2); sc->aac_sg_tablesize = (options >> 16); options = AAC_GET_MAILBOX(sc, 3); sc->aac_max_fibs = (options & 0xFFFF); } if (sc->aac_max_fib_size > PAGE_SIZE) sc->aac_max_fib_size = PAGE_SIZE; sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { sc->flags |= AAC_FLAGS_RAW_IO; device_printf(sc->aac_dev, "Enable Raw I/O\n"); } if ((sc->flags & AAC_FLAGS_RAW_IO) && (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { sc->flags |= AAC_FLAGS_LBA_64BIT; device_printf(sc->aac_dev, "Enable 64-bit array\n"); } return (0); } static int aac_init(struct aac_softc *sc) { struct aac_adapter_init *ip; u_int32_t qoffset; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Fill in the init structure. This tells the adapter about the * physical location of various important shared data structures. */ ip = &sc->aac_common->ac_init; ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; sc->flags |= AAC_FLAGS_RAW_IO; } ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_fibs); ip->AdapterFibsVirtualAddress = 0; ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); ip->AdapterFibAlign = sizeof(struct aac_fib); ip->PrintfBufferAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_printf); ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; /* * The adapter assumes that pages are 4K in size, except on some * broken firmware versions that do the page->byte conversion twice, * therefore 'assuming' that this value is in 16MB units (2^24). * Round up since the granularity is so high. */ ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { ip->HostPhysMemPages = (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; } ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ ip->InitFlags = 0; if (sc->flags & AAC_FLAGS_NEW_COMM) { ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED; device_printf(sc->aac_dev, "New comm. interface enabled\n"); } ip->MaxIoCommands = sc->aac_max_fibs; ip->MaxIoSize = sc->aac_max_sectors << 9; ip->MaxFibSize = sc->aac_max_fib_size; /* * Initialize FIB queues. Note that it appears that the layout of the * indexes and the segmentation of the entries may be mandated by the * adapter, which is only told about the base of the queue index fields. * * The initial values of the indices are assumed to inform the adapter * of the sizes of the respective queues, and theoretically it could * work out the entire layout of the queue structures from this. We * take the easy route and just lay this area out like everyone else * does. * * The Linux driver uses a much more complex scheme whereby several * header records are kept for each queue. We use a couple of generic * list manipulation functions which 'know' the size of each list by * virtue of a table. */ qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; qoffset &= ~(AAC_QUEUE_ALIGN - 1); sc->aac_queues = (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_HOST_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_HOST_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_HOST_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_HOST_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_ADAP_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_ADAP_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_ADAP_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_ADAP_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_HOST_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_HOST_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_HOST_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_HOST_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_ADAP_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_ADAP_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_ADAP_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_ADAP_HIGH_RESP_ENTRIES; sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = &sc->aac_queues->qt_HostNormCmdQueue[0]; sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = &sc->aac_queues->qt_HostHighCmdQueue[0]; sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = &sc->aac_queues->qt_AdapNormCmdQueue[0]; sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = &sc->aac_queues->qt_AdapHighCmdQueue[0]; sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = &sc->aac_queues->qt_HostNormRespQueue[0]; sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = &sc->aac_queues->qt_HostHighRespQueue[0]; sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = &sc->aac_queues->qt_AdapNormRespQueue[0]; sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = &sc->aac_queues->qt_AdapHighRespQueue[0]; /* * Do controller-type-specific initialisation */ switch (sc->aac_hwif) { case AAC_HWIF_I960RX: AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0); break; case AAC_HWIF_RKT: AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0); break; default: break; } /* * Give the init structure to the controller. */ if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, sc->aac_common_busaddr + offsetof(struct aac_common, ac_init), 0, 0, 0, NULL)) { device_printf(sc->aac_dev, "error establishing init structure\n"); error = EIO; goto out; } error = 0; out: return(error); } static int aac_setup_intr(struct aac_softc *sc) { if (sc->flags & AAC_FLAGS_NEW_COMM) { if (bus_setup_intr(sc->aac_dev, sc->aac_irq, INTR_MPSAFE|INTR_TYPE_BIO, NULL, aac_new_intr, sc, &sc->aac_intr)) { device_printf(sc->aac_dev, "can't set up interrupt\n"); return (EINVAL); } } else { if (bus_setup_intr(sc->aac_dev, sc->aac_irq, INTR_TYPE_BIO, aac_filter, NULL, sc, &sc->aac_intr)) { device_printf(sc->aac_dev, "can't set up interrupt filter\n"); return (EINVAL); } } return (0); } /* * Send a synchronous command to the controller and wait for a result. * Indicate if the controller completed the command with an error status. */ static int aac_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp) { time_t then; u_int32_t status; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* populate the mailbox */ AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); /* ensure the sync command doorbell flag is cleared */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* then set it to signal the adapter */ AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); /* spin waiting for the command to complete */ then = time_uptime; do { if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); return(EIO); } } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); /* clear the completion flag */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* get the command status */ status = AAC_GET_MAILBOX(sc, 0); if (sp != NULL) *sp = status; if (status != AAC_SRB_STS_SUCCESS) return (-1); return(0); } int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, struct aac_fib *fib, u_int16_t datasize) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (datasize > AAC_FIB_DATASIZE) return(EINVAL); /* * Set up the sync FIB */ fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY; fib->Header.XferState |= xferstate; fib->Header.Command = command; fib->Header.StructType = AAC_FIBTYPE_TFIB; fib->Header.Size = sizeof(struct aac_fib_header) + datasize; fib->Header.SenderSize = sizeof(struct aac_fib); fib->Header.SenderFibAddress = 0; /* Not needed */ fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_sync_fib); /* * Give the FIB to the controller, wait for a response. */ if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); return(EIO); } return (0); } /* * Adapter-space FIB queue manipulation * * Note that the queue implementation here is a little funky; neither the PI or * CI will ever be zero. This behaviour is a controller feature. */ static const struct { int size; int notify; } aac_qinfo[] = { {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, {AAC_HOST_HIGH_CMD_ENTRIES, 0}, {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, {AAC_HOST_HIGH_RESP_ENTRIES, 0}, {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, {AAC_ADAP_HIGH_RESP_ENTRIES, 0} }; /* * Atomically insert an entry into the nominated queue, returns 0 on success or * EBUSY if the queue is full. * * Note: it would be more efficient to defer notifying the controller in * the case where we may be inserting several entries in rapid succession, * but implementing this usefully may be difficult (it would involve a * separate queue/notify interface). */ static int aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) { u_int32_t pi, ci; int error; u_int32_t fib_size; u_int32_t fib_addr; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); fib_size = cm->cm_fib->Header.Size; fib_addr = cm->cm_fib->Header.ReceiverFibAddress; /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* wrap the queue? */ if (pi >= aac_qinfo[queue].size) pi = 0; /* check for queue full */ if ((pi + 1) == ci) { error = EBUSY; goto out; } /* * To avoid a race with its completion interrupt, place this command on * the busy queue prior to advertising it to the controller. */ aac_enqueue_busy(cm); /* populate queue entry */ (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; /* update producer index */ sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; /* notify the adapter if we know how */ if (aac_qinfo[queue].notify != 0) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: return(error); } /* * Atomically remove one entry from the nominated queue, returns 0 on * success or ENOENT if the queue is empty. */ static int aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, struct aac_fib **fib_addr) { u_int32_t pi, ci; u_int32_t fib_index; int error; int notify; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* check for queue empty */ if (ci == pi) { error = ENOENT; goto out; } /* wrap the pi so the following test works */ if (pi >= aac_qinfo[queue].size) pi = 0; notify = 0; if (ci == pi + 1) notify++; /* wrap the queue? */ if (ci >= aac_qinfo[queue].size) ci = 0; /* fetch the entry */ *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; switch (queue) { case AAC_HOST_NORM_CMD_QUEUE: case AAC_HOST_HIGH_CMD_QUEUE: /* * The aq_fib_addr is only 32 bits wide so it can't be counted * on to hold an address. For AIF's, the adapter assumes * that it's giving us an address into the array of AIF fibs. * Therefore, we have to convert it to an index. */ fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / sizeof(struct aac_fib); *fib_addr = &sc->aac_common->ac_fibs[fib_index]; break; case AAC_HOST_NORM_RESP_QUEUE: case AAC_HOST_HIGH_RESP_QUEUE: { struct aac_command *cm; /* * As above, an index is used instead of an actual address. * Gotta shift the index to account for the fast response * bit. No other correction is needed since this value was * originally provided by the driver via the SenderFibAddress * field. */ fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; cm = sc->aac_commands + (fib_index >> 2); *fib_addr = cm->cm_fib; /* * Is this a fast response? If it is, update the fib fields in * local memory since the whole fib isn't DMA'd back up. */ if (fib_index & 0x01) { (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; } break; } default: panic("Invalid queue in aac_dequeue_fib()"); break; } /* update consumer index */ sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; /* if we have made the queue un-full, notify the adapter */ if (notify && (aac_qinfo[queue].notify != 0)) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: return(error); } /* * Put our response to an Adapter Initialed Fib on the response queue */ static int aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) { u_int32_t pi, ci; int error; u_int32_t fib_size; u_int32_t fib_addr; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* Tell the adapter where the FIB is */ fib_size = fib->Header.Size; fib_addr = fib->Header.SenderFibAddress; fib->Header.ReceiverFibAddress = fib_addr; /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* wrap the queue? */ if (pi >= aac_qinfo[queue].size) pi = 0; /* check for queue full */ if ((pi + 1) == ci) { error = EBUSY; goto out; } /* populate queue entry */ (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; /* update producer index */ sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; /* notify the adapter if we know how */ if (aac_qinfo[queue].notify != 0) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: return(error); } /* * Check for commands that have been outstanding for a suspiciously long time, * and complain about them. */ static void aac_timeout(struct aac_softc *sc) { struct aac_command *cm; time_t deadline; int timedout, code; /* * Traverse the busy command list, bitch about late commands once * only. */ timedout = 0; deadline = time_uptime - AAC_CMD_TIMEOUT; TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { if ((cm->cm_timestamp < deadline) && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) { cm->cm_flags |= AAC_CMD_TIMEDOUT; device_printf(sc->aac_dev, "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n", cm, cm->cm_fib->Header.Command, (int)(time_uptime-cm->cm_timestamp)); AAC_PRINT_FIB(sc, cm->cm_fib); timedout++; } } if (timedout) { code = AAC_GET_FWSTATUS(sc); if (code != AAC_UP_AND_RUNNING) { device_printf(sc->aac_dev, "WARNING! Controller is no " "longer running! code= 0x%x\n", code); } } } /* * Interface Function Vectors */ /* * Read the current firmware status word. */ static int aac_sa_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS)); } static int aac_rx_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? AAC_RX_OMR0 : AAC_RX_FWSTATUS)); } static int aac_rkt_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? AAC_RKT_OMR0 : AAC_RKT_FWSTATUS)); } /* * Notify the controller of a change in a given queue */ static void aac_sa_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); } static void aac_rx_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit); } static void aac_rkt_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit); } /* * Get the interrupt reason bits */ static int aac_sa_get_istatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0)); } static int aac_rx_get_istatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR)); } static int aac_rkt_get_istatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR)); } /* * Clear some interrupt reason bits */ static void aac_sa_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); } static void aac_rx_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask); } static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask); } /* * Populate the mailbox and set the command word */ static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); } static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); } static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); } /* * Fetch the immediate command status word */ static int aac_sa_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); } static int aac_rx_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); } static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); } /* * Set/clear interrupt masks */ static void aac_sa_set_interrupts(struct aac_softc *sc, int enable) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); if (enable) { AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); } else { AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0); } } static void aac_rx_set_interrupts(struct aac_softc *sc, int enable) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); if (enable) { if (sc->flags & AAC_FLAGS_NEW_COMM) AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); else AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); } else { AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0); } } static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); if (enable) { if (sc->flags & AAC_FLAGS_NEW_COMM) AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); else AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); } else { AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0); } } /* * New comm. interface: Send command functions */ static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) { u_int32_t index, device; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); if (index == 0xffffffffL) index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); if (index == 0xffffffffL) return index; aac_enqueue_busy(cm); device = index; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); device += 4; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); device += 4; AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index); return 0; } static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) { u_int32_t index, device; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); if (index == 0xffffffffL) index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); if (index == 0xffffffffL) return index; aac_enqueue_busy(cm); device = index; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); device += 4; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); device += 4; AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index); return 0; } /* * New comm. interface: get, set outbound queue index */ static int aac_rx_get_outb_queue(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE)); } static int aac_rkt_get_outb_queue(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE)); } static void aac_rx_set_outb_queue(struct aac_softc *sc, int index) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index); } static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index); } /* * Debugging and Diagnostics */ /* * Print some information about the controller. */ static void aac_describe_controller(struct aac_softc *sc) { struct aac_fib *fib; struct aac_adapter_info *info; char *adapter_type = "Adaptec RAID controller"; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); fib->data[0] = 0; if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } /* save the kernel revision structure for later use */ info = (struct aac_adapter_info *)&fib->data[0]; sc->aac_revision = info->KernelRevision; if (bootverbose) { device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " "(%dMB cache, %dMB execution), %s\n", aac_describe_code(aac_cpu_variant, info->CpuVariant), info->ClockSpeed, info->TotalMem / (1024 * 1024), info->BufferMem / (1024 * 1024), info->ExecutionMem / (1024 * 1024), aac_describe_code(aac_battery_platform, info->batteryPlatform)); device_printf(sc->aac_dev, "Kernel %d.%d-%d, Build %d, S/N %6X\n", info->KernelRevision.external.comp.major, info->KernelRevision.external.comp.minor, info->KernelRevision.external.comp.dash, info->KernelRevision.buildNumber, (u_int32_t)(info->SerialNumber & 0xffffff)); device_printf(sc->aac_dev, "Supported Options=%b\n", sc->supported_options, "\20" "\1SNAPSHOT" "\2CLUSTERS" "\3WCACHE" "\4DATA64" "\5HOSTTIME" "\6RAID50" "\7WINDOW4GB" "\10SCSIUPGD" "\11SOFTERR" "\12NORECOND" "\13SGMAP64" "\14ALARM" "\15NONDASD" "\16SCSIMGT" "\17RAIDSCSI" "\21ADPTINFO" "\22NEWCOMM" "\23ARRAY64BIT" "\24HEATSENSOR"); } if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { fib->data[0] = 0; if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n"); else adapter_type = ((struct aac_supplement_adapter_info *) &fib->data[0])->AdapterTypeText; } device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", adapter_type, AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); } /* * Look up a text description of a numeric error code and return a pointer to * same. */ static const char * aac_describe_code(const struct aac_code_lookup *table, u_int32_t code) { int i; for (i = 0; table[i].string != NULL; i++) if (table[i].code == code) return(table[i].string); return(table[i + 1].string); } /* * Management Interface */ static int aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct aac_softc *sc; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); device_busy(sc->aac_dev); devfs_set_cdevpriv(sc, aac_cdevpriv_dtor); return 0; } static int aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { union aac_statrequest *as; struct aac_softc *sc; int error = 0; as = (union aac_statrequest *)arg; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); switch (cmd) { case AACIO_STATS: switch (as->as_item) { case AACQ_FREE: case AACQ_BIO: case AACQ_READY: case AACQ_BUSY: bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, sizeof(struct aac_qstat)); break; default: error = ENOENT; break; } break; case FSACTL_SENDFIB: case FSACTL_SEND_LARGE_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_SENDFIB: case FSACTL_LNX_SEND_LARGE_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); error = aac_ioctl_sendfib(sc, arg); break; case FSACTL_SEND_RAW_SRB: arg = *(caddr_t*)arg; case FSACTL_LNX_SEND_RAW_SRB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); error = aac_ioctl_send_raw_srb(sc, arg); break; case FSACTL_AIF_THREAD: case FSACTL_LNX_AIF_THREAD: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); error = EINVAL; break; case FSACTL_OPEN_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); error = aac_open_aif(sc, arg); break; case FSACTL_GET_NEXT_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); error = aac_getnext_aif(sc, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); error = aac_close_aif(sc, arg); break; case FSACTL_MINIPORT_REV_CHECK: arg = *(caddr_t*)arg; case FSACTL_LNX_MINIPORT_REV_CHECK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); error = aac_rev_check(sc, arg); break; case FSACTL_QUERY_DISK: arg = *(caddr_t*)arg; case FSACTL_LNX_QUERY_DISK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); error = aac_query_disk(sc, arg); break; case FSACTL_DELETE_DISK: case FSACTL_LNX_DELETE_DISK: /* * We don't trust the underland to tell us when to delete a * container, rather we rely on an AIF coming from the * controller */ error = 0; break; case FSACTL_GET_PCI_INFO: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_PCI_INFO: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); error = aac_get_pci_info(sc, arg); break; case FSACTL_GET_FEATURES: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_FEATURES: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); error = aac_supported_features(sc, arg); break; default: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); error = EINVAL; break; } return(error); } static int aac_poll(struct cdev *dev, int poll_events, struct thread *td) { struct aac_softc *sc; struct aac_fib_context *ctx; int revents; sc = dev->si_drv1; revents = 0; mtx_lock(&sc->aac_aifq_lock); if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { revents |= poll_events & (POLLIN | POLLRDNORM); break; } } } mtx_unlock(&sc->aac_aifq_lock); if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) selrecord(td, &sc->rcv_select); } return (revents); } static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) { switch (event->ev_type) { case AAC_EVENT_CMFREE: mtx_assert(&sc->aac_io_lock, MA_OWNED); if (aac_alloc_command(sc, (struct aac_command **)arg)) { aac_add_event(sc, event); return; } free(event, M_AACBUF); wakeup(arg); break; default: break; } } /* * Send a FIB supplied from userspace */ static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) { struct aac_command *cm; int size, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; /* * Get a command */ mtx_lock(&sc->aac_io_lock); if (aac_alloc_command(sc, &cm)) { struct aac_event *event; event = malloc(sizeof(struct aac_event), M_AACBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aac_add_event(sc, event); msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); } mtx_unlock(&sc->aac_io_lock); /* * Fetch the FIB header, then re-copy to get data as well. */ if ((error = copyin(ufib, cm->cm_fib, sizeof(struct aac_fib_header))) != 0) goto out; size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } if ((error = copyin(ufib, cm->cm_fib, size)) != 0) goto out; cm->cm_fib->Header.Size = size; cm->cm_timestamp = time_uptime; /* * Pass the FIB to the controller, wait for it to complete. */ mtx_lock(&sc->aac_io_lock); error = aac_wait_command(cm); mtx_unlock(&sc->aac_io_lock); if (error != 0) { device_printf(sc->aac_dev, "aac_wait_command return %d\n", error); goto out; } /* * Copy the FIB and data back out to the caller. */ size = cm->cm_fib->Header.Size; if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } error = copyout(cm->cm_fib, ufib, size); out: if (cm != NULL) { mtx_lock(&sc->aac_io_lock); aac_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * Send a passthrough FIB supplied from userspace */ static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) { struct aac_command *cm; struct aac_event *event; struct aac_fib *fib; struct aac_srb *srbcmd, *user_srb; struct aac_sg_entry *sge; struct aac_sg_entry64 *sge64; void *srb_sg_address, *ureply; uint32_t fibsize, srb_sg_bytecount; int error, transfer_data; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; transfer_data = 0; fibsize = 0; user_srb = (struct aac_srb *)arg; mtx_lock(&sc->aac_io_lock); if (aac_alloc_command(sc, &cm)) { event = malloc(sizeof(struct aac_event), M_AACBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aac_add_event(sc, event); msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0); } mtx_unlock(&sc->aac_io_lock); cm->cm_data = NULL; fib = cm->cm_fib; srbcmd = (struct aac_srb *)fib->data; error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t)); if (error != 0) goto out; if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) { error = EINVAL; goto out; } error = copyin(user_srb, srbcmd, fibsize); if (error != 0) goto out; srbcmd->function = 0; srbcmd->retry_limit = 0; if (srbcmd->sg_map.SgCount > 1) { error = EINVAL; goto out; } /* Retrieve correct SG entries. */ if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { struct aac_sg_entry sg; sge = srbcmd->sg_map.SgEntry; sge64 = NULL; if ((error = copyin(sge, &sg, sizeof(sg))) != 0) goto out; srb_sg_bytecount = sg.SgByteCount; srb_sg_address = (void *)(uintptr_t)sg.SgAddress; } #ifdef __amd64__ else if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { struct aac_sg_entry64 sg; sge = NULL; sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; if ((error = copyin(sge64, &sg, sizeof(sg))) != 0) goto out; srb_sg_bytecount = sg.SgByteCount; srb_sg_address = (void *)sg.SgAddress; if (sge64->SgAddress > 0xffffffffull && (sc->flags & AAC_FLAGS_SG_64BIT) == 0) { error = EINVAL; goto out; } } #endif else { error = EINVAL; goto out; } ureply = (char *)arg + fibsize; srbcmd->data_len = srb_sg_bytecount; if (srbcmd->sg_map.SgCount == 1) transfer_data = 1; cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; if (transfer_data) { cm->cm_datalen = srb_sg_bytecount; cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT); if (cm->cm_data == NULL) { error = ENOMEM; goto out; } if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) cm->cm_flags |= AAC_CMD_DATAIN; if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { cm->cm_flags |= AAC_CMD_DATAOUT; error = copyin(srb_sg_address, cm->cm_data, cm->cm_datalen); if (error != 0) goto out; } } fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_srb); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ? ScsiPortCommandU64 : ScsiPortCommand; mtx_lock(&sc->aac_io_lock); aac_wait_command(cm); mtx_unlock(&sc->aac_io_lock); if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) { error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen); if (error != 0) goto out; } error = copyout(fib->data, ureply, sizeof(struct aac_srb_response)); out: if (cm != NULL) { if (cm->cm_data != NULL) free(cm->cm_data, M_AACBUF); mtx_lock(&sc->aac_io_lock); aac_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * cdevpriv interface private destructor. */ static void aac_cdevpriv_dtor(void *arg) { struct aac_softc *sc; sc = arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); device_unbusy(sc->aac_dev); } /* * Handle an AIF sent to us by the controller; queue it for later reference. * If the queue fills up, then drop the older entries. */ static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) { struct aac_aif_command *aif; struct aac_container *co, *co_next; struct aac_fib_context *ctx; struct aac_mntinforesp *mir; int next, current, found; int count = 0, added = 0, i = 0; uint32_t channel; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); aif = (struct aac_aif_command*)&fib->data[0]; aac_print_aif(sc, aif); /* Is it an event that we should care about? */ switch (aif->command) { case AifCmdEventNotify: switch (aif->data.EN.type) { case AifEnAddContainer: case AifEnDeleteContainer: /* * A container was added or deleted, but the message * doesn't tell us anything else! Re-enumerate the * containers and sort things out. */ aac_alloc_sync_fib(sc, &fib); do { /* * Ask the controller for its containers one at * a time. * XXX What if the controller's list changes * midway through this enumaration? * XXX This should be done async. */ if ((mir = aac_get_container_info(sc, fib, i)) == NULL) continue; if (i == 0) count = mir->MntRespCount; /* * Check the container against our list. * co->co_found was already set to 0 in a * previous run. */ if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { found = 0; TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == mir->MntTable[0].ObjectId) { co->co_found = 1; found = 1; break; } } /* * If the container matched, continue * in the list. */ if (found) { i++; continue; } /* * This is a new container. Do all the * appropriate things to set it up. */ aac_add_container(sc, mir, 1); added = 1; } i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); /* * Go through our list of containers and see which ones * were not marked 'found'. Since the controller didn't * list them they must have been deleted. Do the * appropriate steps to destroy the device. Also reset * the co->co_found field. */ co = TAILQ_FIRST(&sc->aac_container_tqh); while (co != NULL) { if (co->co_found == 0) { mtx_unlock(&sc->aac_io_lock); - mtx_lock(&Giant); + bus_topo_lock(); device_delete_child(sc->aac_dev, co->co_disk); - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&sc->aac_io_lock); co_next = TAILQ_NEXT(co, co_link); mtx_lock(&sc->aac_container_lock); TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); mtx_unlock(&sc->aac_container_lock); free(co, M_AACBUF); co = co_next; } else { co->co_found = 0; co = TAILQ_NEXT(co, co_link); } } /* Attach the newly created containers */ if (added) { mtx_unlock(&sc->aac_io_lock); - mtx_lock(&Giant); + bus_topo_lock(); bus_generic_attach(sc->aac_dev); - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&sc->aac_io_lock); } break; case AifEnEnclosureManagement: switch (aif->data.EN.data.EEE.eventType) { case AIF_EM_DRIVE_INSERTION: case AIF_EM_DRIVE_REMOVAL: channel = aif->data.EN.data.EEE.unitID; if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, (channel & 0xFFFF)); break; } break; case AifEnAddJBOD: case AifEnDeleteJBOD: channel = aif->data.EN.data.ECE.container; if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, AAC_CAM_TARGET_WILDCARD); break; default: break; } default: break; } /* Copy the AIF data to the AIF queue for ioctl retrieval */ mtx_lock(&sc->aac_aifq_lock); current = sc->aifq_idx; next = (current + 1) % AAC_AIFQ_LENGTH; if (next == 0) sc->aifq_filled = 1; bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); /* modify AIF contexts */ if (sc->aifq_filled) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (next == ctx->ctx_idx) ctx->ctx_wrap = 1; else if (current == ctx->ctx_idx && ctx->ctx_wrap) ctx->ctx_idx = next; } } sc->aifq_idx = next; /* On the off chance that someone is sleeping for an aif... */ if (sc->aac_state & AAC_STATE_AIF_SLEEPER) wakeup(sc->aac_aifq); /* Wakeup any poll()ers */ selwakeuppri(&sc->rcv_select, PRIBIO); mtx_unlock(&sc->aac_aifq_lock); } /* * Return the Revision of the driver to userspace and check to see if the * userspace app is possibly compatible. This is extremely bogus since * our driver doesn't follow Adaptec's versioning system. Cheat by just * returning what the card reported. */ static int aac_rev_check(struct aac_softc *sc, caddr_t udata) { struct aac_rev_check rev_check; struct aac_rev_check_resp rev_check_resp; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Copyin the revision struct from userspace */ if ((error = copyin(udata, (caddr_t)&rev_check, sizeof(struct aac_rev_check))) != 0) { return error; } fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", rev_check.callingRevision.buildNumber); /* * Doctor up the response struct. */ rev_check_resp.possiblyCompatible = 1; rev_check_resp.adapterSWRevision.external.comp.major = AAC_DRIVER_MAJOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.minor = AAC_DRIVER_MINOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.type = AAC_DRIVER_TYPE; rev_check_resp.adapterSWRevision.external.comp.dash = AAC_DRIVER_BUGFIX_LEVEL; rev_check_resp.adapterSWRevision.buildNumber = AAC_DRIVER_BUILD; return(copyout((caddr_t)&rev_check_resp, udata, sizeof(struct aac_rev_check_resp))); } /* * Pass the fib context to the caller */ static int aac_open_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *fibctx, *ctx; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); if (fibctx == NULL) return (ENOMEM); mtx_lock(&sc->aac_aifq_lock); /* all elements are already 0, add to queue */ if (sc->fibctx == NULL) sc->fibctx = fibctx; else { for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) ; ctx->next = fibctx; fibctx->prev = ctx; } /* evaluate unique value */ fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); ctx = sc->fibctx; while (ctx != fibctx) { if (ctx->unique == fibctx->unique) { fibctx->unique++; ctx = sc->fibctx; } else { ctx = ctx->next; } } mtx_unlock(&sc->aac_aifq_lock); error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); if (error) aac_close_aif(sc, (caddr_t)ctx); return error; } /* * Close the caller's fib context */ static int aac_close_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *ctx; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_aifq_lock); for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->unique == *(uint32_t *)&arg) { if (ctx == sc->fibctx) sc->fibctx = NULL; else { ctx->prev->next = ctx->next; if (ctx->next) ctx->next->prev = ctx->prev; } break; } } mtx_unlock(&sc->aac_aifq_lock); if (ctx) free(ctx, M_AACBUF); return 0; } /* * Pass the caller the next AIF in their queue */ static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg) { struct get_adapter_fib_ioctl agf; struct aac_fib_context *ctx; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) { struct get_adapter_fib_ioctl32 agf32; error = copyin(arg, &agf32, sizeof(agf32)); if (error == 0) { agf.AdapterFibContext = agf32.AdapterFibContext; agf.Wait = agf32.Wait; agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib; } } else #endif error = copyin(arg, &agf, sizeof(agf)); if (error == 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (agf.AdapterFibContext == ctx->unique) break; } if (!ctx) return (EFAULT); error = aac_return_aif(sc, ctx, agf.AifFib); if (error == EAGAIN && agf.Wait) { fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); sc->aac_state |= AAC_STATE_AIF_SLEEPER; while (error == EAGAIN) { error = tsleep(sc->aac_aifq, PRIBIO | PCATCH, "aacaif", 0); if (error == 0) error = aac_return_aif(sc, ctx, agf.AifFib); } sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; } } return(error); } /* * Hand the next AIF off the top of the queue out to userspace. */ static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) { int current, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_aifq_lock); current = ctx->ctx_idx; if (current == sc->aifq_idx && !ctx->ctx_wrap) { /* empty */ mtx_unlock(&sc->aac_aifq_lock); return (EAGAIN); } error = copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); if (error) device_printf(sc->aac_dev, "aac_return_aif: copyout returned %d\n", error); else { ctx->ctx_wrap = 0; ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; } mtx_unlock(&sc->aac_aifq_lock); return(error); } static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) { struct aac_pci_info { u_int32_t bus; u_int32_t slot; } pciinf; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); pciinf.bus = pci_get_bus(sc->aac_dev); pciinf.slot = pci_get_slot(sc->aac_dev); error = copyout((caddr_t)&pciinf, uptr, sizeof(struct aac_pci_info)); return (error); } static int aac_supported_features(struct aac_softc *sc, caddr_t uptr) { struct aac_features f; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((error = copyin(uptr, &f, sizeof (f))) != 0) return (error); /* * When the management driver receives FSACTL_GET_FEATURES ioctl with * ALL zero in the featuresState, the driver will return the current * state of all the supported features, the data field will not be * valid. * When the management driver receives FSACTL_GET_FEATURES ioctl with * a specific bit set in the featuresState, the driver will return the * current state of this specific feature and whatever data that are * associated with the feature in the data field or perform whatever * action needed indicates in the data field. */ if (f.feat.fValue == 0) { f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; /* TODO: In the future, add other features state here as well */ } else { if (f.feat.fBits.largeLBA) f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; /* TODO: Add other features state and data in the future */ } error = copyout(&f, uptr, sizeof (f)); return (error); } /* * Give the userland some information about the container. The AAC arch * expects the driver to be a SCSI passthrough type driver, so it expects * the containers to have b:t:l numbers. Fake it. */ static int aac_query_disk(struct aac_softc *sc, caddr_t uptr) { struct aac_query_disk query_disk; struct aac_container *co; struct aac_disk *disk; int error, id; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); disk = NULL; error = copyin(uptr, (caddr_t)&query_disk, sizeof(struct aac_query_disk)); if (error) return (error); id = query_disk.ContainerNumber; if (id == -1) return (EINVAL); mtx_lock(&sc->aac_container_lock); TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == id) break; } if (co == NULL) { query_disk.Valid = 0; query_disk.Locked = 0; query_disk.Deleted = 1; /* XXX is this right? */ } else { disk = device_get_softc(co->co_disk); query_disk.Valid = 1; query_disk.Locked = (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; query_disk.Deleted = 0; query_disk.Bus = device_get_unit(sc->aac_dev); query_disk.Target = disk->unit; query_disk.Lun = 0; query_disk.UnMapped = 0; sprintf(&query_disk.diskDeviceName[0], "%s%d", disk->ad_disk->d_name, disk->ad_disk->d_unit); } mtx_unlock(&sc->aac_container_lock); error = copyout((caddr_t)&query_disk, uptr, sizeof(struct aac_query_disk)); return (error); } static void aac_get_bus_info(struct aac_softc *sc) { struct aac_fib *fib; struct aac_ctcfg *c_cmd; struct aac_ctcfg_resp *c_resp; struct aac_vmioctl *vmi; struct aac_vmi_businf_resp *vmi_resp; struct aac_getbusinf businfo; struct aac_sim *caminf; device_t child; int i, found, error; mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); c_cmd = (struct aac_ctcfg *)&fib->data[0]; bzero(c_cmd, sizeof(struct aac_ctcfg)); c_cmd->Command = VM_ContainerConfig; c_cmd->cmd = CT_GET_SCSI_METHOD; c_cmd->param = 0; error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_ctcfg)); if (error) { device_printf(sc->aac_dev, "Error %d sending " "VM_ContainerConfig command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; if (c_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", c_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } sc->scsi_method_id = c_resp->param; vmi = (struct aac_vmioctl *)&fib->data[0]; bzero(vmi, sizeof(struct aac_vmioctl)); vmi->Command = VM_Ioctl; vmi->ObjType = FT_DRIVE; vmi->MethId = sc->scsi_method_id; vmi->ObjId = 0; vmi->IoctlCmd = GetBusInfo; error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_vmi_businf_resp)); if (error) { device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; if (vmi_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", vmi_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); found = 0; for (i = 0; i < businfo.BusCount; i++) { if (businfo.BusValid[i] != AAC_BUS_VALID) continue; caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), M_AACBUF, M_NOWAIT | M_ZERO); if (caminf == NULL) { device_printf(sc->aac_dev, "No memory to add passthrough bus %d\n", i); break; } child = device_add_child(sc->aac_dev, "aacp", -1); if (child == NULL) { device_printf(sc->aac_dev, "device_add_child failed for passthrough bus %d\n", i); free(caminf, M_AACBUF); break; } caminf->TargetsPerBus = businfo.TargetsPerBus; caminf->BusNumber = i; caminf->InitiatorBusId = businfo.InitiatorBusId[i]; caminf->aac_sc = sc; caminf->sim_dev = child; device_set_ivars(child, caminf); device_set_desc(child, "SCSI Passthrough Bus"); TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); found = 1; } if (found) bus_generic_attach(sc->aac_dev); } diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index 1242b7ce9ce1..70e196399e0f 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1,4531 +1,4530 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ACPI") static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; static struct cdevsw acpi_cdevsw = { .d_version = D_VERSION, .d_open = acpiopen, .d_close = acpiclose, .d_ioctl = acpiioctl, .d_name = "acpi", }; struct acpi_interface { ACPI_STRING *data; int num; }; static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; static char *pcilink_ids[] = { "PNP0C0F", NULL }; /* Global mutex for locking access to the ACPI subsystem. */ struct mtx acpi_mutex; struct callout acpi_sleep_timer; /* Bitmap of device quirks. */ int acpi_quirks; /* Supported sleep states. */ static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT]; static void acpi_lookup(void *arg, const char *name, device_t *dev); static int acpi_modevent(struct module *mod, int event, void *junk); static int acpi_probe(device_t dev); static int acpi_attach(device_t dev); static int acpi_suspend(device_t dev); static int acpi_resume(device_t dev); static int acpi_shutdown(device_t dev); static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit); static int acpi_print_child(device_t bus, device_t child); static void acpi_probe_nomatch(device_t bus, device_t child); static void acpi_driver_added(device_t dev, driver_t *driver); static void acpi_child_deleted(device_t dev, device_t child); static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value); static struct resource_list *acpi_get_rlist(device_t dev, device_t child); static void acpi_reserve_resources(device_t dev); static int acpi_sysres_alloc(device_t dev); static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count); static struct resource *acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end); static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); static void acpi_delete_resource(device_t bus, device_t child, int type, int rid); static uint32_t acpi_isa_get_logicalid(device_t dev); static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); static ssize_t acpi_bus_get_prop(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type); static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match); static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret); static ACPI_STATUS acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value); static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *context, void **retval); static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg); static ACPI_STATUS acpi_find_dsd(device_t bus, device_t dev); static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids); static void acpi_platform_osc(device_t dev); static void acpi_probe_children(device_t bus); static void acpi_probe_order(ACPI_HANDLE handle, int *order); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_sleep_enable(void *arg); static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc); static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); static void acpi_shutdown_final(void *arg, int howto); static void acpi_enable_fixed_events(struct acpi_softc *sc); static void acpi_resync_clock(struct acpi_softc *sc); static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_prep_walk(int sstate); static int acpi_wake_sysctl_walk(device_t dev); static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_sname2sstate(const char *sname); static const char *acpi_sstate2sname(int sstate); static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_pm_func(u_long cmd, void *arg, ...); static int acpi_child_location_str_method(device_t acdev, device_t child, char *buf, size_t buflen); static int acpi_child_pnpinfo_str_method(device_t acdev, device_t child, char *buf, size_t buflen); static void acpi_enable_pcie(void); static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp); static void acpi_reset_interfaces(device_t dev); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_shutdown, acpi_shutdown), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_suspend, acpi_suspend), DEVMETHOD(device_resume, acpi_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_driver_added), DEVMETHOD(bus_child_deleted, acpi_child_deleted), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_get_resource_list, acpi_get_rlist), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_adjust_resource, acpi_adjust_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_delete_resource, acpi_delete_resource), DEVMETHOD(bus_child_pnpinfo_str, acpi_child_pnpinfo_str_method), DEVMETHOD(bus_child_location_str, acpi_child_location_str_method), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_domain, acpi_get_domain), DEVMETHOD(bus_get_property, acpi_bus_get_prop), /* ACPI bus */ DEVMETHOD(acpi_id_probe, acpi_device_id_probe), DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), DEVMETHOD(acpi_get_property, acpi_device_get_prop), DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), DEVMETHOD(acpi_scan_children, acpi_device_scan_children), /* ISA emulation */ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), DEVMETHOD_END }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; static devclass_t acpi_devclass; EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(acpi, 1); ACPI_SERIAL_DECL(acpi, "ACPI root bus"); /* Local pools for managing system resources for ACPI child devices. */ static struct rman acpi_rman_io, acpi_rman_mem; #define ACPI_MINIMUM_AWAKETIME 5 /* Holds the description of the acpi0 device. */ static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ACPI debugging"); static char acpi_ca_version[12]; SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, acpi_ca_version, 0, "Version of Intel ACPI-CA"); /* * Allow overriding _OSI methods. */ static char acpi_install_interface[256]; TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, sizeof(acpi_install_interface)); static char acpi_remove_interface[256]; TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, sizeof(acpi_remove_interface)); /* Allow users to dump Debug objects without ACPI debugger. */ static int acpi_debug_objects; TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0, acpi_debug_objects_sysctl, "I", "Enable Debug objects"); /* Allow the interpreter to ignore common mistakes in BIOS. */ static int acpi_interpreter_slack = 1; TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN, &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); /* Ignore register widths set by FADT and use default widths instead. */ static int acpi_ignore_reg_width = 1; TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width); SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN, &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT"); /* Allow users to override quirks. */ TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); int acpi_susp_bounce; SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); /* * ACPI standard UUID for Device Specific Data Package * "Device Properties UUID for _DSD" Rev. 2.0 */ static const struct uuid acpi_dsd_uuid = { 0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, { 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 } }; /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus hierarchy hangs * off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: if (!cold) { printf("The ACPI driver cannot be loaded after boot.\n"); return (EPERM); } break; case MOD_UNLOAD: if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) return (EBUSY); break; default: break; } return (0); } /* * Perform early initialization. */ ACPI_STATUS acpi_Startup(void) { static int started = 0; ACPI_STATUS status; int val; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Only run the startup code once. The MADT driver also calls this. */ if (started) return_VALUE (AE_OK); started = 1; /* * Initialize the ACPICA subsystem. */ if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) { printf("ACPI: Could not initialize Subsystem: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing * if more tables exist. */ if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { printf("ACPI: Table initialisation failed: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* Set up any quirks we have for this system. */ if (acpi_quirks == ACPI_Q_OK) acpi_table_quirks(&acpi_quirks); /* If the user manually set the disabled hint to 0, force-enable ACPI. */ if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) acpi_quirks &= ~ACPI_Q_BROKEN; if (acpi_quirks & ACPI_Q_BROKEN) { printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); status = AE_SUPPORT; } return_VALUE (status); } /* * Detect ACPI and perform early initialisation. */ int acpi_identify(void) { ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_HEADER *rsdt; ACPI_PHYSICAL_ADDRESS paddr; struct sbuf sb; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (!cold) return (ENXIO); /* Check that we haven't been disabled with a hint. */ if (resource_disabled("acpi", 0)) return (ENXIO); /* Check for other PM systems. */ if (power_pm_get_type() != POWER_PM_TYPE_NONE && power_pm_get_type() != POWER_PM_TYPE_ACPI) { printf("ACPI identify failed, other PM system enabled.\n"); return (ENXIO); } /* Initialize root tables. */ if (ACPI_FAILURE(acpi_Startup())) { printf("ACPI: Try disabling either ACPI or apic support.\n"); return (ENXIO); } if ((paddr = AcpiOsGetRootPointer()) == 0 || (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) return (ENXIO); if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; else paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) return (ENXIO); sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); sbuf_trim(&sb); sbuf_putc(&sb, ' '); sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); sbuf_trim(&sb); sbuf_finish(&sb); sbuf_delete(&sb); AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); return (0); } /* * Fetch some descriptive data from ACPI to put in our attach message. */ static int acpi_probe(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); device_set_desc(dev, acpi_desc); return_VALUE (BUS_PROBE_NOWILDCARD); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error, state; UINT32 flags; UINT8 TypeA, TypeB; char *env; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->acpi_dev = dev; callout_init(&sc->susp_force_to, 1); error = ENXIO; /* Initialize resource manager. */ acpi_rman_io.rm_type = RMAN_ARRAY; acpi_rman_io.rm_start = 0; acpi_rman_io.rm_end = 0xffff; acpi_rman_io.rm_descr = "ACPI I/O ports"; if (rman_init(&acpi_rman_io) != 0) panic("acpi rman_init IO ports failed"); acpi_rman_mem.rm_type = RMAN_ARRAY; acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; if (rman_init(&acpi_rman_mem) != 0) panic("acpi rman_init memory failed"); /* Initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); /* * Set the globals from our tunables. This is needed because ACPI-CA * uses UINT8 for some values and we have no tunable_byte. */ AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE; #ifndef ACPI_DEBUG /* * Disable all debugging layers and levels. */ AcpiDbgLayer = 0; AcpiDbgLevel = 0; #endif /* Override OS interfaces if the user requested. */ acpi_reset_interfaces(dev); /* Load ACPI name space. */ status = AcpiLoadTables(); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not load Namespace: %s\n", AcpiFormatException(status)); goto out; } /* Handle MCFG table if present. */ acpi_enable_pcie(); /* * Note that some systems (specifically, those with namespace evaluation * issues that require the avoidance of parts of the namespace) must * avoid running _INI and _STA on everything, as well as dodging the final * object init pass. * * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). * * XXX We should arrange for the object init pass after we have attached * all our child devices, but on many systems it works here. */ flags = 0; if (testenv("debug.acpi.avoid")) flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; /* Bring the hardware and basic handlers online. */ if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { device_printf(dev, "Could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Call the ECDT probe function to provide EC functionality before * the namespace has been evaluated. * * XXX This happens before the sysresource devices have been probed and * attached so its resources come from nexus0. In practice, this isn't * a problem but should be addressed eventually. */ acpi_ec_ecdt_probe(dev); /* Bring device objects and regions online. */ if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { device_printf(dev, "Could not initialize ACPI objects: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, acpi_supported_sleep_state_sysctl, "A", "List supported ACPI sleep states."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "Power button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "Sleep button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, "sleep delay in seconds"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "disable_on_reboot", CTLFLAG_RW, &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "handle_reboot", CTLFLAG_RW, &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); /* * Default to 1 second before sleeping to give some machines time to * stabilize. */ sc->acpi_sleep_delay = 1; if (bootverbose) sc->acpi_verbose = 1; if ((env = kern_getenv("hw.acpi.verbose")) != NULL) { if (strcmp(env, "0") != 0) sc->acpi_verbose = 1; freeenv(env); } /* Only enable reboot by default if the FADT says it is available. */ if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) sc->acpi_handle_reboot = 1; #if !ACPI_REDUCED_HARDWARE /* Only enable S4BIOS by default if the FACS says it is available. */ if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) sc->acpi_s4bios = 1; #endif /* Probe all supported sleep states. */ acpi_sleep_states[ACPI_STATE_S0] = TRUE; for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT, __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) && ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) acpi_sleep_states[state] = TRUE; /* * Dispatch the default sleep state to devices. The lid switch is set * to UNKNOWN by default to avoid surprising users. */ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ? ACPI_STATE_S5 : ACPI_STATE_UNKNOWN; sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN; sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ? ACPI_STATE_S1 : ACPI_STATE_UNKNOWN; sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ? ACPI_STATE_S3 : ACPI_STATE_UNKNOWN; /* Pick the first valid sleep state for the sleep button default. */ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN; for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) if (acpi_sleep_states[state]) { sc->acpi_sleep_button_sx = state; break; } acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ /* Register our shutdown handler. */ EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* Flag our initial states. */ sc->acpi_enabled = TRUE; sc->acpi_sstate = ACPI_STATE_S0; sc->acpi_sleep_disabled = TRUE; /* Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664, "acpi"); sc->acpi_dev_t->si_drv1 = sc; if ((error = acpi_machdep_init(dev))) goto out; /* Register ACPI again to pass the correct argument of pm_func. */ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); acpi_platform_osc(dev); if (!acpi_disabled("bus")) { EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000); acpi_probe_children(dev); } /* Update all GPEs and enable runtime GPEs. */ status = AcpiUpdateAllGpes(); if (ACPI_FAILURE(status)) device_printf(dev, "Could not update all GPEs: %s\n", AcpiFormatException(status)); /* Allow sleep request after a while. */ callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0); callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME, acpi_sleep_enable, sc); error = 0; out: return_VALUE (error); } static void acpi_set_power_children(device_t dev, int state) { device_t child; device_t *devlist; int dstate, i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; /* * Retrieve and set D-state for the sleep state if _SxD is present. * Skip children who aren't attached since they are handled separately. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dstate = state; if (device_is_attached(child) && acpi_device_pwr_for_sleep(dev, child, &dstate) == 0) acpi_set_powerstate(child, dstate); } free(devlist, M_TEMP); } static int acpi_suspend(device_t dev) { int error; GIANT_REQUIRED; error = bus_generic_suspend(dev); if (error == 0) acpi_set_power_children(dev, ACPI_STATE_D3); return (error); } static int acpi_resume(device_t dev) { GIANT_REQUIRED; acpi_set_power_children(dev, ACPI_STATE_D0); return (bus_generic_resume(dev)); } static int acpi_shutdown(device_t dev) { GIANT_REQUIRED; /* Allow children to shutdown first. */ bus_generic_shutdown(dev); /* * Enable any GPEs that are able to power-on the system (i.e., RTC). * Also, disable any that are not valid for this state (most). */ acpi_wake_prep_walk(ACPI_STATE_S5); return (0); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); else free(ad, M_ACPIDEV); return (child); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += bus_print_child_domain(bus, child); retval += bus_print_child_footer(bus, child); return (retval); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. * * XXX Disabled for now since many necessary devices (like fdc and * ATA) don't claim the devices we created for them but still expect * them to be powered up. */ static void acpi_probe_nomatch(device_t bus, device_t child) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D3); #endif } /* * If a new driver has a chance to probe a child, first power it up. * * XXX Disabled for now (see acpi_probe_nomatch for details). */ static void acpi_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs)) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); #else device_probe_and_attach(child); #endif } } free(devlist, M_TEMP); } /* Location hint for devctl(8) */ static int acpi_child_location_str_method(device_t cbdev, device_t child, char *buf, size_t buflen) { struct acpi_device *dinfo = device_get_ivars(child); char buf2[32]; int pxm; if (dinfo->ad_handle) { snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle)); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) { snprintf(buf2, 32, " _PXM=%d", pxm); strlcat(buf, buf2, buflen); } } else { snprintf(buf, buflen, ""); } return (0); } /* PnP information for devctl(8) */ int acpi_pnpinfo_str(ACPI_HANDLE handle, char *buf, size_t buflen) { ACPI_DEVICE_INFO *adinfo; if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) { snprintf(buf, buflen, "unknown"); return (0); } snprintf(buf, buflen, "_HID=%s _UID=%lu _CID=%s", (adinfo->Valid & ACPI_VALID_HID) ? adinfo->HardwareId.String : "none", (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL, ((adinfo->Valid & ACPI_VALID_CID) && adinfo->CompatibleIdList.Count > 0) ? adinfo->CompatibleIdList.Ids[0].String : "none"); AcpiOsFree(adinfo); return (0); } static int acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf, size_t buflen) { struct acpi_device *dinfo = device_get_ivars(child); return (acpi_pnpinfo_str(dinfo->ad_handle, buf, buflen)); } /* * Handle device deletion. */ static void acpi_child_deleted(device_t dev, device_t child) { struct acpi_device *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ad_handle) == child) AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } /* ACPI and ISA compatibility ivars */ switch(index) { case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; case ACPI_IVAR_FLAGS: *(int *)result = ad->ad_flags; break; case ISA_IVAR_VENDORID: case ISA_IVAR_SERIAL: case ISA_IVAR_COMPATID: *(int *)result = -1; break; case ISA_IVAR_LOGICALID: *(int *)result = acpi_isa_get_logicalid(child); break; case PCI_IVAR_CLASS: *(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff; break; case PCI_IVAR_SUBCLASS: *(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff; break; case PCI_IVAR_PROGIF: *(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff; break; default: return (ENOENT); } return (0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } switch(index) { case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; case ACPI_IVAR_FLAGS: ad->ad_flags = (int)value; break; default: panic("bad ivar write request (%d)", index); return (ENOENT); } return (0); } /* * Handle child resource allocation/removal */ static struct resource_list * acpi_get_rlist(device_t dev, device_t child) { struct acpi_device *ad; ad = device_get_ivars(child); return (&ad->ad_rl); } static int acpi_match_resource_hint(device_t dev, int type, long value) { struct acpi_device *ad = device_get_ivars(dev); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->start <= value && rle->end >= value) return (1); } return (0); } /* * Wire device unit numbers based on resource matches in hints. */ static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp) { const char *s; long value; int line, matches, unit; /* * Iterate over all the hints for the devices with the specified * name to see if one's resources are a subset of this device. */ line = 0; while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { /* Must have an "at" for acpi or isa. */ resource_string_value(name, unit, "at", &s); if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0)) continue; /* * Check for matching resources. We must have at least one match. * Since I/O and memory resources cannot be shared, if we get a * match on either of those, ignore any mismatches in IRQs or DRQs. * * XXX: We may want to revisit this to be more lenient and wire * as long as it gets one match. */ matches = 0; if (resource_long_value(name, unit, "port", &value) == 0) { /* * Floppy drive controllers are notorious for having a * wide variety of resources not all of which include the * first port that is specified by the hint (typically * 0x3f0) (see the comment above fdc_isa_alloc_resources() * in fdc_isa.c). However, they do all seem to include * port + 2 (e.g. 0x3f2) so for a floppy device, look for * 'value + 2' in the port resources instead of the hint * value. */ if (strcmp(name, "fdc") == 0) value += 2; if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) matches++; else continue; } if (resource_long_value(name, unit, "maddr", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) matches++; else continue; } if (matches > 0) goto matched; if (resource_long_value(name, unit, "irq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) matches++; else continue; } if (resource_long_value(name, unit, "drq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) matches++; else continue; } matched: if (matches > 0) { /* We have a winner! */ *unitp = unit; break; } } } /* * Fetch the NUMA domain for a device by mapping the value returned by * _PXM to a NUMA domain. If the device does not have a _PXM method, * -2 is returned. If any other error occurs, -1 is returned. */ static int acpi_parse_pxm(device_t dev) { #ifdef NUMA #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) ACPI_HANDLE handle; ACPI_STATUS status; int pxm; handle = acpi_get_handle(dev); if (handle == NULL) return (-2); status = acpi_GetInteger(handle, "_PXM", &pxm); if (ACPI_SUCCESS(status)) return (acpi_map_pxm_to_vm_domainid(pxm)); if (status == AE_NOT_FOUND) return (-2); #endif #endif return (-1); } int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { int d, error; d = acpi_parse_pxm(child); if (d < 0) return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); switch (op) { case LOCAL_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = cpuset_domain[d]; return (0); case INTR_CPUS: error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); if (error != 0) return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } } /* * Fetch the NUMA domain for the given device 'dev'. * * If a device has a _PXM method, map that to a NUMA domain. * Otherwise, pass the request up to the parent. * If there's no matching domain or the domain cannot be * determined, return ENOENT. */ int acpi_get_domain(device_t dev, device_t child, int *domain) { int d; d = acpi_parse_pxm(child); if (d >= 0) { *domain = d; return (0); } if (d == -1) return (ENOENT); /* No _PXM node; go up a level */ return (bus_generic_get_domain(dev, child, domain)); } /* * Pre-allocate/manage all memory and IO resources. Since rman can't handle * duplicates, we merge any in the sysresource attach routine. */ static int acpi_sysres_alloc(device_t dev) { struct resource *res; struct resource_list *rl; struct resource_list_entry *rle; struct rman *rm; device_t *children; int child_count, i; /* * Probe/attach any sysresource devices. This would be unnecessary if we * had multi-pass probe/attach. */ if (device_get_children(dev, &children, &child_count) != 0) return (ENXIO); for (i = 0; i < child_count; i++) { if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) device_probe_and_attach(children[i]); } free(children, M_TEMP); rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev); STAILQ_FOREACH(rle, rl, link) { if (rle->res != NULL) { device_printf(dev, "duplicate resource for %jx\n", rle->start); continue; } /* Only memory and IO resources are valid here. */ switch (rle->type) { case SYS_RES_IOPORT: rm = &acpi_rman_io; break; case SYS_RES_MEMORY: rm = &acpi_rman_mem; break; default: continue; } /* Pre-allocate resource and add to our rman pool. */ res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type, &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0); if (res != NULL) { rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); rle->res = res; } else if (bootverbose) device_printf(dev, "reservation of %jx, %jx (%d) failed\n", rle->start, rle->count, rle->type); } return (0); } /* * Reserve declared resources for devices found during attach once system * resources have been allocated. */ static void acpi_reserve_resources(device_t dev) { struct resource_list_entry *rle; struct resource_list *rl; struct acpi_device *ad; struct acpi_softc *sc; device_t *children; int child_count, i; sc = device_get_softc(dev); if (device_get_children(dev, &children, &child_count) != 0) return; for (i = 0; i < child_count; i++) { ad = device_get_ivars(children[i]); rl = &ad->ad_rl; /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) continue; STAILQ_FOREACH(rle, rl, link) { /* * Don't reserve IRQ resources. There are many sticky things * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET * when using legacy routing). */ if (rle->type == SYS_RES_IRQ) continue; /* * Don't reserve the resource if it is already allocated. * The acpi_ec(4) driver can allocate its resources early * if ECDT is present. */ if (rle->res != NULL) continue; /* * Try to reserve the resource from our parent. If this * fails because the resource is a system resource, just * let it be. The resource range is already reserved so * that other devices will not use it. If the driver * needs to allocate the resource, then * acpi_alloc_resource() will sub-alloc from the system * resource. */ resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid, rle->start, rle->end, rle->count, 0); } } free(children, M_TEMP); sc->acpi_resources_reserved = 1; } static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct acpi_softc *sc = device_get_softc(dev); struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; ACPI_DEVICE_INFO *devinfo; rman_res_t end; int allow; /* Ignore IRQ resources for PCI link devices. */ if (type == SYS_RES_IRQ && ACPI_ID_PROBE(dev, child, pcilink_ids, NULL) <= 0) return (0); /* * Ignore most resources for PCI root bridges. Some BIOSes * incorrectly enumerate the memory ranges they decode as plain * memory resources instead of as ResourceProducer ranges. Other * BIOSes incorrectly list system resource entries for I/O ranges * under the PCI bridge. Do allow the one known-correct case on * x86 of a PCI bridge claiming the I/O ports used for PCI config * access. */ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { if (ACPI_SUCCESS(AcpiGetObjectInfo(ad->ad_handle, &devinfo))) { if ((devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0) { #if defined(__i386__) || defined(__amd64__) allow = (type == SYS_RES_IOPORT && start == CONF1_ADDR_PORT); #else allow = 0; #endif if (!allow) { AcpiOsFree(devinfo); return (0); } } AcpiOsFree(devinfo); } } #ifdef INTRNG /* map with default for now */ if (type == SYS_RES_IRQ) start = (rman_res_t)acpi_map_intr(child, (u_int)start, acpi_get_handle(child)); #endif /* If the resource is already allocated, fail. */ if (resource_list_busy(rl, type, rid)) return (EBUSY); /* If the resource is already reserved, release it. */ if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, dev, child, type, rid); /* Add the resource. */ end = (start + count - 1); resource_list_add(rl, type, rid, start, end, count); /* Don't reserve resources until the system resources are allocated. */ if (!sc->acpi_resources_reserved) return (0); /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, child, sysres_ids, NULL) <= 0) return (0); /* * Don't reserve IRQ resources. There are many sticky things to * get right otherwise (e.g. IRQs for psm, atkbd, and HPET when * using legacy routing). */ if (type == SYS_RES_IRQ) return (0); /* * Don't reserve resources for CPU devices. Some of these * resources need to be allocated as shareable, but reservations * are always non-shareable. */ if (device_get_devclass(child) == devclass_find("cpu")) return (0); /* * Reserve the resource. * * XXX: Ignores failure for now. Failure here is probably a * BIOS/firmware bug? */ resource_list_reserve(rl, dev, child, type, &rid, start, end, count, 0); return (0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifndef INTRNG ACPI_RESOURCE ares; #endif struct acpi_device *ad; struct resource_list_entry *rle; struct resource_list *rl; struct resource *res; int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* * First attempt at allocating the resource. For direct children, * use resource_list_alloc() to handle reserved resources. For * other devices, pass the request up to our parent. */ if (bus == device_get_parent(child)) { ad = device_get_ivars(child); rl = &ad->ad_rl; /* * Simulate the behavior of the ISA bus for direct children * devices. That is, if a non-default range is specified for * a resource that doesn't exist, use bus_set_resource() to * add the resource before allocating it. Note that these * resources will not be reserved. */ if (!isdefault && resource_list_find(rl, type, *rid) == NULL) resource_list_add(rl, type, *rid, start, end, count); res = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); #ifndef INTRNG if (res != NULL && type == SYS_RES_IRQ) { /* * Since bus_config_intr() takes immediate effect, we cannot * configure the interrupt associated with a device when we * parse the resources but have to defer it until a driver * actually allocates the interrupt via bus_alloc_resource(). * * XXX: Should we handle the lookup failing? */ if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) acpi_config_intr(child, &ares); } #endif /* * If this is an allocation of the "default" range for a given * RID, fetch the exact bounds for this resource from the * resource list entry to try to allocate the range from the * system resource regions. */ if (res == NULL && isdefault) { rle = resource_list_find(rl, type, *rid); if (rle != NULL) { start = rle->start; end = rle->end; count = rle->count; } } } else res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); /* * If the first attempt failed and this is an allocation of a * specific range, try to satisfy the request via a suballocation * from our system resource regions. */ if (res == NULL && start + count - 1 == end) res = acpi_alloc_sysres(child, type, rid, start, end, count, flags); return (res); } /* * Attempt to allocate a specific resource range from the system * resource ranges. Note that we only handle memory and I/O port * system resources. */ struct resource * acpi_alloc_sysres(device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct rman *rm; struct resource *res; switch (type) { case SYS_RES_IOPORT: rm = &acpi_rman_io; break; case SYS_RES_MEMORY: rm = &acpi_rman_mem; break; default: return (NULL); } KASSERT(start + count - 1 == end, ("wildcard resource range")); res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); /* If requested, activate the resource using the parent's method. */ if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } return (res); } static int acpi_is_resource_managed(int type, struct resource *r) { /* We only handle memory and IO resources through rman. */ switch (type) { case SYS_RES_IOPORT: return (rman_is_region_manager(r, &acpi_rman_io)); case SYS_RES_MEMORY: return (rman_is_region_manager(r, &acpi_rman_mem)); } return (0); } static int acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { if (acpi_is_resource_managed(type, r)) return (rman_adjust_resource(r, start, end)); return (bus_generic_adjust_resource(bus, child, type, r, start, end)); } static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int ret; /* * If this resource belongs to one of our internal managers, * deactivate it and release it to the local pool. */ if (acpi_is_resource_managed(type, r)) { if (rman_get_flags(r) & RF_ACTIVE) { ret = bus_deactivate_resource(child, type, rid, r); if (ret != 0) return (ret); } return (rman_release_resource(r)); } return (bus_generic_rl_release_resource(bus, child, type, rid, r)); } static void acpi_delete_resource(device_t bus, device_t child, int type, int rid) { struct resource_list *rl; rl = acpi_get_rlist(bus, child); if (resource_list_busy(rl, type, rid)) { device_printf(bus, "delete_resource: Resource still owned by child" " (type=%d, rid=%d)\n", type, rid); return; } resource_list_unreserve(rl, bus, child, type, rid); resource_list_delete(rl, type, rid); } /* Allocate an IO port or memory resource, given its GAS. */ int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags) { int error, res_type; error = ENOMEM; if (type == NULL || rid == NULL || gas == NULL || res == NULL) return (EINVAL); /* We only support memory and IO spaces. */ switch (gas->SpaceId) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: res_type = SYS_RES_MEMORY; break; case ACPI_ADR_SPACE_SYSTEM_IO: res_type = SYS_RES_IOPORT; break; default: return (EOPNOTSUPP); } /* * If the register width is less than 8, assume the BIOS author means * it is a bit field and just allocate a byte. */ if (gas->BitWidth && gas->BitWidth < 8) gas->BitWidth = 8; /* Validate the address after we're sure we support the space. */ if (gas->Address == 0 || gas->BitWidth == 0) return (EINVAL); bus_set_resource(dev, res_type, *rid, gas->Address, gas->BitWidth / 8); *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); if (*res != NULL) { *type = res_type; error = 0; } else bus_delete_resource(dev, res_type, *rid); return (error); } /* Probe _HID and _CID for compatible ISA PNP ids. */ static uint32_t acpi_isa_get_logicalid(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; uint32_t pnpid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Fetch and validate the HID. */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 && devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ? PNP_EISAID(devinfo->HardwareId.String) : 0; AcpiOsFree(devinfo); return_VALUE (pnpid); } static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) { ACPI_DEVICE_INFO *devinfo; ACPI_PNP_DEVICE_ID *ids; ACPI_HANDLE h; uint32_t *pnpid; int i, valid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = cids; /* Fetch and validate the CID */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); if ((devinfo->Valid & ACPI_VALID_CID) == 0) { AcpiOsFree(devinfo); return_VALUE (0); } if (devinfo->CompatibleIdList.Count < count) count = devinfo->CompatibleIdList.Count; ids = devinfo->CompatibleIdList.Ids; for (i = 0, valid = 0; i < count; i++) if (ids[i].Length >= ACPI_EISAID_STRING_SIZE && strncmp(ids[i].String, "PNP", 3) == 0) { *pnpid++ = PNP_EISAID(ids[i].String); valid++; } AcpiOsFree(devinfo); return_VALUE (valid); } static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; int rv; int i; h = acpi_get_handle(dev); if (ids == NULL || h == NULL) return (ENXIO); t = acpi_get_type(dev); if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR) return (ENXIO); /* Try to match one of the array of IDs with a HID or CID. */ for (i = 0; ids[i] != NULL; i++) { rv = acpi_MatchHid(h, ids[i]); if (rv == ACPI_MATCHHID_NOMATCH) continue; if (match != NULL) { *match = ids[i]; } return ((rv == ACPI_MATCHHID_HID)? BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY); } return (ENXIO); } static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) { ACPI_HANDLE h; if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); return (AcpiEvaluateObject(h, pathname, parameters, ret)); } static ACPI_STATUS acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { const ACPI_OBJECT *pkg, *name, *val; struct acpi_device *ad; ACPI_STATUS status; int i; ad = device_get_ivars(dev); if (ad == NULL || propname == NULL) return (AE_BAD_PARAMETER); if (ad->dsd_pkg == NULL) { if (ad->dsd.Pointer == NULL) { status = acpi_find_dsd(bus, dev); if (ACPI_FAILURE(status)) return (status); } else { return (AE_NOT_FOUND); } } for (i = 0; i < ad->dsd_pkg->Package.Count; i ++) { pkg = &ad->dsd_pkg->Package.Elements[i]; if (pkg->Type != ACPI_TYPE_PACKAGE || pkg->Package.Count != 2) continue; name = &pkg->Package.Elements[0]; val = &pkg->Package.Elements[1]; if (name->Type != ACPI_TYPE_STRING) continue; if (strncmp(propname, name->String.Pointer, name->String.Length) == 0) { if (value != NULL) *value = val; return (AE_OK); } } return (AE_NOT_FOUND); } static ACPI_STATUS acpi_find_dsd(device_t bus, device_t dev) { const ACPI_OBJECT *dsd, *guid, *pkg; struct acpi_device *ad; ACPI_STATUS status; ad = device_get_ivars(dev); ad->dsd.Length = ACPI_ALLOCATE_BUFFER; ad->dsd.Pointer = NULL; ad->dsd_pkg = NULL; status = ACPI_EVALUATE_OBJECT(bus, dev, "_DSD", NULL, &ad->dsd); if (ACPI_FAILURE(status)) return (status); dsd = ad->dsd.Pointer; guid = &dsd->Package.Elements[0]; pkg = &dsd->Package.Elements[1]; if (guid->Type != ACPI_TYPE_BUFFER || pkg->Type != ACPI_TYPE_PACKAGE || guid->Buffer.Length != sizeof(acpi_dsd_uuid)) return (AE_NOT_FOUND); if (memcmp(guid->Buffer.Pointer, &acpi_dsd_uuid, sizeof(acpi_dsd_uuid)) == 0) { ad->dsd_pkg = pkg; return (AE_OK); } return (AE_NOT_FOUND); } static ssize_t acpi_bus_get_prop(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { ACPI_STATUS status; const ACPI_OBJECT *obj; status = acpi_device_get_prop(bus, child, __DECONST(char *, propname), &obj); if (ACPI_FAILURE(status)) return (-1); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: break; default: return (-1); } switch (obj->Type) { case ACPI_TYPE_INTEGER: if (type == DEVICE_PROP_UINT32) { if (propvalue != NULL && size >= sizeof(uint32_t)) *((uint32_t *)propvalue) = obj->Integer.Value; return (sizeof(uint32_t)); } if (propvalue != NULL && size >= sizeof(uint64_t)) *((uint64_t *) propvalue) = obj->Integer.Value; return (sizeof(uint64_t)); case ACPI_TYPE_STRING: if (type != DEVICE_PROP_ANY && type != DEVICE_PROP_BUFFER) return (-1); if (propvalue != NULL && size > 0) memcpy(propvalue, obj->String.Pointer, MIN(size, obj->String.Length)); return (obj->String.Length); case ACPI_TYPE_BUFFER: if (propvalue != NULL && size > 0) memcpy(propvalue, obj->Buffer.Pointer, MIN(size, obj->Buffer.Length)); return (obj->Buffer.Length); default: return (0); } } int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) { struct acpi_softc *sc; ACPI_HANDLE handle; ACPI_STATUS status; char sxd[8]; handle = acpi_get_handle(dev); /* * XXX If we find these devices, don't try to power them down. * The serial and IRDA ports on my T23 hang the system when * set to D3 and it appears that such legacy devices may * need special handling in their drivers. */ if (dstate == NULL || handle == NULL || acpi_MatchHid(handle, "PNP0500") || acpi_MatchHid(handle, "PNP0501") || acpi_MatchHid(handle, "PNP0502") || acpi_MatchHid(handle, "PNP0510") || acpi_MatchHid(handle, "PNP0511")) return (ENXIO); /* * Override next state with the value from _SxD, if present. * Note illegal _S0D is evaluated because some systems expect this. */ sc = device_get_softc(bus); snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); status = acpi_GetInteger(handle, sxd, dstate); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { device_printf(dev, "failed to get %s on %s: %s\n", sxd, acpi_name(handle), AcpiFormatException(status)); return (ENXIO); } return (0); } /* Callback arg for our implementation of walking the namespace. */ struct acpi_device_scan_ctx { acpi_scan_cb_t user_fn; void *arg; ACPI_HANDLE parent; }; static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) { struct acpi_device_scan_ctx *ctx; device_t dev, old_dev; ACPI_STATUS status; ACPI_OBJECT_TYPE type; /* * Skip this device if we think we'll have trouble with it or it is * the parent where the scan began. */ ctx = (struct acpi_device_scan_ctx *)arg; if (acpi_avoid(h) || h == ctx->parent) return (AE_OK); /* If this is not a valid device type (e.g., a method), skip it. */ if (ACPI_FAILURE(AcpiGetType(h, &type))) return (AE_OK); if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) return (AE_OK); /* * Call the user function with the current device. If it is unchanged * afterwards, return. Otherwise, we update the handle to the new dev. */ old_dev = acpi_get_device(h); dev = old_dev; status = ctx->user_fn(h, &dev, level, ctx->arg); if (ACPI_FAILURE(status) || old_dev == dev) return (status); /* Remove the old child and its connection to the handle. */ if (old_dev != NULL) device_delete_child(device_get_parent(old_dev), old_dev); /* Recreate the handle association if the user created a device. */ if (dev != NULL) AcpiAttachData(h, acpi_fake_objhandler, dev); return (AE_OK); } static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg) { ACPI_HANDLE h; struct acpi_device_scan_ctx ctx; if (acpi_disabled("children")) return (AE_OK); if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); ctx.user_fn = user_fn; ctx.arg = arg; ctx.parent = h; return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, acpi_device_scan_cb, NULL, &ctx, NULL)); } /* * Even though ACPI devices are not PCI, we use the PCI approach for setting * device power states since it's close enough to ACPI. */ int acpi_set_powerstate(device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; h = acpi_get_handle(child); if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX) return (EINVAL); if (h == NULL) return (0); /* Ignore errors if the power methods aren't present. */ status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) device_printf(child, "set ACPI power state D%d on %s\n", state, acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(child, "failed to set ACPI power state D%d on %s: %s\n", state, acpi_name(h), AcpiFormatException(status)); return (0); } static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) { int result, cid_count, i; uint32_t lid, cids[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * ISA-style drivers attached to ACPI may persist and * probe manually if we return ENOENT. We never want * that to happen, so don't ever return it. */ result = ENXIO; /* Scan the supplied IDs for a match */ lid = acpi_isa_get_logicalid(child); cid_count = acpi_isa_get_compatid(child, cids, 8); while (ids && ids->ip_id) { if (lid == ids->ip_id) { result = 0; goto out; } for (i = 0; i < cid_count; i++) { if (cids[i] == ids->ip_id) { result = 0; goto out; } } ids++; } out: if (result == 0 && ids->ip_desc) device_set_desc(child, ids->ip_desc); return_VALUE (result); } /* * Look for a MCFG table. If it is present, use the settings for * domain (segment) 0 to setup PCI config space access via the memory * map. * * On non-x86 architectures (arm64 for now), this will be done from the * PCI host bridge driver. */ static void acpi_enable_pcie(void) { #if defined(__i386__) || defined(__amd64__) ACPI_TABLE_HEADER *hdr; ACPI_MCFG_ALLOCATION *alloc, *end; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); if (ACPI_FAILURE(status)) return; end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); while (alloc < end) { if (alloc->PciSegment == 0) { pcie_cfgregopen(alloc->Address, alloc->StartBusNumber, alloc->EndBusNumber); return; } alloc++; } #endif } static void acpi_platform_osc(device_t dev) { ACPI_HANDLE sb_handle; ACPI_STATUS status; uint32_t cap_set[2]; /* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */ static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = { 0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44, 0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48 }; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) return; cap_set[1] = 0x10; /* APEI Support */ status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) return; device_printf(dev, "_OSC failed: %s\n", AcpiFormatException(status)); return; } } /* * Scan all of the ACPI namespace and attach child devices. * * We should only expect to find devices in the \_PR, \_TZ, \_SI, and * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. * However, in violation of the spec, some systems place their PCI link * devices in \, so we have to walk the whole namespace. We check the * type of namespace nodes, so this should be ok. */ static void acpi_probe_children(device_t bus) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Scan the namespace and insert placeholders for all the devices that * we find. We also probe/attach any early devices. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are * currently present. (This assumes that we don't want to create/remove * devices as they appear, which might be smarter.) */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, NULL, bus, NULL); /* Pre-allocate resources for our rman from any sysresource devices. */ acpi_sysres_alloc(bus); /* Reserve resources already allocated to children. */ acpi_reserve_resources(bus); /* Create any static children by calling device identify methods. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); bus_generic_probe(bus); /* Probe/attach all children, created statically and from the namespace. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n")); bus_generic_attach(bus); /* Attach wake sysctls. */ acpi_wake_sysctl_walk(bus); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); return_VOID; } /* * Determine the probe order for a given device. */ static void acpi_probe_order(ACPI_HANDLE handle, int *order) { ACPI_OBJECT_TYPE type; /* * 0. CPUs * 1. I/O port and memory system resource holders * 2. Clocks and timers (to handle early accesses) * 3. Embedded controllers (to handle early accesses) * 4. PCI Link Devices */ AcpiGetType(handle, &type); if (type == ACPI_TYPE_PROCESSOR) *order = 0; else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) *order = 1; else if (acpi_MatchHid(handle, "PNP0100") || acpi_MatchHid(handle, "PNP0103") || acpi_MatchHid(handle, "PNP0B00")) *order = 2; else if (acpi_MatchHid(handle, "PNP0C09")) *order = 3; else if (acpi_MatchHid(handle, "PNP0C0F")) *order = 4; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_DEVICE_INFO *devinfo; struct acpi_device *ad; struct acpi_prw_data prw; ACPI_OBJECT_TYPE type; ACPI_HANDLE h; device_t bus, child; char *handle_str; int order; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (acpi_disabled("children")) return_ACPI_STATUS (AE_OK); /* Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS (AE_OK); bus = (device_t)context; if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { handle_str = acpi_name(handle); switch (type) { case ACPI_TYPE_DEVICE: /* * Since we scan from \, be sure to skip system scope objects. * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run * during the initialization and \_TZ_ is to support Notify() on it. */ if (strcmp(handle_str, "\\_SB_") == 0 || strcmp(handle_str, "\\_TZ_") == 0) break; if (acpi_parse_prw(handle, &prw) == 0) AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); /* * Ignore devices that do not have a _HID or _CID. They should * be discovered by other buses (e.g. the PCI bus driver). */ if (!acpi_has_hid(handle)) break; /* FALLTHROUGH */ case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: /* * Create a placeholder device for this node. Sort the * placeholder so that the probe/attach passes will run * breadth-first. Orders less than ACPI_DEV_BASE_ORDER * are reserved for special objects (i.e., system * resources). */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); order = level * 10 + ACPI_DEV_BASE_ORDER; acpi_probe_order(handle, &order); child = BUS_ADD_CHILD(bus, order, NULL, -1); if (child == NULL) break; /* Associate the handle with the device_t and vice versa. */ acpi_set_handle(child, handle); AcpiAttachData(handle, acpi_fake_objhandler, child); /* * Check that the device is present. If it's not present, * leave it disabled (so that we have a device_t attached to * the handle, but we don't probe it). * * XXX PCI link devices sometimes report "present" but not * "functional" (i.e. if disabled). Go ahead and probe them * anyway since we may enable them later. */ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { /* Never disable PCI link devices. */ if (acpi_MatchHid(handle, "PNP0C0F")) break; /* * Docking stations should remain enabled since the system * may be undocked at boot. */ if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) break; device_disable(child); break; } /* * Get the device's resource settings and attach them. * Note that if the device has _PRS but no _CRS, we need * to decide when it's appropriate to try to configure the * device. Ignore the return value here; it's OK for the * device not to have any resources. */ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); ad = device_get_ivars(child); ad->ad_cls_class = 0xffffff; if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) { if ((devinfo->Valid & ACPI_VALID_CLS) != 0 && devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) { ad->ad_cls_class = strtoul(devinfo->ClassCode.String, NULL, 16); } AcpiOsFree(devinfo); } break; } } return_ACPI_STATUS (AE_OK); } /* * AcpiAttachData() requires an object handler but never uses it. This is a * placeholder object handler so we can store a device_t in an ACPI_HANDLE. */ void acpi_fake_objhandler(ACPI_HANDLE h, void *data) { } static void acpi_shutdown_final(void *arg, int howto) { struct acpi_softc *sc = (struct acpi_softc *)arg; register_t intr; ACPI_STATUS status; /* * XXX Shutdown code should only run on the BSP (cpuid 0). * Some chipsets do not power off the system correctly if called from * an AP. */ if ((howto & RB_POWEROFF) != 0) { status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); return; } device_printf(sc->acpi_dev, "Powering system off\n"); intr = intr_disable(); status = AcpiEnterSleepState(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - timeout\n"); } } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { /* Reboot using the reset register. */ status = AcpiReset(); if (ACPI_SUCCESS(status)) { DELAY(1000000); device_printf(sc->acpi_dev, "reset failed - timeout\n"); } else if (status != AE_NOT_EXIST) device_printf(sc->acpi_dev, "reset failed - %s\n", AcpiFormatException(status)); } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { /* * Only disable ACPI if the user requested. On some systems, writing * the disable value to SMI_CMD hangs the system. */ device_printf(sc->acpi_dev, "Shutting down\n"); AcpiTerminate(); } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; /* Enable and clear fixed events and install handlers. */ if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_event_power_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Power Button (fixed)\n"); } if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_event_sleep_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); #ifdef ACPI_EARLY_EPYC_WAR /* * Certain Treadripper boards always returns 0 for FreeBSD because it * only returns non-zero for the OS string "Windows 2015". Otherwise it * will return zero. Force them to always be treated as present. * Beata versions were worse: they always returned 0. */ if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010")) return (TRUE); #endif status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if the battery is actually present and inserted. */ BOOLEAN acpi_BatteryIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if a device has at least one valid device ID. */ BOOLEAN acpi_has_hid(ACPI_HANDLE h) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; if (h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (FALSE); ret = FALSE; if ((devinfo->Valid & ACPI_VALID_HID) != 0) ret = TRUE; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) if (devinfo->CompatibleIdList.Count > 0) ret = TRUE; AcpiOsFree(devinfo); return (ret); } /* * Match a HID string against a handle * returns ACPI_MATCHHID_HID if _HID match * ACPI_MATCHHID_CID if _CID match and not _HID match. * ACPI_MATCHHID_NOMATCH=0 if no match. */ int acpi_MatchHid(ACPI_HANDLE h, const char *hid) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; int i; if (hid == NULL || h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ACPI_MATCHHID_NOMATCH); ret = ACPI_MATCHHID_NOMATCH; if ((devinfo->Valid & ACPI_VALID_HID) != 0 && strcmp(hid, devinfo->HardwareId.String) == 0) ret = ACPI_MATCHHID_HID; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { ret = ACPI_MATCHHID_CID; break; } } AcpiOsFree(devinfo); return (ret); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; /* Walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (ACPI_SUCCESS(status)) { *result = r; return (AE_OK); } /* XXX Return error here? */ if (status != AE_NOT_FOUND) return (AE_OK); if (ACPI_FAILURE(AcpiGetParent(parent, &r))) return (AE_NOT_FOUND); parent = r; } } ACPI_STATUS acpi_GetProperty(device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { device_t bus = device_get_parent(dev); return (ACPI_GET_PROPERTY(bus, dev, propname, value)); } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return (NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return (buf); } ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) { ACPI_OBJECT arg1; ACPI_OBJECT_LIST args; arg1.Type = ACPI_TYPE_INTEGER; arg1.Integer.Value = number; args.Count = 1; args.Pointer = &arg1; return (AcpiEvaluateObject(handle, path, &args, NULL)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT param; if (handle == NULL) handle = ACPI_ROOT_OBJECT; /* * Assume that what we've been pointed at is an Integer object, or * a method that will return an Integer. */ buf.Pointer = ¶m; buf.Length = sizeof(param); status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) { if (param.Type == ACPI_TYPE_INTEGER) *number = param.Integer.Value; else status = AE_TYPE; } /* * In some applications, a method that's expected to return an Integer * may instead return a Buffer (probably to simplify some internal * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, * convert it into an Integer as best we can. * * This is a hack. */ if (status == AE_BUFFER_OVERFLOW) { if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { status = AE_NO_MEMORY; } else { status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) status = acpi_ConvertBufferToInteger(&buf, number); AcpiOsFree(buf.Pointer); } } return (status); } ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) { ACPI_OBJECT *p; UINT8 *val; int i; p = (ACPI_OBJECT *)bufp->Pointer; if (p->Type == ACPI_TYPE_INTEGER) { *number = p->Integer.Value; return (AE_OK); } if (p->Type != ACPI_TYPE_BUFFER) return (AE_TYPE); if (p->Buffer.Length > sizeof(int)) return (AE_BAD_DATA); *number = 0; val = p->Buffer.Pointer; for (i = 0; i < p->Buffer.Length; i++) *number += val[i] << (i * 8); return (AE_OK); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) return (AE_BAD_PARAMETER); /* Iterate over components */ i = 0; comp = pkg->Package.Elements; for (; i < pkg->Package.Count; i++, comp++) func(comp, arg); return (AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* Range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); /* Check for terminator */ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) return (AE_NOT_FOUND); rp = ACPI_NEXT_RESOURCE(rp); } if (resp != NULL) *resp = rp; return (AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return (AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; } if (res == NULL) return (AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* Range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) break; rp = ACPI_NEXT_RESOURCE(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RS_SIZE_NO_DATA + ACPI_RS_SIZE_MIN) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return (AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); /* And add the terminator. */ rp = ACPI_NEXT_RESOURCE(rp); rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; return (AE_OK); } UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision) { /* * ACPI spec 9.1.1 defines this. * * "Arg2: Function Index Represents a specific function whose meaning is * specific to the UUID and Revision ID. Function indices should start * with 1. Function number zero is a query function (see the special * return code defined below)." */ ACPI_BUFFER buf; ACPI_OBJECT *obj; UINT64 ret = 0; int i; if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) { ACPI_INFO(("Failed to enumerate DSM functions\n")); return (0); } obj = (ACPI_OBJECT *)buf.Pointer; KASSERT(obj, ("Object not allowed to be NULL\n")); /* * From ACPI 6.2 spec 9.1.1: * If Function Index = 0, a Buffer containing a function index bitfield. * Otherwise, the return value and type depends on the UUID and revision * ID (see below). */ switch (obj->Type) { case ACPI_TYPE_BUFFER: for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++) ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8)); break; case ACPI_TYPE_INTEGER: ACPI_BIOS_WARNING((AE_INFO, "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n")); ret = obj->Integer.Value; break; default: ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type)); }; AcpiOsFree(obj); return ret; } /* * DSM may return multiple types depending on the function. It is therefore * unsafe to use the typed evaluation. It is highly recommended that the caller * check the type of the returned object. */ ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf) { return (acpi_EvaluateDSMTyped(handle, uuid, revision, function, package, out_buf, ACPI_TYPE_ANY)); } ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type) { ACPI_OBJECT arg[4]; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; if (out_buf == NULL) return (AE_NO_MEMORY); arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid); arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = function; if (package) { arg[3] = *package; } else { arg[3].Type = ACPI_TYPE_PACKAGE; arg[3].Package.Count = 0; arg[3].Package.Elements = NULL; } arglist.Pointer = arg; arglist.Count = 4; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type); if (ACPI_FAILURE(status)) return (status); KASSERT(ACPI_SUCCESS(status), ("Unexpected status")); *out_buf = buf; return (status); } ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query) { ACPI_OBJECT arg[4], *ret; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; arglist.Pointer = arg; arglist.Count = 4; arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = uuid; arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = count; arg[3].Type = ACPI_TYPE_BUFFER; arg[3].Buffer.Length = count * sizeof(*caps_in); arg[3].Buffer.Pointer = (uint8_t *)caps_in; caps_in[0] = query ? 1 : 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return (status); if (caps_out != NULL) { ret = buf.Pointer; if (ret->Buffer.Length != count * sizeof(*caps_out)) { AcpiOsFree(buf.Pointer); return (AE_BUFFER_OVERFLOW); } bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length); } AcpiOsFree(buf.Pointer); return (status); } /* * Set interrupt model. */ ACPI_STATUS acpi_SetIntrModel(int model) { return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); } /* * Walk subtables of a table and call a callback routine for each * subtable. The caller should provide the first subtable and a * pointer to the end of the table. This can be used to walk tables * such as MADT and SRAT that use subtable entries. */ void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg) { ACPI_SUBTABLE_HEADER *entry; for (entry = first; (void *)entry < end; ) { /* Avoid an infinite loop if we hit a bogus entry. */ if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER)) return; handler(entry, arg); entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length); } } /* * DEPRECATED. This interface has serious deficiencies and will be * removed. * * Immediately enter the sleep state. In the old model, acpiconf(8) ran * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { static int once; if (!once) { device_printf(sc->acpi_dev, "warning: acpi_SetSleepState() deprecated, need to update your software\n"); once = 1; } return (acpi_EnterSleepState(sc, state)); } #if defined(__amd64__) || defined(__i386__) static void acpi_sleep_force_task(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) device_printf(sc->acpi_dev, "force sleep state S%d failed\n", sc->acpi_next_sstate); } static void acpi_sleep_force(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; device_printf(sc->acpi_dev, "suspend request timed out, forcing sleep now\n"); /* * XXX Suspending from callout causes freezes in DEVICE_SUSPEND(). * Suspend from acpi_task thread instead. */ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_sleep_force_task, sc))) device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n"); } #endif /* * Request that the system enter the given suspend state. All /dev/apm * devices and devd(8) will be notified. Userland then has a chance to * save state and acknowledge the request. The system sleeps once all * acks are in. */ int acpi_ReqSleepState(struct acpi_softc *sc, int state) { #if defined(__amd64__) || defined(__i386__) struct apm_clone_data *clone; ACPI_STATUS status; if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); /* * If a reboot/shutdown/suspend request is already in progress or * suspend is blocked due to an upcoming shutdown, just return. */ if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) { return (0); } /* Wait until sleep is enabled. */ while (sc->acpi_sleep_disabled) { AcpiOsSleep(1000); } ACPI_LOCK(acpi); sc->acpi_next_sstate = state; /* S5 (soft-off) should be entered directly with no waiting. */ if (state == ACPI_STATE_S5) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* Record the pending state and notify all apm devices. */ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { clone->notify_status = APM_EV_NONE; if ((clone->flags & ACPI_EVF_DEVD) == 0) { selwakeuppri(&clone->sel_read, PZERO); KNOTE_LOCKED(&clone->sel_read.si_note, 0); } } /* If devd(8) is not running, immediately enter the sleep state. */ if (!devctl_process_running()) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* * Set a timeout to fire if userland doesn't ack the suspend request * in time. This way we still eventually go to sleep if we were * overheating or running low on battery, even if userland is hung. * We cancel this timeout once all userland acks are in or the * suspend request is aborted. */ callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); ACPI_UNLOCK(acpi); /* Now notify devd(8) also. */ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); return (0); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } /* * Acknowledge (or reject) a pending sleep state. The caller has * prepared for suspend and is now ready for it to proceed. If the * error argument is non-zero, it indicates suspend should be cancelled * and gives an errno value describing why. Once all votes are in, * we suspend the system. */ int acpi_AckSleepState(struct apm_clone_data *clone, int error) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc; int ret, sleeping; /* If no pending sleep state, return an error. */ ACPI_LOCK(acpi); sc = clone->acpi_sc; if (sc->acpi_next_sstate == 0) { ACPI_UNLOCK(acpi); return (ENXIO); } /* Caller wants to abort suspend process. */ if (error) { sc->acpi_next_sstate = 0; callout_stop(&sc->susp_force_to); device_printf(sc->acpi_dev, "listener on %s cancelled the pending suspend\n", devtoname(clone->cdev)); ACPI_UNLOCK(acpi); return (0); } /* * Mark this device as acking the suspend request. Then, walk through * all devices, seeing if they agree yet. We only count devices that * are writable since read-only devices couldn't ack the request. */ sleeping = TRUE; clone->notify_status = APM_EV_ACKED; STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { if ((clone->flags & ACPI_EVF_WRITE) != 0 && clone->notify_status != APM_EV_ACKED) { sleeping = FALSE; break; } } /* If all devices have voted "yes", we will suspend now. */ if (sleeping) callout_stop(&sc->susp_force_to); ACPI_UNLOCK(acpi); ret = 0; if (sleeping) { if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) ret = ENODEV; } return (ret); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } static void acpi_sleep_enable(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; ACPI_LOCK_ASSERT(acpi); /* Reschedule if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) { callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); return; } sc->acpi_sleep_disabled = FALSE; } static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc) { ACPI_STATUS status; /* Fail if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) return (AE_ERROR); ACPI_LOCK(acpi); status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK; sc->acpi_sleep_disabled = TRUE; ACPI_UNLOCK(acpi); return (status); } enum acpi_sleep_state { ACPI_SS_NONE, ACPI_SS_GPE_SET, ACPI_SS_DEV_SUSPEND, ACPI_SS_SLP_PREP, ACPI_SS_SLEPT, }; /* * Enter the desired system sleep state. * * Currently we support S1-S5 but S4 is only S4BIOS */ static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state) { register_t intr; ACPI_STATUS status; ACPI_EVENT_STATUS power_button_status; enum acpi_sleep_state slp_state; int sleep_result; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return_ACPI_STATUS (AE_BAD_PARAMETER); if (!acpi_sleep_states[state]) { device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", state); return (AE_SUPPORT); } /* Re-entry once we're suspending is not allowed. */ status = acpi_sleep_disable(sc); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "suspend request ignored (not ready yet)\n"); return (status); } if (state == ACPI_STATE_S5) { /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); return_ACPI_STATUS (AE_OK); } EVENTHANDLER_INVOKE(power_suspend_early); stop_all_proc(); suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); #ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } #endif /* - * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE - * drivers need this. + * Be sure to hold Giant across DEVICE_SUSPEND/RESUME */ - mtx_lock(&Giant); + bus_topo_lock(); slp_state = ACPI_SS_NONE; sc->acpi_sstate = state; /* Enable any GPEs as appropriate and requested by the user. */ acpi_wake_prep_walk(state); slp_state = ACPI_SS_GPE_SET; /* * Inform all devices that we are going to sleep. If at least one * device fails, DEVICE_SUSPEND() automatically resumes the tree. * * XXX Note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the current * bus interface does not provide for this. */ if (DEVICE_SUSPEND(root_bus) != 0) { device_printf(sc->acpi_dev, "device_suspend failed\n"); goto backout; } slp_state = ACPI_SS_DEV_SUSPEND; status = AcpiEnterSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); goto backout; } slp_state = ACPI_SS_SLP_PREP; if (sc->acpi_sleep_delay > 0) DELAY(sc->acpi_sleep_delay * 1000000); suspendclock(); intr = intr_disable(); if (state != ACPI_STATE_S1) { sleep_result = acpi_sleep_machdep(sc, state); acpi_wakeup_machdep(sc, state, sleep_result, 0); /* * XXX According to ACPI specification SCI_EN bit should be restored * by ACPI platform (BIOS, firmware) to its pre-sleep state. * Unfortunately some BIOSes fail to do that and that leads to * unexpected and serious consequences during wake up like a system * getting stuck in SMI handlers. * This hack is picked up from Linux, which claims that it follows * Windows behavior. */ if (sleep_result == 1 && state != ACPI_STATE_S4) AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT); if (sleep_result == 1 && state == ACPI_STATE_S3) { /* * Prevent mis-interpretation of the wakeup by power button * as a request for power off. * Ideally we should post an appropriate wakeup event, * perhaps using acpi_event_power_button_wake or alike. * * Clearing of power button status after wakeup is mandated * by ACPI specification in section "Fixed Power Button". * * XXX As of ACPICA 20121114 AcpiGetEventStatus provides * status as 0/1 corressponding to inactive/active despite * its type being ACPI_EVENT_STATUS. In other words, * we should not test for ACPI_EVENT_FLAG_SET for time being. */ if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON, &power_button_status)) && power_button_status != 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); device_printf(sc->acpi_dev, "cleared fixed power button status\n"); } } intr_restore(intr); /* call acpi_wakeup_machdep() again with interrupt enabled */ acpi_wakeup_machdep(sc, state, sleep_result, 1); AcpiLeaveSleepStatePrep(state); if (sleep_result == -1) goto backout; /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state == ACPI_STATE_S4) AcpiEnable(); } else { status = AcpiEnterSleepState(state); intr_restore(intr); AcpiLeaveSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); goto backout; } } slp_state = ACPI_SS_SLEPT; /* * Back out state according to how far along we got in the suspend * process. This handles both the error and success cases. */ backout: if (slp_state >= ACPI_SS_SLP_PREP) resumeclock(); if (slp_state >= ACPI_SS_GPE_SET) { acpi_wake_prep_walk(state); sc->acpi_sstate = ACPI_STATE_S0; } if (slp_state >= ACPI_SS_DEV_SUSPEND) DEVICE_RESUME(root_bus); if (slp_state >= ACPI_SS_SLP_PREP) AcpiLeaveSleepState(state); if (slp_state >= ACPI_SS_SLEPT) { #if defined(__i386__) || defined(__amd64__) /* NB: we are still using ACPI timecounter at this point. */ resume_TSC(); #endif acpi_resync_clock(sc); acpi_enable_fixed_events(sc); } sc->acpi_next_sstate = 0; - mtx_unlock(&Giant); + bus_topo_unlock(); #ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif resume_all_fs(); resume_all_proc(); EVENTHANDLER_INVOKE(power_resume); /* Allow another sleep request after a while. */ callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); /* Run /etc/rc.resume after we are back. */ if (devctl_process_running()) acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); return_ACPI_STATUS (status); } static void acpi_resync_clock(struct acpi_softc *sc) { /* * Warm up timecounter again and reset system clock. */ (void)timecounter->tc_get_timecount(timecounter); inittodr(time_second + sc->acpi_sleep_delay); } /* Enable or disable the device's wake GPE. */ int acpi_wake_set_enable(device_t dev, int enable) { struct acpi_prw_data prw; ACPI_STATUS status; int flags; /* Make sure the device supports waking the system and get the GPE. */ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) return (ENXIO); flags = acpi_get_flags(dev); if (enable) { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "enable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); } else { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "disable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); } return (0); } static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* Check that this is a wake-capable device and get its GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); /* * The destination sleep state must be less than (i.e., higher power) * or equal to the value specified by _PRW. If this GPE cannot be * enabled for the next sleep state, then disable it. If it can and * the user requested it be enabled, turn on any required power resources * and set _PSW. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (bootverbose) device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", acpi_name(handle), sstate); } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { acpi_pwr_wake_enable(handle, 1); acpi_SetInteger(handle, "_PSW", 1); if (bootverbose) device_printf(dev, "wake_prep enabled for %s (S%d)\n", acpi_name(handle), sstate); } return (0); } static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* * Check that this is a wake-capable device and get its GPE. Return * now if the user didn't enable this device for wake. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) return (0); /* * If this GPE couldn't be enabled for the previous sleep state, it was * disabled before going to sleep so re-enable it. If it was enabled, * clear _PSW and turn off any power resources it used. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (bootverbose) device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); } else { acpi_SetInteger(handle, "_PSW", 0); acpi_pwr_wake_enable(handle, 0); if (bootverbose) device_printf(dev, "run_prep cleaned up for %s\n", acpi_name(handle)); } return (0); } static ACPI_STATUS acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { int sstate; /* If suspending, run the sleep prep function, otherwise wake. */ sstate = *(int *)context; if (AcpiGbl_SystemAwakeAndRunning) acpi_wake_sleep_prep(handle, sstate); else acpi_wake_run_prep(handle, sstate); return (AE_OK); } /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ static int acpi_wake_prep_walk(int sstate) { ACPI_HANDLE sb_handle; if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_wake_prep, NULL, &sstate, NULL); return (0); } /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ static int acpi_wake_sysctl_walk(device_t dev) { int error, i, numdevs; device_t *devlist; device_t child; ACPI_STATUS status; error = device_get_children(dev, &devlist, &numdevs); if (error != 0 || numdevs == 0) { if (numdevs == 0) free(devlist, M_TEMP); return (error); } for (i = 0; i < numdevs; i++) { child = devlist[i]; acpi_wake_sysctl_walk(child); if (!device_is_attached(child)) continue; status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); if (ACPI_SUCCESS(status)) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, "wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0, acpi_wake_set_sysctl, "I", "Device set to wake the system"); } } free(devlist, M_TEMP); return (0); } /* Enable or disable wake from userland. */ static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) { int enable, error; device_t dev; dev = (device_t)arg1; enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; error = sysctl_handle_int(oidp, &enable, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enable != 0 && enable != 1) return (EINVAL); return (acpi_wake_set_enable(dev, enable)); } /* Parse a device's _PRW into a structure. */ int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) { ACPI_STATUS status; ACPI_BUFFER prw_buffer; ACPI_OBJECT *res, *res2; int error, i, power_count; if (h == NULL || prw == NULL) return (EINVAL); /* * The _PRW object (7.2.9) is only required for devices that have the * ability to wake the system from a sleeping state. */ error = EINVAL; prw_buffer.Pointer = NULL; prw_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); if (ACPI_FAILURE(status)) return (ENOENT); res = (ACPI_OBJECT *)prw_buffer.Pointer; if (res == NULL) return (ENOENT); if (!ACPI_PKG_VALID(res, 2)) goto out; /* * Element 1 of the _PRW object: * The lowest power system sleeping state that can be entered while still * providing wake functionality. The sleeping state being entered must * be less than (i.e., higher power) or equal to this value. */ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) goto out; /* * Element 0 of the _PRW object: */ switch (res->Package.Elements[0].Type) { case ACPI_TYPE_INTEGER: /* * If the data type of this package element is numeric, then this * _PRW package element is the bit index in the GPEx_EN, in the * GPE blocks described in the FADT, of the enable bit that is * enabled for the wake event. */ prw->gpe_handle = NULL; prw->gpe_bit = res->Package.Elements[0].Integer.Value; error = 0; break; case ACPI_TYPE_PACKAGE: /* * If the data type of this package element is a package, then this * _PRW package element is itself a package containing two * elements. The first is an object reference to the GPE Block * device that contains the GPE that will be triggered by the wake * event. The second element is numeric and it contains the bit * index in the GPEx_EN, in the GPE Block referenced by the * first element in the package, of the enable bit that is enabled for * the wake event. * * For example, if this field is a package then it is of the form: * Package() {\_SB.PCI0.ISA.GPE, 2} */ res2 = &res->Package.Elements[0]; if (!ACPI_PKG_VALID(res2, 2)) goto out; prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); if (prw->gpe_handle == NULL) goto out; if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) goto out; error = 0; break; default: goto out; } /* Elements 2 to N of the _PRW object are power resources. */ power_count = res->Package.Count - 2; if (power_count > ACPI_PRW_MAX_POWERRES) { printf("ACPI device %s has too many power resources\n", acpi_name(h)); power_count = 0; } prw->power_res_count = power_count; for (i = 0; i < power_count; i++) prw->power_res[i] = res->Package.Elements[i]; out: if (prw_buffer.Pointer != NULL) AcpiOsFree(prw_buffer.Pointer); return (error); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { struct acpi_softc *sc = (struct acpi_softc *)arg; int ret; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Check if button action is disabled or unknown. */ if (state == ACPI_STATE_UNKNOWN) return; /* Request that the system prepare to enter the given suspend state. */ ret = acpi_ReqSleepState(sc, state); if (ret != 0) device_printf(sc->acpi_dev, "request to enter state S%d failed (err %d)\n", state, ret); return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Currently, nothing to do for wakeup. */ return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ static void acpi_invoke_sleep_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context); } static void acpi_invoke_wake_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context); } UINT32 acpi_event_power_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_power_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } /* * XXX This static buffer is suboptimal. There is no locking so only * use this for single-threaded callers. */ char * acpi_name(ACPI_HANDLE handle) { ACPI_BUFFER buf; static char data[256]; buf.Length = sizeof(data); buf.Pointer = data; if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) return (data); return ("(unknown)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *env, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((env = kern_getenv("debug.acpi.avoid")) == NULL) return (0); /* Scan the avoid list checking for a match */ cp = env; for (;;) { while (*cp != 0 && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while (cp[len] != 0 && !isspace(cp[len])) len++; if (!strncmp(cp, np, len)) { freeenv(env); return(1); } cp += len; } freeenv(env); return (0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp, *env; int len; if ((env = kern_getenv("debug.acpi.disabled")) == NULL) return (0); if (strcmp(env, "all") == 0) { freeenv(env); return (1); } /* Scan the disable list, checking for a match. */ cp = env; for (;;) { while (*cp != '\0' && isspace(*cp)) cp++; if (*cp == '\0') break; len = 0; while (cp[len] != '\0' && !isspace(cp[len])) len++; if (strncmp(cp, subsys, len) == 0) { freeenv(env); return (1); } cp += len; } freeenv(env); return (0); } static void acpi_lookup(void *arg, const char *name, device_t *dev) { ACPI_HANDLE handle; if (*dev != NULL) return; /* * Allow any handle name that is specified as an absolute path and * starts with '\'. We could restrict this to \_SB and friends, * but see acpi_probe_children() for notes on why we scan the entire * namespace for devices. * * XXX: The pathname argument to AcpiGetHandle() should be fixed to * be const. */ if (name[0] != '\\') return; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name), &handle))) return; *dev = acpi_get_device(handle); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the * register/deregister interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; acpi_ioctl_fn fn; void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return (ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); ACPI_UNLOCK(acpi); return (0); } void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) { struct acpi_ioctl_hook *hp; ACPI_LOCK(acpi); TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if (hp->cmd == cmd && hp->fn == fn) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } ACPI_UNLOCK(acpi); } static int acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, state; error = 0; hp = NULL; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted) TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) break; } ACPI_UNLOCK(acpi); if (hp) return (hp->fn(cmd, addr, hp->arg)); /* * Core ioctls are not permitted for non-writable user. * Currently, other ioctls just fetch information. * Not changing system behavior. */ if ((flag & FWRITE) == 0) return (EPERM); /* Core system ioctls. */ switch (cmd) { case ACPIIO_REQSLPSTATE: state = *(int *)addr; if (state != ACPI_STATE_S5) return (acpi_ReqSleepState(sc, state)); device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n"); error = EOPNOTSUPP; break; case ACPIIO_ACKSLPSTATE: error = *(int *)addr; error = acpi_AckSleepState(sc->acpi_clone, error); break; case ACPIIO_SETSLPSTATE: /* DEPRECATED */ state = *(int *)addr; if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) error = ENXIO; break; default: error = ENXIO; break; } return (error); } static int acpi_sname2sstate(const char *sname) { int sstate; if (toupper(sname[0]) == 'S') { sstate = sname[1] - '0'; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 && sname[2] == '\0') return (sstate); } else if (strcasecmp(sname, "NONE") == 0) return (ACPI_STATE_UNKNOWN); return (-1); } static const char * acpi_sstate2sname(int sstate) { static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" }; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5) return (snames[sstate]); else if (sstate == ACPI_STATE_UNKNOWN) return ("NONE"); return (NULL); } static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { int error; struct sbuf sb; UINT8 state; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (acpi_sleep_states[state]) sbuf_printf(&sb, "%s ", acpi_sstate2sname(state)); sbuf_trim(&sb); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error, new_state, old_state; old_state = *(int *)oidp->oid_arg1; strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state)); error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { new_state = acpi_sname2sstate(sleep_state); if (new_state < ACPI_STATE_S1) return (EINVAL); if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state]) return (EOPNOTSUPP); if (new_state != old_state) *(int *)oidp->oid_arg1 = new_state; } return (error); } /* Inform devctl(4) when we receive a Notify. */ void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) { char notify_buf[16]; ACPI_BUFFER handle_buf; ACPI_STATUS status; if (subsystem == NULL) return; handle_buf.Pointer = NULL; handle_buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); if (ACPI_FAILURE(status)) return; snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); AcpiOsFree(handle_buf.Pointer); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUS", ACPI_BUS}, {"ACPI_BUTTON", ACPI_BUTTON}, {"ACPI_EC", ACPI_EC}, {"ACPI_FAN", ACPI_FAN}, {"ACPI_POWERRES", ACPI_POWERRES}, {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, {"ACPI_TIMER", ACPI_TIMER}, {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, {NULL, 0} }; static struct debugtag dbg_level[] = { {"ACPI_LV_INIT", ACPI_LV_INIT}, {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, {"ACPI_LV_INFO", ACPI_LV_INFO}, {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, /* Trace verbosity level 1 [Standard Trace Level] */ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, {"ACPI_LV_PARSE", ACPI_LV_PARSE}, {"ACPI_LV_LOAD", ACPI_LV_LOAD}, {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, {"ACPI_LV_EXEC", ACPI_LV_EXEC}, {"ACPI_LV_NAMES", ACPI_LV_NAMES}, {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, {"ACPI_LV_TABLES", ACPI_LV_TABLES}, {"ACPI_LV_VALUES", ACPI_LV_VALUES}, {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, /* Trace verbosity level 2 [Function tracing and memory allocation] */ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, {"ACPI_LV_ALL", ACPI_LV_ALL}, /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, {"ACPI_LV_THREADS", ACPI_LV_THREADS}, {"ACPI_LV_IO", ACPI_LV_IO}, {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, /* Exceptionally verbose output -- also used in the global "DebugLevel" */ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) *flag |= tag[i].value; else *flag &= ~tag[i].value; } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *layer, *level; if (cold) { AcpiDbgLayer = 0; AcpiDbgLevel = 0; } layer = kern_getenv("debug.acpi.layer"); level = kern_getenv("debug.acpi.level"); if (layer == NULL && level == NULL) return; printf("ACPI set debug"); if (layer != NULL) { if (strcmp("NONE", layer) != 0) printf(" layer '%s'", layer); acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); freeenv(layer); } if (level != NULL) { if (strcmp("NONE", level) != 0) printf(" level '%s'", level); acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); freeenv(level); } printf("\n"); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); static int acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) { int error, *dbg; struct debugtag *tag; struct sbuf sb; char temp[128]; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) return (ENOMEM); if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { tag = &dbg_layer[0]; dbg = &AcpiDbgLayer; } else { tag = &dbg_level[0]; dbg = &AcpiDbgLevel; } /* Get old values if this is a get request. */ ACPI_SERIAL_BEGIN(acpi); if (*dbg == 0) { sbuf_cpy(&sb, "NONE"); } else if (req->newptr == NULL) { for (; tag->name != NULL; tag++) { if ((*dbg & tag->value) == tag->value) sbuf_printf(&sb, "%s ", tag->name); } } sbuf_trim(&sb); sbuf_finish(&sb); strlcpy(temp, sbuf_data(&sb), sizeof(temp)); sbuf_delete(&sb); error = sysctl_handle_string(oidp, temp, sizeof(temp), req); /* Check for error or no change */ if (error == 0 && req->newptr != NULL) { *dbg = 0; kern_setenv((char *)oidp->oid_arg1, temp); acpi_set_debugging(NULL); } ACPI_SERIAL_END(acpi); return (error); } SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); #endif /* ACPI_DEBUG */ static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) { int error; int old; old = acpi_debug_objects; error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (old == acpi_debug_objects || (old && acpi_debug_objects)) return (0); ACPI_SERIAL_BEGIN(acpi); AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; ACPI_SERIAL_END(acpi); return (0); } static int acpi_parse_interfaces(char *str, struct acpi_interface *iface) { char *p; size_t len; int i, j; p = str; while (isspace(*p) || *p == ',') p++; len = strlen(p); if (len == 0) return (0); p = strdup(p, M_TEMP); for (i = 0; i < len; i++) if (p[i] == ',') p[i] = '\0'; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { i += strlen(p + i) + 1; j++; } if (j == 0) { free(p, M_TEMP); return (0); } iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); iface->num = j; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { iface->data[j] = p + i; i += strlen(p + i) + 1; j++; } return (j); } static void acpi_free_interfaces(struct acpi_interface *iface) { free(iface->data[0], M_TEMP); free(iface->data, M_TEMP); } static void acpi_reset_interfaces(device_t dev) { struct acpi_interface list; ACPI_STATUS status; int i; if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiInstallInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to install _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "installed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiRemoveInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to remove _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "removed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } } static int acpi_pm_func(u_long cmd, void *arg, ...) { int state, acpi_state; int error; struct acpi_softc *sc; va_list ap; error = 0; switch (cmd) { case POWER_CMD_SUSPEND: sc = (struct acpi_softc *)arg; if (sc == NULL) { error = EINVAL; goto out; } va_start(ap, arg); state = va_arg(ap, int); va_end(ap); switch (state) { case POWER_SLEEP_STATE_STANDBY: acpi_state = sc->acpi_standby_sx; break; case POWER_SLEEP_STATE_SUSPEND: acpi_state = sc->acpi_suspend_sx; break; case POWER_SLEEP_STATE_HIBERNATE: acpi_state = ACPI_STATE_S4; break; default: error = EINVAL; goto out; } if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) error = ENXIO; break; default: error = EINVAL; goto out; } out: return (error); } static void acpi_pm_register(void *arg) { if (!cold || resource_disabled("acpi", 0)) return; power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); } SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL); diff --git a/sys/dev/acpica/acpi_dock.c b/sys/dev/acpica/acpi_dock.c index 211994640031..2b7a451bfe87 100644 --- a/sys/dev/acpica/acpi_dock.c +++ b/sys/dev/acpica/acpi_dock.c @@ -1,550 +1,550 @@ /*- * Copyright (c) 2005-2006 Mitsuru IWASAKI * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include #include #include #include #include /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_DOCK ACPI_MODULE_NAME("DOCK") /* For Docking status */ #define ACPI_DOCK_STATUS_UNKNOWN -1 #define ACPI_DOCK_STATUS_UNDOCKED 0 #define ACPI_DOCK_STATUS_DOCKED 1 #define ACPI_DOCK_UNLOCK 0 /* Allow device to be ejected */ #define ACPI_DOCK_LOCK 1 /* Prevent dev from being removed */ #define ACPI_DOCK_ISOLATE 0 /* Isolate from dock connector */ #define ACPI_DOCK_CONNECT 1 /* Connect to dock */ struct acpi_dock_softc { int _sta; int _bdn; int _uid; int status; struct sysctl_ctx_list *sysctl_ctx; struct sysctl_oid *sysctl_tree; }; ACPI_SERIAL_DECL(dock, "ACPI Docking Station"); static char *acpi_dock_pnp_ids[] = {"PNP0C15", NULL}; /* * Utility functions */ static void acpi_dock_get_info(device_t dev) { struct acpi_dock_softc *sc; ACPI_HANDLE h; sc = device_get_softc(dev); h = acpi_get_handle(dev); if (ACPI_FAILURE(acpi_GetInteger(h, "_STA", &sc->_sta))) sc->_sta = ACPI_DOCK_STATUS_UNKNOWN; if (ACPI_FAILURE(acpi_GetInteger(h, "_BDN", &sc->_bdn))) sc->_bdn = ACPI_DOCK_STATUS_UNKNOWN; if (ACPI_FAILURE(acpi_GetInteger(h, "_UID", &sc->_uid))) sc->_uid = ACPI_DOCK_STATUS_UNKNOWN; ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "_STA: %04x, _BDN: %04x, _UID: %04x\n", sc->_sta, sc->_bdn, sc->_uid); } static int acpi_dock_execute_dck(device_t dev, int dock) { ACPI_HANDLE h; ACPI_OBJECT argobj; ACPI_OBJECT_LIST args; ACPI_BUFFER buf; ACPI_OBJECT retobj; ACPI_STATUS status; h = acpi_get_handle(dev); argobj.Type = ACPI_TYPE_INTEGER; argobj.Integer.Value = dock; args.Count = 1; args.Pointer = &argobj; buf.Pointer = &retobj; buf.Length = sizeof(retobj); status = AcpiEvaluateObject(h, "_DCK", &args, &buf); /* * When _DCK is called with 0, OSPM will ignore the return value. */ if (dock == ACPI_DOCK_ISOLATE) return (0); /* If _DCK returned 1, the request succeeded. */ if (ACPI_SUCCESS(status) && retobj.Type == ACPI_TYPE_INTEGER && retobj.Integer.Value == 1) return (0); return (-1); } /* Lock devices while docked to prevent surprise removal. */ static void acpi_dock_execute_lck(device_t dev, int lock) { ACPI_HANDLE h; h = acpi_get_handle(dev); acpi_SetInteger(h, "_LCK", lock); } /* Eject a device (i.e., motorized). */ static int acpi_dock_execute_ejx(device_t dev, int eject, int state) { ACPI_HANDLE h; ACPI_STATUS status; char ejx[5]; h = acpi_get_handle(dev); snprintf(ejx, sizeof(ejx), "_EJ%d", state); status = acpi_SetInteger(h, ejx, eject); if (ACPI_SUCCESS(status)) return (0); return (-1); } /* Find dependent devices. When their parent is removed, so are they. */ static int acpi_dock_is_ejd_device(ACPI_HANDLE dock_handle, ACPI_HANDLE handle) { int ret; ACPI_STATUS ret_status; ACPI_BUFFER ejd_buffer; ACPI_OBJECT *obj; ret = 0; ejd_buffer.Pointer = NULL; ejd_buffer.Length = ACPI_ALLOCATE_BUFFER; ret_status = AcpiEvaluateObject(handle, "_EJD", NULL, &ejd_buffer); if (ACPI_FAILURE(ret_status)) goto out; obj = (ACPI_OBJECT *)ejd_buffer.Pointer; if (dock_handle == acpi_GetReference(NULL, obj)) ret = 1; out: if (ejd_buffer.Pointer != NULL) AcpiOsFree(ejd_buffer.Pointer); return (ret); } /* * Docking functions */ static void acpi_dock_attach_later(void *context) { device_t dev; dev = (device_t)context; if (!device_is_enabled(dev)) device_enable(dev); - mtx_lock(&Giant); + bus_topo_lock(); device_probe_and_attach(dev); - mtx_unlock(&Giant); + bus_topo_unlock(); } static ACPI_STATUS acpi_dock_insert_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { device_t dock_dev, dev; ACPI_HANDLE dock_handle; dock_dev = (device_t)context; dock_handle = acpi_get_handle(dock_dev); if (!acpi_dock_is_ejd_device(dock_handle, handle)) goto out; ACPI_VPRINT(dock_dev, acpi_device_get_parent_softc(dock_dev), "inserting device for %s\n", acpi_name(handle)); #if 0 /* * If the system boot up w/o Docking, the devices under the dock * still un-initialized, also control methods such as _INI, _STA * are not executed. * Normal devices are initialized at booting by calling * AcpiInitializeObjects(), however the devices under the dock * need to be initialized here on the scheme of ACPICA. */ ACPI_INIT_WALK_INFO Info; AcpiNsWalkNamespace(ACPI_TYPE_ANY, handle, 100, TRUE, AcpiNsInitOneDevice, NULL, &Info, NULL); #endif dev = acpi_get_device(handle); if (dev == NULL) { device_printf(dock_dev, "error: %s has no associated device\n", acpi_name(handle)); goto out; } AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_dock_attach_later, dev); out: return (AE_OK); } static void acpi_dock_insert_children(device_t dev) { ACPI_STATUS status; ACPI_HANDLE sb_handle; status = AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle); if (ACPI_SUCCESS(status)) { AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_dock_insert_child, NULL, dev, NULL); } } static void acpi_dock_insert(device_t dev) { struct acpi_dock_softc *sc; ACPI_HANDLE h; ACPI_SERIAL_ASSERT(dock); sc = device_get_softc(dev); h = acpi_get_handle(dev); if (sc->status == ACPI_DOCK_STATUS_UNDOCKED || sc->status == ACPI_DOCK_STATUS_UNKNOWN) { acpi_dock_execute_lck(dev, ACPI_DOCK_LOCK); if (acpi_dock_execute_dck(dev, ACPI_DOCK_CONNECT) != 0) { device_printf(dev, "_DCK failed\n"); return; } if (!cold) { acpi_dock_insert_children(dev); acpi_UserNotify("Dock", h, 1); } sc->status = ACPI_DOCK_STATUS_DOCKED; } } /* * Undock */ static ACPI_STATUS acpi_dock_eject_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { device_t dock_dev, dev; ACPI_HANDLE dock_handle; dock_dev = *(device_t *)context; dock_handle = acpi_get_handle(dock_dev); if (!acpi_dock_is_ejd_device(dock_handle, handle)) goto out; ACPI_VPRINT(dock_dev, acpi_device_get_parent_softc(dock_dev), "ejecting device for %s\n", acpi_name(handle)); dev = acpi_get_device(handle); if (dev != NULL && device_is_attached(dev)) { - mtx_lock(&Giant); + bus_topo_lock(); device_detach(dev); - mtx_unlock(&Giant); + bus_topo_unlock(); } acpi_SetInteger(handle, "_EJ0", 0); out: return (AE_OK); } static void acpi_dock_eject_children(device_t dev) { ACPI_HANDLE sb_handle; ACPI_STATUS status; status = AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle); if (ACPI_SUCCESS(status)) { AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_dock_eject_child, NULL, &dev, NULL); } } static void acpi_dock_removal(device_t dev) { struct acpi_dock_softc *sc; ACPI_HANDLE h; ACPI_SERIAL_ASSERT(dock); sc = device_get_softc(dev); h = acpi_get_handle(dev); if (sc->status == ACPI_DOCK_STATUS_DOCKED || sc->status == ACPI_DOCK_STATUS_UNKNOWN) { acpi_dock_eject_children(dev); if (acpi_dock_execute_dck(dev, ACPI_DOCK_ISOLATE) != 0) return; acpi_dock_execute_lck(dev, ACPI_DOCK_UNLOCK); if (acpi_dock_execute_ejx(dev, 1, 0) != 0) { device_printf(dev, "_EJ0 failed\n"); return; } acpi_UserNotify("Dock", h, 0); sc->status = ACPI_DOCK_STATUS_UNDOCKED; } acpi_dock_get_info(dev); if (sc->_sta != 0) device_printf(dev, "mechanical failure (%#x).\n", sc->_sta); } /* * Device/Bus check */ static void acpi_dock_device_check(device_t dev) { struct acpi_dock_softc *sc; ACPI_SERIAL_ASSERT(dock); sc = device_get_softc(dev); acpi_dock_get_info(dev); /* * If the _STA method indicates 'present' and 'functioning', the * system is docked. If _STA does not exist for this device, it * is always present. */ if (sc->_sta == ACPI_DOCK_STATUS_UNKNOWN || ACPI_DEVICE_PRESENT(sc->_sta)) acpi_dock_insert(dev); else if (sc->_sta == 0) acpi_dock_removal(dev); } /* * Notify Handler */ static void acpi_dock_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t dev; dev = (device_t) context; ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "got notification %#x\n", notify); ACPI_SERIAL_BEGIN(dock); switch (notify) { case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_DEVICE_CHECK: acpi_dock_device_check(dev); break; case ACPI_NOTIFY_EJECT_REQUEST: acpi_dock_removal(dev); break; default: device_printf(dev, "unknown notify %#x\n", notify); break; } ACPI_SERIAL_END(dock); } static int acpi_dock_status_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_dock_softc *sc; device_t dev; int status, err; dev = (device_t)arg1; sc = device_get_softc(dev); status = sc->status; ACPI_SERIAL_BEGIN(dock); err = sysctl_handle_int(oidp, &status, 0, req); if (err != 0 || req->newptr == NULL) goto out; if (status != ACPI_DOCK_STATUS_UNDOCKED && status != ACPI_DOCK_STATUS_DOCKED) { err = EINVAL; goto out; } if (status == sc->status) goto out; switch (status) { case ACPI_DOCK_STATUS_UNDOCKED: acpi_dock_removal(dev); break; case ACPI_DOCK_STATUS_DOCKED: acpi_dock_device_check(dev); break; default: err = EINVAL; break; } out: ACPI_SERIAL_END(dock); return (err); } static int acpi_dock_probe(device_t dev) { ACPI_HANDLE h, tmp; h = acpi_get_handle(dev); if (acpi_disabled("dock") || ACPI_FAILURE(AcpiGetHandle(h, "_DCK", &tmp))) return (ENXIO); device_set_desc(dev, "ACPI Docking Station"); /* * XXX Somewhere else in the kernel panics on "sysctl kern" if we * return a negative value here (reprobe ok). */ return (0); } static int acpi_dock_attach(device_t dev) { struct acpi_dock_softc *sc; ACPI_HANDLE h; sc = device_get_softc(dev); h = acpi_get_handle(dev); if (sc == NULL || h == NULL) return (ENXIO); sc->status = ACPI_DOCK_STATUS_UNKNOWN; AcpiEvaluateObject(h, "_INI", NULL, NULL); ACPI_SERIAL_BEGIN(dock); acpi_dock_device_check(dev); /* Get the sysctl tree */ sc->sysctl_ctx = device_get_sysctl_ctx(dev); sc->sysctl_tree = device_get_sysctl_tree(dev); SYSCTL_ADD_INT(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "_sta", CTLFLAG_RD, &sc->_sta, 0, "Dock _STA"); SYSCTL_ADD_INT(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "_bdn", CTLFLAG_RD, &sc->_bdn, 0, "Dock _BDN"); SYSCTL_ADD_INT(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "_uid", CTLFLAG_RD, &sc->_uid, 0, "Dock _UID"); SYSCTL_ADD_PROC(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "status", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, acpi_dock_status_sysctl, "I", "Dock/Undock operation"); ACPI_SERIAL_END(dock); AcpiInstallNotifyHandler(h, ACPI_ALL_NOTIFY, acpi_dock_notify_handler, dev); return (0); } static device_method_t acpi_dock_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_dock_probe), DEVMETHOD(device_attach, acpi_dock_attach), DEVMETHOD_END }; static driver_t acpi_dock_driver = { "acpi_dock", acpi_dock_methods, sizeof(struct acpi_dock_softc), }; static devclass_t acpi_dock_devclass; DRIVER_MODULE(acpi_dock, acpi, acpi_dock_driver, acpi_dock_devclass, 0, 0); MODULE_DEPEND(acpi_dock, acpi, 1, 1, 1); ACPI_PNP_INFO(acpi_dock_pnp_ids); diff --git a/sys/dev/acpica/acpi_pci.c b/sys/dev/acpica/acpi_pci.c index c8d37268f466..36ea1a267853 100644 --- a/sys/dev/acpica/acpi_pci.c +++ b/sys/dev/acpica/acpi_pci.c @@ -1,485 +1,485 @@ /*- * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include "opt_iommu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI") struct acpi_pci_devinfo { struct pci_devinfo ap_dinfo; ACPI_HANDLE ap_handle; int ap_flags; }; ACPI_SERIAL_DECL(pci_powerstate, "ACPI PCI power methods"); /* Be sure that ACPI and PCI power states are equivalent. */ CTASSERT(ACPI_STATE_D0 == PCI_POWERSTATE_D0); CTASSERT(ACPI_STATE_D1 == PCI_POWERSTATE_D1); CTASSERT(ACPI_STATE_D2 == PCI_POWERSTATE_D2); CTASSERT(ACPI_STATE_D3 == PCI_POWERSTATE_D3); static struct pci_devinfo *acpi_pci_alloc_devinfo(device_t dev); static int acpi_pci_attach(device_t dev); static void acpi_pci_child_deleted(device_t dev, device_t child); static int acpi_pci_child_location_str_method(device_t cbdev, device_t child, char *buf, size_t buflen); static int acpi_pci_detach(device_t dev); static int acpi_pci_probe(device_t dev); static int acpi_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static ACPI_STATUS acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static int acpi_pci_set_powerstate_method(device_t dev, device_t child, int state); static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child); static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child); static device_method_t acpi_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pci_probe), DEVMETHOD(device_attach, acpi_pci_attach), DEVMETHOD(device_detach, acpi_pci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, acpi_pci_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pci_write_ivar), DEVMETHOD(bus_child_deleted, acpi_pci_child_deleted), DEVMETHOD(bus_child_location_str, acpi_pci_child_location_str_method), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_dma_tag, acpi_pci_get_dma_tag), DEVMETHOD(bus_get_domain, acpi_get_domain), /* PCI interface */ DEVMETHOD(pci_alloc_devinfo, acpi_pci_alloc_devinfo), DEVMETHOD(pci_child_added, acpi_pci_child_added), DEVMETHOD(pci_set_powerstate, acpi_pci_set_powerstate_method), DEVMETHOD_END }; static devclass_t pci_devclass; DEFINE_CLASS_1(pci, acpi_pci_driver, acpi_pci_methods, sizeof(struct pci_softc), pci_driver); DRIVER_MODULE(acpi_pci, pcib, acpi_pci_driver, pci_devclass, 0, 0); MODULE_DEPEND(acpi_pci, acpi, 1, 1, 1); MODULE_DEPEND(acpi_pci, pci, 1, 1, 1); MODULE_VERSION(acpi_pci, 1); static struct pci_devinfo * acpi_pci_alloc_devinfo(device_t dev) { struct acpi_pci_devinfo *dinfo; dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); return (&dinfo->ap_dinfo); } static int acpi_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: *result = (uintptr_t)dinfo->ap_handle; return (0); case ACPI_IVAR_FLAGS: *result = (uintptr_t)dinfo->ap_flags; return (0); } return (pci_read_ivar(dev, child, which, result)); } static int acpi_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: dinfo->ap_handle = (ACPI_HANDLE)value; return (0); case ACPI_IVAR_FLAGS: dinfo->ap_flags = (int)value; return (0); } return (pci_write_ivar(dev, child, which, value)); } static void acpi_pci_child_deleted(device_t dev, device_t child) { struct acpi_pci_devinfo *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ap_handle) == child) AcpiDetachData(dinfo->ap_handle, acpi_fake_objhandler); pci_child_deleted(dev, child); } static int acpi_pci_child_location_str_method(device_t cbdev, device_t child, char *buf, size_t buflen) { struct acpi_pci_devinfo *dinfo = device_get_ivars(child); int pxm; char buf2[32]; pci_child_location_str_method(cbdev, child, buf, buflen); if (dinfo->ap_handle) { strlcat(buf, " handle=", buflen); strlcat(buf, acpi_name(dinfo->ap_handle), buflen); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ap_handle, "_PXM", &pxm))) { snprintf(buf2, 32, " _PXM=%d", pxm); strlcat(buf, buf2, buflen); } } return (0); } /* * PCI power manangement */ static int acpi_pci_set_powerstate_method(device_t dev, device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; int old_state, error; error = 0; if (state < ACPI_STATE_D0 || state > ACPI_STATE_D3) return (EINVAL); /* * We set the state using PCI Power Management outside of setting * the ACPI state. This means that when powering down a device, we * first shut it down using PCI, and then using ACPI, which lets ACPI * try to power down any Power Resources that are now no longer used. * When powering up a device, we let ACPI set the state first so that * it can enable any needed Power Resources before changing the PCI * power state. */ ACPI_SERIAL_BEGIN(pci_powerstate); old_state = pci_get_powerstate(child); if (old_state < state && pci_do_power_suspend) { error = pci_set_powerstate_method(dev, child, state); if (error) goto out; } h = acpi_get_handle(child); status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) device_printf(dev, "set ACPI power state D%d on %s\n", state, acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(dev, "failed to set ACPI power state D%d on %s: %s\n", state, acpi_name(h), AcpiFormatException(status)); if (old_state > state && pci_do_power_resume) error = pci_set_powerstate_method(dev, child, state); out: ACPI_SERIAL_END(pci_powerstate); return (error); } static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child) { ACPI_STATUS status; device_t child; /* * Occasionally a PCI device may show up as an ACPI device * with a _HID. (For example, the TabletPC TC1000 has a * second PCI-ISA bridge that has a _HID for an * acpi_sysresource device.) In that case, leave ACPI-CA's * device data pointing at the ACPI-enumerated device. */ child = acpi_get_device(handle); if (child != NULL) { KASSERT(device_get_parent(child) == devclass_get_device(devclass_find("acpi"), 0), ("%s: child (%s)'s parent is not acpi0", __func__, acpi_name(handle))); return; } /* * Update ACPI-CA to use the PCI enumerated device_t for this handle. */ status = AcpiAttachData(handle, acpi_fake_objhandler, pci_child); if (ACPI_FAILURE(status)) printf("WARNING: Unable to attach object data to %s - %s\n", acpi_name(handle), AcpiFormatException(status)); } static ACPI_STATUS acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { struct acpi_pci_devinfo *dinfo; device_t child; int func, slot; UINT32 address; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); child = context; if (ACPI_FAILURE(acpi_GetInteger(handle, "_ADR", &address))) return_ACPI_STATUS (AE_OK); slot = ACPI_ADR_PCI_SLOT(address); func = ACPI_ADR_PCI_FUNC(address); dinfo = device_get_ivars(child); if (dinfo->ap_dinfo.cfg.func == func && dinfo->ap_dinfo.cfg.slot == slot) { dinfo->ap_handle = handle; acpi_pci_update_device(handle, child); return_ACPI_STATUS (AE_CTRL_TERMINATE); } return_ACPI_STATUS (AE_OK); } void acpi_pci_child_added(device_t dev, device_t child) { /* * PCI devices are added via the bus scan in the normal PCI * bus driver. As each device is added, the * acpi_pci_child_added() callback walks the ACPI namespace * under the bridge driver to save ACPI handles to all the * devices that appear in the ACPI namespace as immediate * descendants of the bridge. * * XXX: Sometimes PCI devices show up in the ACPI namespace that * pci_add_children() doesn't find. We currently just ignore * these devices. */ AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1, acpi_pci_save_handle, NULL, child, NULL); } static int acpi_pci_probe(device_t dev) { if (acpi_get_handle(dev) == NULL) return (ENXIO); device_set_desc(dev, "ACPI PCI bus"); return (BUS_PROBE_DEFAULT); } static void acpi_pci_bus_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t dev; dev = context; switch (notify) { case ACPI_NOTIFY_BUS_CHECK: - mtx_lock(&Giant); + bus_topo_lock(); BUS_RESCAN(dev); - mtx_unlock(&Giant); + bus_topo_unlock(); break; default: device_printf(dev, "unknown notify %#x\n", notify); break; } } static void acpi_pci_device_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t child, dev; ACPI_STATUS status; int error; dev = context; switch (notify) { case ACPI_NOTIFY_DEVICE_CHECK: - mtx_lock(&Giant); + bus_topo_lock(); BUS_RESCAN(dev); - mtx_unlock(&Giant); + bus_topo_unlock(); break; case ACPI_NOTIFY_EJECT_REQUEST: child = acpi_get_device(h); if (child == NULL) { device_printf(dev, "no device to eject for %s\n", acpi_name(h)); return; } - mtx_lock(&Giant); + bus_topo_lock(); error = device_detach(child); if (error) { - mtx_unlock(&Giant); + bus_topo_unlock(); device_printf(dev, "failed to detach %s: %d\n", device_get_nameunit(child), error); return; } status = acpi_SetInteger(h, "_EJ0", 1); if (ACPI_FAILURE(status)) { - mtx_unlock(&Giant); + bus_topo_unlock(); device_printf(dev, "failed to eject %s: %s\n", acpi_name(h), AcpiFormatException(status)); return; } BUS_RESCAN(dev); - mtx_unlock(&Giant); + bus_topo_unlock(); break; default: device_printf(dev, "unknown notify %#x for %s\n", notify, acpi_name(h)); break; } } static ACPI_STATUS acpi_pci_install_device_notify_handler(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_HANDLE h; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiGetHandle(handle, "_EJ0", &h))) return_ACPI_STATUS (AE_OK); AcpiInstallNotifyHandler(handle, ACPI_SYSTEM_NOTIFY, acpi_pci_device_notify_handler, context); return_ACPI_STATUS (AE_OK); } static int acpi_pci_attach(device_t dev) { int error; error = pci_attach(dev); if (error) return (error); AcpiInstallNotifyHandler(acpi_get_handle(dev), ACPI_SYSTEM_NOTIFY, acpi_pci_bus_notify_handler, dev); AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1, acpi_pci_install_device_notify_handler, NULL, dev, NULL); return (0); } static ACPI_STATUS acpi_pci_remove_notify_handler(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_HANDLE h; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiGetHandle(handle, "_EJ0", &h))) return_ACPI_STATUS (AE_OK); AcpiRemoveNotifyHandler(handle, ACPI_SYSTEM_NOTIFY, acpi_pci_device_notify_handler); return_ACPI_STATUS (AE_OK); } static int acpi_pci_detach(device_t dev) { AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1, acpi_pci_remove_notify_handler, NULL, dev, NULL); AcpiRemoveNotifyHandler(acpi_get_handle(dev), ACPI_SYSTEM_NOTIFY, acpi_pci_bus_notify_handler); return (pci_detach(dev)); } #ifdef IOMMU static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child) { bus_dma_tag_t tag; if (device_get_parent(child) == bus) { /* try iommu and return if it works */ tag = iommu_get_dma_tag(bus, child); } else tag = NULL; if (tag == NULL) tag = pci_get_dma_tag(bus, child); return (tag); } #else static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child) { return (pci_get_dma_tag(bus, child)); } #endif diff --git a/sys/dev/bhnd/cores/chipc/chipc.c b/sys/dev/bhnd/cores/chipc/chipc.c index a2f660361b58..75876fe17a4c 100644 --- a/sys/dev/bhnd/cores/chipc/chipc.c +++ b/sys/dev/bhnd/cores/chipc/chipc.c @@ -1,1451 +1,1451 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2016 Michael Zhilin * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom ChipCommon driver. * * With the exception of some very early chipsets, the ChipCommon core * has been included in all HND SoCs and chipsets based on the siba(4) * and bcma(4) interconnects, providing a common interface to chipset * identification, bus enumeration, UARTs, clocks, watchdog interrupts, * GPIO, flash, etc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "chipcreg.h" #include "chipcvar.h" #include "chipc_private.h" devclass_t bhnd_chipc_devclass; /**< bhnd(4) chipcommon device class */ static struct bhnd_device_quirk chipc_quirks[]; /* Supported device identifiers */ static const struct bhnd_device chipc_devices[] = { BHND_DEVICE(BCM, CC, NULL, chipc_quirks), BHND_DEVICE(BCM, 4706_CC, NULL, chipc_quirks), BHND_DEVICE_END }; /* Device quirks table */ static struct bhnd_device_quirk chipc_quirks[] = { /* HND OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (12), CHIPC_QUIRK_OTP_HND), /* (?) */ BHND_CORE_QUIRK (HWREV_EQ (17), CHIPC_QUIRK_OTP_HND), /* BCM4311 */ BHND_CORE_QUIRK (HWREV_EQ (22), CHIPC_QUIRK_OTP_HND), /* BCM4312 */ /* IPX OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (21), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(23), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(32), CHIPC_QUIRK_SUPPORTS_SPROM), BHND_CORE_QUIRK (HWREV_GTE(35), CHIPC_QUIRK_SUPPORTS_CAP_EXT), BHND_CORE_QUIRK (HWREV_GTE(49), CHIPC_QUIRK_IPX_OTPL_SIZE), /* 4706 variant quirks */ BHND_CORE_QUIRK (HWREV_EQ (38), CHIPC_QUIRK_4706_NFLASH), /* BCM5357? */ BHND_CHIP_QUIRK (4706, HWREV_ANY, CHIPC_QUIRK_4706_NFLASH), /* 4331 quirks*/ BHND_CHIP_QUIRK (4331, HWREV_ANY, CHIPC_QUIRK_4331_EXTPA_MUX_SPROM), BHND_PKG_QUIRK (4331, TN, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TNA0, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TT, CHIPC_QUIRK_4331_EXTPA2_MUX_SPROM), /* 4360 quirks */ BHND_CHIP_QUIRK (4352, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43460, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43462, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43602, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_DEVICE_QUIRK_END }; static int chipc_add_children(struct chipc_softc *sc); static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps); static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps); static bool chipc_should_enable_muxed_sprom( struct chipc_softc *sc); static int chipc_enable_otp_power(struct chipc_softc *sc); static void chipc_disable_otp_power(struct chipc_softc *sc); static int chipc_enable_sprom_pins(struct chipc_softc *sc); static void chipc_disable_sprom_pins(struct chipc_softc *sc); static int chipc_try_activate_resource(struct chipc_softc *sc, device_t child, int type, int rid, struct resource *r, bool req_direct); static int chipc_init_rman(struct chipc_softc *sc); static void chipc_free_rman(struct chipc_softc *sc); static struct rman *chipc_get_rman(struct chipc_softc *sc, int type); /* quirk and capability flag convenience macros */ #define CHIPC_QUIRK(_sc, _name) \ ((_sc)->quirks & CHIPC_QUIRK_ ## _name) #define CHIPC_CAP(_sc, _name) \ ((_sc)->caps._name) #define CHIPC_ASSERT_QUIRK(_sc, name) \ KASSERT(CHIPC_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) #define CHIPC_ASSERT_CAP(_sc, name) \ KASSERT(CHIPC_CAP((_sc), name), ("capability " __STRING(_name) " not set")) static int chipc_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, chipc_devices, sizeof(chipc_devices[0])); if (id == NULL) return (ENXIO); bhnd_set_default_core_desc(dev); return (BUS_PROBE_DEFAULT); } static int chipc_attach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->quirks = bhnd_device_quirks(dev, chipc_devices, sizeof(chipc_devices[0])); sc->sprom_refcnt = 0; CHIPC_LOCK_INIT(sc); STAILQ_INIT(&sc->mem_regions); /* Set up resource management */ if ((error = chipc_init_rman(sc))) { device_printf(sc->dev, "failed to initialize chipc resource state: %d\n", error); goto failed; } /* Allocate the region containing the chipc register block */ if ((sc->core_region = chipc_find_region_by_rid(sc, 0)) == NULL) { error = ENXIO; goto failed; } error = chipc_retain_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); if (error) { sc->core_region = NULL; goto failed; } /* Save a direct reference to our chipc registers */ sc->core = sc->core_region->cr_res; /* Fetch and parse capability register(s) */ if ((error = chipc_read_caps(sc, &sc->caps))) goto failed; if (bootverbose) chipc_print_caps(sc->dev, &sc->caps); /* Attach all supported child devices */ if ((error = chipc_add_children(sc))) goto failed; /* * Register ourselves with the bus; we're fully initialized and can * response to ChipCommin API requests. * * Since our children may need access to ChipCommon, this must be done * before attaching our children below (via bus_generic_attach). */ if ((error = bhnd_register_provider(dev, BHND_SERVICE_CHIPC))) goto failed; if ((error = bus_generic_attach(dev))) goto failed; return (0); failed: device_delete_children(sc->dev); if (sc->core_region != NULL) { chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); } chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (error); } static int chipc_detach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev))) return (error); if ((error = device_delete_children(dev))) return (error); if ((error = bhnd_deregister_provider(dev, BHND_SERVICE_ANY))) return (error); chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (0); } static int chipc_add_children(struct chipc_softc *sc) { device_t child; const char *flash_bus; int error; /* SPROM/OTP */ if (sc->caps.nvram_src == BHND_NVRAM_SRC_SPROM || sc->caps.nvram_src == BHND_NVRAM_SRC_OTP) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", -1); if (child == NULL) { device_printf(sc->dev, "failed to add nvram device\n"); return (ENXIO); } /* Both OTP and external SPROM are mapped at CHIPC_SPROM_OTP */ error = chipc_set_mem_resource(sc, child, 0, CHIPC_SPROM_OTP, CHIPC_SPROM_OTP_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set OTP memory " "resource: %d\n", error); return (error); } } /* * PMU/PWR_CTRL * * On AOB ("Always on Bus") devices, the PMU core (if it exists) is * attached directly to the bhnd(4) bus -- not chipc. */ if (sc->caps.pmu && !sc->caps.aob) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pmu\n"); return (ENXIO); } } else if (sc->caps.pwr_ctrl) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pwrctl\n"); return (ENXIO); } } /* GPIO */ child = BUS_ADD_CHILD(sc->dev, 0, "gpio", -1); if (child == NULL) { device_printf(sc->dev, "failed to add gpio\n"); return (ENXIO); } error = chipc_set_mem_resource(sc, child, 0, 0, RM_MAX_END, 0, 0); if (error) { device_printf(sc->dev, "failed to set gpio memory resource: " "%d\n", error); return (error); } /* All remaining devices are SoC-only */ if (bhnd_get_attach_type(sc->dev) != BHND_ATTACH_NATIVE) return (0); /* UARTs */ for (u_int i = 0; i < min(sc->caps.num_uarts, CHIPC_UART_MAX); i++) { int irq_rid, mem_rid; irq_rid = 0; mem_rid = 0; child = BUS_ADD_CHILD(sc->dev, 0, "uart", -1); if (child == NULL) { device_printf(sc->dev, "failed to add uart%u\n", i); return (ENXIO); } /* Shared IRQ */ error = chipc_set_irq_resource(sc, child, irq_rid, 0); if (error) { device_printf(sc->dev, "failed to set uart%u irq %u\n", i, 0); return (error); } /* UART registers are mapped sequentially */ error = chipc_set_mem_resource(sc, child, mem_rid, CHIPC_UART(i), CHIPC_UART_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set uart%u memory " "resource: %d\n", i, error); return (error); } } /* Flash */ flash_bus = chipc_flash_bus_name(sc->caps.flash_type); if (flash_bus != NULL) { int rid; child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, -1); if (child == NULL) { device_printf(sc->dev, "failed to add %s device\n", flash_bus); return (ENXIO); } /* flash memory mapping */ rid = 0; error = chipc_set_mem_resource(sc, child, rid, 0, RM_MAX_END, 1, 1); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } /* flashctrl registers */ rid++; error = chipc_set_mem_resource(sc, child, rid, CHIPC_SFLASH_BASE, CHIPC_SFLASH_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } } return (0); } /** * Determine the NVRAM data source for this device. * * The SPROM, OTP, and flash capability flags must be fully populated in * @p caps. * * @param sc chipc driver state. * @param caps capability flags to be used to derive NVRAM configuration. */ static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t otp_st, srom_ctrl; /* * We check for hardware presence in order of precedence. For example, * SPROM is always used in preference to internal OTP if found. */ if (CHIPC_QUIRK(sc, SUPPORTS_SPROM) && caps->sprom) { srom_ctrl = bhnd_bus_read_4(sc->core, CHIPC_SPROM_CTRL); if (srom_ctrl & CHIPC_SRC_PRESENT) return (BHND_NVRAM_SRC_SPROM); } /* Check for programmed OTP H/W subregion (contains SROM data) */ if (CHIPC_QUIRK(sc, SUPPORTS_OTP) && caps->otp_size > 0) { /* TODO: need access to HND-OTP device */ if (!CHIPC_QUIRK(sc, OTP_HND)) { device_printf(sc->dev, "NVRAM unavailable: unsupported OTP controller.\n"); return (BHND_NVRAM_SRC_UNKNOWN); } otp_st = bhnd_bus_read_4(sc->core, CHIPC_OTPST); if (otp_st & CHIPC_OTPS_GUP_HW) return (BHND_NVRAM_SRC_OTP); } /* Check for flash */ if (caps->flash_type != CHIPC_FLASH_NONE) return (BHND_NVRAM_SRC_FLASH); /* No NVRAM hardware capability declared */ return (BHND_NVRAM_SRC_UNKNOWN); } /* Read and parse chipc capabilities */ static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t cap_reg; uint32_t cap_ext_reg; uint32_t regval; /* Fetch cap registers */ cap_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES); cap_ext_reg = 0; if (CHIPC_QUIRK(sc, SUPPORTS_CAP_EXT)) cap_ext_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES_EXT); /* Extract values */ caps->num_uarts = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_NUM_UART); caps->mipseb = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_MIPSEB); caps->uart_gpio = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_UARTGPIO); caps->uart_clock = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_UCLKSEL); caps->extbus_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_EXTBUS); caps->pwr_ctrl = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PWR_CTL); caps->jtag_master = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_JTAGP); caps->pll_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_PLL); caps->backplane_64 = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_BKPLN64); caps->boot_rom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ROM); caps->pmu = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PMU); caps->eci = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ECI); caps->sprom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_SPROM); caps->otp_size = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_OTP_SIZE); caps->seci = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_SECI); caps->gsio = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_GSIO); caps->aob = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_AOB); /* Fetch OTP size for later IPX controller revisions */ if (CHIPC_QUIRK(sc, IPX_OTPL_SIZE)) { regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->otp_size = CHIPC_GET_BITS(regval, CHIPC_OTPL_SIZE); } /* Determine flash type and parameters */ caps->cfi_width = 0; switch (CHIPC_GET_BITS(cap_reg, CHIPC_CAP_FLASH)) { case CHIPC_CAP_SFLASH_ST: caps->flash_type = CHIPC_SFLASH_ST; break; case CHIPC_CAP_SFLASH_AT: caps->flash_type = CHIPC_SFLASH_AT; break; case CHIPC_CAP_NFLASH: /* unimplemented */ caps->flash_type = CHIPC_NFLASH; break; case CHIPC_CAP_PFLASH: caps->flash_type = CHIPC_PFLASH_CFI; /* determine cfi width */ regval = bhnd_bus_read_4(sc->core, CHIPC_FLASH_CFG); if (CHIPC_GET_FLAG(regval, CHIPC_FLASH_CFG_DS)) caps->cfi_width = 2; else caps->cfi_width = 1; break; case CHIPC_CAP_FLASH_NONE: caps->flash_type = CHIPC_FLASH_NONE; break; } /* Handle 4706_NFLASH fallback */ if (CHIPC_QUIRK(sc, 4706_NFLASH) && CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_4706_NFLASH)) { caps->flash_type = CHIPC_NFLASH_4706; } /* Determine NVRAM source. Must occur after the SPROM/OTP/flash * capability flags have been populated. */ caps->nvram_src = chipc_find_nvram_src(sc, caps); /* Determine the SPROM offset within OTP (if any). SPROM-formatted * data is placed within the OTP general use region. */ caps->sprom_offset = 0; if (caps->nvram_src == BHND_NVRAM_SRC_OTP) { CHIPC_ASSERT_QUIRK(sc, OTP_IPX); /* Bit offset to GUP HW subregion containing SPROM data */ regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->sprom_offset = CHIPC_GET_BITS(regval, CHIPC_OTPL_GUP); /* Convert to bytes */ caps->sprom_offset /= 8; } return (0); } static int chipc_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int chipc_resume(device_t dev) { return (bus_generic_resume(dev)); } static void chipc_probe_nomatch(device_t dev, device_t child) { struct resource_list *rl; const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> at", name); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } printf(" (no driver attached)\n"); } static int chipc_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int chipc_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) { if (buflen == 0) return (EOVERFLOW); *buf = '\0'; return (0); } static int chipc_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { if (buflen == 0) return (EOVERFLOW); *buf = '\0'; return (ENXIO); } static device_t chipc_add_child(device_t dev, u_int order, const char *name, int unit) { struct chipc_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct chipc_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&dinfo->resources); dinfo->irq_mapped = false; device_set_ivars(child, dinfo); return (child); } static void chipc_child_deleted(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { /* Free the child's resource list */ resource_list_free(&dinfo->resources); /* Unmap the child's IRQ */ if (dinfo->irq_mapped) { bhnd_unmap_intr(dev, dinfo->irq); dinfo->irq_mapped = false; } free(dinfo, M_BHND); } device_set_ivars(child, NULL); } static struct resource_list * chipc_get_resource_list(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* Allocate region records for the given port, and add the port's memory * range to the mem_rman */ static int chipc_rman_init_regions (struct chipc_softc *sc, bhnd_port_type type, u_int port) { struct chipc_region *cr; rman_res_t start, end; u_int num_regions; int error; num_regions = bhnd_get_region_count(sc->dev, type, port); for (u_int region = 0; region < num_regions; region++) { /* Allocate new region record */ cr = chipc_alloc_region(sc, type, port, region); if (cr == NULL) return (ENODEV); /* Can't manage regions that cannot be allocated */ if (cr->cr_rid < 0) { BHND_DEBUG_DEV(sc->dev, "no rid for chipc region " "%s%u.%u", bhnd_port_type_name(type), port, region); chipc_free_region(sc, cr); continue; } /* Add to rman's managed range */ start = cr->cr_addr; end = cr->cr_end; if ((error = rman_manage_region(&sc->mem_rman, start, end))) { chipc_free_region(sc, cr); return (error); } /* Add to region list */ STAILQ_INSERT_TAIL(&sc->mem_regions, cr, cr_link); } return (0); } /* Initialize memory state for all chipc port regions */ static int chipc_init_rman(struct chipc_softc *sc) { u_int num_ports; int error; /* Port types for which we'll register chipc_region mappings */ bhnd_port_type types[] = { BHND_PORT_DEVICE }; /* Initialize resource manager */ sc->mem_rman.rm_start = 0; sc->mem_rman.rm_end = BUS_SPACE_MAXADDR; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "ChipCommon Device Memory"; if ((error = rman_init(&sc->mem_rman))) { device_printf(sc->dev, "could not initialize mem_rman: %d\n", error); return (error); } /* Populate per-port-region state */ for (u_int i = 0; i < nitems(types); i++) { num_ports = bhnd_get_port_count(sc->dev, types[i]); for (u_int port = 0; port < num_ports; port++) { error = chipc_rman_init_regions(sc, types[i], port); if (error) { device_printf(sc->dev, "region init failed for %s%u: %d\n", bhnd_port_type_name(types[i]), port, error); goto failed; } } } return (0); failed: chipc_free_rman(sc); return (error); } /* Free memory management state */ static void chipc_free_rman(struct chipc_softc *sc) { struct chipc_region *cr, *cr_next; STAILQ_FOREACH_SAFE(cr, &sc->mem_regions, cr_link, cr_next) chipc_free_region(sc, cr); rman_fini(&sc->mem_rman); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The chipc device state. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * chipc_get_rman(struct chipc_softc *sc, int type) { switch (type) { case SYS_RES_MEMORY: return (&sc->mem_rman); case SYS_RES_IRQ: /* We delegate IRQ resource management to the parent bus */ return (NULL); default: return (NULL); }; } static struct resource * chipc_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct chipc_softc *sc; struct chipc_region *cr; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager, delegate request if necessary */ rm = chipc_get_rman(sc, type); if (rm == NULL) { /* Requested resource type is delegated to our parent */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy " "[%d]\n", *rid, type, device_get_nameunit(child), rman_get_flags(rle->res)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Locate a mapping region */ if ((cr = chipc_find_region(sc, start, end)) == NULL) { /* Resource requests outside our shared port regions can be * delegated to our parent. */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* * As a special case, children that map the complete ChipCommon register * block are delegated to our parent. * * The rman API does not support sharing resources that are not * identical in size; since we allocate subregions to various children, * any children that need to map the entire register block (e.g. because * they require access to discontiguous register ranges) must make the * allocation through our parent, where we hold a compatible * RF_SHAREABLE allocation. */ if (cr == sc->core_region && cr->cr_addr == start && cr->cr_end == end && cr->cr_count == count) { rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Try to retain a region reference */ if ((error = chipc_retain_region(sc, cr, RF_ALLOCATED))) return (NULL); /* Make our rman reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) { chipc_release_region(sc, cr, RF_ALLOCATED); return (NULL); } rman_set_rid(rv, *rid); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); chipc_release_region(sc, cr, RF_ALLOCATED); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } static int chipc_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; struct resource_list_entry *rle; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_rl_release_resource(dev, child, type, rid, r)); } /* Locate the mapping region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); /* Drop allocation reference */ chipc_release_region(sc, cr, RF_ALLOCATED); /* Clear reference from the resource list entry if exists */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; return (0); } static int chipc_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_adjust_resource(dev, child, type, r, start, end)); } /* The range is limited to the existing region mapping */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); if (end <= start) return (EINVAL); if (start < cr->cr_addr || end > cr->cr_end) return (EINVAL); /* Range falls within the existing region */ return (rman_adjust_resource(r, start, end)); } /** * Retain an RF_ACTIVE reference to the region mapping @p r, and * configure @p r with its subregion values. * * @param sc Driver instance state. * @param child Requesting child device. * @param type resource type of @p r. * @param rid resource id of @p r * @param r resource to be activated. * @param req_direct If true, failure to allocate a direct bhnd resource * will be treated as an error. If false, the resource will not be marked * as RF_ACTIVE if bhnd direct resource allocation fails. */ static int chipc_try_activate_resource(struct chipc_softc *sc, device_t child, int type, int rid, struct resource *r, bool req_direct) { struct rman *rm; struct chipc_region *cr; bhnd_size_t cr_offset; rman_res_t r_start, r_end, r_size; int error; rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) return (EINVAL); r_start = rman_get_start(r); r_end = rman_get_end(r); r_size = rman_get_size(r); /* Find the corresponding chipc region */ cr = chipc_find_region(sc, r_start, r_end); if (cr == NULL) return (EINVAL); /* Calculate subregion offset within the chipc region */ cr_offset = r_start - cr->cr_addr; /* Retain (and activate, if necessary) the chipc region */ if ((error = chipc_retain_region(sc, cr, RF_ACTIVE))) return (error); /* Configure child resource with its subregion values. */ if (cr->cr_res->direct) { error = chipc_init_child_resource(r, cr->cr_res->res, cr_offset, r_size); if (error) goto cleanup; /* Mark active */ if ((error = rman_activate_resource(r))) goto cleanup; } else if (req_direct) { error = ENOMEM; goto cleanup; } return (0); cleanup: chipc_release_region(sc, cr, RF_ACTIVE); return (error); } static int chipc_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct chipc_softc *sc; struct rman *rm; int error; sc = device_get_softc(dev); /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r->res, rm)) { return (bhnd_bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region resource */ error = chipc_try_activate_resource(sc, child, type, rid, r->res, false); if (error) return (error); /* Mark the child resource as direct according to the returned resource * state */ if (rman_get_flags(r->res) & RF_ACTIVE) r->direct = true; return (0); } static int chipc_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct rman *rm; sc = device_get_softc(dev); /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region-based resource */ return (chipc_try_activate_resource(sc, child, type, rid, r, true)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int chipc_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } /* Find the corresponding chipc region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); /* Drop associated RF_ACTIVE reference */ chipc_release_region(sc, cr, RF_ACTIVE); return (0); } /** * Examine bus state and make a best effort determination of whether it's * likely safe to enable the muxed SPROM pins. * * On devices that do not use SPROM pin muxing, always returns true. * * @param sc chipc driver state. */ static bool chipc_should_enable_muxed_sprom(struct chipc_softc *sc) { device_t *devs; device_t hostb; device_t parent; int devcount; int error; bool result; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (true); - mtx_lock(&Giant); /* for newbus */ + bus_topo_lock(); parent = device_get_parent(sc->dev); hostb = bhnd_bus_find_hostb_device(parent); if ((error = device_get_children(parent, &devs, &devcount))) { - mtx_unlock(&Giant); + bus_topo_unlock(); return (false); } /* Reject any active devices other than ChipCommon, or the * host bridge (if any). */ result = true; for (int i = 0; i < devcount; i++) { if (devs[i] == hostb || devs[i] == sc->dev) continue; if (!device_is_attached(devs[i])) continue; if (device_is_suspended(devs[i])) continue; /* Active device; assume SPROM is busy */ result = false; break; } free(devs, M_TEMP); - mtx_unlock(&Giant); + bus_topo_unlock(); return (result); } static int chipc_enable_sprom(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Already enabled? */ if (sc->sprom_refcnt >= 1) { sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (0); } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: error = chipc_enable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: error = chipc_enable_otp_power(sc); break; default: error = 0; break; } /* Bump the reference count */ if (error == 0) sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (error); } static void chipc_disable_sprom(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Check reference count, skip disable if in-use. */ KASSERT(sc->sprom_refcnt > 0, ("sprom refcnt overrelease")); sc->sprom_refcnt--; if (sc->sprom_refcnt > 0) { CHIPC_UNLOCK(sc); return; } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: chipc_disable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: chipc_disable_otp_power(sc); break; default: break; } CHIPC_UNLOCK(sc); } static int chipc_enable_otp_power(struct chipc_softc *sc) { // TODO: Enable OTP resource via PMU, and wait up to 100 usec for // OTPS_READY to be set in `optstatus`. return (0); } static void chipc_disable_otp_power(struct chipc_softc *sc) { // TODO: Disable OTP resource via PMU } /** * If required by this device, enable access to the SPROM. * * @param sc chipc driver state. */ static int chipc_enable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins already enabled")); /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (0); /* Check whether bus is busy */ if (!chipc_should_enable_muxed_sprom(sc)) return (EBUSY); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return (0); } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } /* Refuse to proceed on unsupported devices with muxed SPROM pins */ device_printf(sc->dev, "muxed sprom lines on unrecognized device\n"); return (ENXIO); } /** * If required by this device, revert any GPIO/pin configuration applied * to allow SPROM access. * * @param sc chipc driver state. */ static void chipc_disable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins in use")); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl |= CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return; } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } } static uint32_t chipc_read_chipst(device_t dev) { struct chipc_softc *sc = device_get_softc(dev); return (bhnd_bus_read_4(sc->core, CHIPC_CHIPST)); } static void chipc_write_chipctrl(device_t dev, uint32_t value, uint32_t mask) { struct chipc_softc *sc; uint32_t cctrl; sc = device_get_softc(dev); CHIPC_LOCK(sc); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); cctrl = (cctrl & ~mask) | (value | mask); bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); CHIPC_UNLOCK(sc); } static struct chipc_caps * chipc_get_caps(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); return (&sc->caps); } static device_method_t chipc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, chipc_probe), DEVMETHOD(device_attach, chipc_attach), DEVMETHOD(device_detach, chipc_detach), DEVMETHOD(device_suspend, chipc_suspend), DEVMETHOD(device_resume, chipc_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, chipc_probe_nomatch), DEVMETHOD(bus_print_child, chipc_print_child), DEVMETHOD(bus_child_pnpinfo_str, chipc_child_pnpinfo_str), DEVMETHOD(bus_child_location_str, chipc_child_location_str), DEVMETHOD(bus_add_child, chipc_add_child), DEVMETHOD(bus_child_deleted, chipc_child_deleted), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_alloc_resource, chipc_alloc_resource), DEVMETHOD(bus_release_resource, chipc_release_resource), DEVMETHOD(bus_adjust_resource, chipc_adjust_resource), DEVMETHOD(bus_activate_resource, chipc_activate_resource), DEVMETHOD(bus_deactivate_resource, chipc_deactivate_resource), DEVMETHOD(bus_get_resource_list, chipc_get_resource_list), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), /* BHND bus inteface */ DEVMETHOD(bhnd_bus_activate_resource, chipc_activate_bhnd_resource), /* ChipCommon interface */ DEVMETHOD(bhnd_chipc_read_chipst, chipc_read_chipst), DEVMETHOD(bhnd_chipc_write_chipctrl, chipc_write_chipctrl), DEVMETHOD(bhnd_chipc_enable_sprom, chipc_enable_sprom), DEVMETHOD(bhnd_chipc_disable_sprom, chipc_disable_sprom), DEVMETHOD(bhnd_chipc_get_caps, chipc_get_caps), DEVMETHOD_END }; DEFINE_CLASS_0(bhnd_chipc, bhnd_chipc_driver, chipc_methods, sizeof(struct chipc_softc)); EARLY_DRIVER_MODULE(bhnd_chipc, bhnd, bhnd_chipc_driver, bhnd_chipc_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_DEPEND(bhnd_chipc, bhnd, 1, 1, 1); MODULE_VERSION(bhnd_chipc, 1); diff --git a/sys/dev/cardbus/cardbus.c b/sys/dev/cardbus/cardbus.c index 50a21dfc0b82..6145ad8cebb1 100644 --- a/sys/dev/cardbus/cardbus.c +++ b/sys/dev/cardbus/cardbus.c @@ -1,373 +1,373 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000,2001 Jonathan Chen. All rights reserved. * Copyright (c) 2003-2008 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "pcib_if.h" /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, cardbus, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CardBus parameters"); int cardbus_debug = 0; SYSCTL_INT(_hw_cardbus, OID_AUTO, debug, CTLFLAG_RWTUN, &cardbus_debug, 0, "CardBus debug"); int cardbus_cis_debug = 0; SYSCTL_INT(_hw_cardbus, OID_AUTO, cis_debug, CTLFLAG_RWTUN, &cardbus_cis_debug, 0, "CardBus CIS debug"); #define DPRINTF(a) if (cardbus_debug) printf a #define DEVPRINTF(x) if (cardbus_debug) device_printf x static int cardbus_attach(device_t cbdev); static int cardbus_attach_card(device_t cbdev); static int cardbus_detach(device_t cbdev); static int cardbus_detach_card(device_t cbdev); static void cardbus_device_setup_regs(pcicfgregs *cfg); static void cardbus_driver_added(device_t cbdev, driver_t *driver); static int cardbus_probe(device_t cbdev); static int cardbus_read_ivar(device_t cbdev, device_t child, int which, uintptr_t *result); /************************************************************************/ /* Probe/Attach */ /************************************************************************/ static int cardbus_probe(device_t cbdev) { device_set_desc(cbdev, "CardBus bus"); return (0); } static int cardbus_attach(device_t cbdev) { struct cardbus_softc *sc; #ifdef PCI_RES_BUS int rid; #endif sc = device_get_softc(cbdev); sc->sc_dev = cbdev; #ifdef PCI_RES_BUS rid = 0; sc->sc_bus = bus_alloc_resource(cbdev, PCI_RES_BUS, &rid, pcib_get_bus(cbdev), pcib_get_bus(cbdev), 1, 0); if (sc->sc_bus == NULL) { device_printf(cbdev, "failed to allocate bus number\n"); return (ENXIO); } #else device_printf(cbdev, "Your bus numbers may be AFU\n"); #endif return (0); } static int cardbus_detach(device_t cbdev) { #ifdef PCI_RES_BUS struct cardbus_softc *sc; #endif cardbus_detach_card(cbdev); #ifdef PCI_RES_BUS sc = device_get_softc(cbdev); device_printf(cbdev, "Freeing up the allocatd bus\n"); (void)bus_release_resource(cbdev, PCI_RES_BUS, 0, sc->sc_bus); #endif return (0); } static int cardbus_suspend(device_t self) { cardbus_detach_card(self); return (0); } static int cardbus_resume(device_t self) { return (0); } /************************************************************************/ /* Attach/Detach card */ /************************************************************************/ static void cardbus_device_setup_regs(pcicfgregs *cfg) { device_t dev = cfg->dev; int i; /* * Some cards power up with garbage in their BARs. This * code clears all that junk out. */ for (i = 0; i < PCIR_MAX_BAR_0; i++) pci_write_config(dev, PCIR_BAR(i), 0, 4); cfg->intline = pci_get_irq(device_get_parent(device_get_parent(dev))); pci_write_config(dev, PCIR_INTLINE, cfg->intline, 1); pci_write_config(dev, PCIR_CACHELNSZ, 0x08, 1); pci_write_config(dev, PCIR_LATTIMER, 0xa8, 1); pci_write_config(dev, PCIR_MINGNT, 0x14, 1); pci_write_config(dev, PCIR_MAXLAT, 0x14, 1); } static struct pci_devinfo * cardbus_alloc_devinfo(device_t dev) { struct cardbus_devinfo *dinfo; dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); return (&dinfo->pci); } static int cardbus_attach_card(device_t cbdev) { device_t brdev = device_get_parent(cbdev); device_t child; int bus, domain, slot, func; int cardattached = 0; int cardbusfunchigh = 0; struct cardbus_softc *sc; sc = device_get_softc(cbdev); cardbus_detach_card(cbdev); /* detach existing cards */ POWER_DISABLE_SOCKET(brdev, cbdev); /* Turn the socket off first */ POWER_ENABLE_SOCKET(brdev, cbdev); domain = pcib_get_domain(cbdev); bus = pcib_get_bus(cbdev); slot = 0; - mtx_lock(&Giant); + bus_topo_lock(); /* For each function, set it up and try to attach a driver to it */ for (func = 0; func <= cardbusfunchigh; func++) { struct cardbus_devinfo *dinfo; dinfo = (struct cardbus_devinfo *) pci_read_device(brdev, cbdev, domain, bus, slot, func); if (dinfo == NULL) continue; if (dinfo->pci.cfg.mfdev) cardbusfunchigh = PCI_FUNCMAX; child = device_add_child(cbdev, NULL, -1); if (child == NULL) { DEVPRINTF((cbdev, "Cannot add child!\n")); pci_freecfg((struct pci_devinfo *)dinfo); continue; } dinfo->pci.cfg.dev = child; resource_list_init(&dinfo->pci.resources); device_set_ivars(child, dinfo); cardbus_device_create(sc, dinfo, cbdev, child); if (cardbus_do_cis(cbdev, child) != 0) DEVPRINTF((cbdev, "Warning: Bogus CIS ignored\n")); pci_cfg_save(dinfo->pci.cfg.dev, &dinfo->pci, 0); pci_cfg_restore(dinfo->pci.cfg.dev, &dinfo->pci); cardbus_device_setup_regs(&dinfo->pci.cfg); pci_add_resources(cbdev, child, 1, dinfo->mprefetchable); pci_print_verbose(&dinfo->pci); if (device_probe_and_attach(child) == 0) cardattached++; else pci_cfg_save(dinfo->pci.cfg.dev, &dinfo->pci, 1); } - mtx_unlock(&Giant); + bus_topo_unlock(); if (cardattached > 0) return (0); /* POWER_DISABLE_SOCKET(brdev, cbdev); */ return (ENOENT); } static void cardbus_child_deleted(device_t cbdev, device_t child) { struct cardbus_devinfo *dinfo = device_get_ivars(child); if (dinfo->pci.cfg.dev != child) device_printf(cbdev, "devinfo dev mismatch\n"); cardbus_device_destroy(dinfo); pci_child_deleted(cbdev, child); } static int cardbus_detach_card(device_t cbdev) { int err = 0; - mtx_lock(&Giant); + bus_topo_lock(); err = bus_generic_detach(cbdev); if (err == 0) err = device_delete_children(cbdev); - mtx_unlock(&Giant); + bus_topo_unlock(); if (err) return (err); POWER_DISABLE_SOCKET(device_get_parent(cbdev), cbdev); return (err); } static void cardbus_driver_added(device_t cbdev, driver_t *driver) { int numdevs; device_t *devlist; device_t dev; int i; struct cardbus_devinfo *dinfo; DEVICE_IDENTIFY(driver, cbdev); if (device_get_children(cbdev, &devlist, &numdevs) != 0) return; /* * If there are no drivers attached, but there are children, * then power the card up. */ for (i = 0; i < numdevs; i++) { dev = devlist[i]; if (device_get_state(dev) != DS_NOTPRESENT) break; } if (i > 0 && i == numdevs) POWER_ENABLE_SOCKET(device_get_parent(cbdev), cbdev); for (i = 0; i < numdevs; i++) { dev = devlist[i]; if (device_get_state(dev) != DS_NOTPRESENT) continue; dinfo = device_get_ivars(dev); pci_print_verbose(&dinfo->pci); if (bootverbose) printf("pci%d:%d:%d:%d: reprobing on driver added\n", dinfo->pci.cfg.domain, dinfo->pci.cfg.bus, dinfo->pci.cfg.slot, dinfo->pci.cfg.func); pci_cfg_restore(dinfo->pci.cfg.dev, &dinfo->pci); if (device_probe_and_attach(dev) != 0) pci_cfg_save(dev, &dinfo->pci, 1); } free(devlist, M_TEMP); } /************************************************************************/ /* Other Bus Methods */ /************************************************************************/ static int cardbus_read_ivar(device_t cbdev, device_t child, int which, uintptr_t *result) { struct cardbus_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->pci.cfg; switch (which) { case PCI_IVAR_ETHADDR: /* * The generic accessor doesn't deal with failure, so * we set the return value, then return an error. */ if (dinfo->fepresent & (1 << PCCARD_TPLFE_TYPE_LAN_NID)) { *((uint8_t **) result) = dinfo->funce.lan.nid; break; } *((uint8_t **) result) = NULL; return (EINVAL); default: return (pci_read_ivar(cbdev, child, which, result)); } return 0; } static device_method_t cardbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cardbus_probe), DEVMETHOD(device_attach, cardbus_attach), DEVMETHOD(device_detach, cardbus_detach), DEVMETHOD(device_suspend, cardbus_suspend), DEVMETHOD(device_resume, cardbus_resume), /* Bus interface */ DEVMETHOD(bus_child_deleted, cardbus_child_deleted), DEVMETHOD(bus_get_dma_tag, bus_generic_get_dma_tag), DEVMETHOD(bus_read_ivar, cardbus_read_ivar), DEVMETHOD(bus_driver_added, cardbus_driver_added), DEVMETHOD(bus_rescan, bus_null_rescan), /* Card Interface */ DEVMETHOD(card_attach_card, cardbus_attach_card), DEVMETHOD(card_detach_card, cardbus_detach_card), /* PCI interface */ DEVMETHOD(pci_alloc_devinfo, cardbus_alloc_devinfo), {0,0} }; DEFINE_CLASS_1(cardbus, cardbus_driver, cardbus_methods, sizeof(struct cardbus_softc), pci_driver); static devclass_t cardbus_devclass; DRIVER_MODULE(cardbus, cbb, cardbus_driver, cardbus_devclass, 0, 0); MODULE_VERSION(cardbus, 1); diff --git a/sys/dev/drm2/drm_dp_iic_helper.c b/sys/dev/drm2/drm_dp_iic_helper.c index 35318c11c388..c3f980a3342f 100644 --- a/sys/dev/drm2/drm_dp_iic_helper.c +++ b/sys/dev/drm2/drm_dp_iic_helper.c @@ -1,278 +1,278 @@ /* * Copyright © 2009 Keith Packard * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include "iicbus_if.h" #include #include #include static int iic_dp_aux_transaction(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte) { struct iic_dp_aux_data *aux_data; int ret; aux_data = device_get_softc(idev); ret = (*aux_data->aux_ch)(idev, mode, write_byte, read_byte); if (ret < 0) return (ret); return (0); } /* * I2C over AUX CH */ /* * Send the address. If the I2C link is running, this 'restarts' * the connection with the new address, this is used for doing * a write followed by a read (as needed for DDC) */ static int iic_dp_aux_address(device_t idev, u16 address, bool reading) { struct iic_dp_aux_data *aux_data; int mode, ret; aux_data = device_get_softc(idev); mode = MODE_I2C_START; if (reading) mode |= MODE_I2C_READ; else mode |= MODE_I2C_WRITE; aux_data->address = address; aux_data->running = true; ret = iic_dp_aux_transaction(idev, mode, 0, NULL); return (ret); } /* * Stop the I2C transaction. This closes out the link, sending * a bare address packet with the MOT bit turned off */ static void iic_dp_aux_stop(device_t idev, bool reading) { struct iic_dp_aux_data *aux_data; int mode; aux_data = device_get_softc(idev); mode = MODE_I2C_STOP; if (reading) mode |= MODE_I2C_READ; else mode |= MODE_I2C_WRITE; if (aux_data->running) { (void)iic_dp_aux_transaction(idev, mode, 0, NULL); aux_data->running = false; } } /* * Write a single byte to the current I2C address, the * the I2C link must be running or this returns -EIO */ static int iic_dp_aux_put_byte(device_t idev, u8 byte) { struct iic_dp_aux_data *aux_data; int ret; aux_data = device_get_softc(idev); if (!aux_data->running) return (-EIO); ret = iic_dp_aux_transaction(idev, MODE_I2C_WRITE, byte, NULL); return (ret); } /* * Read a single byte from the current I2C address, the * I2C link must be running or this returns -EIO */ static int iic_dp_aux_get_byte(device_t idev, u8 *byte_ret) { struct iic_dp_aux_data *aux_data; int ret; aux_data = device_get_softc(idev); if (!aux_data->running) return (-EIO); ret = iic_dp_aux_transaction(idev, MODE_I2C_READ, 0, byte_ret); return (ret); } static int iic_dp_aux_xfer(device_t idev, struct iic_msg *msgs, uint32_t num) { u8 *buf; int b, m, ret; u16 len; bool reading; ret = 0; reading = false; for (m = 0; m < num; m++) { len = msgs[m].len; buf = msgs[m].buf; reading = (msgs[m].flags & IIC_M_RD) != 0; ret = iic_dp_aux_address(idev, msgs[m].slave >> 1, reading); if (ret < 0) break; if (reading) { for (b = 0; b < len; b++) { ret = iic_dp_aux_get_byte(idev, &buf[b]); if (ret != 0) break; } } else { for (b = 0; b < len; b++) { ret = iic_dp_aux_put_byte(idev, buf[b]); if (ret < 0) break; } } if (ret != 0) break; } iic_dp_aux_stop(idev, reading); DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret); return (-ret); } static void iic_dp_aux_reset_bus(device_t idev) { (void)iic_dp_aux_address(idev, 0, false); (void)iic_dp_aux_stop(idev, false); } static int iic_dp_aux_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr) { iic_dp_aux_reset_bus(idev); return (0); } static int iic_dp_aux_prepare_bus(device_t idev) { /* adapter->retries = 3; */ iic_dp_aux_reset_bus(idev); return (0); } static int iic_dp_aux_probe(device_t idev) { return (BUS_PROBE_DEFAULT); } static int iic_dp_aux_attach(device_t idev) { struct iic_dp_aux_data *aux_data; aux_data = device_get_softc(idev); aux_data->port = device_add_child(idev, "iicbus", -1); if (aux_data->port == NULL) return (ENXIO); device_quiet(aux_data->port); bus_generic_attach(idev); return (0); } int iic_dp_aux_add_bus(device_t dev, const char *name, int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte), void *priv, device_t *bus, device_t *adapter) { device_t ibus; struct iic_dp_aux_data *data; int idx, error; static int dp_bus_counter; - mtx_lock(&Giant); + bus_topo_lock(); idx = atomic_fetchadd_int(&dp_bus_counter, 1); ibus = device_add_child(dev, "drm_iic_dp_aux", idx); if (ibus == NULL) { - mtx_unlock(&Giant); + bus_topo_unlock(); DRM_ERROR("drm_iic_dp_aux bus %d creation error\n", idx); return (-ENXIO); } device_quiet(ibus); error = device_probe_and_attach(ibus); if (error != 0) { device_delete_child(dev, ibus); - mtx_unlock(&Giant); + bus_topo_unlock(); DRM_ERROR("drm_iic_dp_aux bus %d attach failed, %d\n", idx, error); return (-error); } data = device_get_softc(ibus); data->running = false; data->address = 0; data->aux_ch = ch; data->priv = priv; error = iic_dp_aux_prepare_bus(ibus); if (error == 0) { *bus = ibus; *adapter = data->port; } - mtx_unlock(&Giant); + bus_topo_unlock(); return (-error); } static device_method_t drm_iic_dp_aux_methods[] = { DEVMETHOD(device_probe, iic_dp_aux_probe), DEVMETHOD(device_attach, iic_dp_aux_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(iicbus_reset, iic_dp_aux_reset), DEVMETHOD(iicbus_transfer, iic_dp_aux_xfer), DEVMETHOD_END }; static driver_t drm_iic_dp_aux_driver = { "drm_iic_dp_aux", drm_iic_dp_aux_methods, sizeof(struct iic_dp_aux_data) }; static devclass_t drm_iic_dp_aux_devclass; DRIVER_MODULE_ORDERED(drm_iic_dp_aux, drmn, drm_iic_dp_aux_driver, drm_iic_dp_aux_devclass, 0, 0, SI_ORDER_SECOND); diff --git a/sys/dev/hyperv/pcib/vmbus_pcib.c b/sys/dev/hyperv/pcib/vmbus_pcib.c index c7df32044678..fd2b732267f0 100644 --- a/sys/dev/hyperv/pcib/vmbus_pcib.c +++ b/sys/dev/hyperv/pcib/vmbus_pcib.c @@ -1,1897 +1,1897 @@ /*- * Copyright (c) 2016-2017 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #ifdef NEW_PCIB #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #include #include #include #include #include #include #include "vmbus_if.h" #if __FreeBSD_version < 1100000 typedef u_long rman_res_t; #define RM_MAX_END (~(rman_res_t)0) #endif struct completion { unsigned int done; struct mtx lock; }; static void init_completion(struct completion *c) { memset(c, 0, sizeof(*c)); mtx_init(&c->lock, "hvcmpl", NULL, MTX_DEF); c->done = 0; } static void free_completion(struct completion *c) { mtx_destroy(&c->lock); } static void complete(struct completion *c) { mtx_lock(&c->lock); c->done++; mtx_unlock(&c->lock); wakeup(c); } static void wait_for_completion(struct completion *c) { mtx_lock(&c->lock); while (c->done == 0) mtx_sleep(c, &c->lock, 0, "hvwfc", 0); c->done--; mtx_unlock(&c->lock); } /* * Return: 0 if completed, a non-zero value if timed out. */ static int wait_for_completion_timeout(struct completion *c, int timeout) { int ret; mtx_lock(&c->lock); if (c->done == 0) mtx_sleep(c, &c->lock, 0, "hvwfc", timeout); if (c->done > 0) { c->done--; ret = 0; } else { ret = 1; } mtx_unlock(&c->lock); return (ret); } #define PCI_MAKE_VERSION(major, minor) ((uint32_t)(((major) << 16) | (major))) enum { PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1 }; #define PCI_CONFIG_MMIO_LENGTH 0x2000 #define CFG_PAGE_OFFSET 0x1000 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) /* * Message Types */ enum pci_message_type { /* * Version 1.1 */ PCI_MESSAGE_BASE = 0x42490000, PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0, PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1, PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4, PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5, PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6, PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7, PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8, PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9, PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA, PCI_EJECT = PCI_MESSAGE_BASE + 0xB, PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC, PCI_REENABLE = PCI_MESSAGE_BASE + 0xD, PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE, PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF, PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10, PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11, PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12, PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, PCI_MESSAGE_MAXIMUM }; /* * Structures defining the virtual PCI Express protocol. */ union pci_version { struct { uint16_t minor_version; uint16_t major_version; } parts; uint32_t version; } __packed; /* * This representation is the one used in Windows, which is * what is expected when sending this back and forth with * the Hyper-V parent partition. */ union win_slot_encoding { struct { uint32_t slot:5; uint32_t func:3; uint32_t reserved:24; } bits; uint32_t val; } __packed; struct pci_func_desc { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ } __packed; struct hv_msi_desc { uint8_t vector; uint8_t delivery_mode; uint16_t vector_count; uint32_t reserved; uint64_t cpu_mask; } __packed; struct tran_int_desc { uint16_t reserved; uint16_t vector_count; uint32_t data; uint64_t address; } __packed; struct pci_message { uint32_t type; } __packed; struct pci_child_message { struct pci_message message_type; union win_slot_encoding wslot; } __packed; struct pci_incoming_message { struct vmbus_chanpkt_hdr hdr; struct pci_message message_type; } __packed; struct pci_response { struct vmbus_chanpkt_hdr hdr; int32_t status; /* negative values are failures */ } __packed; struct pci_packet { void (*completion_func)(void *context, struct pci_response *resp, int resp_packet_size); void *compl_ctxt; struct pci_message message[0]; }; /* * Specific message types supporting the PCI protocol. */ struct pci_version_request { struct pci_message message_type; uint32_t protocol_version; uint32_t is_last_attempt:1; uint32_t reservedz:31; } __packed; struct pci_bus_d0_entry { struct pci_message message_type; uint32_t reserved; uint64_t mmio_base; } __packed; struct pci_bus_relations { struct pci_incoming_message incoming; uint32_t device_count; struct pci_func_desc func[0]; } __packed; #define MAX_NUM_BARS (PCIR_MAX_BAR_0 + 1) struct pci_q_res_req_response { struct vmbus_chanpkt_hdr hdr; int32_t status; /* negative values are failures */ uint32_t probed_bar[MAX_NUM_BARS]; } __packed; struct pci_resources_assigned { struct pci_message message_type; union win_slot_encoding wslot; uint8_t memory_range[0x14][MAX_NUM_BARS]; /* unused here */ uint32_t msi_descriptors; uint32_t reserved[4]; } __packed; struct pci_create_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc int_desc; } __packed; struct pci_create_int_response { struct pci_response response; uint32_t reserved; struct tran_int_desc int_desc; } __packed; struct pci_delete_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct tran_int_desc int_desc; } __packed; struct pci_dev_incoming { struct pci_incoming_message incoming; union win_slot_encoding wslot; } __packed; struct pci_eject_response { struct pci_message message_type; union win_slot_encoding wslot; uint32_t status; } __packed; /* * Driver specific state. */ enum hv_pcibus_state { hv_pcibus_init = 0, hv_pcibus_installed, }; struct hv_pcibus { device_t pcib; device_t pci_bus; struct vmbus_pcib_softc *sc; uint16_t pci_domain; enum hv_pcibus_state state; struct resource *cfg_res; struct completion query_completion, *query_comp; struct mtx config_lock; /* Avoid two threads writing index page */ struct mtx device_list_lock; /* Protect lists below */ TAILQ_HEAD(, hv_pci_dev) children; TAILQ_HEAD(, hv_dr_state) dr_list; volatile int detaching; }; struct hv_pci_dev { TAILQ_ENTRY(hv_pci_dev) link; struct pci_func_desc desc; bool reported_missing; struct hv_pcibus *hbus; struct task eject_task; TAILQ_HEAD(, hv_irq_desc) irq_desc_list; /* * What would be observed if one wrote 0xFFFFFFFF to a BAR and then * read it back, for each of the BAR offsets within config space. */ uint32_t probed_bar[MAX_NUM_BARS]; }; /* * Tracks "Device Relations" messages from the host, which must be both * processed in order. */ struct hv_dr_work { struct task task; struct hv_pcibus *bus; }; struct hv_dr_state { TAILQ_ENTRY(hv_dr_state) link; uint32_t device_count; struct pci_func_desc func[0]; }; struct hv_irq_desc { TAILQ_ENTRY(hv_irq_desc) link; struct tran_int_desc desc; int irq; }; #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) static uint32_t devfn_to_wslot(unsigned int devfn) { union win_slot_encoding wslot; wslot.val = 0; wslot.bits.slot = PCI_SLOT(devfn); wslot.bits.func = PCI_FUNC(devfn); return (wslot.val); } static unsigned int wslot_to_devfn(uint32_t wslot) { union win_slot_encoding encoding; unsigned int slot; unsigned int func; encoding.val = wslot; slot = encoding.bits.slot; func = encoding.bits.func; return (PCI_DEVFN(slot, func)); } struct vmbus_pcib_softc { struct vmbus_channel *chan; void *rx_buf; struct taskqueue *taskq; struct hv_pcibus *hbus; }; /* {44C4F61D-4444-4400-9D52-802E27EDE19F} */ static const struct hyperv_guid g_pass_through_dev_type = { .hv_guid = {0x1D, 0xF6, 0xC4, 0x44, 0x44, 0x44, 0x00, 0x44, 0x9D, 0x52, 0x80, 0x2E, 0x27, 0xED, 0xE1, 0x9F} }; struct hv_pci_compl { struct completion host_event; int32_t completion_status; }; struct q_res_req_compl { struct completion host_event; struct hv_pci_dev *hpdev; }; struct compose_comp_ctxt { struct hv_pci_compl comp_pkt; struct tran_int_desc int_desc; }; /* * It is possible the device is revoked during initialization. * Check if this happens during wait. * Return: 0 if response arrived, ENODEV if device revoked. */ static int wait_for_response(struct hv_pcibus *hbus, struct completion *c) { do { if (vmbus_chan_is_revoked(hbus->sc->chan)) { device_printf(hbus->pcib, "The device is revoked.\n"); return (ENODEV); } } while (wait_for_completion_timeout(c, hz /10) != 0); return 0; } static void hv_pci_generic_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct hv_pci_compl *comp_pkt = context; if (resp_packet_size >= sizeof(struct pci_response)) comp_pkt->completion_status = resp->status; else comp_pkt->completion_status = -1; complete(&comp_pkt->host_event); } static void q_resource_requirements(void *context, struct pci_response *resp, int resp_packet_size) { struct q_res_req_compl *completion = context; struct pci_q_res_req_response *q_res_req = (struct pci_q_res_req_response *)resp; int i; if (resp->status < 0) { printf("vmbus_pcib: failed to query resource requirements\n"); } else { for (i = 0; i < MAX_NUM_BARS; i++) completion->hpdev->probed_bar[i] = q_res_req->probed_bar[i]; } complete(&completion->host_event); } static void hv_pci_compose_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct compose_comp_ctxt *comp_pkt = context; struct pci_create_int_response *int_resp = (struct pci_create_int_response *)resp; comp_pkt->comp_pkt.completion_status = resp->status; comp_pkt->int_desc = int_resp->int_desc; complete(&comp_pkt->comp_pkt.host_event); } static void hv_int_desc_free(struct hv_pci_dev *hpdev, struct hv_irq_desc *hid) { struct pci_delete_interrupt *int_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_delete_interrupt)]; } ctxt; memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = PCI_DELETE_INTERRUPT_MESSAGE; int_pkt->wslot.val = hpdev->desc.wslot.val; int_pkt->int_desc = hid->desc; vmbus_chan_send(hpdev->hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, int_pkt, sizeof(*int_pkt), 0); free(hid, M_DEVBUF); } static void hv_pci_delete_device(struct hv_pci_dev *hpdev) { struct hv_pcibus *hbus = hpdev->hbus; struct hv_irq_desc *hid, *tmp_hid; device_t pci_dev; int devfn; devfn = wslot_to_devfn(hpdev->desc.wslot.val); - mtx_lock(&Giant); + bus_topo_lock(); pci_dev = pci_find_dbsf(hbus->pci_domain, 0, PCI_SLOT(devfn), PCI_FUNC(devfn)); if (pci_dev) device_delete_child(hbus->pci_bus, pci_dev); - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&hbus->device_list_lock); TAILQ_REMOVE(&hbus->children, hpdev, link); mtx_unlock(&hbus->device_list_lock); TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid) hv_int_desc_free(hpdev, hid); free(hpdev, M_DEVBUF); } static struct hv_pci_dev * new_pcichild_device(struct hv_pcibus *hbus, struct pci_func_desc *desc) { struct hv_pci_dev *hpdev; struct pci_child_message *res_req; struct q_res_req_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_child_message)]; } ctxt; int ret; hpdev = malloc(sizeof(*hpdev), M_DEVBUF, M_WAITOK | M_ZERO); hpdev->hbus = hbus; TAILQ_INIT(&hpdev->irq_desc_list); init_completion(&comp_pkt.host_event); comp_pkt.hpdev = hpdev; ctxt.pkt.compl_ctxt = &comp_pkt; ctxt.pkt.completion_func = q_resource_requirements; res_req = (struct pci_child_message *)&ctxt.pkt.message; res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; res_req->wslot.val = desc->wslot.val; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, res_req, sizeof(*res_req), (uint64_t)(uintptr_t)&ctxt.pkt); if (ret) goto err; if (wait_for_response(hbus, &comp_pkt.host_event)) goto err; free_completion(&comp_pkt.host_event); hpdev->desc = *desc; mtx_lock(&hbus->device_list_lock); if (TAILQ_EMPTY(&hbus->children)) hbus->pci_domain = desc->ser & 0xFFFF; TAILQ_INSERT_TAIL(&hbus->children, hpdev, link); mtx_unlock(&hbus->device_list_lock); return (hpdev); err: free_completion(&comp_pkt.host_event); free(hpdev, M_DEVBUF); return (NULL); } #if __FreeBSD_version < 1100000 /* Old versions don't have BUS_RESCAN(). Let's copy it from FreeBSD 11. */ static struct pci_devinfo * pci_identify_function(device_t pcib, device_t dev, int domain, int busno, int slot, int func, size_t dinfo_size) { struct pci_devinfo *dinfo; dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size); if (dinfo != NULL) pci_add_child(dev, dinfo); return (dinfo); } static int pci_rescan(device_t dev) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); struct pci_softc *sc; device_t child, *devlist, *unchanged; int devcount, error, i, j, maxslots, oldcount; int busno, domain, s, f, pcifunchigh; uint8_t hdrtype; /* No need to check for ARI on a rescan. */ error = device_get_children(dev, &devlist, &devcount); if (error) return (error); if (devcount != 0) { unchanged = malloc(devcount * sizeof(device_t), M_TEMP, M_NOWAIT | M_ZERO); if (unchanged == NULL) { free(devlist, M_TEMP); return (ENOMEM); } } else unchanged = NULL; sc = device_get_softc(dev); domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++) { /* If function 0 is not present, skip to the next slot. */ f = 0; if (REG(PCIR_VENDOR, 2) == 0xffff) continue; pcifunchigh = 0; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = 0; f <= pcifunchigh; f++) { if (REG(PCIR_VENDOR, 2) == 0xffff) continue; /* * Found a valid function. Check if a * device_t for this device already exists. */ for (i = 0; i < devcount; i++) { child = devlist[i]; if (child == NULL) continue; if (pci_get_slot(child) == s && pci_get_function(child) == f) { unchanged[i] = child; goto next_func; } } pci_identify_function(pcib, dev, domain, busno, s, f, sizeof(struct pci_devinfo)); next_func:; } } /* Remove devices that are no longer present. */ for (i = 0; i < devcount; i++) { if (unchanged[i] != NULL) continue; device_delete_child(dev, devlist[i]); } free(devlist, M_TEMP); oldcount = devcount; /* Try to attach the devices just added. */ error = device_get_children(dev, &devlist, &devcount); if (error) { free(unchanged, M_TEMP); return (error); } for (i = 0; i < devcount; i++) { for (j = 0; j < oldcount; j++) { if (devlist[i] == unchanged[j]) goto next_device; } device_probe_and_attach(devlist[i]); next_device:; } free(unchanged, M_TEMP); free(devlist, M_TEMP); return (0); #undef REG } #else static int pci_rescan(device_t dev) { return (BUS_RESCAN(dev)); } #endif static void pci_devices_present_work(void *arg, int pending __unused) { struct hv_dr_work *dr_wrk = arg; struct hv_dr_state *dr = NULL; struct hv_pcibus *hbus; uint32_t child_no; bool found; struct pci_func_desc *new_desc; struct hv_pci_dev *hpdev, *tmp_hpdev; struct completion *query_comp; bool need_rescan = false; hbus = dr_wrk->bus; free(dr_wrk, M_DEVBUF); /* Pull this off the queue and process it if it was the last one. */ mtx_lock(&hbus->device_list_lock); while (!TAILQ_EMPTY(&hbus->dr_list)) { dr = TAILQ_FIRST(&hbus->dr_list); TAILQ_REMOVE(&hbus->dr_list, dr, link); /* Throw this away if the list still has stuff in it. */ if (!TAILQ_EMPTY(&hbus->dr_list)) { free(dr, M_DEVBUF); continue; } } mtx_unlock(&hbus->device_list_lock); if (!dr) return; /* First, mark all existing children as reported missing. */ mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) hpdev->reported_missing = true; mtx_unlock(&hbus->device_list_lock); /* Next, add back any reported devices. */ for (child_no = 0; child_no < dr->device_count; child_no++) { found = false; new_desc = &dr->func[child_no]; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { if ((hpdev->desc.wslot.val == new_desc->wslot.val) && (hpdev->desc.v_id == new_desc->v_id) && (hpdev->desc.d_id == new_desc->d_id) && (hpdev->desc.ser == new_desc->ser)) { hpdev->reported_missing = false; found = true; break; } } mtx_unlock(&hbus->device_list_lock); if (!found) { if (!need_rescan) need_rescan = true; hpdev = new_pcichild_device(hbus, new_desc); if (!hpdev) printf("vmbus_pcib: failed to add a child\n"); } } /* Remove missing device(s), if any */ TAILQ_FOREACH_SAFE(hpdev, &hbus->children, link, tmp_hpdev) { if (hpdev->reported_missing) hv_pci_delete_device(hpdev); } /* Rescan the bus to find any new device, if necessary. */ if (hbus->state == hv_pcibus_installed && need_rescan) pci_rescan(hbus->pci_bus); /* Wake up hv_pci_query_relations(), if it's waiting. */ query_comp = hbus->query_comp; if (query_comp) { hbus->query_comp = NULL; complete(query_comp); } free(dr, M_DEVBUF); } static struct hv_pci_dev * get_pcichild_wslot(struct hv_pcibus *hbus, uint32_t wslot) { struct hv_pci_dev *hpdev, *ret = NULL; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { if (hpdev->desc.wslot.val == wslot) { ret = hpdev; break; } } mtx_unlock(&hbus->device_list_lock); return (ret); } static void hv_pci_devices_present(struct hv_pcibus *hbus, struct pci_bus_relations *relations) { struct hv_dr_state *dr; struct hv_dr_work *dr_wrk; unsigned long dr_size; if (hbus->detaching && relations->device_count > 0) return; dr_size = offsetof(struct hv_dr_state, func) + (sizeof(struct pci_func_desc) * relations->device_count); dr = malloc(dr_size, M_DEVBUF, M_WAITOK | M_ZERO); dr->device_count = relations->device_count; if (dr->device_count != 0) memcpy(dr->func, relations->func, sizeof(struct pci_func_desc) * dr->device_count); mtx_lock(&hbus->device_list_lock); TAILQ_INSERT_TAIL(&hbus->dr_list, dr, link); mtx_unlock(&hbus->device_list_lock); dr_wrk = malloc(sizeof(*dr_wrk), M_DEVBUF, M_WAITOK | M_ZERO); dr_wrk->bus = hbus; TASK_INIT(&dr_wrk->task, 0, pci_devices_present_work, dr_wrk); taskqueue_enqueue(hbus->sc->taskq, &dr_wrk->task); } static void hv_eject_device_work(void *arg, int pending __unused) { struct hv_pci_dev *hpdev = arg; union win_slot_encoding wslot = hpdev->desc.wslot; struct hv_pcibus *hbus = hpdev->hbus; struct pci_eject_response *eject_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_eject_response)]; } ctxt; hv_pci_delete_device(hpdev); memset(&ctxt, 0, sizeof(ctxt)); eject_pkt = (struct pci_eject_response *)&ctxt.pkt.message; eject_pkt->message_type.type = PCI_EJECTION_COMPLETE; eject_pkt->wslot.val = wslot.val; vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, eject_pkt, sizeof(*eject_pkt), 0); } static void hv_pci_eject_device(struct hv_pci_dev *hpdev) { struct hv_pcibus *hbus = hpdev->hbus; struct taskqueue *taskq; if (hbus->detaching) return; /* * Push this task into the same taskqueue on which * vmbus_pcib_attach() runs, so we're sure this task can't run * concurrently with vmbus_pcib_attach(). */ TASK_INIT(&hpdev->eject_task, 0, hv_eject_device_work, hpdev); taskq = vmbus_chan_mgmt_tq(hbus->sc->chan); taskqueue_enqueue(taskq, &hpdev->eject_task); } #define PCIB_PACKET_SIZE 0x100 static void vmbus_pcib_on_channel_callback(struct vmbus_channel *chan, void *arg) { struct vmbus_pcib_softc *sc = arg; struct hv_pcibus *hbus = sc->hbus; void *buffer; int bufferlen = PCIB_PACKET_SIZE; struct pci_packet *comp_packet; struct pci_response *response; struct pci_incoming_message *new_msg; struct pci_bus_relations *bus_rel; struct pci_dev_incoming *dev_msg; struct hv_pci_dev *hpdev; buffer = sc->rx_buf; do { struct vmbus_chanpkt_hdr *pkt = buffer; uint32_t bytes_rxed; int ret; bytes_rxed = bufferlen; ret = vmbus_chan_recv_pkt(chan, pkt, &bytes_rxed); if (ret == ENOBUFS) { /* Handle large packet */ if (bufferlen > PCIB_PACKET_SIZE) { free(buffer, M_DEVBUF); buffer = NULL; } /* alloc new buffer */ buffer = malloc(bytes_rxed, M_DEVBUF, M_WAITOK | M_ZERO); bufferlen = bytes_rxed; continue; } if (ret != 0) { /* ignore EIO or EAGAIN */ break; } if (bytes_rxed <= sizeof(struct pci_response)) continue; switch (pkt->cph_type) { case VMBUS_CHANPKT_TYPE_COMP: comp_packet = (struct pci_packet *)(uintptr_t)pkt->cph_xactid; response = (struct pci_response *)pkt; comp_packet->completion_func(comp_packet->compl_ctxt, response, bytes_rxed); break; case VMBUS_CHANPKT_TYPE_INBAND: new_msg = (struct pci_incoming_message *)buffer; switch (new_msg->message_type.type) { case PCI_BUS_RELATIONS: bus_rel = (struct pci_bus_relations *)buffer; if (bus_rel->device_count == 0) break; if (bytes_rxed < offsetof(struct pci_bus_relations, func) + (sizeof(struct pci_func_desc) * (bus_rel->device_count))) break; hv_pci_devices_present(hbus, bus_rel); break; case PCI_EJECT: dev_msg = (struct pci_dev_incoming *)buffer; hpdev = get_pcichild_wslot(hbus, dev_msg->wslot.val); if (hpdev) hv_pci_eject_device(hpdev); break; default: printf("vmbus_pcib: Unknown msg type 0x%x\n", new_msg->message_type.type); break; } break; default: printf("vmbus_pcib: Unknown VMBus msg type %hd\n", pkt->cph_type); break; } } while (1); if (bufferlen > PCIB_PACKET_SIZE) free(buffer, M_DEVBUF); } static int hv_pci_protocol_negotiation(struct hv_pcibus *hbus) { struct pci_version_request *version_req; struct hv_pci_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_version_request)]; } ctxt; int ret; init_completion(&comp_pkt.host_event); ctxt.pkt.completion_func = hv_pci_generic_compl; ctxt.pkt.compl_ctxt = &comp_pkt; version_req = (struct pci_version_request *)&ctxt.pkt.message; version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT; version_req->is_last_attempt = 1; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, version_req, sizeof(*version_req), (uint64_t)(uintptr_t)&ctxt.pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); if (ret) { device_printf(hbus->pcib, "vmbus_pcib failed to request version: %d\n", ret); goto out; } if (comp_pkt.completion_status < 0) { device_printf(hbus->pcib, "vmbus_pcib version negotiation failed: %x\n", comp_pkt.completion_status); ret = EPROTO; } else { ret = 0; } out: free_completion(&comp_pkt.host_event); return (ret); } /* Ask the host to send along the list of child devices */ static int hv_pci_query_relations(struct hv_pcibus *hbus) { struct pci_message message; int ret; message.type = PCI_QUERY_BUS_RELATIONS; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &message, sizeof(message), 0); return (ret); } static int hv_pci_enter_d0(struct hv_pcibus *hbus) { struct pci_bus_d0_entry *d0_entry; struct hv_pci_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_bus_d0_entry)]; } ctxt; int ret; /* * Tell the host that the bus is ready to use, and moved into the * powered-on state. This includes telling the host which region * of memory-mapped I/O space has been chosen for configuration space * access. */ init_completion(&comp_pkt.host_event); ctxt.pkt.completion_func = hv_pci_generic_compl; ctxt.pkt.compl_ctxt = &comp_pkt; d0_entry = (struct pci_bus_d0_entry *)&ctxt.pkt.message; memset(d0_entry, 0, sizeof(*d0_entry)); d0_entry->message_type.type = PCI_BUS_D0ENTRY; d0_entry->mmio_base = rman_get_start(hbus->cfg_res); ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, d0_entry, sizeof(*d0_entry), (uint64_t)(uintptr_t)&ctxt.pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); if (ret) goto out; if (comp_pkt.completion_status < 0) { device_printf(hbus->pcib, "vmbus_pcib failed to enable D0\n"); ret = EPROTO; } else { ret = 0; } out: free_completion(&comp_pkt.host_event); return (ret); } /* * It looks this is only needed by Windows VM, but let's send the message too * just to make the host happy. */ static int hv_send_resources_allocated(struct hv_pcibus *hbus) { struct pci_resources_assigned *res_assigned; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev; struct pci_packet *pkt; uint32_t wslot; int ret = 0; pkt = malloc(sizeof(*pkt) + sizeof(*res_assigned), M_DEVBUF, M_WAITOK | M_ZERO); for (wslot = 0; wslot < 256; wslot++) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; init_completion(&comp_pkt.host_event); memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned)); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; res_assigned = (struct pci_resources_assigned *)&pkt->message; res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; res_assigned->wslot.val = hpdev->desc.wslot.val; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, &pkt->message, sizeof(*res_assigned), (uint64_t)(uintptr_t)pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); free_completion(&comp_pkt.host_event); if (ret) break; if (comp_pkt.completion_status < 0) { ret = EPROTO; device_printf(hbus->pcib, "failed to send PCI_RESOURCES_ASSIGNED\n"); break; } } free(pkt, M_DEVBUF); return (ret); } static int hv_send_resources_released(struct hv_pcibus *hbus) { struct pci_child_message pkt; struct hv_pci_dev *hpdev; uint32_t wslot; int ret; for (wslot = 0; wslot < 256; wslot++) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; pkt.message_type.type = PCI_RESOURCES_RELEASED; pkt.wslot.val = hpdev->desc.wslot.val; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &pkt, sizeof(pkt), 0); if (ret) return (ret); } return (0); } #define hv_cfg_read(x, s) \ static inline uint##x##_t hv_cfg_read_##s(struct hv_pcibus *bus, \ bus_size_t offset) \ { \ return (bus_read_##s(bus->cfg_res, offset)); \ } #define hv_cfg_write(x, s) \ static inline void hv_cfg_write_##s(struct hv_pcibus *bus, \ bus_size_t offset, uint##x##_t val) \ { \ return (bus_write_##s(bus->cfg_res, offset, val)); \ } hv_cfg_read(8, 1) hv_cfg_read(16, 2) hv_cfg_read(32, 4) hv_cfg_write(8, 1) hv_cfg_write(16, 2) hv_cfg_write(32, 4) static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, int size, uint32_t *val) { struct hv_pcibus *hbus = hpdev->hbus; bus_size_t addr = CFG_PAGE_OFFSET + where; /* * If the attempt is to read the IDs or the ROM BAR, simulate that. */ if (where + size <= PCIR_COMMAND) { memcpy(val, ((uint8_t *)&hpdev->desc.v_id) + where, size); } else if (where >= PCIR_REVID && where + size <= PCIR_CACHELNSZ) { memcpy(val, ((uint8_t *)&hpdev->desc.rev) + where - PCIR_REVID, size); } else if (where >= PCIR_SUBVEND_0 && where + size <= PCIR_BIOS) { memcpy(val, (uint8_t *)&hpdev->desc.subsystem_id + where - PCIR_SUBVEND_0, size); } else if (where >= PCIR_BIOS && where + size <= PCIR_CAP_PTR) { /* ROM BARs are unimplemented */ *val = 0; } else if ((where >= PCIR_INTLINE && where + size <= PCIR_INTPIN) ||(where == PCIR_INTPIN && size == 1)) { /* * Interrupt Line and Interrupt PIN are hard-wired to zero * because this front-end only supports message-signaled * interrupts. */ *val = 0; } else if (where + size <= CFG_PAGE_SIZE) { mtx_lock(&hbus->config_lock); /* Choose the function to be read. */ hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val); /* Make sure the function was chosen before we start reading.*/ mb(); /* Read from that function's config space. */ switch (size) { case 1: *((uint8_t *)val) = hv_cfg_read_1(hbus, addr); break; case 2: *((uint16_t *)val) = hv_cfg_read_2(hbus, addr); break; default: *((uint32_t *)val) = hv_cfg_read_4(hbus, addr); break; } /* * Make sure the write was done before we release the lock, * allowing consecutive reads/writes. */ mb(); mtx_unlock(&hbus->config_lock); } else { /* Invalid config read: it's unlikely to reach here. */ memset(val, 0, size); } } static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, int size, uint32_t val) { struct hv_pcibus *hbus = hpdev->hbus; bus_size_t addr = CFG_PAGE_OFFSET + where; /* SSIDs and ROM BARs are read-only */ if (where >= PCIR_SUBVEND_0 && where + size <= PCIR_CAP_PTR) return; if (where >= PCIR_COMMAND && where + size <= CFG_PAGE_SIZE) { mtx_lock(&hbus->config_lock); /* Choose the function to be written. */ hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val); /* Make sure the function was chosen before we start writing.*/ wmb(); /* Write to that function's config space. */ switch (size) { case 1: hv_cfg_write_1(hbus, addr, (uint8_t)val); break; case 2: hv_cfg_write_2(hbus, addr, (uint16_t)val); break; default: hv_cfg_write_4(hbus, addr, (uint32_t)val); break; } /* * Make sure the write was done before we release the lock, * allowing consecutive reads/writes. */ mb(); mtx_unlock(&hbus->config_lock); } else { /* Invalid config write: it's unlikely to reach here. */ return; } } /* * The vPCI in some Hyper-V releases do not initialize the last 4 * bit of BAR registers. This could result weird problems causing PCI * code fail to configure BAR correctly. * * Just write all 1's to those BARs whose probed values are not zero. * This seems to make the Hyper-V vPCI and pci_write_bar() to cooperate * correctly. */ static void vmbus_pcib_prepopulate_bars(struct hv_pcibus *hbus) { struct hv_pci_dev *hpdev; int i; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { for (i = 0; i < 6; i++) { /* Ignore empty bar */ if (hpdev->probed_bar[i] == 0) continue; uint32_t bar_val = 0; _hv_pcifront_read_config(hpdev, PCIR_BAR(i), 4, &bar_val); if (hpdev->probed_bar[i] != bar_val) { if (bootverbose) printf("vmbus_pcib: initialize bar %d " "by writing all 1s\n", i); _hv_pcifront_write_config(hpdev, PCIR_BAR(i), 4, 0xffffffff); } } } mtx_unlock(&hbus->device_list_lock); } static void vmbus_pcib_set_detaching(void *arg, int pending __unused) { struct hv_pcibus *hbus = arg; atomic_set_int(&hbus->detaching, 1); } static void vmbus_pcib_pre_detach(struct hv_pcibus *hbus) { struct task task; TASK_INIT(&task, 0, vmbus_pcib_set_detaching, hbus); /* * Make sure the channel callback won't push any possible new * PCI_BUS_RELATIONS and PCI_EJECT tasks to sc->taskq. */ vmbus_chan_run_task(hbus->sc->chan, &task); taskqueue_drain_all(hbus->sc->taskq); } /* * Standard probe entry point. * */ static int vmbus_pcib_probe(device_t dev) { if (VMBUS_PROBE_GUID(device_get_parent(dev), dev, &g_pass_through_dev_type) == 0) { device_set_desc(dev, "Hyper-V PCI Express Pass Through"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Standard attach entry point. * */ static int vmbus_pcib_attach(device_t dev) { const int pci_ring_size = (4 * PAGE_SIZE); const struct hyperv_guid *inst_guid; struct vmbus_channel *channel; struct vmbus_pcib_softc *sc; struct hv_pcibus *hbus; int rid = 0; int ret; hbus = malloc(sizeof(*hbus), M_DEVBUF, M_WAITOK | M_ZERO); hbus->pcib = dev; channel = vmbus_get_channel(dev); inst_guid = vmbus_chan_guid_inst(channel); hbus->pci_domain = inst_guid->hv_guid[9] | (inst_guid->hv_guid[8] << 8); mtx_init(&hbus->config_lock, "hbcfg", NULL, MTX_DEF); mtx_init(&hbus->device_list_lock, "hbdl", NULL, MTX_DEF); TAILQ_INIT(&hbus->children); TAILQ_INIT(&hbus->dr_list); hbus->cfg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, RM_MAX_END, PCI_CONFIG_MMIO_LENGTH, RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE)); if (!hbus->cfg_res) { device_printf(dev, "failed to get resource for cfg window\n"); ret = ENXIO; goto free_bus; } sc = device_get_softc(dev); sc->chan = channel; sc->rx_buf = malloc(PCIB_PACKET_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); sc->hbus = hbus; /* * The taskq is used to handle PCI_BUS_RELATIONS and PCI_EJECT * messages. NB: we can't handle the messages in the channel callback * directly, because the message handlers need to send new messages * to the host and waits for the host's completion messages, which * must also be handled by the channel callback. */ sc->taskq = taskqueue_create("vmbus_pcib_tq", M_WAITOK, taskqueue_thread_enqueue, &sc->taskq); taskqueue_start_threads(&sc->taskq, 1, PI_NET, "vmbus_pcib_tq"); hbus->sc = sc; init_completion(&hbus->query_completion); hbus->query_comp = &hbus->query_completion; ret = vmbus_chan_open(sc->chan, pci_ring_size, pci_ring_size, NULL, 0, vmbus_pcib_on_channel_callback, sc); if (ret) goto free_res; ret = hv_pci_protocol_negotiation(hbus); if (ret) goto vmbus_close; ret = hv_pci_query_relations(hbus); if (!ret) ret = wait_for_response(hbus, hbus->query_comp); if (ret) goto vmbus_close; ret = hv_pci_enter_d0(hbus); if (ret) goto vmbus_close; ret = hv_send_resources_allocated(hbus); if (ret) goto vmbus_close; vmbus_pcib_prepopulate_bars(hbus); hbus->pci_bus = device_add_child(dev, "pci", -1); if (!hbus->pci_bus) { device_printf(dev, "failed to create pci bus\n"); ret = ENXIO; goto vmbus_close; } bus_generic_attach(dev); hbus->state = hv_pcibus_installed; return (0); vmbus_close: vmbus_pcib_pre_detach(hbus); vmbus_chan_close(sc->chan); free_res: taskqueue_free(sc->taskq); free_completion(&hbus->query_completion); free(sc->rx_buf, M_DEVBUF); bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res); free_bus: mtx_destroy(&hbus->device_list_lock); mtx_destroy(&hbus->config_lock); free(hbus, M_DEVBUF); return (ret); } /* * Standard detach entry point */ static int vmbus_pcib_detach(device_t dev) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pcibus *hbus = sc->hbus; struct pci_message teardown_packet; struct pci_bus_relations relations; int ret; vmbus_pcib_pre_detach(hbus); if (hbus->state == hv_pcibus_installed) bus_generic_detach(dev); /* Delete any children which might still exist. */ memset(&relations, 0, sizeof(relations)); hv_pci_devices_present(hbus, &relations); ret = hv_send_resources_released(hbus); if (ret) device_printf(dev, "failed to send PCI_RESOURCES_RELEASED\n"); teardown_packet.type = PCI_BUS_D0EXIT; ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &teardown_packet, sizeof(struct pci_message), 0); if (ret) device_printf(dev, "failed to send PCI_BUS_D0EXIT\n"); taskqueue_drain_all(hbus->sc->taskq); vmbus_chan_close(sc->chan); taskqueue_free(sc->taskq); free_completion(&hbus->query_completion); free(sc->rx_buf, M_DEVBUF); bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res); mtx_destroy(&hbus->device_list_lock); mtx_destroy(&hbus->config_lock); free(hbus, M_DEVBUF); return (0); } static int vmbus_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *val) { struct vmbus_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *val = sc->hbus->pci_domain; return (0); case PCIB_IVAR_BUS: /* There is only bus 0. */ *val = 0; return (0); } return (ENOENT); } static int vmbus_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t val) { return (ENOENT); } static struct resource * vmbus_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { unsigned int bar_no; struct hv_pci_dev *hpdev; struct vmbus_pcib_softc *sc = device_get_softc(dev); struct resource *res; unsigned int devfn; if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(sc->hbus->pci_domain, child, rid, start, end, count, flags)); /* Devices with port I/O BAR are not supported. */ if (type == SYS_RES_IOPORT) return (NULL); if (type == SYS_RES_MEMORY) { devfn = PCI_DEVFN(pci_get_slot(child), pci_get_function(child)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (NULL); bar_no = PCI_RID2BAR(*rid); if (bar_no >= MAX_NUM_BARS) return (NULL); /* Make sure a 32-bit BAR gets a 32-bit address */ if (!(hpdev->probed_bar[bar_no] & PCIM_BAR_MEM_64)) end = ulmin(end, 0xFFFFFFFF); } res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); /* * If this is a request for a specific range, assume it is * correct and pass it up to the parent. */ if (res == NULL && start + count - 1 == end) res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); return (res); } static int vmbus_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_release_bus(sc->hbus->pci_domain, child, rid, r)); if (type == SYS_RES_IOPORT) return (EINVAL); return (bus_generic_release_resource(dev, child, type, rid, r)); } #if __FreeBSD_version >= 1100000 static int vmbus_pcib_get_cpus(device_t pcib, device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { return (bus_get_cpus(pcib, op, setsize, cpuset)); } #endif static uint32_t vmbus_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pci_dev *hpdev; unsigned int devfn = PCI_DEVFN(slot, func); uint32_t data = 0; KASSERT(bus == 0, ("bus should be 0, but is %u", bus)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (~0); _hv_pcifront_read_config(hpdev, reg, bytes, &data); return (data); } static void vmbus_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pci_dev *hpdev; unsigned int devfn = PCI_DEVFN(slot, func); KASSERT(bus == 0, ("bus should be 0, but is %u", bus)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return; _hv_pcifront_write_config(hpdev, reg, bytes, data); } static int vmbus_pcib_route_intr(device_t pcib, device_t dev, int pin) { /* We only support MSI/MSI-X and don't support INTx interrupt. */ return (PCI_INVALID_IRQ); } static int vmbus_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { return (PCIB_ALLOC_MSI(device_get_parent(pcib), dev, count, maxcount, irqs)); } static int vmbus_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { return (PCIB_RELEASE_MSI(device_get_parent(pcib), dev, count, irqs)); } static int vmbus_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { return (PCIB_ALLOC_MSIX(device_get_parent(pcib), dev, irq)); } static int vmbus_pcib_release_msix(device_t pcib, device_t dev, int irq) { return (PCIB_RELEASE_MSIX(device_get_parent(pcib), dev, irq)); } #define MSI_INTEL_ADDR_DEST 0x000ff000 #define MSI_INTEL_DATA_INTVEC IOART_INTVEC /* Interrupt vector. */ #define MSI_INTEL_DATA_DELFIXED IOART_DELFIXED static int vmbus_pcib_map_msi(device_t pcib, device_t child, int irq, uint64_t *addr, uint32_t *data) { unsigned int devfn; struct hv_pci_dev *hpdev; uint64_t v_addr; uint32_t v_data; struct hv_irq_desc *hid, *tmp_hid; unsigned int cpu, vcpu_id; unsigned int vector; struct vmbus_pcib_softc *sc = device_get_softc(pcib); struct pci_create_interrupt *int_pkt; struct compose_comp_ctxt comp; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_create_interrupt)]; } ctxt; int ret; devfn = PCI_DEVFN(pci_get_slot(child), pci_get_function(child)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (ENOENT); ret = PCIB_MAP_MSI(device_get_parent(pcib), child, irq, &v_addr, &v_data); if (ret) return (ret); TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid) { if (hid->irq == irq) { TAILQ_REMOVE(&hpdev->irq_desc_list, hid, link); hv_int_desc_free(hpdev, hid); break; } } cpu = (v_addr & MSI_INTEL_ADDR_DEST) >> 12; vcpu_id = VMBUS_GET_VCPU_ID(device_get_parent(pcib), pcib, cpu); vector = v_data & MSI_INTEL_DATA_INTVEC; init_completion(&comp.comp_pkt.host_event); memset(&ctxt, 0, sizeof(ctxt)); ctxt.pkt.completion_func = hv_pci_compose_compl; ctxt.pkt.compl_ctxt = ∁ int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->wslot.val = hpdev->desc.wslot.val; int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector_count = 1; int_pkt->int_desc.delivery_mode = MSI_INTEL_DATA_DELFIXED; int_pkt->int_desc.cpu_mask = 1ULL << vcpu_id; ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, int_pkt, sizeof(*int_pkt), (uint64_t)(uintptr_t)&ctxt.pkt); if (ret) { free_completion(&comp.comp_pkt.host_event); return (ret); } wait_for_completion(&comp.comp_pkt.host_event); free_completion(&comp.comp_pkt.host_event); if (comp.comp_pkt.completion_status < 0) return (EPROTO); *addr = comp.int_desc.address; *data = comp.int_desc.data; hid = malloc(sizeof(struct hv_irq_desc), M_DEVBUF, M_WAITOK | M_ZERO); hid->irq = irq; hid->desc = comp.int_desc; TAILQ_INSERT_TAIL(&hpdev->irq_desc_list, hid, link); return (0); } static device_method_t vmbus_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vmbus_pcib_probe), DEVMETHOD(device_attach, vmbus_pcib_attach), DEVMETHOD(device_detach, vmbus_pcib_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, vmbus_pcib_read_ivar), DEVMETHOD(bus_write_ivar, vmbus_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, vmbus_pcib_alloc_resource), DEVMETHOD(bus_release_resource, vmbus_pcib_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), #if __FreeBSD_version >= 1100000 DEVMETHOD(bus_get_cpus, vmbus_pcib_get_cpus), #endif /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, vmbus_pcib_read_config), DEVMETHOD(pcib_write_config, vmbus_pcib_write_config), DEVMETHOD(pcib_route_interrupt, vmbus_pcib_route_intr), DEVMETHOD(pcib_alloc_msi, vmbus_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, vmbus_pcib_release_msi), DEVMETHOD(pcib_alloc_msix, vmbus_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, vmbus_pcib_release_msix), DEVMETHOD(pcib_map_msi, vmbus_pcib_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; static devclass_t pcib_devclass; DEFINE_CLASS_0(pcib, vmbus_pcib_driver, vmbus_pcib_methods, sizeof(struct vmbus_pcib_softc)); DRIVER_MODULE(vmbus_pcib, vmbus, vmbus_pcib_driver, pcib_devclass, 0, 0); MODULE_DEPEND(vmbus_pcib, vmbus, 1, 1, 1); MODULE_DEPEND(vmbus_pcib, pci, 1, 1, 1); #endif /* NEW_PCIB */ diff --git a/sys/dev/ida/ida.c b/sys/dev/ida/ida.c index 10bba8146706..c09e856a75af 100644 --- a/sys/dev/ida/ida.c +++ b/sys/dev/ida/ida.c @@ -1,841 +1,841 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999,2000 Jonathan Lemon * All rights reserved. * # Derived from the original IDA Compaq RAID driver, which is * Copyright (c) 1996, 1997, 1998, 1999 * Mark Dawson and David James. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Generic driver for Compaq SMART RAID adapters. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* prototypes */ static int ida_alloc_qcbs(struct ida_softc *ida); static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb); static void ida_start(struct ida_softc *ida); static void ida_startio(struct ida_softc *ida); static void ida_startup(void *arg); static void ida_timeout(void *arg); static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb); static d_ioctl_t ida_ioctl; static struct cdevsw ida_cdevsw = { .d_version = D_VERSION, .d_ioctl = ida_ioctl, .d_name = "ida", }; void ida_free(struct ida_softc *ida) { int i; if (ida->ih != NULL) bus_teardown_intr(ida->dev, ida->irq, ida->ih); mtx_lock(&ida->lock); callout_stop(&ida->ch); mtx_unlock(&ida->lock); callout_drain(&ida->ch); if (ida->buffer_dmat) { for (i = 0; i < IDA_QCB_MAX; i++) bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap); bus_dma_tag_destroy(ida->buffer_dmat); } if (ida->hwqcb_dmat) { if (ida->hwqcb_busaddr) bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap); if (ida->hwqcbs) bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs, ida->hwqcb_dmamap); bus_dma_tag_destroy(ida->hwqcb_dmat); } if (ida->qcbs != NULL) free(ida->qcbs, M_DEVBUF); if (ida->irq != NULL) bus_release_resource(ida->dev, ida->irq_res_type, 0, ida->irq); if (ida->parent_dmat != NULL) bus_dma_tag_destroy(ida->parent_dmat); if (ida->regs != NULL) bus_release_resource(ida->dev, ida->regs_res_type, ida->regs_res_id, ida->regs); mtx_destroy(&ida->lock); } /* * record bus address from bus_dmamap_load */ static void ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; baddr = (bus_addr_t *)arg; *baddr = segs->ds_addr; } static __inline struct ida_qcb * ida_get_qcb(struct ida_softc *ida) { struct ida_qcb *qcb; if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) { SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); bzero(qcb->hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); } return (qcb); } static __inline void ida_free_qcb(struct ida_softc *ida, struct ida_qcb *qcb) { qcb->state = QCB_FREE; qcb->buf = NULL; qcb->error = 0; SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); } static __inline bus_addr_t idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb) { return (ida->hwqcb_busaddr + ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs)); } static __inline struct ida_qcb * idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr) { struct ida_hardware_qcb *hwqcb; hwqcb = (struct ida_hardware_qcb *) ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr)); return (hwqcb->qcb); } static int ida_alloc_qcbs(struct ida_softc *ida) { struct ida_qcb *qcb; int error, i; for (i = 0; i < IDA_QCB_MAX; i++) { qcb = &ida->qcbs[i]; error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap); if (error != 0) return (error); qcb->ida = ida; qcb->flags = QCB_FREE; qcb->hwqcb = &ida->hwqcbs[i]; qcb->hwqcb->qcb = qcb; qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb); SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); } return (0); } int ida_setup(struct ida_softc *ida) { struct ida_controller_info cinfo; device_t child; int error, i, unit; SLIST_INIT(&ida->free_qcbs); STAILQ_INIT(&ida->qcb_queue); bioq_init(&ida->bio_queue); ida->qcbs = (struct ida_qcb *) malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF, M_NOWAIT | M_ZERO); if (ida->qcbs == NULL) return (ENOMEM); /* * Create our DMA tags */ /* DMA tag for our hardware QCB structures */ error = bus_dma_tag_create( /* parent */ ida->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &ida->hwqcb_dmat); if (error) return (ENOMEM); /* DMA tag for mapping buffers into device space */ error = bus_dma_tag_create( /* parent */ ida->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ DFLTPHYS, /* nsegments */ IDA_NSEG, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &Giant, &ida->buffer_dmat); if (error) return (ENOMEM); /* Allocation of hardware QCBs */ /* XXX allocation is rounded to hardware page size */ error = bus_dmamem_alloc(ida->hwqcb_dmat, (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap); if (error) return (ENOMEM); /* And permanently map them in */ bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap, ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0); bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb)); error = ida_alloc_qcbs(ida); if (error) return (error); mtx_lock(&ida->lock); ida->cmd.int_enable(ida, 0); error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), IDA_CONTROLLER, 0, DMA_DATA_IN); if (error) { mtx_unlock(&ida->lock); device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n"); return (error); } device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n", cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1], cinfo.firm_rev[2], cinfo.firm_rev[3]); if (ida->flags & IDA_FIRMWARE) { int data; error = ida_command(ida, CMD_START_FIRMWARE, &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN); if (error) { mtx_unlock(&ida->lock); device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n"); return (error); } } ida->cmd.int_enable(ida, 1); ida->flags |= IDA_ATTACHED; mtx_unlock(&ida->lock); for (i = 0; i < cinfo.num_drvs; i++) { child = device_add_child(ida->dev, /*"idad"*/NULL, -1); if (child != NULL) device_set_ivars(child, (void *)(intptr_t)i); } ida->ich.ich_func = ida_startup; ida->ich.ich_arg = ida; if (config_intrhook_establish(&ida->ich) != 0) { device_delete_children(ida->dev); device_printf(ida->dev, "Cannot establish configuration hook\n"); return (error); } unit = device_get_unit(ida->dev); ida->ida_dev_t = make_dev(&ida_cdevsw, unit, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "ida%d", unit); ida->ida_dev_t->si_drv1 = ida; return (0); } static void ida_startup(void *arg) { struct ida_softc *ida; ida = arg; config_intrhook_disestablish(&ida->ich); - mtx_lock(&Giant); + bus_topo_lock(); bus_generic_attach(ida->dev); - mtx_unlock(&Giant); + bus_topo_unlock(); } int ida_detach(device_t dev) { struct ida_softc *ida; int error; ida = (struct ida_softc *)device_get_softc(dev); error = bus_generic_detach(dev); if (error) return (error); error = device_delete_children(dev); if (error) return (error); /* * XXX * before detaching, we must make sure that the system is * quiescent; nothing mounted, no pending activity. */ /* * XXX * now, how are we supposed to maintain a list of our drives? * iterate over our "child devices"? */ destroy_dev(ida->ida_dev_t); ida_free(ida); return (error); } static void ida_data_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct ida_hardware_qcb *hwqcb; struct ida_softc *ida; struct ida_qcb *qcb; bus_dmasync_op_t op; int i; qcb = arg; ida = qcb->ida; if (!dumping) mtx_assert(&ida->lock, MA_OWNED); if (error) { qcb->error = error; ida_done(ida, qcb); return; } hwqcb = qcb->hwqcb; hwqcb->hdr.size = htole16((sizeof(struct ida_req) + sizeof(struct ida_sgb) * IDA_NSEG) >> 2); for (i = 0; i < nsegments; i++) { hwqcb->seg[i].addr = htole32(segs[i].ds_addr); hwqcb->seg[i].length = htole32(segs[i].ds_len); } hwqcb->req.sgcount = nsegments; if (qcb->flags & DMA_DATA_TRANSFER) { switch (qcb->flags & DMA_DATA_TRANSFER) { case DMA_DATA_TRANSFER: op = BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE; break; case DMA_DATA_IN: op = BUS_DMASYNC_PREREAD; break; default: KASSERT((qcb->flags & DMA_DATA_TRANSFER) == DMA_DATA_OUT, ("bad DMA data flags")); op = BUS_DMASYNC_PREWRITE; break; } bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); } bus_dmamap_sync(ida->hwqcb_dmat, ida->hwqcb_dmamap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); ida_start(ida); ida->flags &= ~IDA_QFROZEN; } static int ida_map_qcb(struct ida_softc *ida, struct ida_qcb *qcb, void *data, bus_size_t datasize) { int error, flags; if (ida->flags & IDA_INTERRUPTS) flags = BUS_DMA_WAITOK; else flags = BUS_DMA_NOWAIT; error = bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, data, datasize, ida_data_cb, qcb, flags); if (error == EINPROGRESS) { ida->flags |= IDA_QFROZEN; error = 0; } return (error); } int ida_command(struct ida_softc *ida, int command, void *data, int datasize, int drive, u_int32_t pblkno, int flags) { struct ida_hardware_qcb *hwqcb; struct ida_qcb *qcb; int error; if (!dumping) mtx_assert(&ida->lock, MA_OWNED); qcb = ida_get_qcb(ida); if (qcb == NULL) { device_printf(ida->dev, "out of QCBs\n"); return (EAGAIN); } qcb->flags = flags | IDA_COMMAND; hwqcb = qcb->hwqcb; hwqcb->hdr.drive = drive; hwqcb->req.blkno = htole32(pblkno); hwqcb->req.bcount = htole16(howmany(datasize, DEV_BSIZE)); hwqcb->req.command = command; error = ida_map_qcb(ida, qcb, data, datasize); if (error == 0) { error = ida_wait(ida, qcb); /* Don't free QCB on a timeout in case it later completes. */ if (error) return (error); error = qcb->error; } /* XXX should have status returned here? */ /* XXX have "status pointer" area in QCB? */ ida_free_qcb(ida, qcb); return (error); } void ida_submit_buf(struct ida_softc *ida, struct bio *bp) { mtx_lock(&ida->lock); bioq_insert_tail(&ida->bio_queue, bp); ida_startio(ida); mtx_unlock(&ida->lock); } static void ida_startio(struct ida_softc *ida) { struct ida_hardware_qcb *hwqcb; struct ida_qcb *qcb; struct idad_softc *drv; struct bio *bp; int error; mtx_assert(&ida->lock, MA_OWNED); for (;;) { if (ida->flags & IDA_QFROZEN) return; bp = bioq_first(&ida->bio_queue); if (bp == NULL) return; /* no more buffers */ qcb = ida_get_qcb(ida); if (qcb == NULL) return; /* out of resources */ bioq_remove(&ida->bio_queue, bp); qcb->buf = bp; qcb->flags = bp->bio_cmd == BIO_READ ? DMA_DATA_IN : DMA_DATA_OUT; hwqcb = qcb->hwqcb; drv = bp->bio_driver1; hwqcb->hdr.drive = drv->drive; hwqcb->req.blkno = bp->bio_pblkno; hwqcb->req.bcount = howmany(bp->bio_bcount, DEV_BSIZE); hwqcb->req.command = bp->bio_cmd == BIO_READ ? CMD_READ : CMD_WRITE; error = ida_map_qcb(ida, qcb, bp->bio_data, bp->bio_bcount); if (error) { qcb->error = error; ida_done(ida, qcb); } } } static void ida_start(struct ida_softc *ida) { struct ida_qcb *qcb; if (!dumping) mtx_assert(&ida->lock, MA_OWNED); while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) { if (ida->cmd.fifo_full(ida)) break; STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe); /* * XXX * place the qcb on an active list? */ /* Set a timeout. */ if (!ida->qactive && !dumping) callout_reset(&ida->ch, hz * 5, ida_timeout, ida); ida->qactive++; qcb->state = QCB_ACTIVE; ida->cmd.submit(ida, qcb); } } static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb) { struct ida_qcb *qcb_done = NULL; bus_addr_t completed; int delay; if (!dumping) mtx_assert(&ida->lock, MA_OWNED); if (ida->flags & IDA_INTERRUPTS) { if (mtx_sleep(qcb, &ida->lock, PRIBIO, "idacmd", 5 * hz)) { qcb->state = QCB_TIMEDOUT; return (ETIMEDOUT); } return (0); } again: delay = 5 * 1000 * 100; /* 5 sec delay */ while ((completed = ida->cmd.done(ida)) == 0) { if (delay-- == 0) { qcb->state = QCB_TIMEDOUT; return (ETIMEDOUT); } DELAY(10); } qcb_done = idahwqcbptov(ida, completed & ~3); if (qcb_done != qcb) goto again; ida_done(ida, qcb); return (0); } void ida_intr(void *data) { struct ida_softc *ida; struct ida_qcb *qcb; bus_addr_t completed; ida = (struct ida_softc *)data; mtx_lock(&ida->lock); if (ida->cmd.int_pending(ida) == 0) { mtx_unlock(&ida->lock); return; /* not our interrupt */ } while ((completed = ida->cmd.done(ida)) != 0) { qcb = idahwqcbptov(ida, completed & ~3); if (qcb == NULL || qcb->state != QCB_ACTIVE) { device_printf(ida->dev, "ignoring completion %jx\n", (intmax_t)completed); continue; } /* Handle "Bad Command List" errors. */ if ((completed & 3) && (qcb->hwqcb->req.error == 0)) qcb->hwqcb->req.error = CMD_REJECTED; ida_done(ida, qcb); } ida_startio(ida); mtx_unlock(&ida->lock); } /* * should switch out command type; may be status, not just I/O. */ static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb) { bus_dmasync_op_t op; int active, error = 0; /* * finish up command */ if (!dumping) mtx_assert(&ida->lock, MA_OWNED); active = (qcb->state != QCB_FREE); if (qcb->flags & DMA_DATA_TRANSFER && active) { switch (qcb->flags & DMA_DATA_TRANSFER) { case DMA_DATA_TRANSFER: op = BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE; break; case DMA_DATA_IN: op = BUS_DMASYNC_POSTREAD; break; default: KASSERT((qcb->flags & DMA_DATA_TRANSFER) == DMA_DATA_OUT, ("bad DMA data flags")); op = BUS_DMASYNC_POSTWRITE; break; } bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap); } if (active) bus_dmamap_sync(ida->hwqcb_dmat, ida->hwqcb_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (qcb->hwqcb->req.error & SOFT_ERROR) { if (qcb->buf) device_printf(ida->dev, "soft %s error\n", qcb->buf->bio_cmd == BIO_READ ? "read" : "write"); else device_printf(ida->dev, "soft error\n"); } if (qcb->hwqcb->req.error & HARD_ERROR) { error = 1; if (qcb->buf) device_printf(ida->dev, "hard %s error\n", qcb->buf->bio_cmd == BIO_READ ? "read" : "write"); else device_printf(ida->dev, "hard error\n"); } if (qcb->hwqcb->req.error & CMD_REJECTED) { error = 1; device_printf(ida->dev, "invalid request\n"); } if (qcb->error) { error = 1; device_printf(ida->dev, "request failed to map: %d\n", qcb->error); } if (qcb->flags & IDA_COMMAND) { if (ida->flags & IDA_INTERRUPTS) wakeup(qcb); if (qcb->state == QCB_TIMEDOUT) ida_free_qcb(ida, qcb); } else { KASSERT(qcb->buf != NULL, ("ida_done(): qcb->buf is NULL!")); if (error) qcb->buf->bio_flags |= BIO_ERROR; idad_intr(qcb->buf); ida_free_qcb(ida, qcb); } if (!active) return; ida->qactive--; /* Reschedule or cancel timeout */ if (ida->qactive) callout_reset(&ida->ch, hz * 5, ida_timeout, ida); else callout_stop(&ida->ch); } static void ida_timeout(void *arg) { struct ida_softc *ida; ida = (struct ida_softc *)arg; device_printf(ida->dev, "%s() qactive %d\n", __func__, ida->qactive); if (ida->flags & IDA_INTERRUPTS) device_printf(ida->dev, "IDA_INTERRUPTS\n"); device_printf(ida->dev, "\t R_CMD_FIFO: %08x\n" "\t R_DONE_FIFO: %08x\n" "\t R_INT_MASK: %08x\n" "\t R_STATUS: %08x\n" "\tR_INT_PENDING: %08x\n", ida_inl(ida, R_CMD_FIFO), ida_inl(ida, R_DONE_FIFO), ida_inl(ida, R_INT_MASK), ida_inl(ida, R_STATUS), ida_inl(ida, R_INT_PENDING)); return; } /* * IOCTL stuff follows. */ struct cmd_info { int cmd; int len; int flags; }; static struct cmd_info *ida_cmd_lookup(int); static int ida_ioctl (struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct ida_softc *sc; struct ida_user_command *uc; struct cmd_info *ci; int len; int flags; int error; int data; void *daddr; sc = (struct ida_softc *)dev->si_drv1; uc = (struct ida_user_command *)addr; error = 0; switch (cmd) { case IDAIO_COMMAND: ci = ida_cmd_lookup(uc->command); if (ci == NULL) { error = EINVAL; break; } len = ci->len; flags = ci->flags; if (len) daddr = &uc->d.buf; else { daddr = &data; len = sizeof(data); } mtx_lock(&sc->lock); error = ida_command(sc, uc->command, daddr, len, uc->drive, uc->blkno, flags); mtx_unlock(&sc->lock); break; default: error = ENOIOCTL; break; } return (error); } static struct cmd_info ci_list[] = { { CMD_GET_LOG_DRV_INFO, sizeof(struct ida_drive_info), DMA_DATA_IN }, { CMD_GET_CTRL_INFO, sizeof(struct ida_controller_info), DMA_DATA_IN }, { CMD_SENSE_DRV_STATUS, sizeof(struct ida_drive_status), DMA_DATA_IN }, { CMD_START_RECOVERY, 0, 0 }, { CMD_GET_PHYS_DRV_INFO, sizeof(struct ida_phys_drv_info), DMA_DATA_TRANSFER }, { CMD_BLINK_DRV_LEDS, sizeof(struct ida_blink_drv_leds), DMA_DATA_OUT }, { CMD_SENSE_DRV_LEDS, sizeof(struct ida_blink_drv_leds), DMA_DATA_IN }, { CMD_GET_LOG_DRV_EXT, sizeof(struct ida_drive_info_ext), DMA_DATA_IN }, { CMD_RESET_CTRL, 0, 0 }, { CMD_GET_CONFIG, 0, 0 }, { CMD_SET_CONFIG, 0, 0 }, { CMD_LABEL_LOG_DRV, sizeof(struct ida_label_logical), DMA_DATA_OUT }, { CMD_SET_SURFACE_DELAY, 0, 0 }, { CMD_SENSE_BUS_PARAMS, 0, 0 }, { CMD_SENSE_SUBSYS_INFO, 0, 0 }, { CMD_SENSE_SURFACE_ATS, 0, 0 }, { CMD_PASSTHROUGH, 0, 0 }, { CMD_RESET_SCSI_DEV, 0, 0 }, { CMD_PAUSE_BG_ACT, 0, 0 }, { CMD_RESUME_BG_ACT, 0, 0 }, { CMD_START_FIRMWARE, 0, 0 }, { CMD_SENSE_DRV_ERR_LOG, 0, 0 }, { CMD_START_CPM, 0, 0 }, { CMD_SENSE_CP, 0, 0 }, { CMD_STOP_CPM, 0, 0 }, { CMD_FLUSH_CACHE, 0, 0 }, { CMD_ACCEPT_MEDIA_EXCH, 0, 0 }, { 0, 0, 0 } }; static struct cmd_info * ida_cmd_lookup (int command) { struct cmd_info *ci; ci = ci_list; while (ci->cmd) { if (ci->cmd == command) return (ci); ci++; } return (NULL); } diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c index 981f5a2673e5..4b7d42877d0e 100644 --- a/sys/dev/mfi/mfi.c +++ b/sys/dev/mfi/mfi.c @@ -1,3796 +1,3796 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause * * Copyright (c) 2006 IronPort Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2007 LSI Corp. * Copyright (c) 2007 Rajesh Prabhakaran. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_mfi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mfi_alloc_commands(struct mfi_softc *); static int mfi_comms_init(struct mfi_softc *); static int mfi_get_controller_info(struct mfi_softc *); static int mfi_get_log_state(struct mfi_softc *, struct mfi_evt_log_state **); static int mfi_parse_entries(struct mfi_softc *, int, int); static void mfi_data_cb(void *, bus_dma_segment_t *, int, int); static void mfi_startup(void *arg); static void mfi_intr(void *arg); static void mfi_ldprobe(struct mfi_softc *sc); static void mfi_syspdprobe(struct mfi_softc *sc); static void mfi_handle_evt(void *context, int pending); static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale); static void mfi_aen_complete(struct mfi_command *); static int mfi_add_ld(struct mfi_softc *sc, int); static void mfi_add_ld_complete(struct mfi_command *); static int mfi_add_sys_pd(struct mfi_softc *sc, int); static void mfi_add_sys_pd_complete(struct mfi_command *); static struct mfi_command * mfi_bio_command(struct mfi_softc *); static void mfi_bio_complete(struct mfi_command *); static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*); static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*); static int mfi_send_frame(struct mfi_softc *, struct mfi_command *); static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *); static int mfi_abort(struct mfi_softc *, struct mfi_command **); static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *); static void mfi_timeout(void *); static int mfi_user_command(struct mfi_softc *, struct mfi_ioc_passthru *); static void mfi_enable_intr_xscale(struct mfi_softc *sc); static void mfi_enable_intr_ppc(struct mfi_softc *sc); static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc); static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc); static int mfi_check_clear_intr_xscale(struct mfi_softc *sc); static int mfi_check_clear_intr_ppc(struct mfi_softc *sc); static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt); static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt); static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode); static void mfi_config_unlock(struct mfi_softc *sc, int locked); static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm); static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm); static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm); SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MFI driver parameters"); static int mfi_event_locale = MFI_EVT_LOCALE_ALL; SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale, 0, "event message locale"); static int mfi_event_class = MFI_EVT_CLASS_INFO; SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class, 0, "event message class"); static int mfi_max_cmds = 128; SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds, 0, "Max commands limit (-1 = controller limit)"); static int mfi_detect_jbod_change = 1; SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN, &mfi_detect_jbod_change, 0, "Detect a change to a JBOD"); int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS; SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN, &mfi_polled_cmd_timeout, 0, "Polled command timeout - used for firmware flash etc (in seconds)"); static int mfi_cmd_timeout = MFI_CMD_TIMEOUT; SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout, 0, "Command timeout (in seconds)"); /* Management interface */ static d_open_t mfi_open; static d_close_t mfi_close; static d_ioctl_t mfi_ioctl; static d_poll_t mfi_poll; static struct cdevsw mfi_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = mfi_open, .d_close = mfi_close, .d_ioctl = mfi_ioctl, .d_poll = mfi_poll, .d_name = "mfi", }; MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver"); #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH struct mfi_skinny_dma_info mfi_skinny; static void mfi_enable_intr_xscale(struct mfi_softc *sc) { MFI_WRITE4(sc, MFI_OMSK, 0x01); } static void mfi_enable_intr_ppc(struct mfi_softc *sc) { if (sc->mfi_flags & MFI_FLAGS_1078) { MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM); } else if (sc->mfi_flags & MFI_FLAGS_GEN2) { MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM); } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { MFI_WRITE4(sc, MFI_OMSK, ~0x00000001); } } static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc) { return MFI_READ4(sc, MFI_OMSG0); } static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc) { return MFI_READ4(sc, MFI_OSP0); } static int mfi_check_clear_intr_xscale(struct mfi_softc *sc) { int32_t status; status = MFI_READ4(sc, MFI_OSTS); if ((status & MFI_OSTS_INTR_VALID) == 0) return 1; MFI_WRITE4(sc, MFI_OSTS, status); return 0; } static int mfi_check_clear_intr_ppc(struct mfi_softc *sc) { int32_t status; status = MFI_READ4(sc, MFI_OSTS); if (sc->mfi_flags & MFI_FLAGS_1078) { if (!(status & MFI_1078_RM)) { return 1; } } else if (sc->mfi_flags & MFI_FLAGS_GEN2) { if (!(status & MFI_GEN2_RM)) { return 1; } } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { if (!(status & MFI_SKINNY_RM)) { return 1; } } if (sc->mfi_flags & MFI_FLAGS_SKINNY) MFI_WRITE4(sc, MFI_OSTS, status); else MFI_WRITE4(sc, MFI_ODCR0, status); return 0; } static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) { MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt); } static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) { if (sc->mfi_flags & MFI_FLAGS_SKINNY) { MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 ); MFI_WRITE4(sc, MFI_IQPH, 0x00000000); } else { MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 ); } } int mfi_transition_firmware(struct mfi_softc *sc) { uint32_t fw_state, cur_state; int max_wait, i; uint32_t cur_abs_reg_val = 0; uint32_t prev_abs_reg_val = 0; cur_abs_reg_val = sc->mfi_read_fw_status(sc); fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; while (fw_state != MFI_FWSTATE_READY) { if (bootverbose) device_printf(sc->mfi_dev, "Waiting for firmware to " "become ready\n"); cur_state = fw_state; switch (fw_state) { case MFI_FWSTATE_FAULT: device_printf(sc->mfi_dev, "Firmware fault\n"); return (ENXIO); case MFI_FWSTATE_WAIT_HANDSHAKE: if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); else MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_OPERATIONAL: if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) MFI_WRITE4(sc, MFI_SKINNY_IDB, 7); else MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY); max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_UNDEFINED: case MFI_FWSTATE_BB_INIT: max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_FW_INIT_2: max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_FW_INIT: case MFI_FWSTATE_FLUSH_CACHE: max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_DEVICE_SCAN: max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */ prev_abs_reg_val = cur_abs_reg_val; break; case MFI_FWSTATE_BOOT_MESSAGE_PENDING: if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG); else MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG); max_wait = MFI_RESET_WAIT_TIME; break; default: device_printf(sc->mfi_dev, "Unknown firmware state %#x\n", fw_state); return (ENXIO); } for (i = 0; i < (max_wait * 10); i++) { cur_abs_reg_val = sc->mfi_read_fw_status(sc); fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; if (fw_state == cur_state) DELAY(100000); else break; } if (fw_state == MFI_FWSTATE_DEVICE_SCAN) { /* Check the device scanning progress */ if (prev_abs_reg_val != cur_abs_reg_val) { continue; } } if (fw_state == cur_state) { device_printf(sc->mfi_dev, "Firmware stuck in state " "%#x\n", fw_state); return (ENXIO); } } return (0); } static void mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } int mfi_attach(struct mfi_softc *sc) { uint32_t status; int error, commsz, framessz, sensesz; int frames, unit, max_fw_sge, max_fw_cmds; uint32_t tb_mem_size = 0; struct cdev *dev_t; if (sc == NULL) return EINVAL; device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n", MEGASAS_VERSION); mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF); sx_init(&sc->mfi_config_lock, "MFI config"); TAILQ_INIT(&sc->mfi_ld_tqh); TAILQ_INIT(&sc->mfi_syspd_tqh); TAILQ_INIT(&sc->mfi_ld_pend_tqh); TAILQ_INIT(&sc->mfi_syspd_pend_tqh); TAILQ_INIT(&sc->mfi_evt_queue); TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc); TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc); TAILQ_INIT(&sc->mfi_aen_pids); TAILQ_INIT(&sc->mfi_cam_ccbq); mfi_initq_free(sc); mfi_initq_ready(sc); mfi_initq_busy(sc); mfi_initq_bio(sc); sc->adpreset = 0; sc->last_seq_num = 0; sc->disableOnlineCtrlReset = 1; sc->issuepend_done = 1; sc->hw_crit_error = 0; if (sc->mfi_flags & MFI_FLAGS_1064R) { sc->mfi_enable_intr = mfi_enable_intr_xscale; sc->mfi_read_fw_status = mfi_read_fw_status_xscale; sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale; sc->mfi_issue_cmd = mfi_issue_cmd_xscale; } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) { sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc; sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc; sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc; sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc; sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc; sc->mfi_adp_reset = mfi_tbolt_adp_reset; sc->mfi_tbolt = 1; TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh); } else { sc->mfi_enable_intr = mfi_enable_intr_ppc; sc->mfi_read_fw_status = mfi_read_fw_status_ppc; sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc; sc->mfi_issue_cmd = mfi_issue_cmd_ppc; } /* Before we get too far, see if the firmware is working */ if ((error = mfi_transition_firmware(sc)) != 0) { device_printf(sc->mfi_dev, "Firmware not in READY state, " "error %d\n", error); return (ENXIO); } /* Start: LSIP200113393 */ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */ 1, /* msegments */ MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->verbuf_h_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf, BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n"); return (ENOMEM); } bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t)); bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap, sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t), mfi_addr_cb, &sc->verbuf_h_busaddr, 0); /* End: LSIP200113393 */ /* * Get information needed for sizing the contiguous memory for the * frame pool. Size down the sgl parameter since we know that * we will never need more than what's required for MFI_MAXPHYS. * It would be nice if these constants were available at runtime * instead of compile time. */ status = sc->mfi_read_fw_status(sc); max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK; if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) { device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n", max_fw_cmds, mfi_max_cmds); sc->mfi_max_fw_cmds = mfi_max_cmds; } else { sc->mfi_max_fw_cmds = max_fw_cmds; } max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16; sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1)); /* ThunderBolt Support get the contiguous memory */ if (sc->mfi_flags & MFI_FLAGS_TBOLT) { mfi_tbolt_init_globals(sc); device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, " "MaxSgl = %d, state = %#x\n", max_fw_cmds, sc->mfi_max_fw_cmds, sc->mfi_max_sge, status); tb_mem_size = mfi_tbolt_get_memory_requirement(sc); if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ tb_mem_size, /* maxsize */ 1, /* msegments */ tb_mem_size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_tb_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool, BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); return (ENOMEM); } bzero(sc->request_message_pool, tb_mem_size); bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap, sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0); /* For ThunderBolt memory init */ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 0x100, 0, /* alignmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MFI_FRAME_SIZE, /* maxsize */ 1, /* msegments */ MFI_FRAME_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_tb_init_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init, BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate init memory\n"); return (ENOMEM); } bzero(sc->mfi_tb_init, MFI_FRAME_SIZE); bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap, sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb, &sc->mfi_tb_init_busaddr, 0); if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool, tb_mem_size)) { device_printf(sc->mfi_dev, "Thunderbolt pool preparation error\n"); return 0; } /* Allocate DMA memory mapping for MPI2 IOC Init descriptor, we are taking it different from what we have allocated for Request and reply descriptors to avoid confusion later */ tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST); if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ tb_mem_size, /* maxsize */ 1, /* msegments */ tb_mem_size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_tb_ioc_init_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat, (void **)&sc->mfi_tb_ioc_init_desc, BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); return (ENOMEM); } bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size); bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap, sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_ioc_init_busaddr, 0); } /* * Create the dma tag for data buffers. Used both for block I/O * and for various internal data queries. */ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ sc->mfi_max_sge, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->mfi_io_lock, /* lockfuncarg */ &sc->mfi_buffer_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n"); return (ENOMEM); } /* * Allocate DMA memory for the comms queues. Keep it under 4GB for * efficiency. The mfi_hwcomms struct includes space for 1 reply queue * entry, so the calculated size here will be will be 1 more than * mfi_max_fw_cmds. This is apparently a requirement of the hardware. */ commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) + sizeof(struct mfi_hwcomms); if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ commsz, /* maxsize */ 1, /* msegments */ commsz, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_comms_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms, BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); return (ENOMEM); } bzero(sc->mfi_comms, commsz); bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap, sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0); /* * Allocate DMA memory for the command frames. Keep them in the * lower 4GB for efficiency. Calculate the size of the commands at * the same time; each command is one 64 byte frame plus a set of * additional frames for holding sg lists or other data. * The assumption here is that the SG list will start at the second * frame and not use the unused bytes in the first frame. While this * isn't technically correct, it simplifies the calculation and allows * for command frames that might be larger than an mfi_io_frame. */ if (sizeof(bus_addr_t) == 8) { sc->mfi_sge_size = sizeof(struct mfi_sg64); sc->mfi_flags |= MFI_FLAGS_SG64; } else { sc->mfi_sge_size = sizeof(struct mfi_sg32); } if (sc->mfi_flags & MFI_FLAGS_SKINNY) sc->mfi_sge_size = sizeof(struct mfi_sg_skinny); frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2; sc->mfi_cmd_size = frames * MFI_FRAME_SIZE; framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds; if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 64, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ framessz, /* maxsize */ 1, /* nsegments */ framessz, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_frames_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames, BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate frames memory\n"); return (ENOMEM); } bzero(sc->mfi_frames, framessz); bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap, sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0); /* * Allocate DMA memory for the frame sense data. Keep them in the * lower 4GB for efficiency */ sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN; if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 4, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sensesz, /* maxsize */ 1, /* nsegments */ sensesz, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_sense_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense, BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate sense memory\n"); return (ENOMEM); } bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap, sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0); if ((error = mfi_alloc_commands(sc)) != 0) return (error); /* Before moving the FW to operational state, check whether * hostmemory is required by the FW or not */ /* ThunderBolt MFI_IOC2 INIT */ if (sc->mfi_flags & MFI_FLAGS_TBOLT) { sc->mfi_disable_intr(sc); mtx_lock(&sc->mfi_io_lock); if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) { device_printf(sc->mfi_dev, "TB Init has failed with error %d\n",error); mtx_unlock(&sc->mfi_io_lock); return error; } mtx_unlock(&sc->mfi_io_lock); if ((error = mfi_tbolt_alloc_cmd(sc)) != 0) return error; if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc, &sc->mfi_intr)) { device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); return (EINVAL); } sc->mfi_intr_ptr = mfi_intr_tbolt; sc->mfi_enable_intr(sc); } else { if ((error = mfi_comms_init(sc)) != 0) return (error); if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) { device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); return (EINVAL); } sc->mfi_intr_ptr = mfi_intr; sc->mfi_enable_intr(sc); } if ((error = mfi_get_controller_info(sc)) != 0) return (error); sc->disableOnlineCtrlReset = 0; /* Register a config hook to probe the bus for arrays */ sc->mfi_ich.ich_func = mfi_startup; sc->mfi_ich.ich_arg = sc; if (config_intrhook_establish(&sc->mfi_ich) != 0) { device_printf(sc->mfi_dev, "Cannot establish configuration " "hook\n"); return (EINVAL); } mtx_lock(&sc->mfi_io_lock); if ((error = mfi_aen_setup(sc, 0), 0) != 0) { mtx_unlock(&sc->mfi_io_lock); return (error); } mtx_unlock(&sc->mfi_io_lock); /* * Register a shutdown handler. */ if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown, sc, SHUTDOWN_PRI_DEFAULT)) == NULL) { device_printf(sc->mfi_dev, "Warning: shutdown event " "registration failed\n"); } /* * Create the control device for doing management */ unit = device_get_unit(sc->mfi_dev); sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "mfi%d", unit); if (unit == 0) make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t, sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node"); if (sc->mfi_cdev != NULL) sc->mfi_cdev->si_drv1 = sc; SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), OID_AUTO, "delete_busy_volumes", CTLFLAG_RW, &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes"); SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW, &sc->mfi_keep_deleted_volumes, 0, "Don't detach the mfid device for a busy volume that is deleted"); device_add_child(sc->mfi_dev, "mfip", -1); bus_generic_attach(sc->mfi_dev); /* Start the timeout watchdog */ callout_init(&sc->mfi_watchdog_callout, 1); callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz, mfi_timeout, sc); if (sc->mfi_flags & MFI_FLAGS_TBOLT) { mtx_lock(&sc->mfi_io_lock); mfi_tbolt_sync_map_info(sc); mtx_unlock(&sc->mfi_io_lock); } return (0); } static int mfi_alloc_commands(struct mfi_softc *sc) { struct mfi_command *cm; int i, j; /* * XXX Should we allocate all the commands up front, or allocate on * demand later like 'aac' does? */ sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) * sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO); for (i = 0; i < sc->mfi_max_fw_cmds; i++) { cm = &sc->mfi_commands[i]; cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames + sc->mfi_cmd_size * i); cm->cm_frame_busaddr = sc->mfi_frames_busaddr + sc->mfi_cmd_size * i; cm->cm_frame->header.context = i; cm->cm_sense = &sc->mfi_sense[i]; cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i; cm->cm_sc = sc; cm->cm_index = i; if (bus_dmamap_create(sc->mfi_buffer_dmat, 0, &cm->cm_dmamap) == 0) { mtx_lock(&sc->mfi_io_lock); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); } else { device_printf(sc->mfi_dev, "Failed to allocate %d " "command blocks, only allocated %d\n", sc->mfi_max_fw_cmds, i - 1); for (j = 0; j < i; j++) { cm = &sc->mfi_commands[i]; bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); } free(sc->mfi_commands, M_MFIBUF); sc->mfi_commands = NULL; return (ENOMEM); } } return (0); } void mfi_release_command(struct mfi_command *cm) { struct mfi_frame_header *hdr; uint32_t *hdr_data; mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED); /* * Zero out the important fields of the frame, but make sure the * context field is preserved. For efficiency, handle the fields * as 32 bit words. Clear out the first S/G entry too for safety. */ hdr = &cm->cm_frame->header; if (cm->cm_data != NULL && hdr->sg_count) { cm->cm_sg->sg32[0].len = 0; cm->cm_sg->sg32[0].addr = 0; } /* * Command may be on other queues e.g. busy queue depending on the * flow of a previous call to mfi_mapcmd, so ensure its dequeued * properly */ if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) mfi_remove_busy(cm); if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0) mfi_remove_ready(cm); /* We're not expecting it to be on any other queue but check */ if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) { panic("Command %p is still on another queue, flags = %#x", cm, cm->cm_flags); } /* tbolt cleanup */ if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) { mfi_tbolt_return_cmd(cm->cm_sc, cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1], cm); } hdr_data = (uint32_t *)cm->cm_frame; hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */ hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */ hdr_data[4] = 0; /* flags, timeout */ hdr_data[5] = 0; /* data_len */ cm->cm_extra_frames = 0; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_private = NULL; cm->cm_data = NULL; cm->cm_sg = 0; cm->cm_total_frame_size = 0; cm->retry_for_fw_reset = 0; mfi_enqueue_free(cm); } int mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode, void **bufp, size_t bufsize) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd; void *buf = NULL; uint32_t context = 0; mtx_assert(&sc->mfi_io_lock, MA_OWNED); cm = mfi_dequeue_free(sc); if (cm == NULL) return (EBUSY); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; if ((bufsize > 0) && (bufp != NULL)) { if (*bufp == NULL) { buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO); if (buf == NULL) { mfi_release_command(cm); return (ENOMEM); } *bufp = buf; } else { buf = *bufp; } } dcmd = &cm->cm_frame->dcmd; bzero(dcmd->mbox, MFI_MBOX_SIZE); dcmd->header.cmd = MFI_CMD_DCMD; dcmd->header.timeout = 0; dcmd->header.flags = 0; dcmd->header.data_len = bufsize; dcmd->header.scsi_status = 0; dcmd->opcode = opcode; cm->cm_sg = &dcmd->sgl; cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; cm->cm_flags = 0; cm->cm_data = buf; cm->cm_private = buf; cm->cm_len = bufsize; *cmp = cm; if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) *bufp = buf; return (0); } static int mfi_comms_init(struct mfi_softc *sc) { struct mfi_command *cm; struct mfi_init_frame *init; struct mfi_init_qinfo *qinfo; int error; uint32_t context = 0; mtx_lock(&sc->mfi_io_lock); if ((cm = mfi_dequeue_free(sc)) == NULL) { mtx_unlock(&sc->mfi_io_lock); return (EBUSY); } /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; /* * Abuse the SG list area of the frame to hold the init_qinfo * object; */ init = &cm->cm_frame->init; qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE); bzero(qinfo, sizeof(struct mfi_init_qinfo)); qinfo->rq_entries = sc->mfi_max_fw_cmds + 1; qinfo->rq_addr_lo = sc->mfi_comms_busaddr + offsetof(struct mfi_hwcomms, hw_reply_q); qinfo->pi_addr_lo = sc->mfi_comms_busaddr + offsetof(struct mfi_hwcomms, hw_pi); qinfo->ci_addr_lo = sc->mfi_comms_busaddr + offsetof(struct mfi_hwcomms, hw_ci); init->header.cmd = MFI_CMD_INIT; init->header.data_len = sizeof(struct mfi_init_qinfo); init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE; cm->cm_data = NULL; cm->cm_flags = MFI_CMD_POLLED; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "failed to send init command\n"); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); return (error); } static int mfi_get_controller_info(struct mfi_softc *sc) { struct mfi_command *cm = NULL; struct mfi_ctrl_info *ci = NULL; uint32_t max_sectors_1, max_sectors_2; int error; mtx_lock(&sc->mfi_io_lock); error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO, (void **)&ci, sizeof(*ci)); if (error) goto out; cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; if ((error = mfi_mapcmd(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Failed to get controller info\n"); sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE / MFI_SECTOR_LEN; error = 0; goto out; } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io; max_sectors_2 = ci->max_request_size; sc->mfi_max_io = min(max_sectors_1, max_sectors_2); sc->disableOnlineCtrlReset = ci->properties.OnOffProperties.disableOnlineCtrlReset; out: if (ci) free(ci, M_MFIBUF); if (cm) mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); return (error); } static int mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state) { struct mfi_command *cm = NULL; int error; mtx_assert(&sc->mfi_io_lock, MA_OWNED); error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO, (void **)log_state, sizeof(**log_state)); if (error) goto out; cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; if ((error = mfi_mapcmd(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Failed to get log state\n"); goto out; } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); out: if (cm) mfi_release_command(cm); return (error); } int mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start) { struct mfi_evt_log_state *log_state = NULL; union mfi_evt class_locale; int error = 0; uint32_t seq; mtx_assert(&sc->mfi_io_lock, MA_OWNED); class_locale.members.reserved = 0; class_locale.members.locale = mfi_event_locale; class_locale.members.evt_class = mfi_event_class; if (seq_start == 0) { if ((error = mfi_get_log_state(sc, &log_state)) != 0) goto out; sc->mfi_boot_seq_num = log_state->boot_seq_num; /* * Walk through any events that fired since the last * shutdown. */ if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num, log_state->newest_seq_num)) != 0) goto out; seq = log_state->newest_seq_num; } else seq = seq_start; error = mfi_aen_register(sc, seq, class_locale.word); out: free(log_state, M_MFIBUF); return (error); } int mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm) { mtx_assert(&sc->mfi_io_lock, MA_OWNED); cm->cm_complete = NULL; /* * MegaCli can issue a DCMD of 0. In this case do nothing * and return 0 to it as status */ if (cm->cm_frame->dcmd.opcode == 0) { cm->cm_frame->header.cmd_status = MFI_STAT_OK; cm->cm_error = 0; return (cm->cm_error); } mfi_enqueue_ready(cm); mfi_startio(sc); if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0) msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0); return (cm->cm_error); } void mfi_free(struct mfi_softc *sc) { struct mfi_command *cm; int i; callout_drain(&sc->mfi_watchdog_callout); if (sc->mfi_cdev != NULL) destroy_dev(sc->mfi_cdev); if (sc->mfi_commands != NULL) { for (i = 0; i < sc->mfi_max_fw_cmds; i++) { cm = &sc->mfi_commands[i]; bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); } free(sc->mfi_commands, M_MFIBUF); sc->mfi_commands = NULL; } if (sc->mfi_intr) bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr); if (sc->mfi_irq != NULL) bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid, sc->mfi_irq); if (sc->mfi_sense_busaddr != 0) bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap); if (sc->mfi_sense != NULL) bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense, sc->mfi_sense_dmamap); if (sc->mfi_sense_dmat != NULL) bus_dma_tag_destroy(sc->mfi_sense_dmat); if (sc->mfi_frames_busaddr != 0) bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap); if (sc->mfi_frames != NULL) bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames, sc->mfi_frames_dmamap); if (sc->mfi_frames_dmat != NULL) bus_dma_tag_destroy(sc->mfi_frames_dmat); if (sc->mfi_comms_busaddr != 0) bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap); if (sc->mfi_comms != NULL) bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms, sc->mfi_comms_dmamap); if (sc->mfi_comms_dmat != NULL) bus_dma_tag_destroy(sc->mfi_comms_dmat); /* ThunderBolt contiguous memory free here */ if (sc->mfi_flags & MFI_FLAGS_TBOLT) { if (sc->mfi_tb_busaddr != 0) bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap); if (sc->request_message_pool != NULL) bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool, sc->mfi_tb_dmamap); if (sc->mfi_tb_dmat != NULL) bus_dma_tag_destroy(sc->mfi_tb_dmat); /* Version buffer memory free */ /* Start LSIP200113393 */ if (sc->verbuf_h_busaddr != 0) bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap); if (sc->verbuf != NULL) bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf, sc->verbuf_h_dmamap); if (sc->verbuf_h_dmat != NULL) bus_dma_tag_destroy(sc->verbuf_h_dmat); /* End LSIP200113393 */ /* ThunderBolt INIT packet memory Free */ if (sc->mfi_tb_init_busaddr != 0) bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap); if (sc->mfi_tb_init != NULL) bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init, sc->mfi_tb_init_dmamap); if (sc->mfi_tb_init_dmat != NULL) bus_dma_tag_destroy(sc->mfi_tb_init_dmat); /* ThunderBolt IOC Init Desc memory free here */ if (sc->mfi_tb_ioc_init_busaddr != 0) bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap); if (sc->mfi_tb_ioc_init_desc != NULL) bus_dmamem_free(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_desc, sc->mfi_tb_ioc_init_dmamap); if (sc->mfi_tb_ioc_init_dmat != NULL) bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat); if (sc->mfi_cmd_pool_tbolt != NULL) { for (int i = 0; i < sc->mfi_max_fw_cmds; i++) { if (sc->mfi_cmd_pool_tbolt[i] != NULL) { free(sc->mfi_cmd_pool_tbolt[i], M_MFIBUF); sc->mfi_cmd_pool_tbolt[i] = NULL; } } free(sc->mfi_cmd_pool_tbolt, M_MFIBUF); sc->mfi_cmd_pool_tbolt = NULL; } if (sc->request_desc_pool != NULL) { free(sc->request_desc_pool, M_MFIBUF); sc->request_desc_pool = NULL; } } if (sc->mfi_buffer_dmat != NULL) bus_dma_tag_destroy(sc->mfi_buffer_dmat); if (sc->mfi_parent_dmat != NULL) bus_dma_tag_destroy(sc->mfi_parent_dmat); if (mtx_initialized(&sc->mfi_io_lock)) { mtx_destroy(&sc->mfi_io_lock); sx_destroy(&sc->mfi_config_lock); } return; } static void mfi_startup(void *arg) { struct mfi_softc *sc; sc = (struct mfi_softc *)arg; sc->mfi_enable_intr(sc); sx_xlock(&sc->mfi_config_lock); mtx_lock(&sc->mfi_io_lock); mfi_ldprobe(sc); if (sc->mfi_flags & MFI_FLAGS_SKINNY) mfi_syspdprobe(sc); mtx_unlock(&sc->mfi_io_lock); sx_xunlock(&sc->mfi_config_lock); config_intrhook_disestablish(&sc->mfi_ich); } static void mfi_intr(void *arg) { struct mfi_softc *sc; struct mfi_command *cm; uint32_t pi, ci, context; sc = (struct mfi_softc *)arg; if (sc->mfi_check_clear_intr(sc)) return; restart: pi = sc->mfi_comms->hw_pi; ci = sc->mfi_comms->hw_ci; mtx_lock(&sc->mfi_io_lock); while (ci != pi) { context = sc->mfi_comms->hw_reply_q[ci]; if (context < sc->mfi_max_fw_cmds) { cm = &sc->mfi_commands[context]; mfi_remove_busy(cm); cm->cm_error = 0; mfi_complete(sc, cm); } if (++ci == (sc->mfi_max_fw_cmds + 1)) ci = 0; } sc->mfi_comms->hw_ci = ci; /* Give defered I/O a chance to run */ sc->mfi_flags &= ~MFI_FLAGS_QFRZN; mfi_startio(sc); mtx_unlock(&sc->mfi_io_lock); /* * Dummy read to flush the bus; this ensures that the indexes are up * to date. Restart processing if more commands have come it. */ (void)sc->mfi_read_fw_status(sc); if (pi != sc->mfi_comms->hw_pi) goto restart; return; } int mfi_shutdown(struct mfi_softc *sc) { struct mfi_dcmd_frame *dcmd; struct mfi_command *cm; int error; if (sc->mfi_aen_cm != NULL) { sc->cm_aen_abort = 1; mfi_abort(sc, &sc->mfi_aen_cm); } if (sc->mfi_map_sync_cm != NULL) { sc->cm_map_abort = 1; mfi_abort(sc, &sc->mfi_map_sync_cm); } mtx_lock(&sc->mfi_io_lock); error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0); if (error) { mtx_unlock(&sc->mfi_io_lock); return (error); } dcmd = &cm->cm_frame->dcmd; dcmd->header.flags = MFI_FRAME_DIR_NONE; cm->cm_flags = MFI_CMD_POLLED; cm->cm_data = NULL; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "Failed to shutdown controller\n"); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); return (error); } static void mfi_syspdprobe(struct mfi_softc *sc) { struct mfi_frame_header *hdr; struct mfi_command *cm = NULL; struct mfi_pd_list *pdlist = NULL; struct mfi_system_pd *syspd, *tmp; struct mfi_system_pending *syspd_pend; int error, i, found; sx_assert(&sc->mfi_config_lock, SA_XLOCKED); mtx_assert(&sc->mfi_io_lock, MA_OWNED); /* Add SYSTEM PD's */ error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY, (void **)&pdlist, sizeof(*pdlist)); if (error) { device_printf(sc->mfi_dev, "Error while forming SYSTEM PD list\n"); goto out; } cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; cm->cm_frame->dcmd.mbox[1] = 0; if (mfi_mapcmd(sc, cm) != 0) { device_printf(sc->mfi_dev, "Failed to get syspd device listing\n"); goto out; } bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); hdr = &cm->cm_frame->header; if (hdr->cmd_status != MFI_STAT_OK) { device_printf(sc->mfi_dev, "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status); goto out; } /* Get each PD and add it to the system */ for (i = 0; i < pdlist->count; i++) { if (pdlist->addr[i].device_id == pdlist->addr[i].encl_device_id) continue; found = 0; TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { if (syspd->pd_id == pdlist->addr[i].device_id) found = 1; } TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) { if (syspd_pend->pd_id == pdlist->addr[i].device_id) found = 1; } if (found == 0) mfi_add_sys_pd(sc, pdlist->addr[i].device_id); } /* Delete SYSPD's whose state has been changed */ TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) { found = 0; for (i = 0; i < pdlist->count; i++) { if (syspd->pd_id == pdlist->addr[i].device_id) { found = 1; break; } } if (found == 0) { printf("DELETE\n"); mtx_unlock(&sc->mfi_io_lock); - mtx_lock(&Giant); + bus_topo_lock(); device_delete_child(sc->mfi_dev, syspd->pd_dev); - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&sc->mfi_io_lock); } } out: if (pdlist) free(pdlist, M_MFIBUF); if (cm) mfi_release_command(cm); return; } static void mfi_ldprobe(struct mfi_softc *sc) { struct mfi_frame_header *hdr; struct mfi_command *cm = NULL; struct mfi_ld_list *list = NULL; struct mfi_disk *ld; struct mfi_disk_pending *ld_pend; int error, i; sx_assert(&sc->mfi_config_lock, SA_XLOCKED); mtx_assert(&sc->mfi_io_lock, MA_OWNED); error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, (void **)&list, sizeof(*list)); if (error) goto out; cm->cm_flags = MFI_CMD_DATAIN; if (mfi_wait_command(sc, cm) != 0) { device_printf(sc->mfi_dev, "Failed to get device listing\n"); goto out; } hdr = &cm->cm_frame->header; if (hdr->cmd_status != MFI_STAT_OK) { device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", hdr->cmd_status); goto out; } for (i = 0; i < list->ld_count; i++) { TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == list->ld_list[i].ld.v.target_id) goto skip_add; } TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) { if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id) goto skip_add; } mfi_add_ld(sc, list->ld_list[i].ld.v.target_id); skip_add:; } out: if (list) free(list, M_MFIBUF); if (cm) mfi_release_command(cm); return; } /* * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If * the bits in 24-31 are all set, then it is the number of seconds since * boot. */ static const char * format_timestamp(uint32_t timestamp) { static char buffer[32]; if ((timestamp & 0xff000000) == 0xff000000) snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 0x00ffffff); else snprintf(buffer, sizeof(buffer), "%us", timestamp); return (buffer); } static const char * format_class(int8_t class) { static char buffer[6]; switch (class) { case MFI_EVT_CLASS_DEBUG: return ("debug"); case MFI_EVT_CLASS_PROGRESS: return ("progress"); case MFI_EVT_CLASS_INFO: return ("info"); case MFI_EVT_CLASS_WARNING: return ("WARN"); case MFI_EVT_CLASS_CRITICAL: return ("CRIT"); case MFI_EVT_CLASS_FATAL: return ("FATAL"); case MFI_EVT_CLASS_DEAD: return ("DEAD"); default: snprintf(buffer, sizeof(buffer), "%d", class); return (buffer); } } static void mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) { struct mfi_system_pd *syspd = NULL; device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq, format_timestamp(detail->time), detail->evt_class.members.locale, format_class(detail->evt_class.members.evt_class), detail->description); /* Don't act on old AEN's or while shutting down */ if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching) return; switch (detail->arg_type) { case MR_EVT_ARGS_NONE: if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) { device_printf(sc->mfi_dev, "HostBus scan raised\n"); if (mfi_detect_jbod_change) { /* * Probe for new SYSPD's and Delete * invalid SYSPD's */ sx_xlock(&sc->mfi_config_lock); mtx_lock(&sc->mfi_io_lock); mfi_syspdprobe(sc); mtx_unlock(&sc->mfi_io_lock); sx_xunlock(&sc->mfi_config_lock); } } break; case MR_EVT_ARGS_LD_STATE: /* During load time driver reads all the events starting * from the one that has been logged after shutdown. Avoid * these old events. */ if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) { /* Remove the LD */ struct mfi_disk *ld; TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == detail->args.ld_state.ld.target_id) break; } /* Fix: for kernel panics when SSCD is removed KASSERT(ld != NULL, ("volume dissappeared")); */ if (ld != NULL) { - mtx_lock(&Giant); + bus_topo_lock(); device_delete_child(sc->mfi_dev, ld->ld_dev); - mtx_unlock(&Giant); + bus_topo_unlock(); } } break; case MR_EVT_ARGS_PD: if (detail->code == MR_EVT_PD_REMOVED) { if (mfi_detect_jbod_change) { /* * If the removed device is a SYSPD then * delete it */ TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { if (syspd->pd_id == detail->args.pd.device_id) { - mtx_lock(&Giant); + bus_topo_lock(); device_delete_child( sc->mfi_dev, syspd->pd_dev); - mtx_unlock(&Giant); + bus_topo_unlock(); break; } } } } if (detail->code == MR_EVT_PD_INSERTED) { if (mfi_detect_jbod_change) { /* Probe for new SYSPD's */ sx_xlock(&sc->mfi_config_lock); mtx_lock(&sc->mfi_io_lock); mfi_syspdprobe(sc); mtx_unlock(&sc->mfi_io_lock); sx_xunlock(&sc->mfi_config_lock); } } if (sc->mfi_cam_rescan_cb != NULL && (detail->code == MR_EVT_PD_INSERTED || detail->code == MR_EVT_PD_REMOVED)) { sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id); } break; } } static void mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) { struct mfi_evt_queue_elm *elm; mtx_assert(&sc->mfi_io_lock, MA_OWNED); elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO); if (elm == NULL) return; memcpy(&elm->detail, detail, sizeof(*detail)); TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link); taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task); } static void mfi_handle_evt(void *context, int pending) { TAILQ_HEAD(,mfi_evt_queue_elm) queue; struct mfi_softc *sc; struct mfi_evt_queue_elm *elm; sc = context; TAILQ_INIT(&queue); mtx_lock(&sc->mfi_io_lock); TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link); mtx_unlock(&sc->mfi_io_lock); while ((elm = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, elm, link); mfi_decode_evt(sc, &elm->detail); free(elm, M_MFIBUF); } } static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd; union mfi_evt current_aen, prior_aen; struct mfi_evt_detail *ed = NULL; int error = 0; mtx_assert(&sc->mfi_io_lock, MA_OWNED); current_aen.word = locale; if (sc->mfi_aen_cm != NULL) { prior_aen.word = ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1]; if (prior_aen.members.evt_class <= current_aen.members.evt_class && !((prior_aen.members.locale & current_aen.members.locale) ^current_aen.members.locale)) { return (0); } else { prior_aen.members.locale |= current_aen.members.locale; if (prior_aen.members.evt_class < current_aen.members.evt_class) current_aen.members.evt_class = prior_aen.members.evt_class; mfi_abort(sc, &sc->mfi_aen_cm); } } error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT, (void **)&ed, sizeof(*ed)); if (error) goto out; dcmd = &cm->cm_frame->dcmd; ((uint32_t *)&dcmd->mbox)[0] = seq; ((uint32_t *)&dcmd->mbox)[1] = locale; cm->cm_flags = MFI_CMD_DATAIN; cm->cm_complete = mfi_aen_complete; sc->last_seq_num = seq; sc->mfi_aen_cm = cm; mfi_enqueue_ready(cm); mfi_startio(sc); out: return (error); } static void mfi_aen_complete(struct mfi_command *cm) { struct mfi_frame_header *hdr; struct mfi_softc *sc; struct mfi_evt_detail *detail; struct mfi_aen *mfi_aen_entry, *tmp; int seq = 0, aborted = 0; sc = cm->cm_sc; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if (sc->mfi_aen_cm == NULL) return; hdr = &cm->cm_frame->header; if (sc->cm_aen_abort || hdr->cmd_status == MFI_STAT_INVALID_STATUS) { sc->cm_aen_abort = 0; aborted = 1; } else { sc->mfi_aen_triggered = 1; if (sc->mfi_poll_waiting) { sc->mfi_poll_waiting = 0; selwakeup(&sc->mfi_select); } detail = cm->cm_data; mfi_queue_evt(sc, detail); seq = detail->seq + 1; TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, aen_link); PROC_LOCK(mfi_aen_entry->p); kern_psignal(mfi_aen_entry->p, SIGIO); PROC_UNLOCK(mfi_aen_entry->p); free(mfi_aen_entry, M_MFIBUF); } } free(cm->cm_data, M_MFIBUF); wakeup(&sc->mfi_aen_cm); sc->mfi_aen_cm = NULL; mfi_release_command(cm); /* set it up again so the driver can catch more events */ if (!aborted) mfi_aen_setup(sc, seq); } #define MAX_EVENTS 15 static int mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd; struct mfi_evt_list *el; union mfi_evt class_locale; int error, i, seq, size; mtx_assert(&sc->mfi_io_lock, MA_OWNED); class_locale.members.reserved = 0; class_locale.members.locale = mfi_event_locale; class_locale.members.evt_class = mfi_event_class; size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail) * (MAX_EVENTS - 1); el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO); if (el == NULL) return (ENOMEM); for (seq = start_seq;;) { if ((cm = mfi_dequeue_free(sc)) == NULL) { free(el, M_MFIBUF); return (EBUSY); } dcmd = &cm->cm_frame->dcmd; bzero(dcmd->mbox, MFI_MBOX_SIZE); dcmd->header.cmd = MFI_CMD_DCMD; dcmd->header.timeout = 0; dcmd->header.data_len = size; dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET; ((uint32_t *)&dcmd->mbox)[0] = seq; ((uint32_t *)&dcmd->mbox)[1] = class_locale.word; cm->cm_sg = &dcmd->sgl; cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; cm->cm_data = el; cm->cm_len = size; if ((error = mfi_mapcmd(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Failed to get controller entries\n"); mfi_release_command(cm); break; } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) { mfi_release_command(cm); break; } if (dcmd->header.cmd_status != MFI_STAT_OK) { device_printf(sc->mfi_dev, "Error %d fetching controller entries\n", dcmd->header.cmd_status); mfi_release_command(cm); error = EIO; break; } mfi_release_command(cm); for (i = 0; i < el->count; i++) { /* * If this event is newer than 'stop_seq' then * break out of the loop. Note that the log * is a circular buffer so we have to handle * the case that our stop point is earlier in * the buffer than our start point. */ if (el->event[i].seq >= stop_seq) { if (start_seq <= stop_seq) break; else if (el->event[i].seq < start_seq) break; } mfi_queue_evt(sc, &el->event[i]); } seq = el->event[el->count - 1].seq + 1; } free(el, M_MFIBUF); return (error); } static int mfi_add_ld(struct mfi_softc *sc, int id) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd = NULL; struct mfi_ld_info *ld_info = NULL; struct mfi_disk_pending *ld_pend; int error; mtx_assert(&sc->mfi_io_lock, MA_OWNED); ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO); if (ld_pend != NULL) { ld_pend->ld_id = id; TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link); } error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO, (void **)&ld_info, sizeof(*ld_info)); if (error) { device_printf(sc->mfi_dev, "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error); if (ld_info) free(ld_info, M_MFIBUF); return (error); } cm->cm_flags = MFI_CMD_DATAIN; dcmd = &cm->cm_frame->dcmd; dcmd->mbox[0] = id; if (mfi_wait_command(sc, cm) != 0) { device_printf(sc->mfi_dev, "Failed to get logical drive: %d\n", id); free(ld_info, M_MFIBUF); return (0); } if (ld_info->ld_config.params.isSSCD != 1) mfi_add_ld_complete(cm); else { mfi_release_command(cm); if (ld_info) /* SSCD drives ld_info free here */ free(ld_info, M_MFIBUF); } return (0); } static void mfi_add_ld_complete(struct mfi_command *cm) { struct mfi_frame_header *hdr; struct mfi_ld_info *ld_info; struct mfi_softc *sc; device_t child; sc = cm->cm_sc; hdr = &cm->cm_frame->header; ld_info = cm->cm_private; if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) { free(ld_info, M_MFIBUF); wakeup(&sc->mfi_map_sync_cm); mfi_release_command(cm); return; } wakeup(&sc->mfi_map_sync_cm); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); - mtx_lock(&Giant); + bus_topo_lock(); if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) { device_printf(sc->mfi_dev, "Failed to add logical disk\n"); free(ld_info, M_MFIBUF); - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&sc->mfi_io_lock); return; } device_set_ivars(child, ld_info); device_set_desc(child, "MFI Logical Disk"); bus_generic_attach(sc->mfi_dev); - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&sc->mfi_io_lock); } static int mfi_add_sys_pd(struct mfi_softc *sc, int id) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd = NULL; struct mfi_pd_info *pd_info = NULL; struct mfi_system_pending *syspd_pend; int error; mtx_assert(&sc->mfi_io_lock, MA_OWNED); syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO); if (syspd_pend != NULL) { syspd_pend->pd_id = id; TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link); } error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO, (void **)&pd_info, sizeof(*pd_info)); if (error) { device_printf(sc->mfi_dev, "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", error); if (pd_info) free(pd_info, M_MFIBUF); return (error); } cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; dcmd = &cm->cm_frame->dcmd; dcmd->mbox[0]=id; dcmd->header.scsi_status = 0; dcmd->header.pad0 = 0; if ((error = mfi_mapcmd(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Failed to get physical drive info %d\n", id); free(pd_info, M_MFIBUF); mfi_release_command(cm); return (error); } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); mfi_add_sys_pd_complete(cm); return (0); } static void mfi_add_sys_pd_complete(struct mfi_command *cm) { struct mfi_frame_header *hdr; struct mfi_pd_info *pd_info; struct mfi_softc *sc; device_t child; sc = cm->cm_sc; hdr = &cm->cm_frame->header; pd_info = cm->cm_private; if (hdr->cmd_status != MFI_STAT_OK) { free(pd_info, M_MFIBUF); mfi_release_command(cm); return; } if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) { device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n", pd_info->ref.v.device_id); free(pd_info, M_MFIBUF); mfi_release_command(cm); return; } mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); - mtx_lock(&Giant); + bus_topo_lock(); if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) { device_printf(sc->mfi_dev, "Failed to add system pd\n"); free(pd_info, M_MFIBUF); - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&sc->mfi_io_lock); return; } device_set_ivars(child, pd_info); device_set_desc(child, "MFI System PD"); bus_generic_attach(sc->mfi_dev); - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&sc->mfi_io_lock); } static struct mfi_command * mfi_bio_command(struct mfi_softc *sc) { struct bio *bio; struct mfi_command *cm = NULL; /*reserving two commands to avoid starvation for IOCTL*/ if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) { return (NULL); } if ((bio = mfi_dequeue_bio(sc)) == NULL) { return (NULL); } if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) { cm = mfi_build_ldio(sc, bio); } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) { cm = mfi_build_syspdio(sc, bio); } if (!cm) mfi_enqueue_bio(sc, bio); return cm; } /* * mostly copied from cam/scsi/scsi_all.c:scsi_read_write */ int mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb) { int cdb_len; if (((lba & 0x1fffff) == lba) && ((block_count & 0xff) == block_count) && (byte2 == 0)) { /* We can fit in a 6 byte cdb */ struct scsi_rw_6 *scsi_cmd; scsi_cmd = (struct scsi_rw_6 *)cdb; scsi_cmd->opcode = readop ? READ_6 : WRITE_6; scsi_ulto3b(lba, scsi_cmd->addr); scsi_cmd->length = block_count & 0xff; scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* Need a 10 byte CDB */ struct scsi_rw_10 *scsi_cmd; scsi_cmd = (struct scsi_rw_10 *)cdb; scsi_cmd->opcode = readop ? READ_10 : WRITE_10; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto2b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } else if (((block_count & 0xffffffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* Block count is too big for 10 byte CDB use a 12 byte CDB */ struct scsi_rw_12 *scsi_cmd; scsi_cmd = (struct scsi_rw_12 *)cdb; scsi_cmd->opcode = readop ? READ_12 : WRITE_12; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } else { /* * 16 byte CDB. We'll only get here if the LBA is larger * than 2^32 */ struct scsi_rw_16 *scsi_cmd; scsi_cmd = (struct scsi_rw_16 *)cdb; scsi_cmd->opcode = readop ? READ_16 : WRITE_16; scsi_cmd->byte2 = byte2; scsi_u64to8b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } return cdb_len; } extern char *unmapped_buf; static struct mfi_command * mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio) { struct mfi_command *cm; struct mfi_pass_frame *pass; uint32_t context = 0; int flags = 0, blkcount = 0, readop; uint8_t cdb_len; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if ((cm = mfi_dequeue_free(sc)) == NULL) return (NULL); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; pass = &cm->cm_frame->pass; bzero(pass->cdb, 16); pass->header.cmd = MFI_CMD_PD_SCSI_IO; switch (bio->bio_cmd) { case BIO_READ: flags = MFI_CMD_DATAIN | MFI_CMD_BIO; readop = 1; break; case BIO_WRITE: flags = MFI_CMD_DATAOUT | MFI_CMD_BIO; readop = 0; break; default: /* TODO: what about BIO_DELETE??? */ biofinish(bio, NULL, EOPNOTSUPP); mfi_enqueue_free(cm); return (NULL); } /* Cheat with the sector length to avoid a non-constant division */ blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN); /* Fill the LBA and Transfer length in CDB */ cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount, pass->cdb); pass->header.target_id = (uintptr_t)bio->bio_driver1; pass->header.lun_id = 0; pass->header.timeout = 0; pass->header.flags = 0; pass->header.scsi_status = 0; pass->header.sense_len = MFI_SENSE_LEN; pass->header.data_len = bio->bio_bcount; pass->header.cdb_len = cdb_len; pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); cm->cm_complete = mfi_bio_complete; cm->cm_private = bio; cm->cm_data = unmapped_buf; cm->cm_len = bio->bio_bcount; cm->cm_sg = &pass->sgl; cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; cm->cm_flags = flags; return (cm); } static struct mfi_command * mfi_build_ldio(struct mfi_softc *sc, struct bio *bio) { struct mfi_io_frame *io; struct mfi_command *cm; int flags; uint32_t blkcount; uint32_t context = 0; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if ((cm = mfi_dequeue_free(sc)) == NULL) return (NULL); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; io = &cm->cm_frame->io; switch (bio->bio_cmd) { case BIO_READ: io->header.cmd = MFI_CMD_LD_READ; flags = MFI_CMD_DATAIN | MFI_CMD_BIO; break; case BIO_WRITE: io->header.cmd = MFI_CMD_LD_WRITE; flags = MFI_CMD_DATAOUT | MFI_CMD_BIO; break; default: /* TODO: what about BIO_DELETE??? */ biofinish(bio, NULL, EOPNOTSUPP); mfi_enqueue_free(cm); return (NULL); } /* Cheat with the sector length to avoid a non-constant division */ blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN); io->header.target_id = (uintptr_t)bio->bio_driver1; io->header.timeout = 0; io->header.flags = 0; io->header.scsi_status = 0; io->header.sense_len = MFI_SENSE_LEN; io->header.data_len = blkcount; io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32; io->lba_lo = bio->bio_pblkno & 0xffffffff; cm->cm_complete = mfi_bio_complete; cm->cm_private = bio; cm->cm_data = unmapped_buf; cm->cm_len = bio->bio_bcount; cm->cm_sg = &io->sgl; cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; cm->cm_flags = flags; return (cm); } static void mfi_bio_complete(struct mfi_command *cm) { struct bio *bio; struct mfi_frame_header *hdr; struct mfi_softc *sc; bio = cm->cm_private; hdr = &cm->cm_frame->header; sc = cm->cm_sc; if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) { bio->bio_flags |= BIO_ERROR; bio->bio_error = EIO; device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, " "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status); mfi_print_sense(cm->cm_sc, cm->cm_sense); } else if (cm->cm_error != 0) { bio->bio_flags |= BIO_ERROR; bio->bio_error = cm->cm_error; device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n", cm, cm->cm_error); } mfi_release_command(cm); mfi_disk_complete(bio); } void mfi_startio(struct mfi_softc *sc) { struct mfi_command *cm; struct ccb_hdr *ccbh; for (;;) { /* Don't bother if we're short on resources */ if (sc->mfi_flags & MFI_FLAGS_QFRZN) break; /* Try a command that has already been prepared */ cm = mfi_dequeue_ready(sc); if (cm == NULL) { if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL) cm = sc->mfi_cam_start(ccbh); } /* Nope, so look for work on the bioq */ if (cm == NULL) cm = mfi_bio_command(sc); /* No work available, so exit */ if (cm == NULL) break; /* Send the command to the controller */ if (mfi_mapcmd(sc, cm) != 0) { device_printf(sc->mfi_dev, "Failed to startio\n"); mfi_requeue_ready(cm); break; } } } int mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm) { int error, polled; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) { polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; if (cm->cm_flags & MFI_CMD_CCB) error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat, cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm, polled); else if (cm->cm_flags & MFI_CMD_BIO) error = bus_dmamap_load_bio(sc->mfi_buffer_dmat, cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm, polled); else error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap, cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled); if (error == EINPROGRESS) { sc->mfi_flags |= MFI_FLAGS_QFRZN; return (0); } } else { error = mfi_send_frame(sc, cm); } return (error); } static void mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct mfi_frame_header *hdr; struct mfi_command *cm; union mfi_sgl *sgl; struct mfi_softc *sc; int i, j, first, dir; int sge_size, locked; cm = (struct mfi_command *)arg; sc = cm->cm_sc; hdr = &cm->cm_frame->header; sgl = cm->cm_sg; /* * We need to check if we have the lock as this is async * callback so even though our caller mfi_mapcmd asserts * it has the lock, there is no guarantee that hasn't been * dropped if bus_dmamap_load returned prior to our * completion. */ if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0) mtx_lock(&sc->mfi_io_lock); if (error) { printf("error %d in callback\n", error); cm->cm_error = error; mfi_complete(sc, cm); goto out; } /* Use IEEE sgl only for IO's on a SKINNY controller * For other commands on a SKINNY controller use either * sg32 or sg64 based on the sizeof(bus_addr_t). * Also calculate the total frame size based on the type * of SGL used. */ if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) || (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) || (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) && (sc->mfi_flags & MFI_FLAGS_SKINNY)) { for (i = 0; i < nsegs; i++) { sgl->sg_skinny[i].addr = segs[i].ds_addr; sgl->sg_skinny[i].len = segs[i].ds_len; sgl->sg_skinny[i].flag = 0; } hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64; sge_size = sizeof(struct mfi_sg_skinny); hdr->sg_count = nsegs; } else { j = 0; if (cm->cm_frame->header.cmd == MFI_CMD_STP) { first = cm->cm_stp_len; if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { sgl->sg32[j].addr = segs[0].ds_addr; sgl->sg32[j++].len = first; } else { sgl->sg64[j].addr = segs[0].ds_addr; sgl->sg64[j++].len = first; } } else first = 0; if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { for (i = 0; i < nsegs; i++) { sgl->sg32[j].addr = segs[i].ds_addr + first; sgl->sg32[j++].len = segs[i].ds_len - first; first = 0; } } else { for (i = 0; i < nsegs; i++) { sgl->sg64[j].addr = segs[i].ds_addr + first; sgl->sg64[j++].len = segs[i].ds_len - first; first = 0; } hdr->flags |= MFI_FRAME_SGL64; } hdr->sg_count = j; sge_size = sc->mfi_sge_size; } dir = 0; if (cm->cm_flags & MFI_CMD_DATAIN) { dir |= BUS_DMASYNC_PREREAD; hdr->flags |= MFI_FRAME_DIR_READ; } if (cm->cm_flags & MFI_CMD_DATAOUT) { dir |= BUS_DMASYNC_PREWRITE; hdr->flags |= MFI_FRAME_DIR_WRITE; } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); cm->cm_flags |= MFI_CMD_MAPPED; /* * Instead of calculating the total number of frames in the * compound frame, it's already assumed that there will be at * least 1 frame, so don't compensate for the modulo of the * following division. */ cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs); cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; if ((error = mfi_send_frame(sc, cm)) != 0) { printf("error %d in callback from mfi_send_frame\n", error); cm->cm_error = error; mfi_complete(sc, cm); goto out; } out: /* leave the lock in the state we found it */ if (locked == 0) mtx_unlock(&sc->mfi_io_lock); return; } static int mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm) { int error; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if (sc->MFA_enabled) error = mfi_tbolt_send_frame(sc, cm); else error = mfi_std_send_frame(sc, cm); if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) mfi_remove_busy(cm); return (error); } static int mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm) { struct mfi_frame_header *hdr; int tm = mfi_polled_cmd_timeout * 1000; hdr = &cm->cm_frame->header; if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { cm->cm_timestamp = time_uptime; mfi_enqueue_busy(cm); } else { hdr->cmd_status = MFI_STAT_INVALID_STATUS; hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; } /* * The bus address of the command is aligned on a 64 byte boundary, * leaving the least 6 bits as zero. For whatever reason, the * hardware wants the address shifted right by three, leaving just * 3 zero bits. These three bits are then used as a prefetching * hint for the hardware to predict how many frames need to be * fetched across the bus. If a command has more than 8 frames * then the 3 bits are set to 0x7 and the firmware uses other * information in the command to determine the total amount to fetch. * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames * is enough for both 32bit and 64bit systems. */ if (cm->cm_extra_frames > 7) cm->cm_extra_frames = 7; sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames); if ((cm->cm_flags & MFI_CMD_POLLED) == 0) return (0); /* This is a polled command, so busy-wait for it to complete. */ while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { DELAY(1000); tm -= 1; if (tm <= 0) break; } if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { device_printf(sc->mfi_dev, "Frame %p timed out " "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); return (ETIMEDOUT); } return (0); } void mfi_complete(struct mfi_softc *sc, struct mfi_command *cm) { int dir; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) { dir = 0; if ((cm->cm_flags & MFI_CMD_DATAIN) || (cm->cm_frame->header.cmd == MFI_CMD_STP)) dir |= BUS_DMASYNC_POSTREAD; if (cm->cm_flags & MFI_CMD_DATAOUT) dir |= BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); cm->cm_flags &= ~MFI_CMD_MAPPED; } cm->cm_flags |= MFI_CMD_COMPLETED; if (cm->cm_complete != NULL) cm->cm_complete(cm); else wakeup(cm); } static int mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort) { struct mfi_command *cm; struct mfi_abort_frame *abort; int i = 0, error; uint32_t context = 0; mtx_lock(&sc->mfi_io_lock); if ((cm = mfi_dequeue_free(sc)) == NULL) { mtx_unlock(&sc->mfi_io_lock); return (EBUSY); } /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; abort = &cm->cm_frame->abort; abort->header.cmd = MFI_CMD_ABORT; abort->header.flags = 0; abort->header.scsi_status = 0; abort->abort_context = (*cm_abort)->cm_frame->header.context; abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr; abort->abort_mfi_addr_hi = (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32); cm->cm_data = NULL; cm->cm_flags = MFI_CMD_POLLED; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "failed to abort command\n"); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); while (i < 5 && *cm_abort != NULL) { tsleep(cm_abort, 0, "mfiabort", 5 * hz); i++; } if (*cm_abort != NULL) { /* Force a complete if command didn't abort */ mtx_lock(&sc->mfi_io_lock); (*cm_abort)->cm_complete(*cm_abort); mtx_unlock(&sc->mfi_io_lock); } return (error); } int mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len) { struct mfi_command *cm; struct mfi_io_frame *io; int error; uint32_t context = 0; if ((cm = mfi_dequeue_free(sc)) == NULL) return (EBUSY); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; io = &cm->cm_frame->io; io->header.cmd = MFI_CMD_LD_WRITE; io->header.target_id = id; io->header.timeout = 0; io->header.flags = 0; io->header.scsi_status = 0; io->header.sense_len = MFI_SENSE_LEN; io->header.data_len = howmany(len, MFI_SECTOR_LEN); io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); io->lba_hi = (lba & 0xffffffff00000000) >> 32; io->lba_lo = lba & 0xffffffff; cm->cm_data = virt; cm->cm_len = len; cm->cm_sg = &io->sgl; cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "failed dump blocks\n"); bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); mfi_release_command(cm); return (error); } int mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len) { struct mfi_command *cm; struct mfi_pass_frame *pass; int error, readop, cdb_len; uint32_t blkcount; if ((cm = mfi_dequeue_free(sc)) == NULL) return (EBUSY); pass = &cm->cm_frame->pass; bzero(pass->cdb, 16); pass->header.cmd = MFI_CMD_PD_SCSI_IO; readop = 0; blkcount = howmany(len, MFI_SECTOR_LEN); cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb); pass->header.target_id = id; pass->header.timeout = 0; pass->header.flags = 0; pass->header.scsi_status = 0; pass->header.sense_len = MFI_SENSE_LEN; pass->header.data_len = len; pass->header.cdb_len = cdb_len; pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); cm->cm_data = virt; cm->cm_len = len; cm->cm_sg = &pass->sgl; cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "failed dump blocks\n"); bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); mfi_release_command(cm); return (error); } static int mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mfi_softc *sc; int error; sc = dev->si_drv1; mtx_lock(&sc->mfi_io_lock); if (sc->mfi_detaching) error = ENXIO; else { sc->mfi_flags |= MFI_FLAGS_OPEN; error = 0; } mtx_unlock(&sc->mfi_io_lock); return (error); } static int mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mfi_softc *sc; struct mfi_aen *mfi_aen_entry, *tmp; sc = dev->si_drv1; mtx_lock(&sc->mfi_io_lock); sc->mfi_flags &= ~MFI_FLAGS_OPEN; TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { if (mfi_aen_entry->p == curproc) { TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, aen_link); free(mfi_aen_entry, M_MFIBUF); } } mtx_unlock(&sc->mfi_io_lock); return (0); } static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode) { switch (opcode) { case MFI_DCMD_LD_DELETE: case MFI_DCMD_CFG_ADD: case MFI_DCMD_CFG_CLEAR: case MFI_DCMD_CFG_FOREIGN_IMPORT: sx_xlock(&sc->mfi_config_lock); return (1); default: return (0); } } static void mfi_config_unlock(struct mfi_softc *sc, int locked) { if (locked) sx_xunlock(&sc->mfi_config_lock); } /* * Perform pre-issue checks on commands from userland and possibly veto * them. */ static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm) { struct mfi_disk *ld, *ld2; int error; struct mfi_system_pd *syspd = NULL; uint16_t syspd_id; uint16_t *mbox; mtx_assert(&sc->mfi_io_lock, MA_OWNED); error = 0; switch (cm->cm_frame->dcmd.opcode) { case MFI_DCMD_LD_DELETE: TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) break; } if (ld == NULL) error = ENOENT; else error = mfi_disk_disable(ld); break; case MFI_DCMD_CFG_CLEAR: TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { error = mfi_disk_disable(ld); if (error) break; } if (error) { TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) { if (ld2 == ld) break; mfi_disk_enable(ld2); } } break; case MFI_DCMD_PD_STATE_SET: mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; syspd_id = mbox[0]; if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { if (syspd->pd_id == syspd_id) break; } } else break; if (syspd) error = mfi_syspd_disable(syspd); break; default: break; } return (error); } /* Perform post-issue checks on commands from userland. */ static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm) { struct mfi_disk *ld, *ldn; struct mfi_system_pd *syspd = NULL; uint16_t syspd_id; uint16_t *mbox; switch (cm->cm_frame->dcmd.opcode) { case MFI_DCMD_LD_DELETE: TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) break; } KASSERT(ld != NULL, ("volume dissappeared")); if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { mtx_unlock(&sc->mfi_io_lock); - mtx_lock(&Giant); + bus_topo_lock(); device_delete_child(sc->mfi_dev, ld->ld_dev); - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&sc->mfi_io_lock); } else mfi_disk_enable(ld); break; case MFI_DCMD_CFG_CLEAR: if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { mtx_unlock(&sc->mfi_io_lock); - mtx_lock(&Giant); + bus_topo_lock(); TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) { device_delete_child(sc->mfi_dev, ld->ld_dev); } - mtx_unlock(&Giant); + bus_topo_unlock(); mtx_lock(&sc->mfi_io_lock); } else { TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) mfi_disk_enable(ld); } break; case MFI_DCMD_CFG_ADD: mfi_ldprobe(sc); break; case MFI_DCMD_CFG_FOREIGN_IMPORT: mfi_ldprobe(sc); break; case MFI_DCMD_PD_STATE_SET: mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; syspd_id = mbox[0]; if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) { if (syspd->pd_id == syspd_id) break; } } else break; /* If the transition fails then enable the syspd again */ if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK) mfi_syspd_enable(syspd); break; } } static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm) { struct mfi_config_data *conf_data; struct mfi_command *ld_cm = NULL; struct mfi_ld_info *ld_info = NULL; struct mfi_ld_config *ld; char *p; int error = 0; conf_data = (struct mfi_config_data *)cm->cm_data; if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) { p = (char *)conf_data->array; p += conf_data->array_size * conf_data->array_count; ld = (struct mfi_ld_config *)p; if (ld->params.isSSCD == 1) error = 1; } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) { error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO, (void **)&ld_info, sizeof(*ld_info)); if (error) { device_printf(sc->mfi_dev, "Failed to allocate" "MFI_DCMD_LD_GET_INFO %d", error); if (ld_info) free(ld_info, M_MFIBUF); return 0; } ld_cm->cm_flags = MFI_CMD_DATAIN; ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0]; ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0]; if (mfi_wait_command(sc, ld_cm) != 0) { device_printf(sc->mfi_dev, "failed to get log drv\n"); mfi_release_command(ld_cm); free(ld_info, M_MFIBUF); return 0; } if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) { free(ld_info, M_MFIBUF); mfi_release_command(ld_cm); return 0; } else ld_info = (struct mfi_ld_info *)ld_cm->cm_private; if (ld_info->ld_config.params.isSSCD == 1) error = 1; mfi_release_command(ld_cm); free(ld_info, M_MFIBUF); } return error; } static int mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg) { uint8_t i; struct mfi_ioc_packet *ioc; ioc = (struct mfi_ioc_packet *)arg; int sge_size, error; struct megasas_sge *kern_sge; memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr)); kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off); cm->cm_frame->header.sg_count = ioc->mfi_sge_count; if (sizeof(bus_addr_t) == 8) { cm->cm_frame->header.flags |= MFI_FRAME_SGL64; cm->cm_extra_frames = 2; sge_size = sizeof(struct mfi_sg64); } else { cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; sge_size = sizeof(struct mfi_sg32); } cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count); for (i = 0; i < ioc->mfi_sge_count; i++) { if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ioc->mfi_sgl[i].iov_len,/* maxsize */ 2, /* nsegments */ ioc->mfi_sgl[i].iov_len,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_kbuff_arr_dmat[i])) { device_printf(sc->mfi_dev, "Cannot allocate mfi_kbuff_arr_dmat tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i], (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT, &sc->mfi_kbuff_arr_dmamap[i])) { device_printf(sc->mfi_dev, "Cannot allocate mfi_kbuff_arr_dmamap memory\n"); return (ENOMEM); } bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i], sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i], ioc->mfi_sgl[i].iov_len, mfi_addr_cb, &sc->mfi_kbuff_arr_busaddr[i], 0); if (!sc->kbuff_arr[i]) { device_printf(sc->mfi_dev, "Could not allocate memory for kbuff_arr info\n"); return -1; } kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i]; kern_sge[i].length = ioc->mfi_sgl[i].iov_len; if (sizeof(bus_addr_t) == 8) { cm->cm_frame->stp.sgl.sg64[i].addr = kern_sge[i].phys_addr; cm->cm_frame->stp.sgl.sg64[i].len = ioc->mfi_sgl[i].iov_len; } else { cm->cm_frame->stp.sgl.sg32[i].addr = kern_sge[i].phys_addr; cm->cm_frame->stp.sgl.sg32[i].len = ioc->mfi_sgl[i].iov_len; } error = copyin(ioc->mfi_sgl[i].iov_base, sc->kbuff_arr[i], ioc->mfi_sgl[i].iov_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy in failed\n"); return error; } } cm->cm_flags |=MFI_CMD_MAPPED; return 0; } static int mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd; void *ioc_buf = NULL; uint32_t context; int error = 0, locked; if (ioc->buf_size > 0) { if (ioc->buf_size > 1024 * 1024) return (ENOMEM); ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK); error = copyin(ioc->buf, ioc_buf, ioc->buf_size); if (error) { device_printf(sc->mfi_dev, "failed to copyin\n"); free(ioc_buf, M_MFIBUF); return (error); } } locked = mfi_config_lock(sc, ioc->ioc_frame.opcode); mtx_lock(&sc->mfi_io_lock); while ((cm = mfi_dequeue_free(sc)) == NULL) msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz); /* Save context for later */ context = cm->cm_frame->header.context; dcmd = &cm->cm_frame->dcmd; bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame)); cm->cm_sg = &dcmd->sgl; cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; cm->cm_data = ioc_buf; cm->cm_len = ioc->buf_size; /* restore context */ cm->cm_frame->header.context = context; /* Cheat since we don't know if we're writing or reading */ cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT; error = mfi_check_command_pre(sc, cm); if (error) goto out; error = mfi_wait_command(sc, cm); if (error) { device_printf(sc->mfi_dev, "ioctl failed %d\n", error); goto out; } bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame)); mfi_check_command_post(sc, cm); out: mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); mfi_config_unlock(sc, locked); if (ioc->buf_size > 0) error = copyout(ioc_buf, ioc->buf, ioc->buf_size); if (ioc_buf) free(ioc_buf, M_MFIBUF); return (error); } #define PTRIN(p) ((void *)(uintptr_t)(p)) static int mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct mfi_softc *sc; union mfi_statrequest *ms; struct mfi_ioc_packet *ioc; #ifdef COMPAT_FREEBSD32 struct mfi_ioc_packet32 *ioc32; #endif struct mfi_ioc_aen *aen; struct mfi_command *cm = NULL; uint32_t context = 0; union mfi_sense_ptr sense_ptr; uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0; size_t len; int i, res; struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg; #ifdef COMPAT_FREEBSD32 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg; struct mfi_ioc_passthru iop_swab; #endif int error, locked; union mfi_sgl *sgl; sc = dev->si_drv1; error = 0; if (sc->adpreset) return EBUSY; if (sc->hw_crit_error) return EBUSY; if (sc->issuepend_done == 0) return EBUSY; switch (cmd) { case MFIIO_STATS: ms = (union mfi_statrequest *)arg; switch (ms->ms_item) { case MFIQ_FREE: case MFIQ_BIO: case MFIQ_READY: case MFIQ_BUSY: bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat, sizeof(struct mfi_qstat)); break; default: error = ENOIOCTL; break; } break; case MFIIO_QUERY_DISK: { struct mfi_query_disk *qd; struct mfi_disk *ld; qd = (struct mfi_query_disk *)arg; mtx_lock(&sc->mfi_io_lock); TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == qd->array_id) break; } if (ld == NULL) { qd->present = 0; mtx_unlock(&sc->mfi_io_lock); return (0); } qd->present = 1; if (ld->ld_flags & MFI_DISK_FLAGS_OPEN) qd->open = 1; bzero(qd->devname, SPECNAMELEN + 1); snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit); mtx_unlock(&sc->mfi_io_lock); break; } case MFI_CMD: #ifdef COMPAT_FREEBSD32 case MFI_CMD32: #endif { devclass_t devclass; ioc = (struct mfi_ioc_packet *)arg; int adapter; adapter = ioc->mfi_adapter_no; if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) { devclass = devclass_find("mfi"); sc = devclass_get_softc(devclass, adapter); } mtx_lock(&sc->mfi_io_lock); if ((cm = mfi_dequeue_free(sc)) == NULL) { mtx_unlock(&sc->mfi_io_lock); return (EBUSY); } mtx_unlock(&sc->mfi_io_lock); locked = 0; /* * save off original context since copying from user * will clobber some data */ context = cm->cm_frame->header.context; cm->cm_frame->header.context = cm->cm_index; bcopy(ioc->mfi_frame.raw, cm->cm_frame, 2 * MEGAMFI_FRAME_SIZE); cm->cm_total_frame_size = (sizeof(union mfi_sgl) * ioc->mfi_sge_count) + ioc->mfi_sgl_off; cm->cm_frame->header.scsi_status = 0; cm->cm_frame->header.pad0 = 0; if (ioc->mfi_sge_count) { cm->cm_sg = (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off]; } sgl = cm->cm_sg; cm->cm_flags = 0; if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) cm->cm_flags |= MFI_CMD_DATAIN; if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) cm->cm_flags |= MFI_CMD_DATAOUT; /* Legacy app shim */ if (cm->cm_flags == 0) cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT; cm->cm_len = cm->cm_frame->header.data_len; if (cm->cm_frame->header.cmd == MFI_CMD_STP) { #ifdef COMPAT_FREEBSD32 if (cmd == MFI_CMD) { #endif /* Native */ cm->cm_stp_len = ioc->mfi_sgl[0].iov_len; #ifdef COMPAT_FREEBSD32 } else { /* 32bit on 64bit */ ioc32 = (struct mfi_ioc_packet32 *)ioc; cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len; } #endif cm->cm_len += cm->cm_stp_len; } if (cm->cm_len && (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, M_WAITOK | M_ZERO); } else { cm->cm_data = 0; } /* restore header context */ cm->cm_frame->header.context = context; if (cm->cm_frame->header.cmd == MFI_CMD_STP) { res = mfi_stp_cmd(sc, cm, arg); if (res != 0) goto out; } else { temp = data; if ((cm->cm_flags & MFI_CMD_DATAOUT) || (cm->cm_frame->header.cmd == MFI_CMD_STP)) { for (i = 0; i < ioc->mfi_sge_count; i++) { #ifdef COMPAT_FREEBSD32 if (cmd == MFI_CMD) { #endif /* Native */ addr = ioc->mfi_sgl[i].iov_base; len = ioc->mfi_sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { /* 32bit on 64bit */ ioc32 = (struct mfi_ioc_packet32 *)ioc; addr = PTRIN(ioc32->mfi_sgl[i].iov_base); len = ioc32->mfi_sgl[i].iov_len; } #endif error = copyin(addr, temp, len); if (error != 0) { device_printf(sc->mfi_dev, "Copy in failed\n"); goto out; } temp = &temp[len]; } } } if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { cm->cm_frame->pass.sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; cm->cm_frame->pass.sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); } mtx_lock(&sc->mfi_io_lock); skip_pre_post = mfi_check_for_sscd (sc, cm); if (!skip_pre_post) { error = mfi_check_command_pre(sc, cm); if (error) { mtx_unlock(&sc->mfi_io_lock); goto out; } } if ((error = mfi_wait_command(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Controller polled failed\n"); mtx_unlock(&sc->mfi_io_lock); goto out; } if (!skip_pre_post) { mfi_check_command_post(sc, cm); } mtx_unlock(&sc->mfi_io_lock); if (cm->cm_frame->header.cmd != MFI_CMD_STP) { temp = data; if ((cm->cm_flags & MFI_CMD_DATAIN) || (cm->cm_frame->header.cmd == MFI_CMD_STP)) { for (i = 0; i < ioc->mfi_sge_count; i++) { #ifdef COMPAT_FREEBSD32 if (cmd == MFI_CMD) { #endif /* Native */ addr = ioc->mfi_sgl[i].iov_base; len = ioc->mfi_sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { /* 32bit on 64bit */ ioc32 = (struct mfi_ioc_packet32 *)ioc; addr = PTRIN(ioc32->mfi_sgl[i].iov_base); len = ioc32->mfi_sgl[i].iov_len; } #endif error = copyout(temp, addr, len); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } temp = &temp[len]; } } } if (ioc->mfi_sense_len) { /* get user-space sense ptr then copy out sense */ bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off], &sense_ptr.sense_ptr_data[0], sizeof(sense_ptr.sense_ptr_data)); #ifdef COMPAT_FREEBSD32 if (cmd != MFI_CMD) { /* * not 64bit native so zero out any address * over 32bit */ sense_ptr.addr.high = 0; } #endif error = copyout(cm->cm_sense, sense_ptr.user_space, ioc->mfi_sense_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } } ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status; out: mfi_config_unlock(sc, locked); if (data) free(data, M_MFIBUF); if (cm->cm_frame->header.cmd == MFI_CMD_STP) { for (i = 0; i < 2; i++) { if (sc->kbuff_arr[i]) { if (sc->mfi_kbuff_arr_busaddr[i] != 0) bus_dmamap_unload( sc->mfi_kbuff_arr_dmat[i], sc->mfi_kbuff_arr_dmamap[i] ); if (sc->kbuff_arr[i] != NULL) bus_dmamem_free( sc->mfi_kbuff_arr_dmat[i], sc->kbuff_arr[i], sc->mfi_kbuff_arr_dmamap[i] ); if (sc->mfi_kbuff_arr_dmat[i] != NULL) bus_dma_tag_destroy( sc->mfi_kbuff_arr_dmat[i]); } } } if (cm) { mtx_lock(&sc->mfi_io_lock); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); } break; } case MFI_SET_AEN: aen = (struct mfi_ioc_aen *)arg; mtx_lock(&sc->mfi_io_lock); error = mfi_aen_register(sc, aen->aen_seq_num, aen->aen_class_locale); mtx_unlock(&sc->mfi_io_lock); break; case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ { devclass_t devclass; struct mfi_linux_ioc_packet l_ioc; int adapter; devclass = devclass_find("mfi"); if (devclass == NULL) return (ENOENT); error = copyin(arg, &l_ioc, sizeof(l_ioc)); if (error) return (error); adapter = l_ioc.lioc_adapter_no; sc = devclass_get_softc(devclass, adapter); if (sc == NULL) return (ENOENT); return (mfi_linux_ioctl_int(sc->mfi_cdev, cmd, arg, flag, td)); break; } case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ { devclass_t devclass; struct mfi_linux_ioc_aen l_aen; int adapter; devclass = devclass_find("mfi"); if (devclass == NULL) return (ENOENT); error = copyin(arg, &l_aen, sizeof(l_aen)); if (error) return (error); adapter = l_aen.laen_adapter_no; sc = devclass_get_softc(devclass, adapter); if (sc == NULL) return (ENOENT); return (mfi_linux_ioctl_int(sc->mfi_cdev, cmd, arg, flag, td)); break; } #ifdef COMPAT_FREEBSD32 case MFIIO_PASSTHRU32: if (!SV_CURPROC_FLAG(SV_ILP32)) { error = ENOTTY; break; } iop_swab.ioc_frame = iop32->ioc_frame; iop_swab.buf_size = iop32->buf_size; iop_swab.buf = PTRIN(iop32->buf); iop = &iop_swab; /* FALLTHROUGH */ #endif case MFIIO_PASSTHRU: error = mfi_user_command(sc, iop); #ifdef COMPAT_FREEBSD32 if (cmd == MFIIO_PASSTHRU32) iop32->ioc_frame = iop_swab.ioc_frame; #endif break; default: device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); error = ENOTTY; break; } return (error); } static int mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct mfi_softc *sc; struct mfi_linux_ioc_packet l_ioc; struct mfi_linux_ioc_aen l_aen; struct mfi_command *cm = NULL; struct mfi_aen *mfi_aen_entry; union mfi_sense_ptr sense_ptr; uint32_t context = 0; uint8_t *data = NULL, *temp; int i; int error, locked; sc = dev->si_drv1; error = 0; switch (cmd) { case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ error = copyin(arg, &l_ioc, sizeof(l_ioc)); if (error != 0) return (error); if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) { return (EINVAL); } mtx_lock(&sc->mfi_io_lock); if ((cm = mfi_dequeue_free(sc)) == NULL) { mtx_unlock(&sc->mfi_io_lock); return (EBUSY); } mtx_unlock(&sc->mfi_io_lock); locked = 0; /* * save off original context since copying from user * will clobber some data */ context = cm->cm_frame->header.context; bcopy(l_ioc.lioc_frame.raw, cm->cm_frame, 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ cm->cm_total_frame_size = (sizeof(union mfi_sgl) * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off; cm->cm_frame->header.scsi_status = 0; cm->cm_frame->header.pad0 = 0; if (l_ioc.lioc_sge_count) cm->cm_sg = (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off]; cm->cm_flags = 0; if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) cm->cm_flags |= MFI_CMD_DATAIN; if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) cm->cm_flags |= MFI_CMD_DATAOUT; cm->cm_len = cm->cm_frame->header.data_len; if (cm->cm_len && (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, M_WAITOK | M_ZERO); } else { cm->cm_data = 0; } /* restore header context */ cm->cm_frame->header.context = context; temp = data; if (cm->cm_flags & MFI_CMD_DATAOUT) { for (i = 0; i < l_ioc.lioc_sge_count; i++) { error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base), temp, l_ioc.lioc_sgl[i].iov_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy in failed\n"); goto out; } temp = &temp[l_ioc.lioc_sgl[i].iov_len]; } } if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { cm->cm_frame->pass.sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; cm->cm_frame->pass.sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); } mtx_lock(&sc->mfi_io_lock); error = mfi_check_command_pre(sc, cm); if (error) { mtx_unlock(&sc->mfi_io_lock); goto out; } if ((error = mfi_wait_command(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Controller polled failed\n"); mtx_unlock(&sc->mfi_io_lock); goto out; } mfi_check_command_post(sc, cm); mtx_unlock(&sc->mfi_io_lock); temp = data; if (cm->cm_flags & MFI_CMD_DATAIN) { for (i = 0; i < l_ioc.lioc_sge_count; i++) { error = copyout(temp, PTRIN(l_ioc.lioc_sgl[i].iov_base), l_ioc.lioc_sgl[i].iov_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } temp = &temp[l_ioc.lioc_sgl[i].iov_len]; } } if (l_ioc.lioc_sense_len) { /* get user-space sense ptr then copy out sense */ bcopy(&((struct mfi_linux_ioc_packet*)arg) ->lioc_frame.raw[l_ioc.lioc_sense_off], &sense_ptr.sense_ptr_data[0], sizeof(sense_ptr.sense_ptr_data)); #ifdef __amd64__ /* * only 32bit Linux support so zero out any * address over 32bit */ sense_ptr.addr.high = 0; #endif error = copyout(cm->cm_sense, sense_ptr.user_space, l_ioc.lioc_sense_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } } error = copyout(&cm->cm_frame->header.cmd_status, &((struct mfi_linux_ioc_packet*)arg) ->lioc_frame.hdr.cmd_status, 1); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } out: mfi_config_unlock(sc, locked); if (data) free(data, M_MFIBUF); if (cm) { mtx_lock(&sc->mfi_io_lock); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); } return (error); case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ error = copyin(arg, &l_aen, sizeof(l_aen)); if (error != 0) return (error); printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid); mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF, M_WAITOK); mtx_lock(&sc->mfi_io_lock); if (mfi_aen_entry != NULL) { mfi_aen_entry->p = curproc; TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, aen_link); } error = mfi_aen_register(sc, l_aen.laen_seq_num, l_aen.laen_class_locale); if (error != 0) { TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, aen_link); free(mfi_aen_entry, M_MFIBUF); } mtx_unlock(&sc->mfi_io_lock); return (error); default: device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); error = ENOENT; break; } return (error); } static int mfi_poll(struct cdev *dev, int poll_events, struct thread *td) { struct mfi_softc *sc; int revents = 0; sc = dev->si_drv1; if (poll_events & (POLLIN | POLLRDNORM)) { if (sc->mfi_aen_triggered != 0) { revents |= poll_events & (POLLIN | POLLRDNORM); sc->mfi_aen_triggered = 0; } if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) { revents |= POLLERR; } } if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) { sc->mfi_poll_waiting = 1; selrecord(td, &sc->mfi_select); } } return revents; } static void mfi_dump_all(void) { struct mfi_softc *sc; struct mfi_command *cm; devclass_t dc; time_t deadline; int timedout; int i; dc = devclass_find("mfi"); if (dc == NULL) { printf("No mfi dev class\n"); return; } for (i = 0; ; i++) { sc = devclass_get_softc(dc, i); if (sc == NULL) break; device_printf(sc->mfi_dev, "Dumping\n\n"); timedout = 0; deadline = time_uptime - mfi_cmd_timeout; mtx_lock(&sc->mfi_io_lock); TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { if (cm->cm_timestamp <= deadline) { device_printf(sc->mfi_dev, "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, (int)(time_uptime - cm->cm_timestamp)); MFI_PRINT_CMD(cm); timedout++; } } #if 0 if (timedout) MFI_DUMP_CMDS(sc); #endif mtx_unlock(&sc->mfi_io_lock); } return; } static void mfi_timeout(void *data) { struct mfi_softc *sc = (struct mfi_softc *)data; struct mfi_command *cm, *tmp; time_t deadline; int timedout = 0; deadline = time_uptime - mfi_cmd_timeout; if (sc->adpreset == 0) { if (!mfi_tbolt_reset(sc)) { callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz, mfi_timeout, sc); return; } } mtx_lock(&sc->mfi_io_lock); TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) { if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm) continue; if (cm->cm_timestamp <= deadline) { if (sc->adpreset != 0 && sc->issuepend_done == 0) { cm->cm_timestamp = time_uptime; } else { device_printf(sc->mfi_dev, "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, (int)(time_uptime - cm->cm_timestamp) ); MFI_PRINT_CMD(cm); MFI_VALIDATE_CMD(sc, cm); /* * While commands can get stuck forever we do * not fail them as there is no way to tell if * the controller has actually processed them * or not. * * In addition its very likely that force * failing a command here would cause a panic * e.g. in UFS. */ timedout++; } } } #if 0 if (timedout) MFI_DUMP_CMDS(sc); #endif mtx_unlock(&sc->mfi_io_lock); callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz, mfi_timeout, sc); if (0) mfi_dump_all(); return; } diff --git a/sys/dev/mfi/mfi_cam.c b/sys/dev/mfi/mfi_cam.c index 2ea27527e158..32fe8b2fdc25 100644 --- a/sys/dev/mfi/mfi_cam.c +++ b/sys/dev/mfi/mfi_cam.c @@ -1,476 +1,476 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 2007 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_mfi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum mfip_state { MFIP_STATE_NONE, MFIP_STATE_DETACH, MFIP_STATE_RESCAN }; struct mfip_softc { device_t dev; struct mfi_softc *mfi_sc; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; enum mfip_state state; }; static int mfip_probe(device_t); static int mfip_attach(device_t); static int mfip_detach(device_t); static void mfip_cam_action(struct cam_sim *, union ccb *); static void mfip_cam_poll(struct cam_sim *); static void mfip_cam_rescan(struct mfi_softc *, uint32_t tid); static struct mfi_command * mfip_start(void *); static void mfip_done(struct mfi_command *cm); static int mfi_allow_disks = 0; SYSCTL_INT(_hw_mfi, OID_AUTO, allow_cam_disk_passthrough, CTLFLAG_RDTUN, &mfi_allow_disks, 0, "event message locale"); static devclass_t mfip_devclass; static device_method_t mfip_methods[] = { DEVMETHOD(device_probe, mfip_probe), DEVMETHOD(device_attach, mfip_attach), DEVMETHOD(device_detach, mfip_detach), DEVMETHOD_END }; static driver_t mfip_driver = { "mfip", mfip_methods, sizeof(struct mfip_softc) }; DRIVER_MODULE(mfip, mfi, mfip_driver, mfip_devclass, 0, 0); MODULE_DEPEND(mfip, cam, 1, 1, 1); MODULE_DEPEND(mfip, mfi, 1, 1, 1); #define ccb_mfip_ptr sim_priv.entries[0].ptr static int mfip_probe(device_t dev) { device_set_desc(dev, "SCSI Passthrough Bus"); return (0); } static int mfip_attach(device_t dev) { struct mfip_softc *sc; struct mfi_softc *mfisc; sc = device_get_softc(dev); if (sc == NULL) return (EINVAL); mfisc = device_get_softc(device_get_parent(dev)); sc->dev = dev; sc->state = MFIP_STATE_NONE; sc->mfi_sc = mfisc; mfisc->mfi_cam_start = mfip_start; if ((sc->devq = cam_simq_alloc(MFI_SCSI_MAX_CMDS)) == NULL) return (ENOMEM); sc->sim = cam_sim_alloc(mfip_cam_action, mfip_cam_poll, "mfi", sc, device_get_unit(dev), &mfisc->mfi_io_lock, 1, MFI_SCSI_MAX_CMDS, sc->devq); if (sc->sim == NULL) { cam_simq_free(sc->devq); sc->devq = NULL; device_printf(dev, "CAM SIM attach failed\n"); return (EINVAL); } mfisc->mfi_cam_rescan_cb = mfip_cam_rescan; mtx_lock(&mfisc->mfi_io_lock); if (xpt_bus_register(sc->sim, dev, 0) != 0) { device_printf(dev, "XPT bus registration failed\n"); cam_sim_free(sc->sim, FALSE); sc->sim = NULL; cam_simq_free(sc->devq); sc->devq = NULL; mtx_unlock(&mfisc->mfi_io_lock); return (EINVAL); } mtx_unlock(&mfisc->mfi_io_lock); return (0); } static int mfip_detach(device_t dev) { struct mfip_softc *sc; sc = device_get_softc(dev); if (sc == NULL) return (EINVAL); mtx_lock(&sc->mfi_sc->mfi_io_lock); if (sc->state == MFIP_STATE_RESCAN) { mtx_unlock(&sc->mfi_sc->mfi_io_lock); return (EBUSY); } sc->state = MFIP_STATE_DETACH; mtx_unlock(&sc->mfi_sc->mfi_io_lock); sc->mfi_sc->mfi_cam_rescan_cb = NULL; if (sc->sim != NULL) { mtx_lock(&sc->mfi_sc->mfi_io_lock); xpt_bus_deregister(cam_sim_path(sc->sim)); cam_sim_free(sc->sim, FALSE); sc->sim = NULL; mtx_unlock(&sc->mfi_sc->mfi_io_lock); } if (sc->devq != NULL) { cam_simq_free(sc->devq); sc->devq = NULL; } return (0); } static void mfip_cam_action(struct cam_sim *sim, union ccb *ccb) { struct mfip_softc *sc = cam_sim_softc(sim); struct mfi_softc *mfisc = sc->mfi_sc; mtx_assert(&mfisc->mfi_io_lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN | PIM_UNMAPPED; cpi->hba_eng_cnt = 0; cpi->max_target = MFI_SCSI_MAX_TARGETS; cpi->max_lun = MFI_SCSI_MAX_LUNS; cpi->initiator_id = MFI_SCSI_INITIATOR_ID; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_DEV: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi = &ccb->cts.proto_specific.scsi; struct ccb_trans_settings_sas *sas = &ccb->cts.xport_specific.sas; ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SAS; ccb->cts.transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; sas->valid &= ~CTS_SAS_VALID_SPEED; sas->bitrate = 150000; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_SCSI_IO: { struct ccb_hdr *ccbh = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; ccbh->status = CAM_REQ_INPROG; if (csio->cdb_len > MFI_SCSI_MAX_CDB_LEN) { ccbh->status = CAM_REQ_INVALID; break; } ccbh->ccb_mfip_ptr = sc; TAILQ_INSERT_TAIL(&mfisc->mfi_cam_ccbq, ccbh, sim_links.tqe); mfi_startio(mfisc); return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void mfip_cam_rescan(struct mfi_softc *sc, uint32_t tid) { union ccb *ccb; struct mfip_softc *camsc; struct cam_sim *sim; device_t mfip_dev; - mtx_lock(&Giant); + bus_topo_lock(); mfip_dev = device_find_child(sc->mfi_dev, "mfip", -1); - mtx_unlock(&Giant); + bus_topo_unlock(); if (mfip_dev == NULL) { device_printf(sc->mfi_dev, "Couldn't find mfip child device!\n"); return; } mtx_lock(&sc->mfi_io_lock); camsc = device_get_softc(mfip_dev); if (camsc->state == MFIP_STATE_DETACH) { mtx_unlock(&sc->mfi_io_lock); return; } camsc->state = MFIP_STATE_RESCAN; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mtx_unlock(&sc->mfi_io_lock); device_printf(sc->mfi_dev, "Cannot allocate ccb for bus rescan.\n"); return; } sim = camsc->sim; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sim), tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); mtx_unlock(&sc->mfi_io_lock); device_printf(sc->mfi_dev, "Cannot create path for bus rescan.\n"); return; } xpt_rescan(ccb); camsc->state = MFIP_STATE_NONE; mtx_unlock(&sc->mfi_io_lock); } static struct mfi_command * mfip_start(void *data) { union ccb *ccb = data; struct ccb_hdr *ccbh = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; struct mfip_softc *sc; struct mfi_pass_frame *pt; struct mfi_command *cm; uint32_t context = 0; sc = ccbh->ccb_mfip_ptr; if ((cm = mfi_dequeue_free(sc->mfi_sc)) == NULL) return (NULL); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; pt = &cm->cm_frame->pass; pt->header.cmd = MFI_CMD_PD_SCSI_IO; pt->header.cmd_status = 0; pt->header.scsi_status = 0; pt->header.target_id = ccbh->target_id; pt->header.lun_id = ccbh->target_lun; pt->header.flags = 0; pt->header.timeout = 0; pt->header.data_len = csio->dxfer_len; pt->header.sense_len = MFI_SENSE_LEN; pt->header.cdb_len = csio->cdb_len; pt->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; pt->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); if (ccbh->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, &pt->cdb[0], csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, &pt->cdb[0], csio->cdb_len); cm->cm_complete = mfip_done; cm->cm_private = ccb; cm->cm_sg = &pt->sgl; cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; cm->cm_data = ccb; cm->cm_len = csio->dxfer_len; switch (ccbh->flags & CAM_DIR_MASK) { case CAM_DIR_IN: cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_CCB; break; case CAM_DIR_OUT: cm->cm_flags = MFI_CMD_DATAOUT | MFI_CMD_CCB; break; case CAM_DIR_NONE: default: cm->cm_data = NULL; cm->cm_len = 0; cm->cm_flags = 0; break; } TAILQ_REMOVE(&sc->mfi_sc->mfi_cam_ccbq, ccbh, sim_links.tqe); return (cm); } static void mfip_done(struct mfi_command *cm) { union ccb *ccb = cm->cm_private; struct ccb_hdr *ccbh = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; struct mfip_softc *sc; struct mfi_pass_frame *pt; sc = ccbh->ccb_mfip_ptr; pt = &cm->cm_frame->pass; switch (pt->header.cmd_status) { case MFI_STAT_OK: { uint8_t command, device; ccbh->status = CAM_REQ_CMP; csio->scsi_status = pt->header.scsi_status; if (ccbh->flags & CAM_CDB_POINTER) command = csio->cdb_io.cdb_ptr[0]; else command = csio->cdb_io.cdb_bytes[0]; if (command == INQUIRY) { device = csio->data_ptr[0] & 0x1f; if ((!mfi_allow_disks && device == T_DIRECT) || (device == T_PROCESSOR)) csio->data_ptr[0] = (csio->data_ptr[0] & 0xe0) | T_NODEVICE; } break; } case MFI_STAT_SCSI_DONE_WITH_ERROR: { int sense_len; ccbh->status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; csio->scsi_status = pt->header.scsi_status; if (pt->header.sense_len < csio->sense_len) csio->sense_resid = csio->sense_len - pt->header.sense_len; else csio->sense_resid = 0; sense_len = min(pt->header.sense_len, sizeof(struct scsi_sense_data)); bzero(&csio->sense_data, sizeof(struct scsi_sense_data)); bcopy(&cm->cm_sense->data[0], &csio->sense_data, sense_len); break; } case MFI_STAT_DEVICE_NOT_FOUND: ccbh->status = CAM_SEL_TIMEOUT; break; case MFI_STAT_SCSI_IO_FAILED: ccbh->status = CAM_REQ_CMP_ERR; csio->scsi_status = pt->header.scsi_status; break; default: ccbh->status = CAM_REQ_CMP_ERR; csio->scsi_status = pt->header.scsi_status; break; } mfi_release_command(cm); xpt_done(ccb); } static void mfip_cam_poll(struct cam_sim *sim) { struct mfip_softc *sc = cam_sim_softc(sim); struct mfi_softc *mfisc = sc->mfi_sc; mfisc->mfi_intr_ptr(mfisc); } diff --git a/sys/dev/mlx/mlx.c b/sys/dev/mlx/mlx.c index f5b023eafc9c..2f961d23e304 100644 --- a/sys/dev/mlx/mlx.c +++ b/sys/dev/mlx/mlx.c @@ -1,3076 +1,3076 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Driver for the Mylex DAC960 family of RAID controllers. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct cdevsw mlx_cdevsw = { .d_version = D_VERSION, .d_open = mlx_open, .d_close = mlx_close, .d_ioctl = mlx_ioctl, .d_name = "mlx", }; devclass_t mlx_devclass; /* * Per-interface accessor methods */ static int mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc); static int mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status); static void mlx_v3_intaction(struct mlx_softc *sc, int action); static int mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first); static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc); static int mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status); static void mlx_v4_intaction(struct mlx_softc *sc, int action); static int mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first); static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc); static int mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status); static void mlx_v5_intaction(struct mlx_softc *sc, int action); static int mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first); /* * Status monitoring */ static void mlx_periodic(void *data); static void mlx_periodic_enquiry(struct mlx_command *mc); static void mlx_periodic_eventlog_poll(struct mlx_softc *sc); static void mlx_periodic_eventlog_respond(struct mlx_command *mc); static void mlx_periodic_rebuild(struct mlx_command *mc); /* * Channel Pause */ static void mlx_pause_action(struct mlx_softc *sc); static void mlx_pause_done(struct mlx_command *mc); /* * Command submission. */ static void *mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize, void (*complete)(struct mlx_command *mc)); static int mlx_flush(struct mlx_softc *sc); static int mlx_check(struct mlx_softc *sc, int drive); static int mlx_rebuild(struct mlx_softc *sc, int channel, int target); static int mlx_wait_command(struct mlx_command *mc); static int mlx_poll_command(struct mlx_command *mc); void mlx_startio_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); static void mlx_startio(struct mlx_softc *sc); static void mlx_completeio(struct mlx_command *mc); static int mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu); void mlx_user_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); /* * Command buffer allocation. */ static struct mlx_command *mlx_alloccmd(struct mlx_softc *sc); static void mlx_releasecmd(struct mlx_command *mc); static void mlx_freecmd(struct mlx_command *mc); /* * Command management. */ static int mlx_getslot(struct mlx_command *mc); static void mlx_setup_dmamap(struct mlx_command *mc, bus_dma_segment_t *segs, int nsegments, int error); static void mlx_unmapcmd(struct mlx_command *mc); static int mlx_shutdown_locked(struct mlx_softc *sc); static int mlx_start(struct mlx_command *mc); static int mlx_done(struct mlx_softc *sc, int startio); static void mlx_complete(struct mlx_softc *sc); /* * Debugging. */ static char *mlx_diagnose_command(struct mlx_command *mc); static void mlx_describe_controller(struct mlx_softc *sc); static int mlx_fw_message(struct mlx_softc *sc, int status, int param1, int param2); /* * Utility functions. */ static struct mlx_sysdrive *mlx_findunit(struct mlx_softc *sc, int unit); /******************************************************************************** ******************************************************************************** Public Interfaces ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void mlx_free(struct mlx_softc *sc) { struct mlx_command *mc; debug_called(1); /* destroy control device */ if (sc->mlx_dev_t != NULL) destroy_dev(sc->mlx_dev_t); if (sc->mlx_intr) bus_teardown_intr(sc->mlx_dev, sc->mlx_irq, sc->mlx_intr); /* cancel status timeout */ MLX_IO_LOCK(sc); callout_stop(&sc->mlx_timeout); /* throw away any command buffers */ while ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) { TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link); mlx_freecmd(mc); } MLX_IO_UNLOCK(sc); callout_drain(&sc->mlx_timeout); /* destroy data-transfer DMA tag */ if (sc->mlx_buffer_dmat) bus_dma_tag_destroy(sc->mlx_buffer_dmat); /* free and destroy DMA memory and tag for s/g lists */ if (sc->mlx_sgbusaddr) bus_dmamap_unload(sc->mlx_sg_dmat, sc->mlx_sg_dmamap); if (sc->mlx_sgtable) bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap); if (sc->mlx_sg_dmat) bus_dma_tag_destroy(sc->mlx_sg_dmat); /* disconnect the interrupt handler */ if (sc->mlx_irq != NULL) bus_release_resource(sc->mlx_dev, SYS_RES_IRQ, 0, sc->mlx_irq); /* destroy the parent DMA tag */ if (sc->mlx_parent_dmat) bus_dma_tag_destroy(sc->mlx_parent_dmat); /* release the register window mapping */ if (sc->mlx_mem != NULL) bus_release_resource(sc->mlx_dev, sc->mlx_mem_type, sc->mlx_mem_rid, sc->mlx_mem); /* free controller enquiry data */ if (sc->mlx_enq2 != NULL) free(sc->mlx_enq2, M_DEVBUF); sx_destroy(&sc->mlx_config_lock); mtx_destroy(&sc->mlx_io_lock); } /******************************************************************************** * Map the scatter/gather table into bus space */ static void mlx_dma_map_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mlx_softc *sc = (struct mlx_softc *)arg; debug_called(1); /* save base of s/g table's address in bus space */ sc->mlx_sgbusaddr = segs->ds_addr; } static int mlx_sglist_map(struct mlx_softc *sc) { size_t segsize; int error, ncmd; debug_called(1); /* destroy any existing mappings */ if (sc->mlx_sgbusaddr) bus_dmamap_unload(sc->mlx_sg_dmat, sc->mlx_sg_dmamap); if (sc->mlx_sgtable) bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap); if (sc->mlx_sg_dmat) bus_dma_tag_destroy(sc->mlx_sg_dmat); sc->mlx_sgbusaddr = 0; sc->mlx_sgtable = NULL; sc->mlx_sg_dmat = NULL; /* * Create a single tag describing a region large enough to hold all of * the s/g lists we will need. If we're called early on, we don't know how * many commands we're going to be asked to support, so only allocate enough * for a couple. */ if (sc->mlx_enq2 == NULL) { ncmd = 2; } else { ncmd = sc->mlx_enq2->me_max_commands; } segsize = sizeof(struct mlx_sgentry) * MLX_NSEG * ncmd; error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ segsize, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mlx_sg_dmat); if (error != 0) { device_printf(sc->mlx_dev, "can't allocate scatter/gather DMA tag\n"); return(ENOMEM); } /* * Allocate enough s/g maps for all commands and permanently map them into * controller-visible space. * * XXX this assumes we can get enough space for all the s/g maps in one * contiguous slab. We may need to switch to a more complex arrangement * where we allocate in smaller chunks and keep a lookup table from slot * to bus address. */ error = bus_dmamem_alloc(sc->mlx_sg_dmat, (void **)&sc->mlx_sgtable, BUS_DMA_NOWAIT, &sc->mlx_sg_dmamap); if (error) { device_printf(sc->mlx_dev, "can't allocate s/g table\n"); return(ENOMEM); } (void)bus_dmamap_load(sc->mlx_sg_dmat, sc->mlx_sg_dmamap, sc->mlx_sgtable, segsize, mlx_dma_map_sg, sc, 0); return(0); } /******************************************************************************** * Initialise the controller and softc */ int mlx_attach(struct mlx_softc *sc) { struct mlx_enquiry_old *meo; int rid, error, fwminor, hscode, hserror, hsparam1, hsparam2, hsmsg; debug_called(1); /* * Initialise per-controller queues. */ TAILQ_INIT(&sc->mlx_work); TAILQ_INIT(&sc->mlx_freecmds); bioq_init(&sc->mlx_bioq); /* * Select accessor methods based on controller interface type. */ switch(sc->mlx_iftype) { case MLX_IFTYPE_2: case MLX_IFTYPE_3: sc->mlx_tryqueue = mlx_v3_tryqueue; sc->mlx_findcomplete = mlx_v3_findcomplete; sc->mlx_intaction = mlx_v3_intaction; sc->mlx_fw_handshake = mlx_v3_fw_handshake; break; case MLX_IFTYPE_4: sc->mlx_tryqueue = mlx_v4_tryqueue; sc->mlx_findcomplete = mlx_v4_findcomplete; sc->mlx_intaction = mlx_v4_intaction; sc->mlx_fw_handshake = mlx_v4_fw_handshake; break; case MLX_IFTYPE_5: sc->mlx_tryqueue = mlx_v5_tryqueue; sc->mlx_findcomplete = mlx_v5_findcomplete; sc->mlx_intaction = mlx_v5_intaction; sc->mlx_fw_handshake = mlx_v5_fw_handshake; break; default: return(ENXIO); /* should never happen */ } /* disable interrupts before we start talking to the controller */ MLX_IO_LOCK(sc); sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); MLX_IO_UNLOCK(sc); /* * Wait for the controller to come ready, handshake with the firmware if required. * This is typically only necessary on platforms where the controller BIOS does not * run. */ hsmsg = 0; DELAY(1000); while ((hscode = sc->mlx_fw_handshake(sc, &hserror, &hsparam1, &hsparam2, hsmsg == 0)) != 0) { /* report first time around... */ if (hsmsg == 0) { device_printf(sc->mlx_dev, "controller initialisation in progress...\n"); hsmsg = 1; } /* did we get a real message? */ if (hscode == 2) { hscode = mlx_fw_message(sc, hserror, hsparam1, hsparam2); /* fatal initialisation error? */ if (hscode != 0) { return(ENXIO); } } } if (hsmsg == 1) device_printf(sc->mlx_dev, "initialisation complete.\n"); /* * Allocate and connect our interrupt. */ rid = 0; sc->mlx_irq = bus_alloc_resource_any(sc->mlx_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->mlx_irq == NULL) { device_printf(sc->mlx_dev, "can't allocate interrupt\n"); return(ENXIO); } error = bus_setup_intr(sc->mlx_dev, sc->mlx_irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE, NULL, mlx_intr, sc, &sc->mlx_intr); if (error) { device_printf(sc->mlx_dev, "can't set up interrupt\n"); return(ENXIO); } /* * Create DMA tag for mapping buffers into controller-addressable space. */ error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */ 1, 0, /* align, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MLX_MAXPHYS, /* maxsize */ MLX_NSEG, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->mlx_io_lock, /* lockarg */ &sc->mlx_buffer_dmat); if (error != 0) { device_printf(sc->mlx_dev, "can't allocate buffer DMA tag\n"); return(ENOMEM); } /* * Create some initial scatter/gather mappings so we can run the probe * commands. */ error = mlx_sglist_map(sc); if (error != 0) { device_printf(sc->mlx_dev, "can't make initial s/g list mapping\n"); return(error); } /* * We don't (yet) know where the event log is up to. */ sc->mlx_currevent = -1; /* * Obtain controller feature information */ MLX_IO_LOCK(sc); if ((sc->mlx_enq2 = mlx_enquire(sc, MLX_CMD_ENQUIRY2, sizeof(struct mlx_enquiry2), NULL)) == NULL) { MLX_IO_UNLOCK(sc); device_printf(sc->mlx_dev, "ENQUIRY2 failed\n"); return(ENXIO); } /* * Do quirk/feature related things. */ fwminor = (sc->mlx_enq2->me_firmware_id >> 8) & 0xff; switch(sc->mlx_iftype) { case MLX_IFTYPE_2: /* These controllers don't report the firmware version in the ENQUIRY2 response */ if ((meo = mlx_enquire(sc, MLX_CMD_ENQUIRY_OLD, sizeof(struct mlx_enquiry_old), NULL)) == NULL) { MLX_IO_UNLOCK(sc); device_printf(sc->mlx_dev, "ENQUIRY_OLD failed\n"); return(ENXIO); } sc->mlx_enq2->me_firmware_id = ('0' << 24) | (0 << 16) | (meo->me_fwminor << 8) | meo->me_fwmajor; /* XXX require 2.42 or better (PCI) */ if (meo->me_fwminor < 42) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 2.42 or later\n"); } free(meo, M_DEVBUF); break; case MLX_IFTYPE_3: /* XXX certify 3.52? */ if (fwminor < 51) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 3.51 or later\n"); } break; case MLX_IFTYPE_4: /* XXX certify firmware versions? */ if (fwminor < 6) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 4.06 or later\n"); } break; case MLX_IFTYPE_5: if (fwminor < 7) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 5.07 or later\n"); } break; default: MLX_IO_UNLOCK(sc); return(ENXIO); /* should never happen */ } MLX_IO_UNLOCK(sc); /* * Create the final scatter/gather mappings now that we have characterised the controller. */ error = mlx_sglist_map(sc); if (error != 0) { device_printf(sc->mlx_dev, "can't make final s/g list mapping\n"); return(error); } /* * No user-requested background operation is in progress. */ sc->mlx_background = 0; sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE; /* * Create the control device. */ sc->mlx_dev_t = make_dev(&mlx_cdevsw, 0, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "mlx%d", device_get_unit(sc->mlx_dev)); sc->mlx_dev_t->si_drv1 = sc; /* * Start the timeout routine. */ callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc); /* print a little information about the controller */ mlx_describe_controller(sc); return(0); } /******************************************************************************** * Locate disk resources and attach children to them. */ void mlx_startup(struct mlx_softc *sc) { struct mlx_enq_sys_drive *mes; struct mlx_sysdrive *dr; int i, error; debug_called(1); /* * Scan all the system drives and attach children for those that * don't currently have them. */ MLX_IO_LOCK(sc); mes = mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(*mes) * MLX_MAXDRIVES, NULL); MLX_IO_UNLOCK(sc); if (mes == NULL) { device_printf(sc->mlx_dev, "error fetching drive status\n"); return; } /* iterate over drives returned */ MLX_CONFIG_LOCK(sc); for (i = 0, dr = &sc->mlx_sysdrive[0]; (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff); i++, dr++) { /* are we already attached to this drive? */ if (dr->ms_disk == 0) { /* pick up drive information */ dr->ms_size = mes[i].sd_size; dr->ms_raidlevel = mes[i].sd_raidlevel & 0xf; dr->ms_state = mes[i].sd_state; /* generate geometry information */ if (sc->mlx_geom == MLX_GEOM_128_32) { dr->ms_heads = 128; dr->ms_sectors = 32; dr->ms_cylinders = dr->ms_size / (128 * 32); } else { /* MLX_GEOM_255/63 */ dr->ms_heads = 255; dr->ms_sectors = 63; dr->ms_cylinders = dr->ms_size / (255 * 63); } dr->ms_disk = device_add_child(sc->mlx_dev, /*"mlxd"*/NULL, -1); if (dr->ms_disk == 0) device_printf(sc->mlx_dev, "device_add_child failed\n"); device_set_ivars(dr->ms_disk, dr); } } free(mes, M_DEVBUF); if ((error = bus_generic_attach(sc->mlx_dev)) != 0) device_printf(sc->mlx_dev, "bus_generic_attach returned %d", error); /* mark controller back up */ MLX_IO_LOCK(sc); sc->mlx_state &= ~MLX_STATE_SHUTDOWN; /* enable interrupts */ sc->mlx_intaction(sc, MLX_INTACTION_ENABLE); MLX_IO_UNLOCK(sc); MLX_CONFIG_UNLOCK(sc); } /******************************************************************************** * Disconnect from the controller completely, in preparation for unload. */ int mlx_detach(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); struct mlxd_softc *mlxd; int i, error; debug_called(1); error = EBUSY; MLX_CONFIG_LOCK(sc); if (sc->mlx_state & MLX_STATE_OPEN) goto out; for (i = 0; i < MLX_MAXDRIVES; i++) { if (sc->mlx_sysdrive[i].ms_disk != 0) { mlxd = device_get_softc(sc->mlx_sysdrive[i].ms_disk); if (mlxd->mlxd_flags & MLXD_OPEN) { /* drive is mounted, abort detach */ device_printf(sc->mlx_sysdrive[i].ms_disk, "still open, can't detach\n"); goto out; } } } if ((error = mlx_shutdown(dev))) goto out; MLX_CONFIG_UNLOCK(sc); mlx_free(sc); return (0); out: MLX_CONFIG_UNLOCK(sc); return(error); } /******************************************************************************** * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach, system shutdown, or before performing * an operation which may add or delete system disks. (Call mlx_startup to * resume normal operation.) * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ int mlx_shutdown(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); int error; MLX_CONFIG_LOCK(sc); error = mlx_shutdown_locked(sc); MLX_CONFIG_UNLOCK(sc); return (error); } static int mlx_shutdown_locked(struct mlx_softc *sc) { int i, error; debug_called(1); MLX_CONFIG_ASSERT_LOCKED(sc); MLX_IO_LOCK(sc); sc->mlx_state |= MLX_STATE_SHUTDOWN; sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); /* flush controller */ device_printf(sc->mlx_dev, "flushing cache..."); if (mlx_flush(sc)) { printf("failed\n"); } else { printf("done\n"); } MLX_IO_UNLOCK(sc); /* delete all our child devices */ for (i = 0; i < MLX_MAXDRIVES; i++) { if (sc->mlx_sysdrive[i].ms_disk != 0) { if ((error = device_delete_child(sc->mlx_dev, sc->mlx_sysdrive[i].ms_disk)) != 0) return (error); sc->mlx_sysdrive[i].ms_disk = 0; } } return (0); } /******************************************************************************** * Bring the controller to a quiescent state, ready for system suspend. */ int mlx_suspend(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); debug_called(1); MLX_IO_LOCK(sc); sc->mlx_state |= MLX_STATE_SUSPEND; /* flush controller */ device_printf(sc->mlx_dev, "flushing cache..."); printf("%s\n", mlx_flush(sc) ? "failed" : "done"); sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); MLX_IO_UNLOCK(sc); return(0); } /******************************************************************************** * Bring the controller back to a state ready for operation. */ int mlx_resume(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); debug_called(1); MLX_IO_LOCK(sc); sc->mlx_state &= ~MLX_STATE_SUSPEND; sc->mlx_intaction(sc, MLX_INTACTION_ENABLE); MLX_IO_UNLOCK(sc); return(0); } /******************************************************************************* * Take an interrupt, or be poked by other code to look for interrupt-worthy * status. */ void mlx_intr(void *arg) { struct mlx_softc *sc = (struct mlx_softc *)arg; debug_called(1); /* collect finished commands, queue anything waiting */ MLX_IO_LOCK(sc); mlx_done(sc, 1); MLX_IO_UNLOCK(sc); }; /******************************************************************************* * Receive a buf structure from a child device and queue it on a particular * disk resource, then poke the disk resource to start as much work as it can. */ int mlx_submit_buf(struct mlx_softc *sc, struct bio *bp) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); bioq_insert_tail(&sc->mlx_bioq, bp); sc->mlx_waitbufs++; mlx_startio(sc); return(0); } /******************************************************************************** * Accept an open operation on the control device. */ int mlx_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mlx_softc *sc = dev->si_drv1; MLX_CONFIG_LOCK(sc); MLX_IO_LOCK(sc); sc->mlx_state |= MLX_STATE_OPEN; MLX_IO_UNLOCK(sc); MLX_CONFIG_UNLOCK(sc); return(0); } /******************************************************************************** * Accept the last close on the control device. */ int mlx_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mlx_softc *sc = dev->si_drv1; MLX_CONFIG_LOCK(sc); MLX_IO_LOCK(sc); sc->mlx_state &= ~MLX_STATE_OPEN; MLX_IO_UNLOCK(sc); MLX_CONFIG_UNLOCK(sc); return (0); } /******************************************************************************** * Handle controller-specific control operations. */ int mlx_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct mlx_softc *sc = dev->si_drv1; struct mlx_rebuild_request *rb = (struct mlx_rebuild_request *)addr; struct mlx_rebuild_status *rs = (struct mlx_rebuild_status *)addr; int *arg = (int *)addr; struct mlx_pause *mp; struct mlx_sysdrive *dr; struct mlxd_softc *mlxd; int i, error; switch(cmd) { /* * Enumerate connected system drives; returns the first system drive's * unit number if *arg is -1, or the next unit after *arg if it's * a valid unit on this controller. */ case MLX_NEXT_CHILD: /* search system drives */ MLX_CONFIG_LOCK(sc); for (i = 0; i < MLX_MAXDRIVES; i++) { /* is this one attached? */ if (sc->mlx_sysdrive[i].ms_disk != 0) { /* looking for the next one we come across? */ if (*arg == -1) { *arg = device_get_unit(sc->mlx_sysdrive[i].ms_disk); MLX_CONFIG_UNLOCK(sc); return(0); } /* we want the one after this one */ if (*arg == device_get_unit(sc->mlx_sysdrive[i].ms_disk)) *arg = -1; } } MLX_CONFIG_UNLOCK(sc); return(ENOENT); /* * Scan the controller to see whether new drives have appeared. */ case MLX_RESCAN_DRIVES: - mtx_lock(&Giant); + bus_topo_lock(); mlx_startup(sc); - mtx_unlock(&Giant); + bus_topo_unlock(); return(0); /* * Disconnect from the specified drive; it may be about to go * away. */ case MLX_DETACH_DRIVE: /* detach one drive */ MLX_CONFIG_LOCK(sc); if (((dr = mlx_findunit(sc, *arg)) == NULL) || ((mlxd = device_get_softc(dr->ms_disk)) == NULL)) { MLX_CONFIG_UNLOCK(sc); return(ENOENT); } device_printf(dr->ms_disk, "detaching..."); error = 0; if (mlxd->mlxd_flags & MLXD_OPEN) { error = EBUSY; goto detach_out; } /* flush controller */ MLX_IO_LOCK(sc); if (mlx_flush(sc)) { MLX_IO_UNLOCK(sc); error = EBUSY; goto detach_out; } MLX_IO_UNLOCK(sc); /* nuke drive */ if ((error = device_delete_child(sc->mlx_dev, dr->ms_disk)) != 0) goto detach_out; dr->ms_disk = 0; detach_out: MLX_CONFIG_UNLOCK(sc); if (error) { printf("failed\n"); } else { printf("done\n"); } return(error); /* * Pause one or more SCSI channels for a period of time, to assist * in the process of hot-swapping devices. * * Note that at least the 3.51 firmware on the DAC960PL doesn't seem * to do this right. */ case MLX_PAUSE_CHANNEL: /* schedule a channel pause */ /* Does this command work on this firmware? */ if (!(sc->mlx_feature & MLX_FEAT_PAUSEWORKS)) return(EOPNOTSUPP); /* check time values */ mp = (struct mlx_pause *)addr; if ((mp->mp_when < 0) || (mp->mp_when > 3600)) return(EINVAL); if ((mp->mp_howlong < 1) || (mp->mp_howlong > (0xf * 30))) return(EINVAL); MLX_IO_LOCK(sc); if ((mp->mp_which == MLX_PAUSE_CANCEL) && (sc->mlx_pause.mp_when != 0)) { /* cancel a pending pause operation */ sc->mlx_pause.mp_which = 0; } else { /* fix for legal channels */ mp->mp_which &= ((1 << sc->mlx_enq2->me_actual_channels) -1); /* check for a pause currently running */ if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when == 0)) { MLX_IO_UNLOCK(sc); return(EBUSY); } /* looks ok, go with it */ sc->mlx_pause.mp_which = mp->mp_which; sc->mlx_pause.mp_when = time_second + mp->mp_when; sc->mlx_pause.mp_howlong = sc->mlx_pause.mp_when + mp->mp_howlong; } MLX_IO_UNLOCK(sc); return(0); /* * Accept a command passthrough-style. */ case MLX_COMMAND: return(mlx_user_command(sc, (struct mlx_usercommand *)addr)); /* * Start a rebuild on a given SCSI disk */ case MLX_REBUILDASYNC: MLX_IO_LOCK(sc); if (sc->mlx_background != 0) { MLX_IO_UNLOCK(sc); rb->rr_status = 0x0106; return(EBUSY); } rb->rr_status = mlx_rebuild(sc, rb->rr_channel, rb->rr_target); switch (rb->rr_status) { case 0: error = 0; break; case 0x10000: error = ENOMEM; /* couldn't set up the command */ break; case 0x0002: error = EBUSY; break; case 0x0104: error = EIO; break; case 0x0105: error = ERANGE; break; case 0x0106: error = EBUSY; break; default: error = EINVAL; break; } if (error == 0) sc->mlx_background = MLX_BACKGROUND_REBUILD; MLX_IO_UNLOCK(sc); return(error); /* * Get the status of the current rebuild or consistency check. */ case MLX_REBUILDSTAT: MLX_IO_LOCK(sc); *rs = sc->mlx_rebuildstat; MLX_IO_UNLOCK(sc); return(0); /* * Return the per-controller system drive number matching the * disk device number in (arg), if it happens to belong to us. */ case MLX_GET_SYSDRIVE: error = ENOENT; MLX_CONFIG_LOCK(sc); - mtx_lock(&Giant); + bus_topo_lock(); mlxd = (struct mlxd_softc *)devclass_get_softc(mlxd_devclass, *arg); - mtx_unlock(&Giant); + bus_topo_unlock(); if ((mlxd != NULL) && (mlxd->mlxd_drive >= sc->mlx_sysdrive) && (mlxd->mlxd_drive < (sc->mlx_sysdrive + MLX_MAXDRIVES))) { error = 0; *arg = mlxd->mlxd_drive - sc->mlx_sysdrive; } MLX_CONFIG_UNLOCK(sc); return(error); default: return(ENOTTY); } } /******************************************************************************** * Handle operations requested by a System Drive connected to this controller. */ int mlx_submit_ioctl(struct mlx_softc *sc, struct mlx_sysdrive *drive, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { int *arg = (int *)addr; int error, result; switch(cmd) { /* * Return the current status of this drive. */ case MLXD_STATUS: MLX_IO_LOCK(sc); *arg = drive->ms_state; MLX_IO_UNLOCK(sc); return(0); /* * Start a background consistency check on this drive. */ case MLXD_CHECKASYNC: /* start a background consistency check */ MLX_IO_LOCK(sc); if (sc->mlx_background != 0) { MLX_IO_UNLOCK(sc); *arg = 0x0106; return(EBUSY); } result = mlx_check(sc, drive - &sc->mlx_sysdrive[0]); switch (result) { case 0: error = 0; break; case 0x10000: error = ENOMEM; /* couldn't set up the command */ break; case 0x0002: error = EIO; break; case 0x0105: error = ERANGE; break; case 0x0106: error = EBUSY; break; default: error = EINVAL; break; } if (error == 0) sc->mlx_background = MLX_BACKGROUND_CHECK; MLX_IO_UNLOCK(sc); *arg = result; return(error); } return(ENOIOCTL); } /******************************************************************************** ******************************************************************************** Status Monitoring ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Fire off commands to periodically check the status of connected drives. */ static void mlx_periodic(void *data) { struct mlx_softc *sc = (struct mlx_softc *)data; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* * Run a bus pause? */ if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when > 0) && (time_second >= sc->mlx_pause.mp_when)){ mlx_pause_action(sc); /* pause is running */ sc->mlx_pause.mp_when = 0; sysbeep(500, SBT_1S); /* * Bus pause still running? */ } else if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when == 0)) { /* time to stop bus pause? */ if (time_second >= sc->mlx_pause.mp_howlong) { mlx_pause_action(sc); sc->mlx_pause.mp_which = 0; /* pause is complete */ sysbeep(500, SBT_1S); } else { sysbeep((time_second % 5) * 100 + 500, SBT_1S / 8); } /* * Run normal periodic activities? */ } else if (time_second > (sc->mlx_lastpoll + 10)) { sc->mlx_lastpoll = time_second; /* * Check controller status. * * XXX Note that this may not actually launch a command in situations of high load. */ mlx_enquire(sc, (sc->mlx_iftype == MLX_IFTYPE_2) ? MLX_CMD_ENQUIRY_OLD : MLX_CMD_ENQUIRY, imax(sizeof(struct mlx_enquiry), sizeof(struct mlx_enquiry_old)), mlx_periodic_enquiry); /* * Check system drive status. * * XXX This might be better left to event-driven detection, eg. I/O to an offline * drive will detect it's offline, rebuilds etc. should detect the drive is back * online. */ mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(struct mlx_enq_sys_drive) * MLX_MAXDRIVES, mlx_periodic_enquiry); } /* get drive rebuild/check status */ /* XXX should check sc->mlx_background if this is only valid while in progress */ mlx_enquire(sc, MLX_CMD_REBUILDSTAT, sizeof(struct mlx_rebuild_stat), mlx_periodic_rebuild); /* deal with possibly-missed interrupts and timed-out commands */ mlx_done(sc, 1); /* reschedule another poll next second or so */ callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc); } /******************************************************************************** * Handle the result of an ENQUIRY command instigated by periodic status polling. */ static void mlx_periodic_enquiry(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* Command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "periodic enquiry failed - %s\n", mlx_diagnose_command(mc)); goto out; } /* respond to command */ switch(mc->mc_mailbox[0]) { /* * This is currently a bit fruitless, as we don't know how to extract the eventlog * pointer yet. */ case MLX_CMD_ENQUIRY_OLD: { struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data; struct mlx_enquiry_old *meo = (struct mlx_enquiry_old *)mc->mc_data; int i; /* convert data in-place to new format */ for (i = (sizeof(me->me_dead) / sizeof(me->me_dead[0])) - 1; i >= 0; i--) { me->me_dead[i].dd_chan = meo->me_dead[i].dd_chan; me->me_dead[i].dd_targ = meo->me_dead[i].dd_targ; } me->me_misc_flags = 0; me->me_rebuild_count = meo->me_rebuild_count; me->me_dead_count = meo->me_dead_count; me->me_critical_sd_count = meo->me_critical_sd_count; me->me_event_log_seq_num = 0; me->me_offline_sd_count = meo->me_offline_sd_count; me->me_max_commands = meo->me_max_commands; me->me_rebuild_flag = meo->me_rebuild_flag; me->me_fwmajor = meo->me_fwmajor; me->me_fwminor = meo->me_fwminor; me->me_status_flags = meo->me_status_flags; me->me_flash_age = meo->me_flash_age; for (i = (sizeof(me->me_drvsize) / sizeof(me->me_drvsize[0])) - 1; i >= 0; i--) { if (i > ((sizeof(meo->me_drvsize) / sizeof(meo->me_drvsize[0])) - 1)) { me->me_drvsize[i] = 0; /* drive beyond supported range */ } else { me->me_drvsize[i] = meo->me_drvsize[i]; } } me->me_num_sys_drvs = meo->me_num_sys_drvs; } /* FALLTHROUGH */ /* * Generic controller status update. We could do more with this than just * checking the event log. */ case MLX_CMD_ENQUIRY: { struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data; if (sc->mlx_currevent == -1) { /* initialise our view of the event log */ sc->mlx_currevent = sc->mlx_lastevent = me->me_event_log_seq_num; } else if ((me->me_event_log_seq_num != sc->mlx_lastevent) && !(sc->mlx_flags & MLX_EVENTLOG_BUSY)) { /* record where current events are up to */ sc->mlx_currevent = me->me_event_log_seq_num; debug(1, "event log pointer was %d, now %d\n", sc->mlx_lastevent, sc->mlx_currevent); /* mark the event log as busy */ sc->mlx_flags |= MLX_EVENTLOG_BUSY; /* drain new eventlog entries */ mlx_periodic_eventlog_poll(sc); } break; } case MLX_CMD_ENQSYSDRIVE: { struct mlx_enq_sys_drive *mes = (struct mlx_enq_sys_drive *)mc->mc_data; struct mlx_sysdrive *dr; int i; for (i = 0, dr = &sc->mlx_sysdrive[0]; (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff); i++) { /* has state been changed by controller? */ if (dr->ms_state != mes[i].sd_state) { switch(mes[i].sd_state) { case MLX_SYSD_OFFLINE: device_printf(dr->ms_disk, "drive offline\n"); break; case MLX_SYSD_ONLINE: device_printf(dr->ms_disk, "drive online\n"); break; case MLX_SYSD_CRITICAL: device_printf(dr->ms_disk, "drive critical\n"); break; } /* save new state */ dr->ms_state = mes[i].sd_state; } } break; } default: device_printf(sc->mlx_dev, "%s: unknown command 0x%x", __func__, mc->mc_mailbox[0]); break; } out: free(mc->mc_data, M_DEVBUF); mlx_releasecmd(mc); } static void mlx_eventlog_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_command *mc; mc = (struct mlx_command *)arg; mlx_setup_dmamap(mc, segs, nsegments, error); /* build the command to get one entry */ mlx_make_type3(mc, MLX_CMD_LOGOP, MLX_LOGOP_GET, 1, mc->mc_sc->mlx_lastevent, 0, 0, mc->mc_dataphys, 0); mc->mc_complete = mlx_periodic_eventlog_respond; mc->mc_private = mc; /* start the command */ if (mlx_start(mc) != 0) { mlx_releasecmd(mc); free(mc->mc_data, M_DEVBUF); mc->mc_data = NULL; } } /******************************************************************************** * Instigate a poll for one event log message on (sc). * We only poll for one message at a time, to keep our command usage down. */ static void mlx_periodic_eventlog_poll(struct mlx_softc *sc) { struct mlx_command *mc; void *result = NULL; int error = 0; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 1; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* allocate the response structure */ if ((result = malloc(/*sizeof(struct mlx_eventlog_entry)*/1024, M_DEVBUF, M_NOWAIT)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* map the command so the controller can see it */ mc->mc_data = result; mc->mc_length = /*sizeof(struct mlx_eventlog_entry)*/1024; error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_eventlog_cb, mc, BUS_DMA_NOWAIT); out: if (error != 0) { if (mc != NULL) mlx_releasecmd(mc); if ((result != NULL) && (mc->mc_data != NULL)) free(result, M_DEVBUF); } } /******************************************************************************** * Handle the result of polling for a log message, generate diagnostic output. * If this wasn't the last message waiting for us, we'll go collect another. */ static char *mlx_sense_messages[] = { "because write recovery failed", "because of SCSI bus reset failure", "because of double check condition", "because it was removed", "because of gross error on SCSI chip", "because of bad tag returned from drive", "because of timeout on SCSI command", "because of reset SCSI command issued from system", "because busy or parity error count exceeded limit", "because of 'kill drive' command from system", "because of selection timeout", "due to SCSI phase sequence error", "due to unknown status" }; static void mlx_periodic_eventlog_respond(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; struct mlx_eventlog_entry *el = (struct mlx_eventlog_entry *)mc->mc_data; char *reason; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); sc->mlx_lastevent++; /* next message... */ if (mc->mc_status == 0) { /* handle event log message */ switch(el->el_type) { /* * This is the only sort of message we understand at the moment. * The tests here are probably incomplete. */ case MLX_LOGMSG_SENSE: /* sense data */ /* Mylex vendor-specific message indicating a drive was killed? */ if ((el->el_sensekey == 9) && (el->el_asc == 0x80)) { if (el->el_asq < nitems(mlx_sense_messages)) { reason = mlx_sense_messages[el->el_asq]; } else { reason = "for unknown reason"; } device_printf(sc->mlx_dev, "physical drive %d:%d killed %s\n", el->el_channel, el->el_target, reason); } /* SCSI drive was reset? */ if ((el->el_sensekey == 6) && (el->el_asc == 0x29)) { device_printf(sc->mlx_dev, "physical drive %d:%d reset\n", el->el_channel, el->el_target); } /* SCSI drive error? */ if (!((el->el_sensekey == 0) || ((el->el_sensekey == 2) && (el->el_asc == 0x04) && ((el->el_asq == 0x01) || (el->el_asq == 0x02))))) { device_printf(sc->mlx_dev, "physical drive %d:%d error log: sense = %d asc = %x asq = %x\n", el->el_channel, el->el_target, el->el_sensekey, el->el_asc, el->el_asq); device_printf(sc->mlx_dev, " info %4D csi %4D\n", el->el_information, ":", el->el_csi, ":"); } break; default: device_printf(sc->mlx_dev, "unknown log message type 0x%x\n", el->el_type); break; } } else { device_printf(sc->mlx_dev, "error reading message log - %s\n", mlx_diagnose_command(mc)); /* give up on all the outstanding messages, as we may have come unsynched */ sc->mlx_lastevent = sc->mlx_currevent; } /* dispose of command and data */ free(mc->mc_data, M_DEVBUF); mlx_releasecmd(mc); /* is there another message to obtain? */ if (sc->mlx_lastevent != sc->mlx_currevent) { mlx_periodic_eventlog_poll(sc); } else { /* clear log-busy status */ sc->mlx_flags &= ~MLX_EVENTLOG_BUSY; } } /******************************************************************************** * Handle check/rebuild operations in progress. */ static void mlx_periodic_rebuild(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; struct mlx_rebuild_status *mr = (struct mlx_rebuild_status *)mc->mc_data; MLX_IO_ASSERT_LOCKED(sc); switch(mc->mc_status) { case 0: /* operation running, update stats */ sc->mlx_rebuildstat = *mr; /* spontaneous rebuild/check? */ if (sc->mlx_background == 0) { sc->mlx_background = MLX_BACKGROUND_SPONTANEOUS; device_printf(sc->mlx_dev, "background check/rebuild operation started\n"); } break; case 0x0105: /* nothing running, finalise stats and report */ switch(sc->mlx_background) { case MLX_BACKGROUND_CHECK: device_printf(sc->mlx_dev, "consistency check completed\n"); /* XXX print drive? */ break; case MLX_BACKGROUND_REBUILD: device_printf(sc->mlx_dev, "drive rebuild completed\n"); /* XXX print channel/target? */ break; case MLX_BACKGROUND_SPONTANEOUS: default: /* if we have previously been non-idle, report the transition */ if (sc->mlx_rebuildstat.rs_code != MLX_REBUILDSTAT_IDLE) { device_printf(sc->mlx_dev, "background check/rebuild operation completed\n"); } } sc->mlx_background = 0; sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE; break; } free(mc->mc_data, M_DEVBUF); mlx_releasecmd(mc); } /******************************************************************************** ******************************************************************************** Channel Pause ******************************************************************************** ********************************************************************************/ /******************************************************************************** * It's time to perform a channel pause action for (sc), either start or stop * the pause. */ static void mlx_pause_action(struct mlx_softc *sc) { struct mlx_command *mc; int failsafe, i, command; MLX_IO_ASSERT_LOCKED(sc); /* What are we doing here? */ if (sc->mlx_pause.mp_when == 0) { command = MLX_CMD_STARTCHANNEL; failsafe = 0; } else { command = MLX_CMD_STOPCHANNEL; /* * Channels will always start again after the failsafe period, * which is specified in multiples of 30 seconds. * This constrains us to a maximum pause of 450 seconds. */ failsafe = ((sc->mlx_pause.mp_howlong - time_second) + 5) / 30; if (failsafe > 0xf) { failsafe = 0xf; sc->mlx_pause.mp_howlong = time_second + (0xf * 30) - 5; } } /* build commands for every channel requested */ for (i = 0; i < sc->mlx_enq2->me_actual_channels; i++) { if ((1 << i) & sc->mlx_pause.mp_which) { /* get ourselves a command buffer */ if ((mc = mlx_alloccmd(sc)) == NULL) goto fail; /* get a command slot */ mc->mc_flags |= MLX_CMD_PRIORITY; if (mlx_getslot(mc)) goto fail; /* build the command */ mlx_make_type2(mc, command, (failsafe << 4) | i, 0, 0, 0, 0, 0, 0, 0); mc->mc_complete = mlx_pause_done; mc->mc_private = sc; /* XXX not needed */ if (mlx_start(mc)) goto fail; /* command submitted OK */ return; fail: device_printf(sc->mlx_dev, "%s failed for channel %d\n", command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", i); if (mc != NULL) mlx_releasecmd(mc); } } } static void mlx_pause_done(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int command = mc->mc_mailbox[0]; int channel = mc->mc_mailbox[2] & 0xf; MLX_IO_ASSERT_LOCKED(sc); if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "%s command failed - %s\n", command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", mlx_diagnose_command(mc)); } else if (command == MLX_CMD_STOPCHANNEL) { device_printf(sc->mlx_dev, "channel %d pausing for %ld seconds\n", channel, (long)(sc->mlx_pause.mp_howlong - time_second)); } else { device_printf(sc->mlx_dev, "channel %d resuming\n", channel); } mlx_releasecmd(mc); } /******************************************************************************** ******************************************************************************** Command Submission ******************************************************************************** ********************************************************************************/ static void mlx_enquire_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_softc *sc; struct mlx_command *mc; mc = (struct mlx_command *)arg; if (error) return; mlx_setup_dmamap(mc, segs, nsegments, error); /* build an enquiry command */ sc = mc->mc_sc; mlx_make_type2(mc, mc->mc_command, 0, 0, 0, 0, 0, 0, mc->mc_dataphys, 0); /* do we want a completion callback? */ if (mc->mc_complete != NULL) { if ((error = mlx_start(mc)) != 0) return; } else { /* run the command in either polled or wait mode */ if ((sc->mlx_state & MLX_STATE_INTEN) ? mlx_wait_command(mc) : mlx_poll_command(mc)) return; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "ENQUIRY failed - %s\n", mlx_diagnose_command(mc)); return; } } } /******************************************************************************** * Perform an Enquiry command using a type-3 command buffer and a return a single * linear result buffer. If the completion function is specified, it will * be called with the completed command (and the result response will not be * valid until that point). Otherwise, the command will either be busy-waited * for (interrupts not enabled), or slept for. */ static void * mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize, void (* complete)(struct mlx_command *mc)) { struct mlx_command *mc; void *result; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 1; result = NULL; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* allocate the response structure */ if ((result = malloc(bufsize, M_DEVBUF, M_NOWAIT)) == NULL) goto out; /* get a command slot */ mc->mc_flags |= MLX_CMD_PRIORITY | MLX_CMD_DATAOUT; if (mlx_getslot(mc)) goto out; /* map the command so the controller can see it */ mc->mc_data = result; mc->mc_length = bufsize; mc->mc_command = command; if (complete != NULL) { mc->mc_complete = complete; mc->mc_private = mc; } error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_enquire_cb, mc, BUS_DMA_NOWAIT); out: /* we got a command, but nobody else will free it */ if ((mc != NULL) && (mc->mc_complete == NULL)) mlx_releasecmd(mc); /* we got an error, and we allocated a result */ if ((error != 0) && (result != NULL)) { free(result, M_DEVBUF); result = NULL; } return(result); } /******************************************************************************** * Perform a Flush command on the nominated controller. * * May be called with interrupts enabled or disabled; will not return until * the flush operation completes or fails. */ static int mlx_flush(struct mlx_softc *sc) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 1; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* build a flush command */ mlx_make_type2(mc, MLX_CMD_FLUSH, 0, 0, 0, 0, 0, 0, 0, 0); /* can't assume that interrupts are going to work here, so play it safe */ if (mlx_poll_command(mc)) goto out; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "FLUSH failed - %s\n", mlx_diagnose_command(mc)); goto out; } error = 0; /* success */ out: if (mc != NULL) mlx_releasecmd(mc); return(error); } /******************************************************************************** * Start a background consistency check on (drive). * * May be called with interrupts enabled or disabled; will return as soon as the * operation has started or been refused. */ static int mlx_check(struct mlx_softc *sc, int drive) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 0x10000; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* build a checkasync command, set the "fix it" flag */ mlx_make_type2(mc, MLX_CMD_CHECKASYNC, 0, 0, 0, 0, 0, drive | 0x80, 0, 0); /* start the command and wait for it to be returned */ if (mlx_wait_command(mc)) goto out; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "CHECK ASYNC failed - %s\n", mlx_diagnose_command(mc)); } else { device_printf(sc->mlx_sysdrive[drive].ms_disk, "consistency check started"); } error = mc->mc_status; out: if (mc != NULL) mlx_releasecmd(mc); return(error); } /******************************************************************************** * Start a background rebuild of the physical drive at (channel),(target). * * May be called with interrupts enabled or disabled; will return as soon as the * operation has started or been refused. */ static int mlx_rebuild(struct mlx_softc *sc, int channel, int target) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 0x10000; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* build a checkasync command, set the "fix it" flag */ mlx_make_type2(mc, MLX_CMD_REBUILDASYNC, channel, target, 0, 0, 0, 0, 0, 0); /* start the command and wait for it to be returned */ if (mlx_wait_command(mc)) goto out; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "REBUILD ASYNC failed - %s\n", mlx_diagnose_command(mc)); } else { device_printf(sc->mlx_dev, "drive rebuild started for %d:%d\n", channel, target); } error = mc->mc_status; out: if (mc != NULL) mlx_releasecmd(mc); return(error); } /******************************************************************************** * Run the command (mc) and return when it completes. * * Interrupts need to be enabled; returns nonzero on error. */ static int mlx_wait_command(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int error, count; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); mc->mc_complete = NULL; mc->mc_private = mc; /* wake us when you're done */ if ((error = mlx_start(mc)) != 0) return(error); count = 0; /* XXX better timeout? */ while ((mc->mc_status == MLX_STATUS_BUSY) && (count < 30)) { mtx_sleep(mc->mc_private, &sc->mlx_io_lock, PRIBIO | PCATCH, "mlxwcmd", hz); } if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc)); return(EIO); } return(0); } /******************************************************************************** * Start the command (mc) and busy-wait for it to complete. * * Should only be used when interrupts can't be relied upon. Returns 0 on * success, nonzero on error. * Successfully completed commands are dequeued. */ static int mlx_poll_command(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int error, count; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); mc->mc_complete = NULL; mc->mc_private = NULL; /* we will poll for it */ if ((error = mlx_start(mc)) != 0) return(error); count = 0; do { /* poll for completion */ mlx_done(mc->mc_sc, 1); } while ((mc->mc_status == MLX_STATUS_BUSY) && (count++ < 15000000)); if (mc->mc_status != MLX_STATUS_BUSY) { TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); return(0); } device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc)); return(EIO); } void mlx_startio_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_command *mc; struct mlxd_softc *mlxd; struct mlx_softc *sc; struct bio *bp; int blkcount; int driveno; int cmd; mc = (struct mlx_command *)arg; mlx_setup_dmamap(mc, segs, nsegments, error); sc = mc->mc_sc; bp = mc->mc_private; if (bp->bio_cmd == BIO_READ) { mc->mc_flags |= MLX_CMD_DATAIN; cmd = MLX_CMD_READSG; } else { mc->mc_flags |= MLX_CMD_DATAOUT; cmd = MLX_CMD_WRITESG; } /* build a suitable I/O command (assumes 512-byte rounded transfers) */ mlxd = bp->bio_disk->d_drv1; driveno = mlxd->mlxd_drive - sc->mlx_sysdrive; blkcount = howmany(bp->bio_bcount, MLX_BLKSIZE); if ((bp->bio_pblkno + blkcount) > sc->mlx_sysdrive[driveno].ms_size) device_printf(sc->mlx_dev, "I/O beyond end of unit (%lld,%d > %lu)\n", (long long)bp->bio_pblkno, blkcount, (u_long)sc->mlx_sysdrive[driveno].ms_size); /* * Build the I/O command. Note that the SG list type bits are set to zero, * denoting the format of SG list that we are using. */ if (sc->mlx_iftype == MLX_IFTYPE_2) { mlx_make_type1(mc, (cmd == MLX_CMD_WRITESG) ? MLX_CMD_WRITESG_OLD : MLX_CMD_READSG_OLD, blkcount & 0xff, /* xfer length low byte */ bp->bio_pblkno, /* physical block number */ driveno, /* target drive number */ mc->mc_sgphys, /* location of SG list */ mc->mc_nsgent & 0x3f); /* size of SG list */ } else { mlx_make_type5(mc, cmd, blkcount & 0xff, /* xfer length low byte */ (driveno << 3) | ((blkcount >> 8) & 0x07), /* target+length high 3 bits */ bp->bio_pblkno, /* physical block number */ mc->mc_sgphys, /* location of SG list */ mc->mc_nsgent & 0x3f); /* size of SG list */ } /* try to give command to controller */ if (mlx_start(mc) != 0) { /* fail the command */ mc->mc_status = MLX_STATUS_WEDGED; mlx_completeio(mc); } sc->mlx_state &= ~MLX_STATE_QFROZEN; } /******************************************************************************** * Pull as much work off the softc's work queue as possible and give it to the * controller. Leave a couple of slots free for emergencies. */ static void mlx_startio(struct mlx_softc *sc) { struct mlx_command *mc; struct bio *bp; int error; MLX_IO_ASSERT_LOCKED(sc); /* spin until something prevents us from doing any work */ for (;;) { if (sc->mlx_state & MLX_STATE_QFROZEN) break; /* see if there's work to be done */ if ((bp = bioq_first(&sc->mlx_bioq)) == NULL) break; /* get a command */ if ((mc = mlx_alloccmd(sc)) == NULL) break; /* get a slot for the command */ if (mlx_getslot(mc) != 0) { mlx_releasecmd(mc); break; } /* get the buf containing our work */ bioq_remove(&sc->mlx_bioq, bp); sc->mlx_waitbufs--; /* connect the buf to the command */ mc->mc_complete = mlx_completeio; mc->mc_private = bp; mc->mc_data = bp->bio_data; mc->mc_length = bp->bio_bcount; /* map the command so the controller can work with it */ error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_startio_cb, mc, 0); if (error == EINPROGRESS) { sc->mlx_state |= MLX_STATE_QFROZEN; break; } } } /******************************************************************************** * Handle completion of an I/O command. */ static void mlx_completeio(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; struct bio *bp = mc->mc_private; struct mlxd_softc *mlxd = bp->bio_disk->d_drv1; MLX_IO_ASSERT_LOCKED(sc); if (mc->mc_status != MLX_STATUS_OK) { /* could be more verbose here? */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; switch(mc->mc_status) { case MLX_STATUS_RDWROFFLINE: /* system drive has gone offline */ device_printf(mlxd->mlxd_dev, "drive offline\n"); /* should signal this with a return code */ mlxd->mlxd_drive->ms_state = MLX_SYSD_OFFLINE; break; default: /* other I/O error */ device_printf(sc->mlx_dev, "I/O error - %s\n", mlx_diagnose_command(mc)); #if 0 device_printf(sc->mlx_dev, " b_bcount %ld blkcount %ld b_pblkno %d\n", bp->bio_bcount, bp->bio_bcount / MLX_BLKSIZE, bp->bio_pblkno); device_printf(sc->mlx_dev, " %13D\n", mc->mc_mailbox, " "); #endif break; } } mlx_releasecmd(mc); mlxd_intr(bp); } void mlx_user_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_usercommand *mu; struct mlx_command *mc; struct mlx_dcdb *dcdb; mc = (struct mlx_command *)arg; if (error) return; mlx_setup_dmamap(mc, segs, nsegments, error); mu = (struct mlx_usercommand *)mc->mc_private; dcdb = NULL; /* * If this is a passthrough SCSI command, the DCDB is packed at the * beginning of the data area. Fix up the DCDB to point to the correct * physical address and override any bufptr supplied by the caller since * we know what it's meant to be. */ if (mc->mc_mailbox[0] == MLX_CMD_DIRECT_CDB) { dcdb = (struct mlx_dcdb *)mc->mc_data; dcdb->dcdb_physaddr = mc->mc_dataphys + sizeof(*dcdb); mu->mu_bufptr = 8; } /* * If there's a data buffer, fix up the command's buffer pointer. */ if (mu->mu_datasize > 0) { mc->mc_mailbox[mu->mu_bufptr ] = mc->mc_dataphys & 0xff; mc->mc_mailbox[mu->mu_bufptr + 1] = (mc->mc_dataphys >> 8) & 0xff; mc->mc_mailbox[mu->mu_bufptr + 2] = (mc->mc_dataphys >> 16) & 0xff; mc->mc_mailbox[mu->mu_bufptr + 3] = (mc->mc_dataphys >> 24) & 0xff; } debug(0, "command fixup"); /* submit the command and wait */ if (mlx_wait_command(mc) != 0) return; } /******************************************************************************** * Take a command from user-space and try to run it. * * XXX Note that this can't perform very much in the way of error checking, and * as such, applications _must_ be considered trustworthy. * XXX Commands using S/G for data are not supported. */ static int mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu) { struct mlx_command *mc; void *kbuf; int error; debug_called(0); kbuf = NULL; mc = NULL; error = ENOMEM; /* get ourselves a command and copy in from user space */ MLX_IO_LOCK(sc); if ((mc = mlx_alloccmd(sc)) == NULL) { MLX_IO_UNLOCK(sc); return(error); } bcopy(mu->mu_command, mc->mc_mailbox, sizeof(mc->mc_mailbox)); debug(0, "got command buffer"); /* * if we need a buffer for data transfer, allocate one and copy in its * initial contents */ if (mu->mu_datasize > 0) { if (mu->mu_datasize > MLX_MAXPHYS) { error = EINVAL; goto out; } MLX_IO_UNLOCK(sc); if (((kbuf = malloc(mu->mu_datasize, M_DEVBUF, M_WAITOK)) == NULL) || (error = copyin(mu->mu_buf, kbuf, mu->mu_datasize))) { MLX_IO_LOCK(sc); goto out; } MLX_IO_LOCK(sc); debug(0, "got kernel buffer"); } /* get a command slot */ if (mlx_getslot(mc)) goto out; debug(0, "got a slot"); if (mu->mu_datasize > 0) { /* range check the pointer to physical buffer address */ if ((mu->mu_bufptr < 0) || (mu->mu_bufptr > (sizeof(mu->mu_command) - sizeof(u_int32_t)))) { error = EINVAL; goto out; } } /* map the command so the controller can see it */ mc->mc_data = kbuf; mc->mc_length = mu->mu_datasize; mc->mc_private = mu; error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_user_cb, mc, BUS_DMA_NOWAIT); if (error) goto out; /* copy out status and data */ mu->mu_status = mc->mc_status; if (mu->mu_datasize > 0) { MLX_IO_UNLOCK(sc); error = copyout(kbuf, mu->mu_buf, mu->mu_datasize); MLX_IO_LOCK(sc); } out: mlx_releasecmd(mc); MLX_IO_UNLOCK(sc); if (kbuf != NULL) free(kbuf, M_DEVBUF); return(error); } /******************************************************************************** ******************************************************************************** Command I/O to Controller ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Find a free command slot for (mc). * * Don't hand out a slot to a normal-priority command unless there are at least * 4 slots free for priority commands. */ static int mlx_getslot(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int slot, limit; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* * Enforce slot-usage limit, if we have the required information. */ if (sc->mlx_enq2 != NULL) { limit = sc->mlx_enq2->me_max_commands; } else { limit = 2; } if (sc->mlx_busycmds >= ((mc->mc_flags & MLX_CMD_PRIORITY) ? limit : limit - 4)) return(EBUSY); /* * Allocate an outstanding command slot * * XXX linear search is slow */ for (slot = 0; slot < limit; slot++) { debug(2, "try slot %d", slot); if (sc->mlx_busycmd[slot] == NULL) break; } if (slot < limit) { sc->mlx_busycmd[slot] = mc; sc->mlx_busycmds++; } /* out of slots? */ if (slot >= limit) return(EBUSY); debug(2, "got slot %d", slot); mc->mc_slot = slot; return(0); } /******************************************************************************** * Map/unmap (mc)'s data in the controller's addressable space. */ static void mlx_setup_dmamap(struct mlx_command *mc, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_softc *sc = mc->mc_sc; struct mlx_sgentry *sg; int i; debug_called(1); /* XXX should be unnecessary */ if (sc->mlx_enq2 && (nsegments > sc->mlx_enq2->me_max_sg)) panic("MLX: too many s/g segments (%d, max %d)", nsegments, sc->mlx_enq2->me_max_sg); /* get base address of s/g table */ sg = sc->mlx_sgtable + (mc->mc_slot * MLX_NSEG); /* save s/g table information in command */ mc->mc_nsgent = nsegments; mc->mc_sgphys = sc->mlx_sgbusaddr + (mc->mc_slot * MLX_NSEG * sizeof(struct mlx_sgentry)); mc->mc_dataphys = segs[0].ds_addr; /* populate s/g table */ for (i = 0; i < nsegments; i++, sg++) { sg->sg_addr = segs[i].ds_addr; sg->sg_count = segs[i].ds_len; } /* Make sure the buffers are visible on the bus. */ if (mc->mc_flags & MLX_CMD_DATAIN) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_PREREAD); if (mc->mc_flags & MLX_CMD_DATAOUT) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_PREWRITE); } static void mlx_unmapcmd(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; debug_called(1); /* if the command involved data at all */ if (mc->mc_data != NULL) { if (mc->mc_flags & MLX_CMD_DATAIN) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTREAD); if (mc->mc_flags & MLX_CMD_DATAOUT) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mlx_buffer_dmat, mc->mc_dmamap); } } /******************************************************************************** * Try to deliver (mc) to the controller. * * Can be called at any interrupt level, with or without interrupts enabled. */ static int mlx_start(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int i; debug_called(1); /* save the slot number as ident so we can handle this command when complete */ mc->mc_mailbox[0x1] = mc->mc_slot; /* mark the command as currently being processed */ mc->mc_status = MLX_STATUS_BUSY; /* set a default 60-second timeout XXX tunable? XXX not currently used */ mc->mc_timeout = time_second + 60; /* spin waiting for the mailbox */ for (i = 100000; i > 0; i--) { if (sc->mlx_tryqueue(sc, mc)) { /* move command to work queue */ TAILQ_INSERT_TAIL(&sc->mlx_work, mc, mc_link); return (0); } else if (i > 1) mlx_done(sc, 0); } /* * We couldn't get the controller to take the command. Revoke the slot * that the command was given and return it with a bad status. */ sc->mlx_busycmd[mc->mc_slot] = NULL; device_printf(sc->mlx_dev, "controller wedged (not taking commands)\n"); mc->mc_status = MLX_STATUS_WEDGED; mlx_complete(sc); return(EIO); } /******************************************************************************** * Poll the controller (sc) for completed commands. * Update command status and free slots for reuse. If any slots were freed, * new commands may be posted. * * Returns nonzero if one or more commands were completed. */ static int mlx_done(struct mlx_softc *sc, int startio) { struct mlx_command *mc; int result; u_int8_t slot; u_int16_t status; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); result = 0; /* loop collecting completed commands */ for (;;) { /* poll for a completed command's identifier and status */ if (sc->mlx_findcomplete(sc, &slot, &status)) { result = 1; mc = sc->mlx_busycmd[slot]; /* find command */ if (mc != NULL) { /* paranoia */ if (mc->mc_status == MLX_STATUS_BUSY) { mc->mc_status = status; /* save status */ /* free slot for reuse */ sc->mlx_busycmd[slot] = NULL; sc->mlx_busycmds--; } else { device_printf(sc->mlx_dev, "duplicate done event for slot %d\n", slot); } } else { device_printf(sc->mlx_dev, "done event for nonbusy slot %d\n", slot); } } else { break; } } /* if we've completed any commands, try posting some more */ if (result && startio) mlx_startio(sc); /* handle completion and timeouts */ mlx_complete(sc); return(result); } /******************************************************************************** * Perform post-completion processing for commands on (sc). */ static void mlx_complete(struct mlx_softc *sc) { struct mlx_command *mc, *nc; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* scan the list of busy/done commands */ mc = TAILQ_FIRST(&sc->mlx_work); while (mc != NULL) { nc = TAILQ_NEXT(mc, mc_link); /* Command has been completed in some fashion */ if (mc->mc_status != MLX_STATUS_BUSY) { /* unmap the command's data buffer */ mlx_unmapcmd(mc); /* * Does the command have a completion handler? */ if (mc->mc_complete != NULL) { /* remove from list and give to handler */ TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); mc->mc_complete(mc); /* * Is there a sleeper waiting on this command? */ } else if (mc->mc_private != NULL) { /* sleeping caller wants to know about it */ /* remove from list and wake up sleeper */ TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); wakeup_one(mc->mc_private); /* * Leave the command for a caller that's polling for it. */ } else { } } mc = nc; } } /******************************************************************************** ******************************************************************************** Command Buffer Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Get a new command buffer. * * This may return NULL in low-memory cases. * * Note that using malloc() is expensive (the command buffer is << 1 page) but * necessary if we are to be a loadable module before the zone allocator is fixed. * * If possible, we recycle a command buffer that's been used before. * * XXX Note that command buffers are not cleaned out - it is the caller's * responsibility to ensure that all required fields are filled in before * using a buffer. */ static struct mlx_command * mlx_alloccmd(struct mlx_softc *sc) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); if ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link); /* allocate a new command buffer? */ if (mc == NULL) { mc = (struct mlx_command *)malloc(sizeof(*mc), M_DEVBUF, M_NOWAIT | M_ZERO); if (mc != NULL) { mc->mc_sc = sc; error = bus_dmamap_create(sc->mlx_buffer_dmat, 0, &mc->mc_dmamap); if (error) { free(mc, M_DEVBUF); return(NULL); } } } return(mc); } /******************************************************************************** * Release a command buffer for recycling. * * XXX It might be a good idea to limit the number of commands we save for reuse * if it's shown that this list bloats out massively. */ static void mlx_releasecmd(struct mlx_command *mc) { debug_called(1); MLX_IO_ASSERT_LOCKED(mc->mc_sc); TAILQ_INSERT_HEAD(&mc->mc_sc->mlx_freecmds, mc, mc_link); } /******************************************************************************** * Permanently discard a command buffer. */ static void mlx_freecmd(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; debug_called(1); bus_dmamap_destroy(sc->mlx_buffer_dmat, mc->mc_dmamap); free(mc, M_DEVBUF); } /******************************************************************************** ******************************************************************************** Type 3 interface accessor methods ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure * (the controller is not ready to take a command). */ static int mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) { int i; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* ready for our command? */ if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_FULL)) { /* copy mailbox data to window */ for (i = 0; i < 13; i++) MLX_V3_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); /* post command */ MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_FULL); return(1); } return(0); } /******************************************************************************** * See if a command has been completed, if so acknowledge its completion * and recover the slot number and status code. */ static int mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status) { debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* status available? */ if (MLX_V3_GET_ODBR(sc) & MLX_V3_ODB_SAVAIL) { *slot = MLX_V3_GET_STATUS_IDENT(sc); /* get command identifier */ *status = MLX_V3_GET_STATUS(sc); /* get status */ /* acknowledge completion */ MLX_V3_PUT_ODBR(sc, MLX_V3_ODB_SAVAIL); MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK); return(1); } return(0); } /******************************************************************************** * Enable/disable interrupts as requested. (No acknowledge required) */ static void mlx_v3_intaction(struct mlx_softc *sc, int action) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); switch(action) { case MLX_INTACTION_DISABLE: MLX_V3_PUT_IER(sc, 0); sc->mlx_state &= ~MLX_STATE_INTEN; break; case MLX_INTACTION_ENABLE: MLX_V3_PUT_IER(sc, 1); sc->mlx_state |= MLX_STATE_INTEN; break; } } /******************************************************************************** * Poll for firmware error codes during controller initialisation. * Returns 0 if initialisation is complete, 1 if still in progress but no * error has been fetched, 2 if an error has been retrieved. */ static int mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first) { u_int8_t fwerror; debug_called(2); /* first time around, clear any hardware completion status */ if (first) { MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK); DELAY(1000); } /* init in progress? */ if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_INIT_BUSY)) return(0); /* test error value */ fwerror = MLX_V3_GET_FWERROR(sc); if (!(fwerror & MLX_V3_FWERROR_PEND)) return(1); /* mask status pending bit, fetch status */ *error = fwerror & ~MLX_V3_FWERROR_PEND; *param1 = MLX_V3_GET_FWERROR_PARAM1(sc); *param2 = MLX_V3_GET_FWERROR_PARAM2(sc); /* acknowledge */ MLX_V3_PUT_FWERROR(sc, 0); return(2); } /******************************************************************************** ******************************************************************************** Type 4 interface accessor methods ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure * (the controller is not ready to take a command). */ static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) { int i; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* ready for our command? */ if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_FULL)) { /* copy mailbox data to window */ for (i = 0; i < 13; i++) MLX_V4_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); /* memory-mapped controller, so issue a write barrier to ensure the mailbox is filled */ bus_barrier(sc->mlx_mem, MLX_V4_MAILBOX, MLX_V4_MAILBOX_LENGTH, BUS_SPACE_BARRIER_WRITE); /* post command */ MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_HWMBOX_CMD); return(1); } return(0); } /******************************************************************************** * See if a command has been completed, if so acknowledge its completion * and recover the slot number and status code. */ static int mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status) { debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* status available? */ if (MLX_V4_GET_ODBR(sc) & MLX_V4_ODB_HWSAVAIL) { *slot = MLX_V4_GET_STATUS_IDENT(sc); /* get command identifier */ *status = MLX_V4_GET_STATUS(sc); /* get status */ /* acknowledge completion */ MLX_V4_PUT_ODBR(sc, MLX_V4_ODB_HWMBOX_ACK); MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK); return(1); } return(0); } /******************************************************************************** * Enable/disable interrupts as requested. */ static void mlx_v4_intaction(struct mlx_softc *sc, int action) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); switch(action) { case MLX_INTACTION_DISABLE: MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK | MLX_V4_IER_DISINT); sc->mlx_state &= ~MLX_STATE_INTEN; break; case MLX_INTACTION_ENABLE: MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK & ~MLX_V4_IER_DISINT); sc->mlx_state |= MLX_STATE_INTEN; break; } } /******************************************************************************** * Poll for firmware error codes during controller initialisation. * Returns 0 if initialisation is complete, 1 if still in progress but no * error has been fetched, 2 if an error has been retrieved. */ static int mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first) { u_int8_t fwerror; debug_called(2); /* first time around, clear any hardware completion status */ if (first) { MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK); DELAY(1000); } /* init in progress? */ if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_INIT_BUSY)) return(0); /* test error value */ fwerror = MLX_V4_GET_FWERROR(sc); if (!(fwerror & MLX_V4_FWERROR_PEND)) return(1); /* mask status pending bit, fetch status */ *error = fwerror & ~MLX_V4_FWERROR_PEND; *param1 = MLX_V4_GET_FWERROR_PARAM1(sc); *param2 = MLX_V4_GET_FWERROR_PARAM2(sc); /* acknowledge */ MLX_V4_PUT_FWERROR(sc, 0); return(2); } /******************************************************************************** ******************************************************************************** Type 5 interface accessor methods ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure * (the controller is not ready to take a command). */ static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) { int i; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* ready for our command? */ if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_EMPTY) { /* copy mailbox data to window */ for (i = 0; i < 13; i++) MLX_V5_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); /* post command */ MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_HWMBOX_CMD); return(1); } return(0); } /******************************************************************************** * See if a command has been completed, if so acknowledge its completion * and recover the slot number and status code. */ static int mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status) { debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* status available? */ if (MLX_V5_GET_ODBR(sc) & MLX_V5_ODB_HWSAVAIL) { *slot = MLX_V5_GET_STATUS_IDENT(sc); /* get command identifier */ *status = MLX_V5_GET_STATUS(sc); /* get status */ /* acknowledge completion */ MLX_V5_PUT_ODBR(sc, MLX_V5_ODB_HWMBOX_ACK); MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK); return(1); } return(0); } /******************************************************************************** * Enable/disable interrupts as requested. */ static void mlx_v5_intaction(struct mlx_softc *sc, int action) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); switch(action) { case MLX_INTACTION_DISABLE: MLX_V5_PUT_IER(sc, 0xff & MLX_V5_IER_DISINT); sc->mlx_state &= ~MLX_STATE_INTEN; break; case MLX_INTACTION_ENABLE: MLX_V5_PUT_IER(sc, 0xff & ~MLX_V5_IER_DISINT); sc->mlx_state |= MLX_STATE_INTEN; break; } } /******************************************************************************** * Poll for firmware error codes during controller initialisation. * Returns 0 if initialisation is complete, 1 if still in progress but no * error has been fetched, 2 if an error has been retrieved. */ static int mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first) { u_int8_t fwerror; debug_called(2); /* first time around, clear any hardware completion status */ if (first) { MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK); DELAY(1000); } /* init in progress? */ if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_INIT_DONE) return(0); /* test for error value */ fwerror = MLX_V5_GET_FWERROR(sc); if (!(fwerror & MLX_V5_FWERROR_PEND)) return(1); /* mask status pending bit, fetch status */ *error = fwerror & ~MLX_V5_FWERROR_PEND; *param1 = MLX_V5_GET_FWERROR_PARAM1(sc); *param2 = MLX_V5_GET_FWERROR_PARAM2(sc); /* acknowledge */ MLX_V5_PUT_FWERROR(sc, 0xff); return(2); } /******************************************************************************** ******************************************************************************** Debugging ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Return a status message describing (mc) */ static char *mlx_status_messages[] = { "normal completion", /* 00 */ "irrecoverable data error", /* 01 */ "drive does not exist, or is offline", /* 02 */ "attempt to write beyond end of drive", /* 03 */ "bad data encountered", /* 04 */ "invalid log entry request", /* 05 */ "attempt to rebuild online drive", /* 06 */ "new disk failed during rebuild", /* 07 */ "invalid channel/target", /* 08 */ "rebuild/check already in progress", /* 09 */ "one or more disks are dead", /* 10 */ "invalid or non-redundant drive", /* 11 */ "channel is busy", /* 12 */ "channel is not stopped", /* 13 */ "rebuild successfully terminated", /* 14 */ "unsupported command", /* 15 */ "check condition received", /* 16 */ "device is busy", /* 17 */ "selection or command timeout", /* 18 */ "command terminated abnormally", /* 19 */ "" }; static struct { int command; u_int16_t status; int msg; } mlx_messages[] = { {MLX_CMD_READSG, 0x0001, 1}, {MLX_CMD_READSG, 0x0002, 1}, {MLX_CMD_READSG, 0x0105, 3}, {MLX_CMD_READSG, 0x010c, 4}, {MLX_CMD_WRITESG, 0x0001, 1}, {MLX_CMD_WRITESG, 0x0002, 1}, {MLX_CMD_WRITESG, 0x0105, 3}, {MLX_CMD_READSG_OLD, 0x0001, 1}, {MLX_CMD_READSG_OLD, 0x0002, 1}, {MLX_CMD_READSG_OLD, 0x0105, 3}, {MLX_CMD_WRITESG_OLD, 0x0001, 1}, {MLX_CMD_WRITESG_OLD, 0x0002, 1}, {MLX_CMD_WRITESG_OLD, 0x0105, 3}, {MLX_CMD_LOGOP, 0x0105, 5}, {MLX_CMD_REBUILDASYNC, 0x0002, 6}, {MLX_CMD_REBUILDASYNC, 0x0004, 7}, {MLX_CMD_REBUILDASYNC, 0x0105, 8}, {MLX_CMD_REBUILDASYNC, 0x0106, 9}, {MLX_CMD_REBUILDASYNC, 0x0107, 14}, {MLX_CMD_CHECKASYNC, 0x0002, 10}, {MLX_CMD_CHECKASYNC, 0x0105, 11}, {MLX_CMD_CHECKASYNC, 0x0106, 9}, {MLX_CMD_STOPCHANNEL, 0x0106, 12}, {MLX_CMD_STOPCHANNEL, 0x0105, 8}, {MLX_CMD_STARTCHANNEL, 0x0005, 13}, {MLX_CMD_STARTCHANNEL, 0x0105, 8}, {MLX_CMD_DIRECT_CDB, 0x0002, 16}, {MLX_CMD_DIRECT_CDB, 0x0008, 17}, {MLX_CMD_DIRECT_CDB, 0x000e, 18}, {MLX_CMD_DIRECT_CDB, 0x000f, 19}, {MLX_CMD_DIRECT_CDB, 0x0105, 8}, {0, 0x0104, 14}, {-1, 0, 0} }; static char * mlx_diagnose_command(struct mlx_command *mc) { static char unkmsg[80]; int i; /* look up message in table */ for (i = 0; mlx_messages[i].command != -1; i++) if (((mc->mc_mailbox[0] == mlx_messages[i].command) || (mlx_messages[i].command == 0)) && (mc->mc_status == mlx_messages[i].status)) return(mlx_status_messages[mlx_messages[i].msg]); sprintf(unkmsg, "unknown response 0x%x for command 0x%x", (int)mc->mc_status, (int)mc->mc_mailbox[0]); return(unkmsg); } /******************************************************************************* * Print a string describing the controller (sc) */ static struct { int hwid; char *name; } mlx_controller_names[] = { {0x01, "960P/PD"}, {0x02, "960PL"}, {0x10, "960PG"}, {0x11, "960PJ"}, {0x12, "960PR"}, {0x13, "960PT"}, {0x14, "960PTL0"}, {0x15, "960PRL"}, {0x16, "960PTL1"}, {0x20, "1164PVX"}, {-1, NULL} }; static void mlx_describe_controller(struct mlx_softc *sc) { static char buf[80]; char *model; int i; for (i = 0, model = NULL; mlx_controller_names[i].name != NULL; i++) { if ((sc->mlx_enq2->me_hardware_id & 0xff) == mlx_controller_names[i].hwid) { model = mlx_controller_names[i].name; break; } } if (model == NULL) { sprintf(buf, " model 0x%x", sc->mlx_enq2->me_hardware_id & 0xff); model = buf; } device_printf(sc->mlx_dev, "DAC%s, %d channel%s, firmware %d.%02d-%c-%02d, %dMB RAM\n", model, sc->mlx_enq2->me_actual_channels, sc->mlx_enq2->me_actual_channels > 1 ? "s" : "", sc->mlx_enq2->me_firmware_id & 0xff, (sc->mlx_enq2->me_firmware_id >> 8) & 0xff, (sc->mlx_enq2->me_firmware_id >> 24) & 0xff, (sc->mlx_enq2->me_firmware_id >> 16) & 0xff, sc->mlx_enq2->me_mem_size / (1024 * 1024)); if (bootverbose) { device_printf(sc->mlx_dev, " Hardware ID 0x%08x\n", sc->mlx_enq2->me_hardware_id); device_printf(sc->mlx_dev, " Firmware ID 0x%08x\n", sc->mlx_enq2->me_firmware_id); device_printf(sc->mlx_dev, " Configured/Actual channels %d/%d\n", sc->mlx_enq2->me_configured_channels, sc->mlx_enq2->me_actual_channels); device_printf(sc->mlx_dev, " Max Targets %d\n", sc->mlx_enq2->me_max_targets); device_printf(sc->mlx_dev, " Max Tags %d\n", sc->mlx_enq2->me_max_tags); device_printf(sc->mlx_dev, " Max System Drives %d\n", sc->mlx_enq2->me_max_sys_drives); device_printf(sc->mlx_dev, " Max Arms %d\n", sc->mlx_enq2->me_max_arms); device_printf(sc->mlx_dev, " Max Spans %d\n", sc->mlx_enq2->me_max_spans); device_printf(sc->mlx_dev, " DRAM/cache/flash/NVRAM size %d/%d/%d/%d\n", sc->mlx_enq2->me_mem_size, sc->mlx_enq2->me_cache_size, sc->mlx_enq2->me_flash_size, sc->mlx_enq2->me_nvram_size); device_printf(sc->mlx_dev, " DRAM type %d\n", sc->mlx_enq2->me_mem_type); device_printf(sc->mlx_dev, " Clock Speed %dns\n", sc->mlx_enq2->me_clock_speed); device_printf(sc->mlx_dev, " Hardware Speed %dns\n", sc->mlx_enq2->me_hardware_speed); device_printf(sc->mlx_dev, " Max Commands %d\n", sc->mlx_enq2->me_max_commands); device_printf(sc->mlx_dev, " Max SG Entries %d\n", sc->mlx_enq2->me_max_sg); device_printf(sc->mlx_dev, " Max DP %d\n", sc->mlx_enq2->me_max_dp); device_printf(sc->mlx_dev, " Max IOD %d\n", sc->mlx_enq2->me_max_iod); device_printf(sc->mlx_dev, " Max Comb %d\n", sc->mlx_enq2->me_max_comb); device_printf(sc->mlx_dev, " Latency %ds\n", sc->mlx_enq2->me_latency); device_printf(sc->mlx_dev, " SCSI Timeout %ds\n", sc->mlx_enq2->me_scsi_timeout); device_printf(sc->mlx_dev, " Min Free Lines %d\n", sc->mlx_enq2->me_min_freelines); device_printf(sc->mlx_dev, " Rate Constant %d\n", sc->mlx_enq2->me_rate_const); device_printf(sc->mlx_dev, " MAXBLK %d\n", sc->mlx_enq2->me_maxblk); device_printf(sc->mlx_dev, " Blocking Factor %d sectors\n", sc->mlx_enq2->me_blocking_factor); device_printf(sc->mlx_dev, " Cache Line Size %d blocks\n", sc->mlx_enq2->me_cacheline); device_printf(sc->mlx_dev, " SCSI Capability %s%dMHz, %d bit\n", sc->mlx_enq2->me_scsi_cap & (1<<4) ? "differential " : "", (1 << ((sc->mlx_enq2->me_scsi_cap >> 2) & 3)) * 10, 8 << (sc->mlx_enq2->me_scsi_cap & 0x3)); device_printf(sc->mlx_dev, " Firmware Build Number %d\n", sc->mlx_enq2->me_firmware_build); device_printf(sc->mlx_dev, " Fault Management Type %d\n", sc->mlx_enq2->me_fault_mgmt_type); device_printf(sc->mlx_dev, " Features %b\n", sc->mlx_enq2->me_firmware_features, "\20\4Background Init\3Read Ahead\2MORE\1Cluster\n"); } } /******************************************************************************* * Emit a string describing the firmware handshake status code, and return a flag * indicating whether the code represents a fatal error. * * Error code interpretations are from the Linux driver, and don't directly match * the messages printed by Mylex's BIOS. This may change if documentation on the * codes is forthcoming. */ static int mlx_fw_message(struct mlx_softc *sc, int error, int param1, int param2) { switch(error) { case 0x00: device_printf(sc->mlx_dev, "physical drive %d:%d not responding\n", param2, param1); break; case 0x08: /* we could be neater about this and give some indication when we receive more of them */ if (!(sc->mlx_flags & MLX_SPINUP_REPORTED)) { device_printf(sc->mlx_dev, "spinning up drives...\n"); sc->mlx_flags |= MLX_SPINUP_REPORTED; } break; case 0x30: device_printf(sc->mlx_dev, "configuration checksum error\n"); break; case 0x60: device_printf(sc->mlx_dev, "mirror race recovery failed\n"); break; case 0x70: device_printf(sc->mlx_dev, "mirror race recovery in progress\n"); break; case 0x90: device_printf(sc->mlx_dev, "physical drive %d:%d COD mismatch\n", param2, param1); break; case 0xa0: device_printf(sc->mlx_dev, "logical drive installation aborted\n"); break; case 0xb0: device_printf(sc->mlx_dev, "mirror race on a critical system drive\n"); break; case 0xd0: device_printf(sc->mlx_dev, "new controller configuration found\n"); break; case 0xf0: device_printf(sc->mlx_dev, "FATAL MEMORY PARITY ERROR\n"); return(1); default: device_printf(sc->mlx_dev, "unknown firmware initialisation error %02x:%02x:%02x\n", error, param1, param2); break; } return(0); } /******************************************************************************** ******************************************************************************** Utility Functions ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Find the disk whose unit number is (unit) on this controller */ static struct mlx_sysdrive * mlx_findunit(struct mlx_softc *sc, int unit) { int i; /* search system drives */ MLX_CONFIG_ASSERT_LOCKED(sc); for (i = 0; i < MLX_MAXDRIVES; i++) { /* is this one attached? */ if (sc->mlx_sysdrive[i].ms_disk != 0) { /* is this the one? */ if (unit == device_get_unit(sc->mlx_sysdrive[i].ms_disk)) return(&sc->mlx_sysdrive[i]); } } return(NULL); } diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c b/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c index c752d87af810..315583601831 100644 --- a/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c +++ b/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c @@ -1,540 +1,540 @@ /*- * Copyright (c) 2018, 2019 Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_rss.h" #include "opt_ratelimit.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_MLX5_DUMP, "MLX5DUMP", "MLX5 Firmware dump"); static unsigned mlx5_fwdump_getsize(const struct mlx5_crspace_regmap *rege) { const struct mlx5_crspace_regmap *r; unsigned sz; for (sz = 0, r = rege; r->cnt != 0; r++) sz += r->cnt; return (sz); } static void mlx5_fwdump_destroy_dd(struct mlx5_core_dev *mdev) { mtx_assert(&mdev->dump_lock, MA_OWNED); free(mdev->dump_data, M_MLX5_DUMP); mdev->dump_data = NULL; } static int mlx5_fw_dump_enable = 1; SYSCTL_INT(_hw_mlx5, OID_AUTO, fw_dump_enable, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &mlx5_fw_dump_enable, 0, "Enable fw dump setup and op"); void mlx5_fwdump_prep(struct mlx5_core_dev *mdev) { device_t dev; int error, vsc_addr; unsigned i, sz; u32 addr, in, out, next_addr; mdev->dump_data = NULL; TUNABLE_INT_FETCH("hw.mlx5.fw_dump_enable", &mlx5_fw_dump_enable); if (!mlx5_fw_dump_enable) { mlx5_core_warn(mdev, "Firmware dump administratively prohibited\n"); return; } DROP_GIANT(); error = mlx5_vsc_find_cap(mdev); if (error != 0) { /* Inability to create a firmware dump is not fatal. */ mlx5_core_warn(mdev, "Unable to find vendor-specific capability, error %d\n", error); goto pickup_g; } error = mlx5_vsc_lock(mdev); if (error != 0) goto pickup_g; error = mlx5_vsc_set_space(mdev, MLX5_VSC_DOMAIN_SCAN_CRSPACE); if (error != 0) { mlx5_core_warn(mdev, "VSC scan space is not supported\n"); goto unlock_vsc; } dev = mdev->pdev->dev.bsddev; vsc_addr = mdev->vsc_addr; if (vsc_addr == 0) { mlx5_core_warn(mdev, "Cannot read VSC, no address\n"); goto unlock_vsc; } in = 0; for (sz = 1, addr = 0;;) { MLX5_VSC_SET(vsc_addr, &in, address, addr); pci_write_config(dev, vsc_addr + MLX5_VSC_ADDR_OFFSET, in, 4); error = mlx5_vsc_wait_on_flag(mdev, 1); if (error != 0) { mlx5_core_warn(mdev, "Failed waiting for read complete flag, error %d addr %#x\n", error, addr); goto unlock_vsc; } pci_read_config(dev, vsc_addr + MLX5_VSC_DATA_OFFSET, 4); out = pci_read_config(dev, vsc_addr + MLX5_VSC_ADDR_OFFSET, 4); next_addr = MLX5_VSC_GET(vsc_addr, &out, address); if (next_addr == 0 || next_addr == addr) break; if (next_addr != addr + 4) sz++; addr = next_addr; } if (sz == 1) { mlx5_core_warn(mdev, "no output from scan space\n"); goto unlock_vsc; } /* * We add a sentinel element at the end of the array to * terminate the read loop in mlx5_fwdump(), so allocate sz + 1. */ mdev->dump_rege = malloc((sz + 1) * sizeof(struct mlx5_crspace_regmap), M_MLX5_DUMP, M_WAITOK | M_ZERO); for (i = 0, addr = 0;;) { mdev->dump_rege[i].cnt++; MLX5_VSC_SET(vsc_addr, &in, address, addr); pci_write_config(dev, vsc_addr + MLX5_VSC_ADDR_OFFSET, in, 4); error = mlx5_vsc_wait_on_flag(mdev, 1); if (error != 0) { mlx5_core_warn(mdev, "Failed waiting for read complete flag, error %d addr %#x\n", error, addr); free(mdev->dump_rege, M_MLX5_DUMP); mdev->dump_rege = NULL; goto unlock_vsc; } pci_read_config(dev, vsc_addr + MLX5_VSC_DATA_OFFSET, 4); out = pci_read_config(dev, vsc_addr + MLX5_VSC_ADDR_OFFSET, 4); next_addr = MLX5_VSC_GET(vsc_addr, &out, address); if (next_addr == 0 || next_addr == addr) break; if (next_addr != addr + 4) { if (++i == sz) { mlx5_core_err(mdev, "Inconsistent hw crspace reads (1): sz %u i %u addr %#lx", sz, i, (unsigned long)addr); break; } mdev->dump_rege[i].addr = next_addr; } addr = next_addr; } /* i == sz case already reported by loop above */ if (i + 1 != sz && i != sz) { mlx5_core_err(mdev, "Inconsistent hw crspace reads (2): sz %u i %u addr %#lx", sz, i, (unsigned long)addr); } mdev->dump_size = mlx5_fwdump_getsize(mdev->dump_rege); mdev->dump_data = malloc(mdev->dump_size * sizeof(uint32_t), M_MLX5_DUMP, M_WAITOK | M_ZERO); mdev->dump_valid = false; mdev->dump_copyout = false; unlock_vsc: mlx5_vsc_unlock(mdev); pickup_g: PICKUP_GIANT(); } int mlx5_fwdump(struct mlx5_core_dev *mdev) { const struct mlx5_crspace_regmap *r; uint32_t i, ri; int error; mlx5_core_info(mdev, "Issuing FW dump\n"); mtx_lock(&mdev->dump_lock); if (mdev->dump_data == NULL) { error = EIO; goto failed; } if (mdev->dump_valid) { /* only one dump */ mlx5_core_warn(mdev, "Only one FW dump can be captured aborting FW dump\n"); error = EEXIST; goto failed; } /* mlx5_vsc already warns, be silent. */ error = mlx5_vsc_lock(mdev); if (error != 0) goto failed; error = mlx5_vsc_set_space(mdev, MLX5_VSC_DOMAIN_PROTECTED_CRSPACE); if (error != 0) goto unlock_vsc; for (i = 0, r = mdev->dump_rege; r->cnt != 0; r++) { for (ri = 0; ri < r->cnt; ri++) { error = mlx5_vsc_read(mdev, r->addr + ri * 4, &mdev->dump_data[i]); if (error != 0) goto unlock_vsc; i++; } } mdev->dump_valid = true; unlock_vsc: mlx5_vsc_unlock(mdev); failed: mtx_unlock(&mdev->dump_lock); return (error); } void mlx5_fwdump_clean(struct mlx5_core_dev *mdev) { mtx_lock(&mdev->dump_lock); while (mdev->dump_copyout) msleep(&mdev->dump_copyout, &mdev->dump_lock, 0, "mlx5fwc", 0); mlx5_fwdump_destroy_dd(mdev); mtx_unlock(&mdev->dump_lock); free(mdev->dump_rege, M_MLX5_DUMP); } static int mlx5_fwdump_reset(struct mlx5_core_dev *mdev) { int error; error = 0; mtx_lock(&mdev->dump_lock); if (mdev->dump_data != NULL) { while (mdev->dump_copyout) { msleep(&mdev->dump_copyout, &mdev->dump_lock, 0, "mlx5fwr", 0); } mdev->dump_valid = false; } else { error = ENOENT; } mtx_unlock(&mdev->dump_lock); return (error); } static int mlx5_dbsf_to_core(const struct mlx5_tool_addr *devaddr, struct mlx5_core_dev **mdev) { device_t dev; struct pci_dev *pdev; dev = pci_find_dbsf(devaddr->domain, devaddr->bus, devaddr->slot, devaddr->func); if (dev == NULL) return (ENOENT); if (device_get_devclass(dev) != mlx5_core_driver.bsdclass) return (EINVAL); pdev = device_get_softc(dev); *mdev = pci_get_drvdata(pdev); if (*mdev == NULL) return (ENOENT); return (0); } static int mlx5_fwdump_copyout(struct mlx5_core_dev *mdev, struct mlx5_fwdump_get *fwg) { const struct mlx5_crspace_regmap *r; struct mlx5_fwdump_reg rv, *urv; uint32_t i, ri; int error; mtx_lock(&mdev->dump_lock); if (mdev->dump_data == NULL) { mtx_unlock(&mdev->dump_lock); return (ENOENT); } if (fwg->buf == NULL) { fwg->reg_filled = mdev->dump_size; mtx_unlock(&mdev->dump_lock); return (0); } if (!mdev->dump_valid) { mtx_unlock(&mdev->dump_lock); return (ENOENT); } mdev->dump_copyout = true; mtx_unlock(&mdev->dump_lock); urv = fwg->buf; for (i = 0, r = mdev->dump_rege; r->cnt != 0; r++) { for (ri = 0; ri < r->cnt; ri++) { if (i >= fwg->reg_cnt) goto out; rv.addr = r->addr + ri * 4; rv.val = mdev->dump_data[i]; error = copyout(&rv, urv, sizeof(rv)); if (error != 0) return (error); urv++; i++; } } out: fwg->reg_filled = i; mtx_lock(&mdev->dump_lock); mdev->dump_copyout = false; wakeup(&mdev->dump_copyout); mtx_unlock(&mdev->dump_lock); return (0); } static int mlx5_fw_reset(struct mlx5_core_dev *mdev) { device_t dev, bus; int error; error = -mlx5_set_mfrl_reg(mdev, MLX5_FRL_LEVEL3); if (error == 0) { dev = mdev->pdev->dev.bsddev; - mtx_lock(&Giant); + bus_topo_lock(); bus = device_get_parent(dev); error = BUS_RESET_CHILD(device_get_parent(bus), bus, DEVF_RESET_DETACH); - mtx_unlock(&Giant); + bus_topo_unlock(); } return (error); } static int mlx5_eeprom_copyout(struct mlx5_core_dev *dev, struct mlx5_eeprom_get *eeprom_info) { struct mlx5_eeprom eeprom; int error; eeprom.i2c_addr = MLX5_I2C_ADDR_LOW; eeprom.device_addr = 0; eeprom.page_num = MLX5_EEPROM_LOW_PAGE; eeprom.page_valid = 0; /* Read three first bytes to get important info */ error = mlx5_get_eeprom_info(dev, &eeprom); if (error != 0) { mlx5_core_err(dev, "Failed reading EEPROM initial information\n"); return (error); } eeprom_info->eeprom_info_page_valid = eeprom.page_valid; eeprom_info->eeprom_info_out_len = eeprom.len; if (eeprom_info->eeprom_info_buf == NULL) return (0); /* * Allocate needed length buffer and additional space for * page 0x03 */ eeprom.data = malloc(eeprom.len + MLX5_EEPROM_PAGE_LENGTH, M_MLX5_EEPROM, M_WAITOK | M_ZERO); /* Read the whole eeprom information */ error = mlx5_get_eeprom(dev, &eeprom); if (error != 0) { mlx5_core_err(dev, "Failed reading EEPROM error = %d\n", error); error = 0; /* * Continue printing partial information in case of * an error */ } error = copyout(eeprom.data, eeprom_info->eeprom_info_buf, eeprom.len); free(eeprom.data, M_MLX5_EEPROM); return (error); } static int mlx5_ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct mlx5_core_dev *mdev; struct mlx5_fwdump_get *fwg; struct mlx5_tool_addr *devaddr; struct mlx5_fw_update *fu; struct firmware fake_fw; struct mlx5_eeprom_get *eeprom_info; int error; error = 0; switch (cmd) { case MLX5_FWDUMP_GET: if ((fflag & FREAD) == 0) { error = EBADF; break; } fwg = (struct mlx5_fwdump_get *)data; devaddr = &fwg->devaddr; error = mlx5_dbsf_to_core(devaddr, &mdev); if (error != 0) break; error = mlx5_fwdump_copyout(mdev, fwg); break; case MLX5_FWDUMP_RESET: if ((fflag & FWRITE) == 0) { error = EBADF; break; } devaddr = (struct mlx5_tool_addr *)data; error = mlx5_dbsf_to_core(devaddr, &mdev); if (error == 0) error = mlx5_fwdump_reset(mdev); break; case MLX5_FWDUMP_FORCE: if ((fflag & FWRITE) == 0) { error = EBADF; break; } devaddr = (struct mlx5_tool_addr *)data; error = mlx5_dbsf_to_core(devaddr, &mdev); if (error != 0) break; error = mlx5_fwdump(mdev); break; case MLX5_FW_UPDATE: if ((fflag & FWRITE) == 0) { error = EBADF; break; } fu = (struct mlx5_fw_update *)data; if (fu->img_fw_data_len > 10 * 1024 * 1024) { error = EINVAL; break; } devaddr = &fu->devaddr; error = mlx5_dbsf_to_core(devaddr, &mdev); if (error != 0) break; bzero(&fake_fw, sizeof(fake_fw)); fake_fw.name = "umlx_fw_up"; fake_fw.datasize = fu->img_fw_data_len; fake_fw.version = 1; fake_fw.data = (void *)kmem_malloc(fu->img_fw_data_len, M_WAITOK); if (fake_fw.data == NULL) { error = ENOMEM; break; } error = copyin(fu->img_fw_data, __DECONST(void *, fake_fw.data), fu->img_fw_data_len); if (error == 0) error = -mlx5_firmware_flash(mdev, &fake_fw); kmem_free((vm_offset_t)fake_fw.data, fu->img_fw_data_len); break; case MLX5_FW_RESET: if ((fflag & FWRITE) == 0) { error = EBADF; break; } devaddr = (struct mlx5_tool_addr *)data; error = mlx5_dbsf_to_core(devaddr, &mdev); if (error != 0) break; error = mlx5_fw_reset(mdev); break; case MLX5_EEPROM_GET: if ((fflag & FREAD) == 0) { error = EBADF; break; } eeprom_info = (struct mlx5_eeprom_get *)data; devaddr = &eeprom_info->devaddr; error = mlx5_dbsf_to_core(devaddr, &mdev); if (error != 0) break; error = mlx5_eeprom_copyout(mdev, eeprom_info); break; default: error = ENOTTY; break; } return (error); } static struct cdevsw mlx5_ctl_devsw = { .d_version = D_VERSION, .d_ioctl = mlx5_ctl_ioctl, }; static struct cdev *mlx5_ctl_dev; int mlx5_ctl_init(void) { struct make_dev_args mda; int error; make_dev_args_init(&mda); mda.mda_flags = MAKEDEV_WAITOK | MAKEDEV_CHECKNAME; mda.mda_devsw = &mlx5_ctl_devsw; mda.mda_uid = UID_ROOT; mda.mda_gid = GID_OPERATOR; mda.mda_mode = 0640; error = make_dev_s(&mda, &mlx5_ctl_dev, "mlx5ctl"); return (-error); } void mlx5_ctl_fini(void) { if (mlx5_ctl_dev != NULL) destroy_dev(mlx5_ctl_dev); } diff --git a/sys/dev/mlx5/mlx5_core/mlx5_health.c b/sys/dev/mlx5/mlx5_core/mlx5_health.c index 5cd4bd08e051..f75093b1d9db 100644 --- a/sys/dev/mlx5/mlx5_core/mlx5_health.c +++ b/sys/dev/mlx5/mlx5_core/mlx5_health.c @@ -1,754 +1,755 @@ /*- * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_rss.h" #include "opt_ratelimit.h" #include #include #include #include #include #include #include #include #include #define MLX5_HEALTH_POLL_INTERVAL (2 * HZ) #define MAX_MISSES 3 enum { MLX5_DROP_NEW_HEALTH_WORK, MLX5_DROP_NEW_RECOVERY_WORK, MLX5_DROP_NEW_WATCHDOG_WORK, }; enum { MLX5_SENSOR_NO_ERR = 0, MLX5_SENSOR_PCI_COMM_ERR = 1, MLX5_SENSOR_PCI_ERR = 2, MLX5_SENSOR_NIC_DISABLED = 3, MLX5_SENSOR_NIC_SW_RESET = 4, MLX5_SENSOR_FW_SYND_RFR = 5, }; static int mlx5_fw_reset_enable = 1; SYSCTL_INT(_hw_mlx5, OID_AUTO, fw_reset_enable, CTLFLAG_RWTUN, &mlx5_fw_reset_enable, 0, "Enable firmware reset"); static unsigned int sw_reset_to = 1200; SYSCTL_UINT(_hw_mlx5, OID_AUTO, sw_reset_timeout, CTLFLAG_RWTUN, &sw_reset_to, 0, "Minimum timeout in seconds between two firmware resets"); static int lock_sem_sw_reset(struct mlx5_core_dev *dev) { int ret; /* Lock GW access */ ret = -mlx5_vsc_lock(dev); if (ret) { mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret); return ret; } ret = -mlx5_vsc_lock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET); if (ret) { if (ret == -EBUSY) mlx5_core_dbg(dev, "SW reset FW semaphore already locked, another function will handle the reset\n"); else mlx5_core_warn(dev, "SW reset semaphore lock return %d\n", ret); } /* Unlock GW access */ mlx5_vsc_unlock(dev); return ret; } static int unlock_sem_sw_reset(struct mlx5_core_dev *dev) { int ret; /* Lock GW access */ ret = -mlx5_vsc_lock(dev); if (ret) { mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret); return ret; } ret = -mlx5_vsc_unlock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET); /* Unlock GW access */ mlx5_vsc_unlock(dev); return ret; } u8 mlx5_get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7; } void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state) { u32 cur_cmdq_addr_l_sz; cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz); iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) | state << MLX5_NIC_IFC_OFFSET, &dev->iseg->cmdq_addr_l_sz); } static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct mlx5_health_buffer __iomem *h = health->health; u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET; u8 synd = ioread8(&h->synd); if (rfr && synd) mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd); return rfr && synd; } static void mlx5_trigger_cmd_completions(struct work_struct *work) { struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, priv.health.work_cmd_completion); unsigned long flags; u64 vector; /* wait for pending handlers to complete */ synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector); spin_lock_irqsave(&dev->cmd.alloc_lock, flags); vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); if (!vector) goto no_trig; vector |= MLX5_TRIGGERED_CMD_COMP; spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); mlx5_core_dbg(dev, "vector 0x%jx\n", (uintmax_t)vector); mlx5_cmd_comp_handler(dev, vector, MLX5_CMD_MODE_EVENTS); return; no_trig: spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); } static bool sensor_pci_no_comm(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct mlx5_health_buffer __iomem *h = health->health; bool err = ioread32be(&h->fw_ver) == 0xffffffff; return err; } static bool sensor_nic_disabled(struct mlx5_core_dev *dev) { return mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED; } static bool sensor_nic_sw_reset(struct mlx5_core_dev *dev) { return mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET; } static u32 check_fatal_sensors(struct mlx5_core_dev *dev) { if (sensor_pci_no_comm(dev)) return MLX5_SENSOR_PCI_COMM_ERR; if (pci_channel_offline(dev->pdev)) return MLX5_SENSOR_PCI_ERR; if (sensor_nic_disabled(dev)) return MLX5_SENSOR_NIC_DISABLED; if (sensor_nic_sw_reset(dev)) return MLX5_SENSOR_NIC_SW_RESET; if (sensor_fw_synd_rfr(dev)) return MLX5_SENSOR_FW_SYND_RFR; return MLX5_SENSOR_NO_ERR; } static void reset_fw_if_needed(struct mlx5_core_dev *dev) { bool supported; u32 cmdq_addr, fatal_error; if (!mlx5_fw_reset_enable) return; supported = (ioread32be(&dev->iseg->initializing) >> MLX5_FW_RESET_SUPPORTED_OFFSET) & 1; if (!supported) return; /* The reset only needs to be issued by one PF. The health buffer is * shared between all functions, and will be cleared during a reset. * Check again to avoid a redundant 2nd reset. If the fatal erros was * PCI related a reset won't help. */ fatal_error = check_fatal_sensors(dev); if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR || fatal_error == MLX5_SENSOR_NIC_DISABLED || fatal_error == MLX5_SENSOR_NIC_SW_RESET) { mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.\n"); return; } mlx5_core_info(dev, "Issuing FW Reset\n"); /* Write the NIC interface field to initiate the reset, the command * interface address also resides here, don't overwrite it. */ cmdq_addr = ioread32be(&dev->iseg->cmdq_addr_l_sz); iowrite32be((cmdq_addr & 0xFFFFF000) | MLX5_NIC_IFC_SW_RESET << MLX5_NIC_IFC_OFFSET, &dev->iseg->cmdq_addr_l_sz); } static bool mlx5_health_allow_reset(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned int delta; bool ret; if (health->last_reset_req != 0) { delta = ticks - health->last_reset_req; delta /= hz; ret = delta >= sw_reset_to; } else { ret = true; } /* * In principle, ticks may be 0. Setting it to off by one (-1) * to prevent certain reset in next request. */ health->last_reset_req = ticks ? : -1; if (!ret) mlx5_core_warn(dev, "Firmware reset elided due to auto-reset frequency threshold.\n"); return (ret); } #define MLX5_CRDUMP_WAIT_MS 60000 #define MLX5_FW_RESET_WAIT_MS 1000 #define MLX5_NIC_STATE_POLL_MS 5 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) { int end, delay_ms = MLX5_CRDUMP_WAIT_MS; u32 fatal_error; int lock = -EBUSY; fatal_error = check_fatal_sensors(dev); if (fatal_error || force) { if (xchg(&dev->state, MLX5_DEVICE_STATE_INTERNAL_ERROR) == MLX5_DEVICE_STATE_INTERNAL_ERROR) return; if (!force) mlx5_core_err(dev, "internal state error detected\n"); /* * Queue the command completion handler on the command * work queue to avoid racing with the real command * completion handler and then wait for it to * complete: */ queue_work(dev->priv.health.wq_cmd, &dev->priv.health.work_cmd_completion); flush_workqueue(dev->priv.health.wq_cmd); } mutex_lock(&dev->intf_state_mutex); if (force) goto err_state_done; if (fatal_error == MLX5_SENSOR_FW_SYND_RFR && mlx5_health_allow_reset(dev)) { /* Get cr-dump and reset FW semaphore */ if (mlx5_core_is_pf(dev)) lock = lock_sem_sw_reset(dev); /* Execute cr-dump and SW reset */ if (lock != -EBUSY) { (void)mlx5_fwdump(dev); reset_fw_if_needed(dev); delay_ms = MLX5_FW_RESET_WAIT_MS; } } /* Recover from SW reset */ end = jiffies + msecs_to_jiffies(delay_ms); do { if (sensor_nic_disabled(dev)) break; msleep(MLX5_NIC_STATE_POLL_MS); } while (!time_after(jiffies, end)); if (!sensor_nic_disabled(dev)) { mlx5_core_err(dev, "NIC IFC still %d after %ums.\n", mlx5_get_nic_state(dev), delay_ms); } /* Release FW semaphore if you are the lock owner */ if (!lock) unlock_sem_sw_reset(dev); mlx5_core_info(dev, "System error event triggered\n"); err_state_done: mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1); mutex_unlock(&dev->intf_state_mutex); } static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) { u8 nic_mode = mlx5_get_nic_state(dev); if (nic_mode == MLX5_NIC_IFC_SW_RESET) { /* The IFC mode field is 3 bits, so it will read 0x7 in two cases: * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded * and this is a VF), this is not recoverable by SW reset. * Logging of this is handled elsewhere. * 2. FW reset has been issued by another function, driver can * be reloaded to recover after the mode switches to * MLX5_NIC_IFC_DISABLED. */ if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR) mlx5_core_warn(dev, "NIC SW reset is already progress\n"); else mlx5_core_warn(dev, "Communication with FW over the PCI link is down\n"); } else { mlx5_core_warn(dev, "NIC mode %d\n", nic_mode); } mlx5_disable_device(dev); } #define MLX5_FW_RESET_WAIT_MS 1000 #define MLX5_NIC_STATE_POLL_MS 5 static void health_recover(struct work_struct *work) { unsigned long end = jiffies + msecs_to_jiffies(MLX5_FW_RESET_WAIT_MS); struct mlx5_core_health *health; struct delayed_work *dwork; struct mlx5_core_dev *dev; struct mlx5_priv *priv; bool recover = true; u8 nic_mode; dwork = container_of(work, struct delayed_work, work); health = container_of(dwork, struct mlx5_core_health, recover_work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); - mtx_lock(&Giant); /* XXX newbus needs this */ + /* This might likely be wrong, cut and paste from elsewhere? */ + bus_topo_lock(); if (sensor_pci_no_comm(dev)) { mlx5_core_err(dev, "health recovery flow aborted, PCI reads still not working\n"); recover = false; } nic_mode = mlx5_get_nic_state(dev); while (nic_mode != MLX5_NIC_IFC_DISABLED && !time_after(jiffies, end)) { msleep(MLX5_NIC_STATE_POLL_MS); nic_mode = mlx5_get_nic_state(dev); } if (nic_mode != MLX5_NIC_IFC_DISABLED) { mlx5_core_err(dev, "health recovery flow aborted, unexpected NIC IFC mode %d.\n", nic_mode); recover = false; } if (recover) { mlx5_core_info(dev, "Starting health recovery flow\n"); mlx5_recover_device(dev); } - mtx_unlock(&Giant); + bus_topo_unlock(); } /* How much time to wait until health resetting the driver (in msecs) */ #define MLX5_RECOVERY_DELAY_MSECS 60000 #define MLX5_RECOVERY_NO_DELAY 0 static unsigned long get_recovery_delay(struct mlx5_core_dev *dev) { return dev->priv.health.fatal_error == MLX5_SENSOR_PCI_ERR || dev->priv.health.fatal_error == MLX5_SENSOR_PCI_COMM_ERR ? MLX5_RECOVERY_DELAY_MSECS : MLX5_RECOVERY_NO_DELAY; } static void health_care(struct work_struct *work) { struct mlx5_core_health *health; unsigned long recover_delay; struct mlx5_core_dev *dev; struct mlx5_priv *priv; unsigned long flags; health = container_of(work, struct mlx5_core_health, work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_core_warn(dev, "handling bad device here\n"); mlx5_handle_bad_state(dev); recover_delay = msecs_to_jiffies(get_recovery_delay(dev)); spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) { mlx5_core_warn(dev, "Scheduling recovery work with %lums delay\n", recover_delay); schedule_delayed_work(&health->recover_work, recover_delay); } else { mlx5_core_err(dev, "new health works are not permitted at this stage\n"); } spin_unlock_irqrestore(&health->wq_lock, flags); } static int get_next_poll_jiffies(void) { unsigned long next; get_random_bytes(&next, sizeof(next)); next %= HZ; next += jiffies + MLX5_HEALTH_POLL_INTERVAL; return next; } void mlx5_trigger_health_work(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) queue_work(health->wq, &health->work); else mlx5_core_err(dev, "new health works are not permitted at this stage\n"); spin_unlock_irqrestore(&health->wq_lock, flags); } static const char *hsynd_str(u8 synd) { switch (synd) { case MLX5_HEALTH_SYNDR_FW_ERR: return "firmware internal error"; case MLX5_HEALTH_SYNDR_IRISC_ERR: return "irisc not responding"; case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR: return "unrecoverable hardware error"; case MLX5_HEALTH_SYNDR_CRC_ERR: return "firmware CRC error"; case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: return "ICM fetch PCI error"; case MLX5_HEALTH_SYNDR_HW_FTL_ERR: return "HW fatal error\n"; case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: return "async EQ buffer overrun"; case MLX5_HEALTH_SYNDR_EQ_ERR: return "EQ error"; case MLX5_HEALTH_SYNDR_EQ_INV: return "Invalid EQ referenced"; case MLX5_HEALTH_SYNDR_FFSER_ERR: return "FFSER error"; case MLX5_HEALTH_SYNDR_HIGH_TEMP: return "High temperature"; default: return "unrecognized error"; } } static u8 print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct mlx5_health_buffer __iomem *h = health->health; u8 synd = ioread8(&h->synd); char fw_str[18]; u32 fw; int i; /* * If synd is 0x0 - this indicates that FW is unable to * respond to initialization segment reads and health buffer * should not be read. */ if (synd == 0) return (0); for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) mlx5_core_info(dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i)); mlx5_core_info(dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); mlx5_core_info(dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); snprintf(fw_str, sizeof(fw_str), "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); mlx5_core_info(dev, "fw_ver %s\n", fw_str); mlx5_core_info(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); mlx5_core_info(dev, "irisc_index %d\n", ioread8(&h->irisc_index)); mlx5_core_info(dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); mlx5_core_info(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); fw = ioread32be(&h->fw_ver); mlx5_core_info(dev, "raw fw_ver 0x%08x\n", fw); return synd; } static void health_watchdog(struct work_struct *work) { struct mlx5_core_dev *dev; u16 power; u8 status; int err; dev = container_of(work, struct mlx5_core_dev, priv.health.work_watchdog); if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_FEATURE(dev, pcie_status_and_power)) return; err = mlx5_pci_read_power_status(dev, &power, &status); if (err < 0) { mlx5_core_warn(dev, "Failed reading power status: %d\n", err); return; } dev->pwr_value = power; if (dev->pwr_status != status) { switch (status) { case 0: dev->pwr_status = status; mlx5_core_info(dev, "PCI power is not published by the PCIe slot.\n"); break; case 1: dev->pwr_status = status; mlx5_core_info(dev, "PCIe slot advertised sufficient power (%uW).\n", power); break; case 2: dev->pwr_status = status; mlx5_core_warn(dev, "Detected insufficient power on the PCIe slot (%uW).\n", power); break; default: dev->pwr_status = 0; mlx5_core_warn(dev, "Unknown power state detected(%d).\n", status); break; } } } void mlx5_trigger_health_watchdog(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags)) queue_work(health->wq_watchdog, &health->work_watchdog); else mlx5_core_err(dev, "scheduling watchdog is not permitted at this stage\n"); spin_unlock_irqrestore(&health->wq_lock, flags); } static void poll_health(unsigned long data) { struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; struct mlx5_core_health *health = &dev->priv.health; u32 fatal_error; u32 count; if (dev->state != MLX5_DEVICE_STATE_UP) return; count = ioread32be(health->health_counter); if (count == health->prev) ++health->miss_counter; else health->miss_counter = 0; health->prev = count; if (health->miss_counter == MAX_MISSES) { mlx5_core_err(dev, "device's health compromised - reached miss count\n"); if (print_health_info(dev) == 0) mlx5_core_err(dev, "FW is unable to respond to initialization segment reads\n"); } fatal_error = check_fatal_sensors(dev); if (fatal_error && !health->fatal_error) { mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); dev->priv.health.fatal_error = fatal_error; print_health_info(dev); mlx5_trigger_health_work(dev); } mod_timer(&health->timer, get_next_poll_jiffies()); } void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; init_timer(&health->timer); health->fatal_error = MLX5_SENSOR_NO_ERR; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); clear_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; setup_timer(&health->timer, poll_health, (unsigned long)dev); mod_timer(&health->timer, round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL)); /* do initial PCI power state readout */ mlx5_trigger_health_watchdog(dev); } void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; if (disable_health) { spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); set_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); } del_timer_sync(&health->timer); } void mlx5_drain_health_wq(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); set_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); cancel_delayed_work_sync(&health->recover_work); cancel_work_sync(&health->work); cancel_work_sync(&health->work_watchdog); } void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); cancel_delayed_work_sync(&dev->priv.health.recover_work); } void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; destroy_workqueue(health->wq); destroy_workqueue(health->wq_watchdog); destroy_workqueue(health->wq_cmd); } int mlx5_health_init(struct mlx5_core_dev *dev) { struct mlx5_core_health *health; char name[64]; health = &dev->priv.health; snprintf(name, sizeof(name), "%s-rec", dev_name(&dev->pdev->dev)); health->wq = create_singlethread_workqueue(name); if (!health->wq) goto err_recovery; snprintf(name, sizeof(name), "%s-wdg", dev_name(&dev->pdev->dev)); health->wq_watchdog = create_singlethread_workqueue(name); if (!health->wq_watchdog) goto err_watchdog; snprintf(name, sizeof(name), "%s-cmd", dev_name(&dev->pdev->dev)); health->wq_cmd = create_singlethread_workqueue(name); if (!health->wq_cmd) goto err_cmd; spin_lock_init(&health->wq_lock); INIT_WORK(&health->work, health_care); INIT_WORK(&health->work_watchdog, health_watchdog); INIT_WORK(&health->work_cmd_completion, mlx5_trigger_cmd_completions); INIT_DELAYED_WORK(&health->recover_work, health_recover); return 0; err_cmd: destroy_workqueue(health->wq_watchdog); err_watchdog: destroy_workqueue(health->wq); err_recovery: return -ENOMEM; } diff --git a/sys/dev/pccard/pccard.c b/sys/dev/pccard/pccard.c index 3bab77864347..da6bf20536f3 100644 --- a/sys/dev/pccard/pccard.c +++ b/sys/dev/pccard/pccard.c @@ -1,1486 +1,1486 @@ /* $NetBSD: pcmcia.c,v 1.23 2000/07/28 19:17:02 drochner Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997 Marc Horowitz. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Marc Horowitz. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "card_if.h" #define PCCARDDEBUG /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, pccard, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "PCCARD parameters"); int pccard_debug = 0; SYSCTL_INT(_hw_pccard, OID_AUTO, debug, CTLFLAG_RWTUN, &pccard_debug, 0, "pccard debug"); int pccard_cis_debug = 0; SYSCTL_INT(_hw_pccard, OID_AUTO, cis_debug, CTLFLAG_RWTUN, &pccard_cis_debug, 0, "pccard CIS debug"); #ifdef PCCARDDEBUG #define DPRINTF(arg) if (pccard_debug) printf arg #define DEVPRINTF(arg) if (pccard_debug) device_printf arg #define PRVERBOSE(arg) printf arg #define DEVPRVERBOSE(arg) device_printf arg #else #define DPRINTF(arg) #define DEVPRINTF(arg) #define PRVERBOSE(arg) if (bootverbose) printf arg #define DEVPRVERBOSE(arg) if (bootverbose) device_printf arg #endif static int pccard_ccr_read(struct pccard_function *pf, int ccr); static void pccard_ccr_write(struct pccard_function *pf, int ccr, int val); static int pccard_attach_card(device_t dev); static int pccard_detach_card(device_t dev); static void pccard_function_init(struct pccard_function *pf, int entry); static void pccard_function_free(struct pccard_function *pf); static int pccard_function_enable(struct pccard_function *pf); static void pccard_function_disable(struct pccard_function *pf); static int pccard_probe(device_t dev); static int pccard_attach(device_t dev); static int pccard_detach(device_t dev); static void pccard_print_resources(struct resource_list *rl, const char *name, int type, int count, const char *format); static int pccard_print_child(device_t dev, device_t child); static int pccard_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count); static int pccard_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp); static void pccard_delete_resource(device_t dev, device_t child, int type, int rid); static int pccard_set_res_flags(device_t dev, device_t child, int type, int rid, u_long flags); static int pccard_set_memory_offset(device_t dev, device_t child, int rid, uint32_t offset, uint32_t *deltap); static int pccard_probe_and_attach_child(device_t dev, device_t child, struct pccard_function *pf); static void pccard_probe_nomatch(device_t cbdev, device_t child); static int pccard_read_ivar(device_t bus, device_t child, int which, uintptr_t *result); static void pccard_driver_added(device_t dev, driver_t *driver); static struct resource *pccard_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int pccard_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); static void pccard_child_detached(device_t parent, device_t dev); static int pccard_filter(void *arg); static void pccard_intr(void *arg); static int pccard_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int pccard_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie); static const struct pccard_product * pccard_do_product_lookup(device_t bus, device_t dev, const struct pccard_product *tab, size_t ent_size, pccard_product_match_fn matchfn); static int pccard_ccr_read(struct pccard_function *pf, int ccr) { return (bus_space_read_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + ccr)); } static void pccard_ccr_write(struct pccard_function *pf, int ccr, int val) { if ((pf->ccr_mask) & (1 << (ccr / 2))) { bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + ccr, val); } } static int pccard_set_default_descr(device_t dev) { const char *vendorstr, *prodstr; uint32_t vendor, prod; char *str; if (pccard_get_vendor_str(dev, &vendorstr)) return (0); if (pccard_get_product_str(dev, &prodstr)) return (0); if (vendorstr != NULL && prodstr != NULL) { str = malloc(strlen(vendorstr) + strlen(prodstr) + 2, M_DEVBUF, M_WAITOK); sprintf(str, "%s %s", vendorstr, prodstr); device_set_desc_copy(dev, str); free(str, M_DEVBUF); } else { if (pccard_get_vendor(dev, &vendor)) return (0); if (pccard_get_product(dev, &prod)) return (0); str = malloc(100, M_DEVBUF, M_WAITOK); snprintf(str, 100, "vendor=%#x product=%#x", vendor, prod); device_set_desc_copy(dev, str); free(str, M_DEVBUF); } return (0); } static int pccard_attach_card(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_function *pf; struct pccard_ivar *ivar; device_t child; int i; if (!STAILQ_EMPTY(&sc->card.pf_head)) { if (bootverbose || pccard_debug) device_printf(dev, "Card already inserted.\n"); } DEVPRINTF((dev, "chip_socket_enable\n")); POWER_ENABLE_SOCKET(device_get_parent(dev), dev); DEVPRINTF((dev, "read_cis\n")); pccard_read_cis(sc); DEVPRINTF((dev, "check_cis_quirks\n")); pccard_check_cis_quirks(dev); /* * bail now if the card has no functions, or if there was an error in * the cis. */ if (sc->card.error) { device_printf(dev, "CARD ERROR!\n"); return (1); } if (STAILQ_EMPTY(&sc->card.pf_head)) { device_printf(dev, "Card has no functions!\n"); return (1); } if (bootverbose || pccard_debug) pccard_print_cis(dev); DEVPRINTF((dev, "functions scanning\n")); i = -1; STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { i++; if (STAILQ_EMPTY(&pf->cfe_head)) { device_printf(dev, "Function %d has no config entries.!\n", i); continue; } pf->sc = sc; pf->cfe = NULL; pf->dev = NULL; } DEVPRINTF((dev, "Card has %d functions. pccard_mfc is %d\n", i + 1, pccard_mfc(sc))); - mtx_lock(&Giant); + bus_topo_lock(); STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { if (STAILQ_EMPTY(&pf->cfe_head)) continue; ivar = malloc(sizeof(struct pccard_ivar), M_DEVBUF, M_WAITOK | M_ZERO); resource_list_init(&ivar->resources); child = device_add_child(dev, NULL, -1); device_set_ivars(child, ivar); ivar->pf = pf; pf->dev = child; pccard_probe_and_attach_child(dev, child, pf); } - mtx_unlock(&Giant); + bus_topo_unlock(); return (0); } static int pccard_probe_and_attach_child(device_t dev, device_t child, struct pccard_function *pf) { struct pccard_softc *sc = PCCARD_SOFTC(dev); int error; /* * In NetBSD, the drivers are responsible for activating each * function of a card and selecting the config to use. In * FreeBSD, all that's done automatically in the typical lazy * way we do device resource allocation (except we pick the * cfe up front). This is the biggest depature from the * inherited NetBSD model, apart from the FreeBSD resource code. * * This seems to work well in practice for most cards. * However, there are two cases that are problematic. If a * driver wishes to pick and chose which config entry to use, * then this method falls down. These are usually older * cards. In addition, there are some cards that have * multiple hardware units on the cards, but presents only one * CIS chain. These cards are combination cards, but only one * of these units can be on at a time. * * To overcome this limitation, while preserving the basic * model, the probe routine can select a cfe and try to * activate it. If that succeeds, then we'll keep track of * and let that information persist until we attach the card. * Probe routines that do this MUST return 0, and cannot * participate in the bidding process for a device. This * seems harsh until you realize that if a probe routine knows * enough to override the cfe we pick, then chances are very * very good that it is the only driver that could hope to * cope with the card. Bidding is for generic drivers, and * while some of them may also match, none of them will do * configuration override. */ error = device_probe(child); if (error != 0) goto out; pccard_function_init(pf, -1); if (sc->sc_enabled_count == 0) POWER_ENABLE_SOCKET(device_get_parent(dev), dev); if (pccard_function_enable(pf) == 0 && pccard_set_default_descr(child) == 0 && device_attach(child) == 0) { DEVPRINTF((sc->dev, "function %d CCR at %d offset %#x " "mask %#x: %#x %#x %#x %#x, %#x %#x %#x %#x, %#x\n", pf->number, pf->pf_ccr_window, pf->pf_ccr_offset, pf->ccr_mask, pccard_ccr_read(pf, 0x00), pccard_ccr_read(pf, 0x02), pccard_ccr_read(pf, 0x04), pccard_ccr_read(pf, 0x06), pccard_ccr_read(pf, 0x0A), pccard_ccr_read(pf, 0x0C), pccard_ccr_read(pf, 0x0E), pccard_ccr_read(pf, 0x10), pccard_ccr_read(pf, 0x12))); return (0); } error = ENXIO; out:; /* * Probe may fail AND also try to select a cfe, if so, free * it. This is how we do cfe override. Or the attach fails. * Either way, we have to clean up. */ if (pf->cfe != NULL) pccard_function_disable(pf); pf->cfe = NULL; pccard_function_free(pf); return error; } static int pccard_detach_card(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_function *pf; struct pccard_config_entry *cfe; struct pccard_ivar *devi; int state; /* * We are running on either the PCCARD socket's event thread * or in user context detaching a device by user request. */ STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { if (pf->dev == NULL) continue; state = device_get_state(pf->dev); if (state == DS_ATTACHED || state == DS_BUSY) device_detach(pf->dev); if (pf->cfe != NULL) pccard_function_disable(pf); pccard_function_free(pf); devi = PCCARD_IVAR(pf->dev); device_delete_child(dev, pf->dev); free(devi, M_DEVBUF); } if (sc->sc_enabled_count == 0) POWER_DISABLE_SOCKET(device_get_parent(dev), dev); while (NULL != (pf = STAILQ_FIRST(&sc->card.pf_head))) { while (NULL != (cfe = STAILQ_FIRST(&pf->cfe_head))) { STAILQ_REMOVE_HEAD(&pf->cfe_head, cfe_list); free(cfe, M_DEVBUF); } STAILQ_REMOVE_HEAD(&sc->card.pf_head, pf_list); free(pf, M_DEVBUF); } STAILQ_INIT(&sc->card.pf_head); return (0); } static const struct pccard_product * pccard_do_product_lookup(device_t bus, device_t dev, const struct pccard_product *tab, size_t ent_size, pccard_product_match_fn matchfn) { const struct pccard_product *ent; int matches; uint32_t vendor; uint32_t prod; const char *vendorstr; const char *prodstr; const char *cis3str; const char *cis4str; #ifdef DIAGNOSTIC if (sizeof *ent > ent_size) panic("pccard_product_lookup: bogus ent_size %jd", (intmax_t) ent_size); #endif if (pccard_get_vendor(dev, &vendor)) return (NULL); if (pccard_get_product(dev, &prod)) return (NULL); if (pccard_get_vendor_str(dev, &vendorstr)) return (NULL); if (pccard_get_product_str(dev, &prodstr)) return (NULL); if (pccard_get_cis3_str(dev, &cis3str)) return (NULL); if (pccard_get_cis4_str(dev, &cis4str)) return (NULL); for (ent = tab; ent->pp_vendor != 0; ent = (const struct pccard_product *) ((const char *) ent + ent_size)) { matches = 1; if (ent->pp_vendor == PCCARD_VENDOR_ANY && ent->pp_product == PCCARD_PRODUCT_ANY && ent->pp_cis[0] == NULL && ent->pp_cis[1] == NULL) { if (ent->pp_name) device_printf(dev, "Total wildcard entry ignored for %s\n", ent->pp_name); continue; } if (matches && ent->pp_vendor != PCCARD_VENDOR_ANY && vendor != ent->pp_vendor) matches = 0; if (matches && ent->pp_product != PCCARD_PRODUCT_ANY && prod != ent->pp_product) matches = 0; if (matches && ent->pp_cis[0] && (vendorstr == NULL || strcmp(ent->pp_cis[0], vendorstr) != 0)) matches = 0; if (matches && ent->pp_cis[1] && (prodstr == NULL || strcmp(ent->pp_cis[1], prodstr) != 0)) matches = 0; if (matches && ent->pp_cis[2] && (cis3str == NULL || strcmp(ent->pp_cis[2], cis3str) != 0)) matches = 0; if (matches && ent->pp_cis[3] && (cis4str == NULL || strcmp(ent->pp_cis[3], cis4str) != 0)) matches = 0; if (matchfn != NULL) matches = (*matchfn)(dev, ent, matches); if (matches) return (ent); } return (NULL); } /** * @brief pccard_select_cfe * * Select a cfe entry to use. Should be called from the pccard's probe * routine after it knows for sure that it wants this card. * * XXX I think we need to make this symbol be static, ala the kobj stuff * we do for everything else. This is a quick hack. */ int pccard_select_cfe(device_t dev, int entry) { struct pccard_ivar *devi = PCCARD_IVAR(dev); struct pccard_function *pf = devi->pf; pccard_function_init(pf, entry); return (pf->cfe ? 0 : ENOMEM); } /* * Initialize a PCCARD function. May be called as long as the function is * disabled. * * Note: pccard_function_init should not keep resources allocated. It should * only set them up ala isa pnp, set the values in the rl lists, and return. * Any resource held after pccard_function_init is called is a bug. However, * the bus routines to get the resources also assume that pccard_function_init * does this, so they need to be fixed too. */ static void pccard_function_init(struct pccard_function *pf, int entry) { struct pccard_config_entry *cfe; struct pccard_ivar *devi = PCCARD_IVAR(pf->dev); struct resource_list *rl = &devi->resources; struct resource_list_entry *rle; struct resource *r = NULL; struct pccard_ce_iospace *ios; struct pccard_ce_memspace *mems; device_t bus; rman_res_t start, end, len; int i, rid, spaces; if (pf->pf_flags & PFF_ENABLED) { printf("pccard_function_init: function is enabled"); return; } /* * Driver probe routine requested a specific entry already * that succeeded. */ if (pf->cfe != NULL) return; /* * walk the list of configuration entries until we find one that * we can allocate all the resources to. */ bus = device_get_parent(pf->dev); STAILQ_FOREACH(cfe, &pf->cfe_head, cfe_list) { if (cfe->iftype != PCCARD_IFTYPE_IO) continue; if (entry != -1 && cfe->number != entry) continue; spaces = 0; for (i = 0; i < cfe->num_iospace; i++) { ios = cfe->iospace + i; start = ios->start; if (start) end = start + ios->length - 1; else end = ~0; DEVPRINTF((bus, "I/O rid %d start %#jx end %#jx\n", i, start, end)); rid = i; len = ios->length; r = bus_alloc_resource(bus, SYS_RES_IOPORT, &rid, start, end, len, rman_make_alignment_flags(len)); if (r == NULL) { DEVPRINTF((bus, "I/O rid %d failed\n", i)); goto not_this_one; } rle = resource_list_add(rl, SYS_RES_IOPORT, rid, rman_get_start(r), rman_get_end(r), len); if (rle == NULL) panic("Cannot add resource rid %d IOPORT", rid); rle->res = r; spaces++; } for (i = 0; i < cfe->num_memspace; i++) { mems = cfe->memspace + i; start = mems->cardaddr + mems->hostaddr; if (start) end = start + mems->length - 1; else end = ~0; DEVPRINTF((bus, "Memory rid %d start %#jx end %#jx\ncardaddr %#jx hostaddr %#jx length %#jx\n", i, start, end, mems->cardaddr, mems->hostaddr, mems->length)); rid = i; len = mems->length; r = bus_alloc_resource(bus, SYS_RES_MEMORY, &rid, start, end, len, rman_make_alignment_flags(len)); if (r == NULL) { DEVPRINTF((bus, "Memory rid %d failed\n", i)); // goto not_this_one; continue; } rle = resource_list_add(rl, SYS_RES_MEMORY, rid, rman_get_start(r), rman_get_end(r), len); if (rle == NULL) panic("Cannot add resource rid %d MEM", rid); rle->res = r; spaces++; } if (spaces == 0) { DEVPRINTF((bus, "Neither memory nor I/O mapped\n")); goto not_this_one; } if (cfe->irqmask) { rid = 0; r = bus_alloc_resource_any(bus, SYS_RES_IRQ, &rid, RF_SHAREABLE); if (r == NULL) { DEVPRINTF((bus, "IRQ rid %d failed\n", rid)); goto not_this_one; } rle = resource_list_add(rl, SYS_RES_IRQ, rid, rman_get_start(r), rman_get_end(r), 1); if (rle == NULL) panic("Cannot add resource rid %d IRQ", rid); rle->res = r; } /* If we get to here, we've allocated all we need */ pf->cfe = cfe; break; not_this_one:; DEVPRVERBOSE((bus, "Allocation failed for cfe %d\n", cfe->number)); resource_list_purge(rl); } } /* * Free resources allocated by pccard_function_init(), May be called as long * as the function is disabled. * * NOTE: This function should be unnecessary. pccard_function_init should * never keep resources initialized. */ static void pccard_function_free(struct pccard_function *pf) { struct pccard_ivar *devi = PCCARD_IVAR(pf->dev); struct resource_list_entry *rle; if (pf->pf_flags & PFF_ENABLED) { printf("pccard_function_free: function is enabled"); return; } STAILQ_FOREACH(rle, &devi->resources, link) { if (rle->res) { if (rman_get_device(rle->res) != pf->sc->dev) device_printf(pf->sc->dev, "function_free: Resource still owned by " "child, oops. " "(type=%d, rid=%d, addr=%#jx)\n", rle->type, rle->rid, rman_get_start(rle->res)); BUS_RELEASE_RESOURCE(device_get_parent(pf->sc->dev), pf->sc->dev, rle->type, rle->rid, rle->res); rle->res = NULL; } } resource_list_free(&devi->resources); } static void pccard_mfc_adjust_iobase(struct pccard_function *pf, rman_res_t addr, rman_res_t offset, rman_res_t size) { bus_size_t iosize, tmp; if (addr != 0) { if (pf->pf_mfc_iomax == 0) { pf->pf_mfc_iobase = addr + offset; pf->pf_mfc_iomax = pf->pf_mfc_iobase + size; } else { /* this makes the assumption that nothing overlaps */ if (pf->pf_mfc_iobase > addr + offset) pf->pf_mfc_iobase = addr + offset; if (pf->pf_mfc_iomax < addr + offset + size) pf->pf_mfc_iomax = addr + offset + size; } } tmp = pf->pf_mfc_iomax - pf->pf_mfc_iobase; /* round up to nearest (2^n)-1 */ for (iosize = 1; iosize < tmp; iosize <<= 1) ; iosize--; DEVPRINTF((pf->dev, "MFC: I/O base %#jx IOSIZE %#jx\n", (uintmax_t)pf->pf_mfc_iobase, (uintmax_t)(iosize + 1))); pccard_ccr_write(pf, PCCARD_CCR_IOBASE0, pf->pf_mfc_iobase & 0xff); pccard_ccr_write(pf, PCCARD_CCR_IOBASE1, (pf->pf_mfc_iobase >> 8) & 0xff); pccard_ccr_write(pf, PCCARD_CCR_IOBASE2, 0); pccard_ccr_write(pf, PCCARD_CCR_IOBASE3, 0); pccard_ccr_write(pf, PCCARD_CCR_IOSIZE, iosize); } /* Enable a PCCARD function */ static int pccard_function_enable(struct pccard_function *pf) { struct pccard_function *tmp; int reg; device_t dev = pf->sc->dev; if (pf->cfe == NULL) { DEVPRVERBOSE((dev, "No config entry could be allocated.\n")); return (ENOMEM); } if (pf->pf_flags & PFF_ENABLED) return (0); pf->sc->sc_enabled_count++; /* * it's possible for different functions' CCRs to be in the same * underlying page. Check for that. */ STAILQ_FOREACH(tmp, &pf->sc->card.pf_head, pf_list) { if ((tmp->pf_flags & PFF_ENABLED) && (pf->ccr_base >= (tmp->ccr_base - tmp->pf_ccr_offset)) && ((pf->ccr_base + PCCARD_CCR_SIZE) <= (tmp->ccr_base - tmp->pf_ccr_offset + tmp->pf_ccr_realsize))) { pf->pf_ccrt = tmp->pf_ccrt; pf->pf_ccrh = tmp->pf_ccrh; pf->pf_ccr_realsize = tmp->pf_ccr_realsize; /* * pf->pf_ccr_offset = (tmp->pf_ccr_offset - * tmp->ccr_base) + pf->ccr_base; */ /* pf->pf_ccr_offset = (tmp->pf_ccr_offset + pf->ccr_base) - tmp->ccr_base; */ pf->pf_ccr_window = tmp->pf_ccr_window; break; } } if (tmp == NULL) { pf->ccr_rid = 0; pf->ccr_res = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY, &pf->ccr_rid, PCCARD_MEM_PAGE_SIZE, RF_ACTIVE); if (!pf->ccr_res) goto bad; DEVPRINTF((dev, "ccr_res == %#jx-%#jx, base=%#x\n", rman_get_start(pf->ccr_res), rman_get_end(pf->ccr_res), pf->ccr_base)); CARD_SET_RES_FLAGS(device_get_parent(dev), dev, SYS_RES_MEMORY, pf->ccr_rid, PCCARD_A_MEM_ATTR); CARD_SET_MEMORY_OFFSET(device_get_parent(dev), dev, pf->ccr_rid, pf->ccr_base, &pf->pf_ccr_offset); pf->pf_ccrt = rman_get_bustag(pf->ccr_res); pf->pf_ccrh = rman_get_bushandle(pf->ccr_res); pf->pf_ccr_realsize = 1; } reg = (pf->cfe->number & PCCARD_CCR_OPTION_CFINDEX); reg |= PCCARD_CCR_OPTION_LEVIREQ; if (pccard_mfc(pf->sc)) { reg |= (PCCARD_CCR_OPTION_FUNC_ENABLE | PCCARD_CCR_OPTION_ADDR_DECODE); /* PCCARD_CCR_OPTION_IRQ_ENABLE set elsewhere as needed */ } pccard_ccr_write(pf, PCCARD_CCR_OPTION, reg); reg = 0; if ((pf->cfe->flags & PCCARD_CFE_IO16) == 0) reg |= PCCARD_CCR_STATUS_IOIS8; if (pf->cfe->flags & PCCARD_CFE_AUDIO) reg |= PCCARD_CCR_STATUS_AUDIO; pccard_ccr_write(pf, PCCARD_CCR_STATUS, reg); pccard_ccr_write(pf, PCCARD_CCR_SOCKETCOPY, 0); if (pccard_mfc(pf->sc)) pccard_mfc_adjust_iobase(pf, 0, 0, 0); #ifdef PCCARDDEBUG if (pccard_debug) { STAILQ_FOREACH(tmp, &pf->sc->card.pf_head, pf_list) { device_printf(tmp->sc->dev, "function %d CCR at %d offset %#x: " "%#x %#x %#x %#x, %#x %#x %#x %#x, %#x\n", tmp->number, tmp->pf_ccr_window, tmp->pf_ccr_offset, pccard_ccr_read(tmp, 0x00), pccard_ccr_read(tmp, 0x02), pccard_ccr_read(tmp, 0x04), pccard_ccr_read(tmp, 0x06), pccard_ccr_read(tmp, 0x0A), pccard_ccr_read(tmp, 0x0C), pccard_ccr_read(tmp, 0x0E), pccard_ccr_read(tmp, 0x10), pccard_ccr_read(tmp, 0x12)); } } #endif pf->pf_flags |= PFF_ENABLED; return (0); bad: /* * Decrement the reference count, and power down the socket, if * necessary. */ pf->sc->sc_enabled_count--; DEVPRINTF((dev, "bad --enabled_count = %d\n", pf->sc->sc_enabled_count)); return (1); } /* Disable PCCARD function. */ static void pccard_function_disable(struct pccard_function *pf) { struct pccard_function *tmp; device_t dev = pf->sc->dev; if (pf->cfe == NULL) panic("pccard_function_disable: function not initialized"); if ((pf->pf_flags & PFF_ENABLED) == 0) return; if (pf->intr_handler != NULL) { struct pccard_ivar *devi = PCCARD_IVAR(pf->dev); struct resource_list_entry *rle = resource_list_find(&devi->resources, SYS_RES_IRQ, 0); if (rle == NULL) panic("Can't disable an interrupt with no IRQ res\n"); BUS_TEARDOWN_INTR(dev, pf->dev, rle->res, pf->intr_handler_cookie); } /* * it's possible for different functions' CCRs to be in the same * underlying page. Check for that. Note we mark us as disabled * first to avoid matching ourself. */ pf->pf_flags &= ~PFF_ENABLED; STAILQ_FOREACH(tmp, &pf->sc->card.pf_head, pf_list) { if ((tmp->pf_flags & PFF_ENABLED) && (pf->ccr_base >= (tmp->ccr_base - tmp->pf_ccr_offset)) && ((pf->ccr_base + PCCARD_CCR_SIZE) <= (tmp->ccr_base - tmp->pf_ccr_offset + tmp->pf_ccr_realsize))) break; } /* Not used by anyone else; unmap the CCR. */ if (tmp == NULL) { bus_release_resource(dev, SYS_RES_MEMORY, pf->ccr_rid, pf->ccr_res); pf->ccr_res = NULL; } /* * Decrement the reference count, and power down the socket, if * necessary. */ pf->sc->sc_enabled_count--; } #define PCCARD_NPORT 2 #define PCCARD_NMEM 5 #define PCCARD_NIRQ 1 #define PCCARD_NDRQ 0 static int pccard_probe(device_t dev) { device_set_desc(dev, "16-bit PCCard bus"); return (0); } static int pccard_attach(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); int err; sc->dev = dev; sc->sc_enabled_count = 0; if ((err = pccard_device_create(sc)) != 0) return (err); gone_in_dev(dev, 13, "PC Card to be removed."); STAILQ_INIT(&sc->card.pf_head); return (bus_generic_attach(dev)); } static int pccard_detach(device_t dev) { pccard_detach_card(dev); pccard_device_destroy(device_get_softc(dev)); return (0); } static int pccard_suspend(device_t self) { pccard_detach_card(self); return (0); } static int pccard_resume(device_t self) { return (0); } static void pccard_print_resources(struct resource_list *rl, const char *name, int type, int count, const char *format) { struct resource_list_entry *rle; int printed; int i; printed = 0; for (i = 0; i < count; i++) { rle = resource_list_find(rl, type, i); if (rle != NULL) { if (printed == 0) printf(" %s ", name); else if (printed > 0) printf(","); printed++; printf(format, rle->start); if (rle->count > 1) { printf("-"); printf(format, rle->start + rle->count - 1); } } else if (i > 3) { /* check the first few regardless */ break; } } } static int pccard_print_child(device_t dev, device_t child) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; int retval = 0; retval += bus_print_child_header(dev, child); retval += printf(" at"); if (devi != NULL) { pccard_print_resources(rl, "port", SYS_RES_IOPORT, PCCARD_NPORT, "%#lx"); pccard_print_resources(rl, "iomem", SYS_RES_MEMORY, PCCARD_NMEM, "%#lx"); pccard_print_resources(rl, "irq", SYS_RES_IRQ, PCCARD_NIRQ, "%ld"); pccard_print_resources(rl, "drq", SYS_RES_DRQ, PCCARD_NDRQ, "%ld"); retval += printf(" function %d config %d", devi->pf->number, devi->pf->cfe->number); } retval += bus_print_child_footer(dev, child); return (retval); } static int pccard_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY && type != SYS_RES_IRQ && type != SYS_RES_DRQ) return (EINVAL); if (rid < 0) return (EINVAL); if (type == SYS_RES_IOPORT && rid >= PCCARD_NPORT) return (EINVAL); if (type == SYS_RES_MEMORY && rid >= PCCARD_NMEM) return (EINVAL); if (type == SYS_RES_IRQ && rid >= PCCARD_NIRQ) return (EINVAL); if (type == SYS_RES_DRQ && rid >= PCCARD_NDRQ) return (EINVAL); resource_list_add(rl, type, rid, start, start + count - 1, count); if (NULL != resource_list_alloc(rl, device_get_parent(dev), dev, type, &rid, start, start + count - 1, count, 0)) return 0; else return ENOMEM; } static int pccard_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL) return (ENOENT); if (startp != NULL) *startp = rle->start; if (countp != NULL) *countp = rle->count; return (0); } static void pccard_delete_resource(device_t dev, device_t child, int type, int rid) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; resource_list_delete(rl, type, rid); } static int pccard_set_res_flags(device_t dev, device_t child, int type, int rid, u_long flags) { return (CARD_SET_RES_FLAGS(device_get_parent(dev), child, type, rid, flags)); } static int pccard_set_memory_offset(device_t dev, device_t child, int rid, uint32_t offset, uint32_t *deltap) { return (CARD_SET_MEMORY_OFFSET(device_get_parent(dev), child, rid, offset, deltap)); } static void pccard_probe_nomatch(device_t bus, device_t child) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; struct pccard_softc *sc = PCCARD_SOFTC(bus); int i; device_printf(bus, ""); printf(" (manufacturer=0x%04x, product=0x%04x, function_type=%d) " "at function %d\n", sc->card.manufacturer, sc->card.product, pf->function, pf->number); device_printf(bus, " CIS info: "); for (i = 0; sc->card.cis1_info[i] != NULL && i < 4; i++) printf("%s%s", i > 0 ? ", " : "", sc->card.cis1_info[i]); printf("\n"); return; } static int pccard_child_location_str(device_t bus, device_t child, char *buf, size_t buflen) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; snprintf(buf, buflen, "function=%d", pf->number); return (0); } static int pccard_child_pnpinfo_str(device_t bus, device_t child, char *buf, size_t buflen) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; struct pccard_softc *sc = PCCARD_SOFTC(bus); struct sbuf sb; sbuf_new(&sb, buf, buflen, SBUF_FIXEDLEN | SBUF_INCLUDENUL); sbuf_printf(&sb, "manufacturer=0x%04x product=0x%04x " "cisvendor=\"", sc->card.manufacturer, sc->card.product); devctl_safe_quote_sb(&sb, sc->card.cis1_info[0]); sbuf_printf(&sb, "\" cisproduct=\""); devctl_safe_quote_sb(&sb, sc->card.cis1_info[1]); sbuf_printf(&sb, "\" function_type=%d", pf->function); sbuf_finish(&sb); sbuf_delete(&sb); return (0); } static int pccard_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; struct pccard_softc *sc = PCCARD_SOFTC(bus); if (!pf) panic("No pccard function pointer"); switch (which) { default: return (EINVAL); case PCCARD_IVAR_FUNCE_DISK: *(uint16_t *)result = pf->pf_funce_disk_interface | (pf->pf_funce_disk_power << 8); break; case PCCARD_IVAR_ETHADDR: bcopy(pf->pf_funce_lan_nid, result, ETHER_ADDR_LEN); break; case PCCARD_IVAR_VENDOR: *(uint32_t *)result = sc->card.manufacturer; break; case PCCARD_IVAR_PRODUCT: *(uint32_t *)result = sc->card.product; break; case PCCARD_IVAR_PRODEXT: *(uint16_t *)result = sc->card.prodext; break; case PCCARD_IVAR_FUNCTION: *(uint32_t *)result = pf->function; break; case PCCARD_IVAR_FUNCTION_NUMBER: *(uint32_t *)result = pf->number; break; case PCCARD_IVAR_VENDOR_STR: *(const char **)result = sc->card.cis1_info[0]; break; case PCCARD_IVAR_PRODUCT_STR: *(const char **)result = sc->card.cis1_info[1]; break; case PCCARD_IVAR_CIS3_STR: *(const char **)result = sc->card.cis1_info[2]; break; case PCCARD_IVAR_CIS4_STR: *(const char **)result = sc->card.cis1_info[3]; break; } return (0); } static void pccard_driver_added(device_t dev, driver_t *driver) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_function *pf; device_t child; STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { if (STAILQ_EMPTY(&pf->cfe_head)) continue; child = pf->dev; if (device_get_state(child) != DS_NOTPRESENT) continue; pccard_probe_and_attach_child(dev, child, pf); } return; } static struct resource * pccard_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pccard_ivar *dinfo; struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != dev); int isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); struct resource *r = NULL; /* XXX I'm no longer sure this is right */ if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); } dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, type, *rid); if (rle == NULL && isdefault) return (NULL); /* no resource of that type/rid */ if (rle == NULL || rle->res == NULL) { /* XXX Need to adjust flags */ r = bus_alloc_resource(dev, type, rid, start, end, count, flags); if (r == NULL) goto bad; resource_list_add(&dinfo->resources, type, *rid, rman_get_start(r), rman_get_end(r), count); rle = resource_list_find(&dinfo->resources, type, *rid); if (!rle) goto bad; rle->res = r; } /* * If dev doesn't own the device, then we can't give this device * out. */ if (rman_get_device(rle->res) != dev) return (NULL); rman_set_device(rle->res, child); if (flags & RF_ACTIVE) BUS_ACTIVATE_RESOURCE(dev, child, type, *rid, rle->res); return (rle->res); bad:; device_printf(dev, "WARNING: Resource not reserved by pccard\n"); return (NULL); } static int pccard_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pccard_ivar *dinfo; int passthrough = (device_get_parent(child) != dev); struct resource_list_entry *rle = NULL; if (passthrough) return BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r); dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, type, rid); if (!rle) { device_printf(dev, "Allocated resource not found, " "%d %#x %#jx %#jx\n", type, rid, rman_get_start(r), rman_get_size(r)); return ENOENT; } if (!rle->res) { device_printf(dev, "Allocated resource not recorded\n"); return ENOENT; } /* * Deactivate the resource (since it is being released), and * assign it to the bus. */ BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, rle->res); rman_set_device(rle->res, dev); return (0); } static void pccard_child_detached(device_t parent, device_t dev) { struct pccard_ivar *ivar = PCCARD_IVAR(dev); struct pccard_function *pf = ivar->pf; pccard_function_disable(pf); } static int pccard_filter(void *arg) { struct pccard_function *pf = (struct pccard_function*) arg; int reg; int doisr = 1; /* * MFC cards know if they interrupted, so we have to ack the * interrupt and call the ISR. Non-MFC cards don't have these * bits, so they always get called. Many non-MFC cards have * this bit set always upon read, but some do not. * * We always ack the interrupt, even if there's no ISR * for the card. This is done on the theory that acking * the interrupt will pacify the card enough to keep an * interrupt storm from happening. Of course this won't * help in the non-MFC case. * * This has no impact for MPSAFEness of the client drivers. * We register this with whatever flags the intr_handler * was registered with. All these functions are MPSAFE. */ if (pccard_mfc(pf->sc)) { reg = pccard_ccr_read(pf, PCCARD_CCR_STATUS); if (reg & PCCARD_CCR_STATUS_INTR) pccard_ccr_write(pf, PCCARD_CCR_STATUS, reg & ~PCCARD_CCR_STATUS_INTR); else doisr = 0; } if (doisr) { if (pf->intr_filter != NULL) return (pf->intr_filter(pf->intr_handler_arg)); return (FILTER_SCHEDULE_THREAD); } return (FILTER_STRAY); } static void pccard_intr(void *arg) { struct pccard_function *pf = (struct pccard_function*) arg; pf->intr_handler(pf->intr_handler_arg); } static int pccard_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_ivar *ivar = PCCARD_IVAR(child); struct pccard_function *pf = ivar->pf; int err; if (pf->intr_filter != NULL || pf->intr_handler != NULL) panic("Only one interrupt handler per function allowed"); pf->intr_filter = filt; pf->intr_handler = intr; pf->intr_handler_arg = arg; err = bus_generic_setup_intr(dev, child, irq, flags, pccard_filter, intr ? pccard_intr : NULL, pf, cookiep); if (err != 0) { pf->intr_filter = NULL; pf->intr_handler = NULL; return (err); } pf->intr_handler_cookie = *cookiep; if (pccard_mfc(sc)) { pccard_ccr_write(pf, PCCARD_CCR_OPTION, pccard_ccr_read(pf, PCCARD_CCR_OPTION) | PCCARD_CCR_OPTION_IREQ_ENABLE); } return (0); } static int pccard_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_ivar *ivar = PCCARD_IVAR(child); struct pccard_function *pf = ivar->pf; int ret; if (pccard_mfc(sc)) { pccard_ccr_write(pf, PCCARD_CCR_OPTION, pccard_ccr_read(pf, PCCARD_CCR_OPTION) & ~PCCARD_CCR_OPTION_IREQ_ENABLE); } ret = bus_generic_teardown_intr(dev, child, r, cookie); if (ret == 0) { pf->intr_handler = NULL; pf->intr_handler_arg = NULL; pf->intr_handler_cookie = NULL; } return (ret); } static int pccard_activate_resource(device_t brdev, device_t child, int type, int rid, struct resource *r) { struct pccard_ivar *ivar = PCCARD_IVAR(child); struct pccard_function *pf = ivar->pf; switch(type) { case SYS_RES_IOPORT: /* * We need to adjust IOBASE[01] and IOSIZE if we're an MFC * card. */ if (pccard_mfc(pf->sc)) pccard_mfc_adjust_iobase(pf, rman_get_start(r), 0, rman_get_size(r)); break; default: break; } return (bus_generic_activate_resource(brdev, child, type, rid, r)); } static int pccard_deactivate_resource(device_t brdev, device_t child, int type, int rid, struct resource *r) { /* XXX undo pccard_activate_resource? XXX */ return (bus_generic_deactivate_resource(brdev, child, type, rid, r)); } static int pccard_attr_read_impl(device_t brdev, device_t child, uint32_t offset, uint8_t *val) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; /* * Optimization. Most of the time, devices want to access * the same page of the attribute memory that the CCR is in. * We take advantage of this fact here. */ if (offset / PCCARD_MEM_PAGE_SIZE == pf->ccr_base / PCCARD_MEM_PAGE_SIZE) *val = bus_space_read_1(pf->pf_ccrt, pf->pf_ccrh, offset % PCCARD_MEM_PAGE_SIZE); else { CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, offset, &offset); *val = bus_space_read_1(pf->pf_ccrt, pf->pf_ccrh, offset); CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, pf->ccr_base, &offset); } return 0; } static int pccard_attr_write_impl(device_t brdev, device_t child, uint32_t offset, uint8_t val) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; /* * Optimization. Most of the time, devices want to access * the same page of the attribute memory that the CCR is in. * We take advantage of this fact here. */ if (offset / PCCARD_MEM_PAGE_SIZE == pf->ccr_base / PCCARD_MEM_PAGE_SIZE) bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, offset % PCCARD_MEM_PAGE_SIZE, val); else { CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, offset, &offset); bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, offset, val); CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, pf->ccr_base, &offset); } return 0; } static int pccard_ccr_read_impl(device_t brdev, device_t child, uint32_t offset, uint8_t *val) { struct pccard_ivar *devi = PCCARD_IVAR(child); *val = pccard_ccr_read(devi->pf, offset); DEVPRINTF((child, "ccr_read of %#x (%#x) is %#x\n", offset, devi->pf->pf_ccr_offset, *val)); return 0; } static int pccard_ccr_write_impl(device_t brdev, device_t child, uint32_t offset, uint8_t val) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; /* * Can't use pccard_ccr_write since client drivers may access * registers not contained in the 'mask' if they are non-standard. */ DEVPRINTF((child, "ccr_write of %#x to %#x (%#x)\n", val, offset, devi->pf->pf_ccr_offset)); bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + offset, val); return 0; } static device_method_t pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pccard_probe), DEVMETHOD(device_attach, pccard_attach), DEVMETHOD(device_detach, pccard_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, pccard_suspend), DEVMETHOD(device_resume, pccard_resume), /* Bus interface */ DEVMETHOD(bus_print_child, pccard_print_child), DEVMETHOD(bus_driver_added, pccard_driver_added), DEVMETHOD(bus_child_detached, pccard_child_detached), DEVMETHOD(bus_alloc_resource, pccard_alloc_resource), DEVMETHOD(bus_release_resource, pccard_release_resource), DEVMETHOD(bus_activate_resource, pccard_activate_resource), DEVMETHOD(bus_deactivate_resource, pccard_deactivate_resource), DEVMETHOD(bus_setup_intr, pccard_setup_intr), DEVMETHOD(bus_teardown_intr, pccard_teardown_intr), DEVMETHOD(bus_set_resource, pccard_set_resource), DEVMETHOD(bus_get_resource, pccard_get_resource), DEVMETHOD(bus_delete_resource, pccard_delete_resource), DEVMETHOD(bus_probe_nomatch, pccard_probe_nomatch), DEVMETHOD(bus_read_ivar, pccard_read_ivar), DEVMETHOD(bus_child_pnpinfo_str, pccard_child_pnpinfo_str), DEVMETHOD(bus_child_location_str, pccard_child_location_str), /* Card Interface */ DEVMETHOD(card_set_res_flags, pccard_set_res_flags), DEVMETHOD(card_set_memory_offset, pccard_set_memory_offset), DEVMETHOD(card_attach_card, pccard_attach_card), DEVMETHOD(card_detach_card, pccard_detach_card), DEVMETHOD(card_do_product_lookup, pccard_do_product_lookup), DEVMETHOD(card_cis_scan, pccard_scan_cis), DEVMETHOD(card_attr_read, pccard_attr_read_impl), DEVMETHOD(card_attr_write, pccard_attr_write_impl), DEVMETHOD(card_ccr_read, pccard_ccr_read_impl), DEVMETHOD(card_ccr_write, pccard_ccr_write_impl), { 0, 0 } }; static driver_t pccard_driver = { "pccard", pccard_methods, sizeof(struct pccard_softc) }; devclass_t pccard_devclass; /* Maybe we need to have a slot device? */ DRIVER_MODULE(pccard, pcic, pccard_driver, pccard_devclass, 0, 0); DRIVER_MODULE(pccard, cbb, pccard_driver, pccard_devclass, 0, 0); MODULE_VERSION(pccard, 1); diff --git a/sys/dev/pci/pci_pci.c b/sys/dev/pci/pci_pci.c index cecf75024d3f..0945606bbc50 100644 --- a/sys/dev/pci/pci_pci.c +++ b/sys/dev/pci/pci_pci.c @@ -1,3021 +1,3021 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * PCI:PCI bridge support. */ #include "opt_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" static int pcib_probe(device_t dev); static int pcib_suspend(device_t dev); static int pcib_resume(device_t dev); static int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate); static int pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id); static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width); static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width); static int pcib_ari_maxslots(device_t dev); static int pcib_ari_maxfuncs(device_t dev); static int pcib_try_enable_ari(device_t pcib, device_t dev); static int pcib_ari_enabled(device_t pcib); static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func); #ifdef PCI_HP static void pcib_pcie_ab_timeout(void *arg, int pending); static void pcib_pcie_cc_timeout(void *arg, int pending); static void pcib_pcie_dll_timeout(void *arg, int pending); #endif static int pcib_request_feature_default(device_t pcib, device_t dev, enum pci_feature feature); static int pcib_reset_child(device_t dev, device_t child, int flags); static device_method_t pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcib_probe), DEVMETHOD(device_attach, pcib_attach), DEVMETHOD(device_detach, pcib_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, pcib_suspend), DEVMETHOD(device_resume, pcib_resume), /* Bus interface */ DEVMETHOD(bus_child_present, pcib_child_present), DEVMETHOD(bus_read_ivar, pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, pcib_alloc_resource), #ifdef NEW_PCIB DEVMETHOD(bus_adjust_resource, pcib_adjust_resource), DEVMETHOD(bus_release_resource, pcib_release_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), #endif DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_reset_child, pcib_reset_child), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_ari_maxslots), DEVMETHOD(pcib_maxfuncs, pcib_ari_maxfuncs), DEVMETHOD(pcib_read_config, pcib_read_config), DEVMETHOD(pcib_write_config, pcib_write_config), DEVMETHOD(pcib_route_interrupt, pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, pcib_map_msi), DEVMETHOD(pcib_power_for_sleep, pcib_power_for_sleep), DEVMETHOD(pcib_get_id, pcib_ari_get_id), DEVMETHOD(pcib_try_enable_ari, pcib_try_enable_ari), DEVMETHOD(pcib_ari_enabled, pcib_ari_enabled), DEVMETHOD(pcib_decode_rid, pcib_ari_decode_rid), DEVMETHOD(pcib_request_feature, pcib_request_feature_default), DEVMETHOD_END }; static devclass_t pcib_devclass; DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc)); EARLY_DRIVER_MODULE(pcib, pci, pcib_driver, pcib_devclass, NULL, NULL, BUS_PASS_BUS); #if defined(NEW_PCIB) || defined(PCI_HP) SYSCTL_DECL(_hw_pci); #endif #ifdef NEW_PCIB static int pci_clear_pcib; SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0, "Clear firmware-assigned resources for PCI-PCI bridge I/O windows."); /* * Get the corresponding window if this resource from a child device was * sub-allocated from one of our window resource managers. */ static struct pcib_window * pcib_get_resource_window(struct pcib_softc *sc, int type, struct resource *r) { switch (type) { case SYS_RES_IOPORT: if (rman_is_region_manager(r, &sc->io.rman)) return (&sc->io); break; case SYS_RES_MEMORY: /* Prefetchable resources may live in either memory rman. */ if (rman_get_flags(r) & RF_PREFETCHABLE && rman_is_region_manager(r, &sc->pmem.rman)) return (&sc->pmem); if (rman_is_region_manager(r, &sc->mem.rman)) return (&sc->mem); break; } return (NULL); } /* * Is a resource from a child device sub-allocated from one of our * resource managers? */ static int pcib_is_resource_managed(struct pcib_softc *sc, int type, struct resource *r) { #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) return (rman_is_region_manager(r, &sc->bus.rman)); #endif return (pcib_get_resource_window(sc, type, r) != NULL); } static int pcib_is_window_open(struct pcib_window *pw) { return (pw->valid && pw->base < pw->limit); } /* * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and * handle for the resource, we could pass RF_ACTIVE up to the PCI bus * when allocating the resource windows and rely on the PCI bus driver * to do this for us. */ static void pcib_activate_window(struct pcib_softc *sc, int type) { PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type); } static void pcib_write_windows(struct pcib_softc *sc, int mask) { device_t dev; uint32_t val; dev = sc->dev; if (sc->io.valid && mask & WIN_IO) { val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { pci_write_config(dev, PCIR_IOBASEH_1, sc->io.base >> 16, 2); pci_write_config(dev, PCIR_IOLIMITH_1, sc->io.limit >> 16, 2); } pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1); pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1); } if (mask & WIN_MEM) { pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2); } if (sc->pmem.valid && mask & WIN_PMEM) { val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { pci_write_config(dev, PCIR_PMBASEH_1, sc->pmem.base >> 32, 4); pci_write_config(dev, PCIR_PMLIMITH_1, sc->pmem.limit >> 32, 4); } pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2); } } /* * This is used to reject I/O port allocations that conflict with an * ISA alias range. */ static int pcib_is_isa_range(struct pcib_softc *sc, rman_res_t start, rman_res_t end, rman_res_t count) { rman_res_t next_alias; if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE)) return (0); /* Only check fixed ranges for overlap. */ if (start + count - 1 != end) return (0); /* ISA aliases are only in the lower 64KB of I/O space. */ if (start >= 65536) return (0); /* Check for overlap with 0x000 - 0x0ff as a special case. */ if (start < 0x100) goto alias; /* * If the start address is an alias, the range is an alias. * Otherwise, compute the start of the next alias range and * check if it is before the end of the candidate range. */ if ((start & 0x300) != 0) goto alias; next_alias = (start & ~0x3fful) | 0x100; if (next_alias <= end) goto alias; return (0); alias: if (bootverbose) device_printf(sc->dev, "I/O range %#jx-%#jx overlaps with an ISA alias\n", start, end); return (1); } static void pcib_add_window_resources(struct pcib_window *w, struct resource **res, int count) { struct resource **newarray; int error, i; newarray = malloc(sizeof(struct resource *) * (w->count + count), M_DEVBUF, M_WAITOK); if (w->res != NULL) bcopy(w->res, newarray, sizeof(struct resource *) * w->count); bcopy(res, newarray + w->count, sizeof(struct resource *) * count); free(w->res, M_DEVBUF); w->res = newarray; w->count += count; for (i = 0; i < count; i++) { error = rman_manage_region(&w->rman, rman_get_start(res[i]), rman_get_end(res[i])); if (error) panic("Failed to add resource to rman"); } } typedef void (nonisa_callback)(rman_res_t start, rman_res_t end, void *arg); static void pcib_walk_nonisa_ranges(rman_res_t start, rman_res_t end, nonisa_callback *cb, void *arg) { rman_res_t next_end; /* * If start is within an ISA alias range, move up to the start * of the next non-alias range. As a special case, addresses * in the range 0x000 - 0x0ff should also be skipped since * those are used for various system I/O devices in ISA * systems. */ if (start <= 65535) { if (start < 0x100 || (start & 0x300) != 0) { start &= ~0x3ff; start += 0x400; } } /* ISA aliases are only in the lower 64KB of I/O space. */ while (start <= MIN(end, 65535)) { next_end = MIN(start | 0xff, end); cb(start, next_end, arg); start += 0x400; } if (start <= end) cb(start, end, arg); } static void count_ranges(rman_res_t start, rman_res_t end, void *arg) { int *countp; countp = arg; (*countp)++; } struct alloc_state { struct resource **res; struct pcib_softc *sc; int count, error; }; static void alloc_ranges(rman_res_t start, rman_res_t end, void *arg) { struct alloc_state *as; struct pcib_window *w; int rid; as = arg; if (as->error != 0) return; w = &as->sc->io; rid = w->reg; if (bootverbose) device_printf(as->sc->dev, "allocating non-ISA range %#jx-%#jx\n", start, end); as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT, &rid, start, end, end - start + 1, 0); if (as->res[as->count] == NULL) as->error = ENXIO; else as->count++; } static int pcib_alloc_nonisa_ranges(struct pcib_softc *sc, rman_res_t start, rman_res_t end) { struct alloc_state as; int i, new_count; /* First, see how many ranges we need. */ new_count = 0; pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count); /* Second, allocate the ranges. */ as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF, M_WAITOK); as.sc = sc; as.count = 0; as.error = 0; pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as); if (as.error != 0) { for (i = 0; i < as.count; i++) bus_release_resource(sc->dev, SYS_RES_IOPORT, sc->io.reg, as.res[i]); free(as.res, M_DEVBUF); return (as.error); } KASSERT(as.count == new_count, ("%s: count mismatch", __func__)); /* Third, add the ranges to the window. */ pcib_add_window_resources(&sc->io, as.res, as.count); free(as.res, M_DEVBUF); return (0); } static void pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type, int flags, pci_addr_t max_address) { struct resource *res; char buf[64]; int error, rid; if (max_address != (rman_res_t)max_address) max_address = ~0; w->rman.rm_start = 0; w->rman.rm_end = max_address; w->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s %s window", device_get_nameunit(sc->dev), w->name); w->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&w->rman); if (error) panic("Failed to initialize %s %s rman", device_get_nameunit(sc->dev), w->name); if (!pcib_is_window_open(w)) return; if (w->base > max_address || w->limit > max_address) { device_printf(sc->dev, "initial %s window has too many bits, ignoring\n", w->name); return; } if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE) (void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit); else { rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit, w->limit - w->base + 1, flags); if (res != NULL) pcib_add_window_resources(w, &res, 1); } if (w->res == NULL) { device_printf(sc->dev, "failed to allocate initial %s window: %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); w->base = max_address; w->limit = 0; pcib_write_windows(sc, w->mask); return; } pcib_activate_window(sc, type); } /* * Initialize I/O windows. */ static void pcib_probe_windows(struct pcib_softc *sc) { pci_addr_t max; device_t dev; uint32_t val; dev = sc->dev; if (pci_clear_pcib) { pcib_bridge_init(dev); } /* Determine if the I/O port window is implemented. */ val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if (val == 0) { /* * If 'val' is zero, then only 16-bits of I/O space * are supported. */ pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) { sc->io.valid = 1; pci_write_config(dev, PCIR_IOBASEL_1, 0, 1); } } else sc->io.valid = 1; /* Read the existing I/O port window. */ if (sc->io.valid) { sc->io.reg = PCIR_IOBASEL_1; sc->io.step = 12; sc->io.mask = WIN_IO; sc->io.name = "I/O port"; if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { sc->io.base = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), val); sc->io.limit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffffffff; } else { sc->io.base = PCI_PPBIOBASE(0, val); sc->io.limit = PCI_PPBIOLIMIT(0, pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffff; } pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max); } /* Read the existing memory window. */ sc->mem.valid = 1; sc->mem.reg = PCIR_MEMBASE_1; sc->mem.step = 20; sc->mem.mask = WIN_MEM; sc->mem.name = "memory"; sc->mem.base = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->mem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff); /* Determine if the prefetchable memory window is implemented. */ val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if (val == 0) { /* * If 'val' is zero, then only 32-bits of memory space * are supported. */ pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) { sc->pmem.valid = 1; pci_write_config(dev, PCIR_PMBASEL_1, 0, 2); } } else sc->pmem.valid = 1; /* Read the existing prefetchable memory window. */ if (sc->pmem.valid) { sc->pmem.reg = PCIR_PMBASEL_1; sc->pmem.step = 20; sc->pmem.mask = WIN_PMEM; sc->pmem.name = "prefetch"; if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { sc->pmem.base = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), val); sc->pmem.limit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffffffffffff; } else { sc->pmem.base = PCI_PPBMEMBASE(0, val); sc->pmem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffff; } pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY, RF_PREFETCHABLE, max); } } static void pcib_release_window(struct pcib_softc *sc, struct pcib_window *w, int type) { device_t dev; int error, i; if (!w->valid) return; dev = sc->dev; error = rman_fini(&w->rman); if (error) { device_printf(dev, "failed to release %s rman\n", w->name); return; } free(__DECONST(char *, w->rman.rm_descr), M_DEVBUF); for (i = 0; i < w->count; i++) { error = bus_free_resource(dev, type, w->res[i]); if (error) device_printf(dev, "failed to release %s resource: %d\n", w->name, error); } free(w->res, M_DEVBUF); } static void pcib_free_windows(struct pcib_softc *sc) { pcib_release_window(sc, &sc->pmem, SYS_RES_MEMORY); pcib_release_window(sc, &sc->mem, SYS_RES_MEMORY); pcib_release_window(sc, &sc->io, SYS_RES_IOPORT); } #ifdef PCI_RES_BUS /* * Allocate a suitable secondary bus for this bridge if needed and * initialize the resource manager for the secondary bus range. Note * that the minimum count is a desired value and this may allocate a * smaller range. */ void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count) { char buf[64]; int error, rid, sec_reg; switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; bus->sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; bus->sub_reg = PCIR_SUBBUS_2; break; default: panic("not a PCI bridge"); } bus->sec = pci_read_config(dev, sec_reg, 1); bus->sub = pci_read_config(dev, bus->sub_reg, 1); bus->dev = dev; bus->rman.rm_start = 0; bus->rman.rm_end = PCI_BUSMAX; bus->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); bus->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&bus->rman); if (error) panic("Failed to initialize %s bus number rman", device_get_nameunit(dev)); /* * Allocate a bus range. This will return an existing bus range * if one exists, or a new bus range if one does not. */ rid = 0; bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, min_count, 0); if (bus->res == NULL) { /* * Fall back to just allocating a range of a single bus * number. */ bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, 1, 0); } else if (rman_get_size(bus->res) < min_count) /* * Attempt to grow the existing range to satisfy the * minimum desired count. */ (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), rman_get_start(bus->res) + min_count - 1); /* * Add the initial resource to the rman. */ if (bus->res != NULL) { error = rman_manage_region(&bus->rman, rman_get_start(bus->res), rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sec = rman_get_start(bus->res); bus->sub = rman_get_end(bus->res); } } void pcib_free_secbus(device_t dev, struct pcib_secbus *bus) { int error; error = rman_fini(&bus->rman); if (error) { device_printf(dev, "failed to release bus number rman\n"); return; } free(__DECONST(char *, bus->rman.rm_descr), M_DEVBUF); error = bus_free_resource(dev, PCI_RES_BUS, bus->res); if (error) device_printf(dev, "failed to release bus numbers resource: %d\n", error); } static struct resource * pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; res = rman_reserve_resource(&bus->rman, start, end, count, flags, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(bus->dev, "allocated bus range (%ju-%ju) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); return (res); } /* * Attempt to grow the secondary bus range. This is much simpler than * for I/O windows as the range can only be grown by increasing * subbus. */ static int pcib_grow_subbus(struct pcib_secbus *bus, rman_res_t new_end) { rman_res_t old_end; int error; old_end = rman_get_end(bus->res); KASSERT(new_end > old_end, ("attempt to shrink subbus")); error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), new_end); if (error) return (error); if (bootverbose) device_printf(bus->dev, "grew bus range to %ju-%ju\n", rman_get_start(bus->res), rman_get_end(bus->res)); error = rman_manage_region(&bus->rman, old_end + 1, rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sub = rman_get_end(bus->res); pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1); return (0); } struct resource * pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; rman_res_t start_free, end_free, new_end; /* * First, see if the request can be satisified by the existing * bus range. */ res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags); if (res != NULL) return (res); /* * Figure out a range to grow the bus range. First, find the * first bus number after the last allocated bus in the rman and * enforce that as a minimum starting point for the range. */ if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 || end_free != bus->sub) start_free = bus->sub + 1; if (start_free < start) start_free = start; new_end = start_free + count - 1; /* * See if this new range would satisfy the request if it * succeeds. */ if (new_end > end) return (NULL); /* Finally, attempt to grow the existing resource. */ if (bootverbose) { device_printf(bus->dev, "attempting to grow bus range for %ju buses\n", count); printf("\tback candidate range: %ju-%ju\n", start_free, new_end); } if (pcib_grow_subbus(bus, new_end) == 0) return (pcib_suballoc_bus(bus, child, rid, start, end, count, flags)); return (NULL); } #endif #else /* * Is the prefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_prefetch_open(struct pcib_softc *sc) { return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit); } /* * Is the nonprefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_nonprefetch_open(struct pcib_softc *sc) { return (sc->membase > 0 && sc->membase < sc->memlimit); } /* * Is the io window open (eg, can we allocate ports in it?) */ static int pcib_is_io_open(struct pcib_softc *sc) { return (sc->iobase > 0 && sc->iobase < sc->iolimit); } /* * Get current I/O decode. */ static void pcib_get_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iolow; dev = sc->dev; iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iobase = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow); else sc->iobase = PCI_PPBIOBASE(0, iolow); iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iolimit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow); else sc->iolimit = PCI_PPBIOLIMIT(0, iolow); } /* * Get current memory decode. */ static void pcib_get_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemlow; dev = sc->dev; sc->membase = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->memlimit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmembase = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow); else sc->pmembase = PCI_PPBMEMBASE(0, pmemlow); pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmemlimit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow); else sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow); } /* * Restore previous I/O decode. */ static void pcib_set_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iohi; dev = sc->dev; iohi = sc->iobase >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2); pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1); iohi = sc->iolimit >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2); pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1); } /* * Restore previous memory decode. */ static void pcib_set_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemhi; dev = sc->dev; pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2); pmemhi = sc->pmembase >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2); pmemhi = sc->pmemlimit >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2); } #endif #ifdef PCI_HP /* * PCI-express HotPlug support. */ static int pci_enable_pcie_hp = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN, &pci_enable_pcie_hp, 0, "Enable support for native PCI-express HotPlug."); TASKQUEUE_DEFINE_THREAD(pci_hp); static void pcib_probe_hotplug(struct pcib_softc *sc) { device_t dev; uint32_t link_cap; uint16_t link_sta, slot_sta; if (!pci_enable_pcie_hp) return; dev = sc->dev; if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0) return; if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT)) return; sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4); if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) == 0) return; link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4); if ((link_cap & PCIEM_LINK_CAP_DL_ACTIVE) == 0) return; /* * Some devices report that they have an MRL when they actually * do not. Since they always report that the MRL is open, child * devices would be ignored. Try to detect these devices and * ignore their claim of HotPlug support. * * If there is an open MRL but the Data Link Layer is active, * the MRL is not real. */ if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0) { link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); if ((slot_sta & PCIEM_SLOT_STA_MRLSS) != 0 && (link_sta & PCIEM_LINK_STA_DL_ACTIVE) != 0) { return; } } /* * Now that we're sure we want to do hot plug, ask the * firmware, if any, if that's OK. */ if (pcib_request_feature(dev, PCI_FEATURE_HP) != 0) { if (bootverbose) device_printf(dev, "Unable to activate hot plug feature.\n"); return; } sc->flags |= PCIB_HOTPLUG; } /* * Send a HotPlug command to the slot control register. If this slot * uses command completion interrupts and a previous command is still * in progress, then the command is dropped. Once the previous * command completes or times out, pcib_pcie_hotplug_update() will be * invoked to post a new command based on the slot's state at that * time. */ static void pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask) { device_t dev; uint16_t ctl, new; dev = sc->dev; if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) return; ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2); new = (ctl & ~mask) | val; if (new == ctl) return; if (bootverbose) device_printf(dev, "HotPlug command: %04x -> %04x\n", ctl, new); pcie_write_config(dev, PCIER_SLOT_CTL, new, 2); if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) && (ctl & new) & PCIEM_SLOT_CTL_CCIE) { sc->flags |= PCIB_HOTPLUG_CMD_PENDING; if (!cold) taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, hz); } } static void pcib_pcie_hotplug_command_completed(struct pcib_softc *sc) { device_t dev; dev = sc->dev; if (bootverbose) device_printf(dev, "Command Completed\n"); if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING)) return; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; wakeup(sc); } /* * Returns true if a card is fully inserted from the user's * perspective. It may not yet be ready for access, but the driver * can now start enabling access if necessary. */ static bool pcib_hotplug_inserted(struct pcib_softc *sc) { /* Pretend the card isn't present if a detach is forced. */ if (sc->flags & PCIB_DETACHING) return (false); /* Card must be present in the slot. */ if ((sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS) == 0) return (false); /* A power fault implicitly turns off power to the slot. */ if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) return (false); /* If the MRL is disengaged, the slot is powered off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP && (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS) != 0) return (false); return (true); } /* * Returns -1 if the card is fully inserted, powered, and ready for * access. Otherwise, returns 0. */ static int pcib_hotplug_present(struct pcib_softc *sc) { /* Card must be inserted. */ if (!pcib_hotplug_inserted(sc)) return (0); /* Require the Data Link Layer to be active. */ if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)) return (0); return (-1); } static void pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask, bool schedule_task) { bool card_inserted, ei_engaged; /* Clear DETACHING if Presence Detect has cleared. */ if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) == PCIEM_SLOT_STA_PDC) sc->flags &= ~PCIB_DETACHING; card_inserted = pcib_hotplug_inserted(sc); /* Turn the power indicator on if a card is inserted. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PIP) { mask |= PCIEM_SLOT_CTL_PIC; if (card_inserted) val |= PCIEM_SLOT_CTL_PI_ON; else if (sc->flags & PCIB_DETACH_PENDING) val |= PCIEM_SLOT_CTL_PI_BLINK; else val |= PCIEM_SLOT_CTL_PI_OFF; } /* Turn the power on via the Power Controller if a card is inserted. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) { mask |= PCIEM_SLOT_CTL_PCC; if (card_inserted) val |= PCIEM_SLOT_CTL_PC_ON; else val |= PCIEM_SLOT_CTL_PC_OFF; } /* * If a card is inserted, enable the Electromechanical * Interlock. If a card is not inserted (or we are in the * process of detaching), disable the Electromechanical * Interlock. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) { mask |= PCIEM_SLOT_CTL_EIC; ei_engaged = (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) != 0; if (card_inserted != ei_engaged) val |= PCIEM_SLOT_CTL_EIC; } /* * Start a timer to see if the Data Link Layer times out. * Note that we only start the timer if Presence Detect or MRL Sensor * changed on this interrupt. Stop any scheduled timer if * the Data Link Layer is active. */ if (card_inserted && !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) && sc->pcie_slot_sta & (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) { if (cold) device_printf(sc->dev, "Data Link Layer inactive\n"); else taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_dll_task, hz); } else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_dll_task, NULL); pcib_pcie_hotplug_command(sc, val, mask); /* * During attach the child "pci" device is added synchronously; * otherwise, the task is scheduled to manage the child * device. */ if (schedule_task && (pcib_hotplug_present(sc) != 0) != (sc->child != NULL)) taskqueue_enqueue(taskqueue_pci_hp, &sc->pcie_hp_task); } static void pcib_pcie_intr_hotplug(void *arg) { struct pcib_softc *sc; device_t dev; uint16_t old_slot_sta; sc = arg; dev = sc->dev; PCIB_HP_LOCK(sc); old_slot_sta = sc->pcie_slot_sta; sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); /* Clear the events just reported. */ pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); if (bootverbose) device_printf(dev, "HotPlug interrupt: %#x\n", sc->pcie_slot_sta); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_ABP) { if (sc->flags & PCIB_DETACH_PENDING) { device_printf(dev, "Attention Button Pressed: Detach Cancelled\n"); sc->flags &= ~PCIB_DETACH_PENDING; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, NULL); } else if (old_slot_sta & PCIEM_SLOT_STA_PDS) { /* Only initiate detach sequence if device present. */ device_printf(dev, "Attention Button Pressed: Detaching in 5 seconds\n"); sc->flags |= PCIB_DETACH_PENDING; taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, 5 * hz); } } if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) device_printf(dev, "Power Fault Detected\n"); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSC) device_printf(dev, "MRL Sensor Changed to %s\n", sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS ? "open" : "closed"); if (bootverbose && sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC) device_printf(dev, "Presence Detect Changed to %s\n", sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS ? "card present" : "empty"); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_CC) pcib_pcie_hotplug_command_completed(sc); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_DLLSC) { sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); if (bootverbose) device_printf(dev, "Data Link Layer State Changed to %s\n", sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE ? "active" : "inactive"); } pcib_pcie_hotplug_update(sc, 0, 0, true); PCIB_HP_UNLOCK(sc); } static void pcib_pcie_hotplug_task(void *context, int pending) { struct pcib_softc *sc; device_t dev; sc = context; PCIB_HP_LOCK(sc); dev = sc->dev; if (pcib_hotplug_present(sc) != 0) { if (sc->child == NULL) { sc->child = device_add_child(dev, "pci", -1); bus_generic_attach(dev); } } else { if (sc->child != NULL) { if (device_delete_child(dev, sc->child) == 0) sc->child = NULL; } } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_ab_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; PCIB_HP_LOCK(sc); if (sc->flags & PCIB_DETACH_PENDING) { sc->flags |= PCIB_DETACHING; sc->flags &= ~PCIB_DETACH_PENDING; pcib_pcie_hotplug_update(sc, 0, 0, true); } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_cc_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; device_t dev = sc->dev; uint16_t sta; PCIB_HP_LOCK(sc); sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); if (!(sta & PCIEM_SLOT_STA_CC)) { device_printf(dev, "HotPlug Command Timed Out\n"); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; } else { device_printf(dev, "Missed HotPlug interrupt waiting for Command Completion\n"); pcib_pcie_intr_hotplug(sc); } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_dll_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; device_t dev = sc->dev; uint16_t sta; PCIB_HP_LOCK(sc); sta = pcie_read_config(dev, PCIER_LINK_STA, 2); if (!(sta & PCIEM_LINK_STA_DL_ACTIVE)) { device_printf(dev, "Timed out waiting for Data Link Layer Active\n"); sc->flags |= PCIB_DETACHING; pcib_pcie_hotplug_update(sc, 0, 0, true); } else if (sta != sc->pcie_link_sta) { device_printf(dev, "Missed HotPlug interrupt waiting for DLL Active\n"); pcib_pcie_intr_hotplug(sc); } PCIB_HP_UNLOCK(sc); } static int pcib_alloc_pcie_irq(struct pcib_softc *sc) { device_t dev; int count, error, rid; rid = -1; dev = sc->dev; /* * For simplicity, only use MSI-X if there is a single message. * To support a device with multiple messages we would have to * use remap intr if the MSI number is not 0. */ count = pci_msix_count(dev); if (count == 1) { error = pci_alloc_msix(dev, &count); if (error == 0) rid = 1; } if (rid < 0 && pci_msi_count(dev) > 0) { count = 1; error = pci_alloc_msi(dev, &count); if (error == 0) rid = 1; } if (rid < 0) rid = 0; sc->pcie_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->pcie_irq == NULL) { device_printf(dev, "Failed to allocate interrupt for PCI-e events\n"); if (rid > 0) pci_release_msi(dev); return (ENXIO); } error = bus_setup_intr(dev, sc->pcie_irq, INTR_TYPE_MISC|INTR_MPSAFE, NULL, pcib_pcie_intr_hotplug, sc, &sc->pcie_ihand); if (error) { device_printf(dev, "Failed to setup PCI-e interrupt handler\n"); bus_release_resource(dev, SYS_RES_IRQ, rid, sc->pcie_irq); if (rid > 0) pci_release_msi(dev); return (error); } return (0); } static int pcib_release_pcie_irq(struct pcib_softc *sc) { device_t dev; int error; dev = sc->dev; error = bus_teardown_intr(dev, sc->pcie_irq, sc->pcie_ihand); if (error) return (error); error = bus_free_resource(dev, SYS_RES_IRQ, sc->pcie_irq); if (error) return (error); return (pci_release_msi(dev)); } static void pcib_setup_hotplug(struct pcib_softc *sc) { device_t dev; uint16_t mask, val; dev = sc->dev; TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_ab_task, 0, pcib_pcie_ab_timeout, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_cc_task, 0, pcib_pcie_cc_timeout, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_dll_task, 0, pcib_pcie_dll_timeout, sc); - sc->pcie_hp_lock = &Giant; + sc->pcie_hp_lock = bus_topo_mtx(); /* Allocate IRQ. */ if (pcib_alloc_pcie_irq(sc) != 0) return; sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); /* Clear any events previously pending. */ pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); /* Enable HotPlug events. */ mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; val = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_PDCE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB) val |= PCIEM_SLOT_CTL_ABPE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) val |= PCIEM_SLOT_CTL_PFDE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) val |= PCIEM_SLOT_CTL_MRLSCE; if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS)) val |= PCIEM_SLOT_CTL_CCIE; /* Turn the attention indicator off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { mask |= PCIEM_SLOT_CTL_AIC; val |= PCIEM_SLOT_CTL_AI_OFF; } pcib_pcie_hotplug_update(sc, val, mask, false); } static int pcib_detach_hotplug(struct pcib_softc *sc) { uint16_t mask, val; int error; /* Disable the card in the slot and force it to detach. */ if (sc->flags & PCIB_DETACH_PENDING) { sc->flags &= ~PCIB_DETACH_PENDING; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, NULL); } sc->flags |= PCIB_DETACHING; if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) { taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL); tsleep(sc, 0, "hpcmd", hz); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; } /* Disable HotPlug events. */ mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; val = 0; /* Turn the attention indicator off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { mask |= PCIEM_SLOT_CTL_AIC; val |= PCIEM_SLOT_CTL_AI_OFF; } pcib_pcie_hotplug_update(sc, val, mask, false); error = pcib_release_pcie_irq(sc); if (error) return (error); taskqueue_drain(taskqueue_pci_hp, &sc->pcie_hp_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_ab_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_cc_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_dll_task); return (0); } #endif /* * Get current bridge configuration. */ static void pcib_cfg_save(struct pcib_softc *sc) { #ifndef NEW_PCIB device_t dev; uint16_t command; dev = sc->dev; command = pci_read_config(dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_get_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_get_mem_decode(sc); #endif } /* * Restore previous bridge configuration. */ static void pcib_cfg_restore(struct pcib_softc *sc) { #ifndef NEW_PCIB uint16_t command; #endif #ifdef NEW_PCIB pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM); #else command = pci_read_config(sc->dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_set_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_set_mem_decode(sc); #endif } /* * Generic device interface */ static int pcib_probe(device_t dev) { if ((pci_get_class(dev) == PCIC_BRIDGE) && (pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) { device_set_desc(dev, "PCI-PCI bridge"); return(-10000); } return(ENXIO); } void pcib_attach_common(device_t dev) { struct pcib_softc *sc; struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int comma; sc = device_get_softc(dev); sc->dev = dev; /* * Get current bridge configuration. */ sc->domain = pci_get_domain(dev); #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); #endif sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); pcib_cfg_save(sc); /* * The primary bus register should always be the bus of the * parent. */ sc->pribus = pci_get_bus(dev); pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1); /* * Setup sysctl reporting nodes */ sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); /* * Quirk handling. */ switch (pci_get_devid(dev)) { #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) case 0x12258086: /* Intel 82454KX/GX (Orion) */ { uint8_t supbus; supbus = pci_read_config(dev, 0x41, 1); if (supbus != 0xff) { sc->bus.sec = supbus + 1; sc->bus.sub = supbus + 1; } break; } #endif /* * The i82380FB mobile docking controller is a PCI-PCI bridge, * and it is a subtractive bridge. However, the ProgIf is wrong * so the normal setting of PCIB_SUBTRACTIVE bit doesn't * happen. There are also Toshiba and Cavium ThunderX bridges * that behave this way. */ case 0xa002177d: /* Cavium ThunderX */ case 0x124b8086: /* Intel 82380FB Mobile */ case 0x060513d7: /* Toshiba ???? */ sc->flags |= PCIB_SUBTRACTIVE; break; #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) /* Compaq R3000 BIOS sets wrong subordinate bus number. */ case 0x00dd10de: { char *cp; if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sc->bus.sub < 0xa) { pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); } break; } #endif } if (pci_msi_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSI; if (pci_msix_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSIX; /* * Intel 815, 845 and other chipsets say they are PCI-PCI bridges, * but have a ProgIF of 0x80. The 82801 family (AA, AB, BAM/CAM, * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese. * This means they act as if they were subtractively decoding * bridges and pass all transactions. Mark them and real ProgIf 1 * parts as subtractive. */ if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 || pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE) sc->flags |= PCIB_SUBTRACTIVE; #ifdef PCI_HP pcib_probe_hotplug(sc); #endif #ifdef NEW_PCIB #ifdef PCI_RES_BUS pcib_setup_secbus(dev, &sc->bus, 1); #endif pcib_probe_windows(sc); #endif #ifdef PCI_HP if (sc->flags & PCIB_HOTPLUG) pcib_setup_hotplug(sc); #endif if (bootverbose) { device_printf(dev, " domain %d\n", sc->domain); device_printf(dev, " secondary bus %d\n", sc->bus.sec); device_printf(dev, " subordinate bus %d\n", sc->bus.sub); #ifdef NEW_PCIB if (pcib_is_window_open(&sc->io)) device_printf(dev, " I/O decode 0x%jx-0x%jx\n", (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit); if (pcib_is_window_open(&sc->mem)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit); if (pcib_is_window_open(&sc->pmem)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit); #else if (pcib_is_io_open(sc)) device_printf(dev, " I/O decode 0x%x-0x%x\n", sc->iobase, sc->iolimit); if (pcib_is_nonprefetch_open(sc)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->membase, (uintmax_t)sc->memlimit); if (pcib_is_prefetch_open(sc)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); #endif if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) || sc->flags & PCIB_SUBTRACTIVE) { device_printf(dev, " special decode "); comma = 0; if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) { printf("ISA"); comma = 1; } if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) { printf("%sVGA", comma ? ", " : ""); comma = 1; } if (sc->flags & PCIB_SUBTRACTIVE) printf("%ssubtractive", comma ? ", " : ""); printf("\n"); } } /* * Always enable busmastering on bridges so that transactions * initiated on the secondary bus are passed through to the * primary bus. */ pci_enable_busmaster(dev); } #ifdef PCI_HP static int pcib_present(struct pcib_softc *sc) { if (sc->flags & PCIB_HOTPLUG) return (pcib_hotplug_present(sc) != 0); return (1); } #endif int pcib_attach_child(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->bus.sec == 0) { /* no secondary bus; we should have fixed this */ return(0); } #ifdef PCI_HP if (!pcib_present(sc)) { /* An empty HotPlug slot, so don't add a PCI bus yet. */ return (0); } #endif sc->child = device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } int pcib_attach(device_t dev) { pcib_attach_common(dev); return (pcib_attach_child(dev)); } int pcib_detach(device_t dev) { #if defined(PCI_HP) || defined(NEW_PCIB) struct pcib_softc *sc; #endif int error; #if defined(PCI_HP) || defined(NEW_PCIB) sc = device_get_softc(dev); #endif error = bus_generic_detach(dev); if (error) return (error); #ifdef PCI_HP if (sc->flags & PCIB_HOTPLUG) { error = pcib_detach_hotplug(sc); if (error) return (error); } #endif error = device_delete_children(dev); if (error) return (error); #ifdef NEW_PCIB pcib_free_windows(sc); #ifdef PCI_RES_BUS pcib_free_secbus(dev, &sc->bus); #endif #endif return (0); } int pcib_suspend(device_t dev) { pcib_cfg_save(device_get_softc(dev)); return (bus_generic_suspend(dev)); } int pcib_resume(device_t dev) { pcib_cfg_restore(device_get_softc(dev)); /* * Restore the Command register only after restoring the windows. * The bridge should not be claiming random windows. */ pci_write_config(dev, PCIR_COMMAND, pci_get_cmdreg(dev), 2); return (bus_generic_resume(dev)); } void pcib_bridge_init(device_t dev) { pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2); pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1); pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2); pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2); pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4); pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2); pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4); } int pcib_child_present(device_t dev, device_t child) { #ifdef PCI_HP struct pcib_softc *sc = device_get_softc(dev); int retval; retval = bus_child_present(dev); if (retval != 0 && sc->flags & PCIB_HOTPLUG) retval = pcib_hotplug_present(sc); return (retval); #else return (bus_child_present(dev)); #endif } int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->domain; return(0); case PCIB_IVAR_BUS: *result = sc->bus.sec; return(0); } return(ENOENT); } int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return(EINVAL); case PCIB_IVAR_BUS: return(EINVAL); } return(ENOENT); } #ifdef NEW_PCIB /* * Attempt to allocate a resource from the existing resources assigned * to a window. */ static struct resource * pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (!pcib_is_window_open(w)) return (NULL); res = rman_reserve_resource(&w->rman, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(sc->dev, "allocated %s range (%#jx-%#jx) for rid %x of %s\n", w->name, rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); /* * If the resource should be active, pass that request up the * tree. This assumes the parent drivers can handle * activating sub-allocated resources. */ if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } } return (res); } /* Allocate a fresh resource range for an unconfigured window. */ static int pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; rman_res_t base, limit, wmask; int rid; /* * If this is an I/O window on a bridge with ISA enable set * and the start address is below 64k, then try to allocate an * initial window of 0x1000 bytes long starting at address * 0xf000 and walking down. Note that if the original request * was larger than the non-aliased range size of 0x100 our * caller would have raised the start address up to 64k * already. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && start < 65536) { for (base = 0xf000; (long)base >= 0; base -= 0x1000) { limit = base + 0xfff; /* * Skip ranges that wouldn't work for the * original request. Note that the actual * window that overlaps are the non-alias * ranges within [base, limit], so this isn't * quite a simple comparison. */ if (start + count > limit - 0x400) continue; if (base == 0) { /* * The first open region for the window at * 0 is 0x400-0x4ff. */ if (end - count + 1 < 0x400) continue; } else { if (end - count + 1 < base) continue; } if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) { w->base = base; w->limit = limit; return (0); } } return (ENOSPC); } wmask = ((rman_res_t)1 << w->step) - 1; if (RF_ALIGNMENT(flags) < w->step) { flags &= ~RF_ALIGNMENT_MASK; flags |= RF_ALIGNMENT_LOG2(w->step); } start &= ~wmask; end |= wmask; count = roundup2(count, (rman_res_t)1 << w->step); rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) return (ENOSPC); pcib_add_window_resources(w, &res, 1); pcib_activate_window(sc, type); w->base = rman_get_start(res); w->limit = rman_get_end(res); return (0); } /* Try to expand an existing window to the requested base and limit. */ static int pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t base, rman_res_t limit) { struct resource *res; int error, i, force_64k_base; KASSERT(base <= w->base && limit >= w->limit, ("attempting to shrink window")); /* * XXX: pcib_grow_window() doesn't try to do this anyway and * the error handling for all the edge cases would be tedious. */ KASSERT(limit == w->limit || base == w->base, ("attempting to grow both ends of a window")); /* * Yet more special handling for requests to expand an I/O * window behind an ISA-enabled bridge. Since I/O windows * have to grow in 0x1000 increments and the end of the 0xffff * range is an alias, growing a window below 64k will always * result in allocating new resources and never adjusting an * existing resource. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && (limit <= 65535 || (base <= 65535 && base != w->base))) { KASSERT(limit == w->limit || limit <= 65535, ("attempting to grow both ends across 64k ISA alias")); if (base != w->base) error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1); else error = pcib_alloc_nonisa_ranges(sc, w->limit + 1, limit); if (error == 0) { w->base = base; w->limit = limit; } return (error); } /* * Find the existing resource to adjust. Usually there is only one, * but for an ISA-enabled bridge we might be growing the I/O window * above 64k and need to find the existing resource that maps all * of the area above 64k. */ for (i = 0; i < w->count; i++) { if (rman_get_end(w->res[i]) == w->limit) break; } KASSERT(i != w->count, ("did not find existing resource")); res = w->res[i]; /* * Usually the resource we found should match the window's * existing range. The one exception is the ISA-enabled case * mentioned above in which case the resource should start at * 64k. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && w->base <= 65535) { KASSERT(rman_get_start(res) == 65536, ("existing resource mismatch")); force_64k_base = 1; } else { KASSERT(w->base == rman_get_start(res), ("existing resource mismatch")); force_64k_base = 0; } error = bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : base, limit); if (error) return (error); /* Add the newly allocated region to the resource manager. */ if (w->base != base) { error = rman_manage_region(&w->rman, base, w->base - 1); w->base = base; } else { error = rman_manage_region(&w->rman, w->limit + 1, limit); w->limit = limit; } if (error) { if (bootverbose) device_printf(sc->dev, "failed to expand %s resource manager\n", w->name); (void)bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : w->base, w->limit); } return (error); } /* * Attempt to grow a window to make room for a given resource request. */ static int pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t align, start_free, end_free, front, back, wmask; int error; /* * Clamp the desired resource range to the maximum address * this window supports. Reject impossible requests. * * For I/O port requests behind a bridge with the ISA enable * bit set, force large allocations to start above 64k. */ if (!w->valid) return (EINVAL); if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 && start < 65536) start = 65536; if (end > w->rman.rm_end) end = w->rman.rm_end; if (start + count - 1 > end || start + count < start) return (EINVAL); wmask = ((rman_res_t)1 << w->step) - 1; /* * If there is no resource at all, just try to allocate enough * aligned space for this resource. */ if (w->res == NULL) { error = pcib_alloc_new_window(sc, w, type, start, end, count, flags); if (error) { if (bootverbose) device_printf(sc->dev, "failed to allocate initial %s window (%#jx-%#jx,%#jx)\n", w->name, start, end, count); return (error); } if (bootverbose) device_printf(sc->dev, "allocated initial %s window of %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); goto updatewin; } /* * See if growing the window would help. Compute the minimum * amount of address space needed on both the front and back * ends of the existing window to satisfy the allocation. * * For each end, build a candidate region adjusting for the * required alignment, etc. If there is a free region at the * edge of the window, grow from the inner edge of the free * region. Otherwise grow from the window boundary. * * Growing an I/O window below 64k for a bridge with the ISA * enable bit doesn't require any special magic as the step * size of an I/O window (1k) always includes multiple * non-alias ranges when it is grown in either direction. * * XXX: Special case: if w->res is completely empty and the * request size is larger than w->res, we should find the * optimal aligned buffer containing w->res and allocate that. */ if (bootverbose) device_printf(sc->dev, "attempting to grow %s window for (%#jx-%#jx,%#jx)\n", w->name, start, end, count); align = (rman_res_t)1 << RF_ALIGNMENT(flags); if (start < w->base) { if (rman_first_free_region(&w->rman, &start_free, &end_free) != 0 || start_free != w->base) end_free = w->base; if (end_free > end) end_free = end + 1; /* Move end_free down until it is properly aligned. */ end_free &= ~(align - 1); end_free--; front = end_free - (count - 1); /* * The resource would now be allocated at (front, * end_free). Ensure that fits in the (start, end) * bounds. end_free is checked above. If 'front' is * ok, ensure it is properly aligned for this window. * Also check for underflow. */ if (front >= start && front <= end_free) { if (bootverbose) printf("\tfront candidate range: %#jx-%#jx\n", front, end_free); front &= ~wmask; front = w->base - front; } else front = 0; } else front = 0; if (end > w->limit) { if (rman_last_free_region(&w->rman, &start_free, &end_free) != 0 || end_free != w->limit) start_free = w->limit + 1; if (start_free < start) start_free = start; /* Move start_free up until it is properly aligned. */ start_free = roundup2(start_free, align); back = start_free + count - 1; /* * The resource would now be allocated at (start_free, * back). Ensure that fits in the (start, end) * bounds. start_free is checked above. If 'back' is * ok, ensure it is properly aligned for this window. * Also check for overflow. */ if (back <= end && start_free <= back) { if (bootverbose) printf("\tback candidate range: %#jx-%#jx\n", start_free, back); back |= wmask; back -= w->limit; } else back = 0; } else back = 0; /* * Try to allocate the smallest needed region first. * If that fails, fall back to the other region. */ error = ENOSPC; while (front != 0 || back != 0) { if (front != 0 && (front <= back || back == 0)) { error = pcib_expand_window(sc, w, type, w->base - front, w->limit); if (error == 0) break; front = 0; } else { error = pcib_expand_window(sc, w, type, w->base, w->limit + back); if (error == 0) break; back = 0; } } if (error) return (error); if (bootverbose) device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); updatewin: /* Write the new window. */ KASSERT((w->base & wmask) == 0, ("start address is not aligned")); KASSERT((w->limit & wmask) == wmask, ("end address is not aligned")); pcib_write_windows(sc, w->mask); return (0); } /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pcib_softc *sc; struct resource *r; sc = device_get_softc(dev); /* * VGA resources are decoded iff the VGA enable bit is set in * the bridge control register. VGA resources do not fall into * the resource windows and are passed up to the parent. */ if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) || (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) { if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); else return (NULL); } switch (type) { #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, count, flags)); #endif case SYS_RES_IOPORT: if (pcib_is_isa_range(sc, start, end, count)) return (NULL); r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (pcib_grow_window(sc, &sc->io, type, start, end, count, flags) == 0) r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); break; case SYS_RES_MEMORY: /* * For prefetchable resources, prefer the prefetchable * memory window, but fall back to the regular memory * window if that fails. Try both windows before * attempting to grow a window in case the firmware * has used a range in the regular memory window to * map a prefetchable BAR. */ if (flags & RF_PREFETCHABLE) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (flags & RF_PREFETCHABLE) { if (pcib_grow_window(sc, &sc->pmem, type, start, end, count, flags) == 0) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } } if (pcib_grow_window(sc, &sc->mem, type, start, end, count, flags & ~RF_PREFETCHABLE) == 0) r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); break; default: return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } /* * If attempts to suballocate from the window fail but this is a * subtractive bridge, pass the request up the tree. */ if (sc->flags & PCIB_SUBTRACTIVE && r == NULL) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); return (r); } int pcib_adjust_resource(device_t bus, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct pcib_softc *sc; struct pcib_window *w; rman_res_t wmask; int error; sc = device_get_softc(bus); /* * If the resource wasn't sub-allocated from one of our region * managers then just pass the request up. */ if (!pcib_is_resource_managed(sc, type, r)) return (bus_generic_adjust_resource(bus, child, type, r, start, end)); #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) { /* * If our bus range isn't big enough to grow the sub-allocation * then we need to grow our bus range. Any request that would * require us to decrease the start of our own bus range is * invalid, we can only extend the end; ignore such requests * and let rman_adjust_resource fail below. */ if (start >= sc->bus.sec && end > sc->bus.sub) { error = pcib_grow_subbus(&sc->bus, end); if (error != 0) return (error); } } else #endif { /* * Resource is managed and not a secondary bus number, must * be from one of our windows. */ w = pcib_get_resource_window(sc, type, r); KASSERT(w != NULL, ("%s: no window for resource (%#jx-%#jx) type %d", __func__, rman_get_start(r), rman_get_end(r), type)); /* * If our window isn't big enough to grow the sub-allocation * then we need to expand the window. */ if (start < w->base || end > w->limit) { wmask = ((rman_res_t)1 << w->step) - 1; error = pcib_expand_window(sc, w, type, MIN(start & ~wmask, w->base), MAX(end | wmask, w->limit)); if (error != 0) return (error); if (bootverbose) device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); pcib_write_windows(sc, w->mask); } } return (rman_adjust_resource(r, start, end)); } int pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pcib_softc *sc; int error; sc = device_get_softc(dev); if (pcib_is_resource_managed(sc, type, r)) { if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } return (bus_generic_release_resource(dev, child, type, rid, r)); } #else /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pcib_softc *sc = device_get_softc(dev); const char *name, *suffix; int ok; /* * Fail the allocation for this range if it's not supported. */ name = device_get_nameunit(child); if (name == NULL) { name = ""; suffix = ""; } else suffix = " "; switch (type) { case SYS_RES_IOPORT: ok = 0; if (!pcib_is_io_open(sc)) break; ok = (start >= sc->iobase && end <= sc->iolimit); /* * Make sure we allow access to VGA I/O addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_ioport_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { if (start < sc->iobase) start = sc->iobase; if (end > sc->iolimit) end = sc->iolimit; if (start < end) ok = 1; } } else { ok = 1; #if 0 /* * If we overlap with the subtractive range, then * pick the upper range to use. */ if (start < sc->iolimit && end > sc->iobase) start = sc->iolimit + 1; #endif } if (end < start) { device_printf(dev, "ioport: end (%jx) < start (%jx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok) { device_printf(dev, "%s%srequested unsupported I/O " "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n", name, suffix, start, end, sc->iobase, sc->iolimit); return (NULL); } if (bootverbose) device_printf(dev, "%s%srequested I/O range 0x%jx-0x%jx: in range\n", name, suffix, start, end); break; case SYS_RES_MEMORY: ok = 0; if (pcib_is_nonprefetch_open(sc)) ok = ok || (start >= sc->membase && end <= sc->memlimit); if (pcib_is_prefetch_open(sc)) ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit); /* * Make sure we allow access to VGA memory addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_memory_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { ok = 1; if (flags & RF_PREFETCHABLE) { if (pcib_is_prefetch_open(sc)) { if (start < sc->pmembase) start = sc->pmembase; if (end > sc->pmemlimit) end = sc->pmemlimit; } else { ok = 0; } } else { /* non-prefetchable */ if (pcib_is_nonprefetch_open(sc)) { if (start < sc->membase) start = sc->membase; if (end > sc->memlimit) end = sc->memlimit; } else { ok = 0; } } } } else if (!ok) { ok = 1; /* subtractive bridge: always ok */ #if 0 if (pcib_is_nonprefetch_open(sc)) { if (start < sc->memlimit && end > sc->membase) start = sc->memlimit + 1; } if (pcib_is_prefetch_open(sc)) { if (start < sc->pmemlimit && end > sc->pmembase) start = sc->pmemlimit + 1; } #endif } if (end < start) { device_printf(dev, "memory: end (%jx) < start (%jx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok && bootverbose) device_printf(dev, "%s%srequested unsupported memory range %#jx-%#jx " "(decoding %#jx-%#jx, %#jx-%#jx)\n", name, suffix, start, end, (uintmax_t)sc->membase, (uintmax_t)sc->memlimit, (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); if (!ok) return (NULL); if (bootverbose) device_printf(dev,"%s%srequested memory range " "0x%jx-0x%jx: good\n", name, suffix, start, end); break; default: break; } /* * Bridge is OK decoding this resource, so pass it up. */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #endif /* * If ARI is enabled on this downstream port, translate the function number * to the non-ARI slot/function. The downstream port will convert it back in * hardware. If ARI is not enabled slot and func are not modified. */ static __inline void pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func) { struct pcib_softc *sc; int ari_func; sc = device_get_softc(pcib); ari_func = *func; if (sc->flags & PCIB_ENABLE_ARI) { KASSERT(*slot == 0, ("Non-zero slot number with ARI enabled!")); *slot = PCIE_ARI_SLOT(ari_func); *func = PCIE_ARI_FUNC(ari_func); } } static void pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos) { uint32_t ctl2; ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4); ctl2 |= PCIEM_CTL2_ARI; pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4); sc->flags |= PCIB_ENABLE_ARI; } /* * PCIB interface. */ int pcib_maxslots(device_t dev) { #if !defined(__amd64__) && !defined(__i386__) uint32_t pcie_pos; uint16_t val; /* * If this is a PCIe rootport or downstream switch port, there's only * one slot permitted. */ if (pci_find_cap(dev, PCIY_EXPRESS, &pcie_pos) == 0) { val = pci_read_config(dev, pcie_pos + PCIER_FLAGS, 2); val &= PCIEM_FLAGS_TYPE; if (val == PCIEM_TYPE_ROOT_PORT || val == PCIEM_TYPE_DOWNSTREAM_PORT) return (0); } #endif return (PCI_SLOTMAX); } static int pcib_ari_maxslots(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_SLOTMAX); else return (pcib_maxslots(dev)); } static int pcib_ari_maxfuncs(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_FUNCMAX); else return (PCI_FUNCMAX); } static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func) { struct pcib_softc *sc; sc = device_get_softc(pcib); *bus = PCI_RID2BUS(rid); if (sc->flags & PCIB_ENABLE_ARI) { *slot = PCIE_ARI_RID2SLOT(rid); *func = PCIE_ARI_RID2FUNC(rid); } else { *slot = PCI_RID2SLOT(rid); *func = PCI_RID2FUNC(rid); } } /* * Since we are a child of a PCI bus, its parent must support the pcib interface. */ static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { #ifdef PCI_HP struct pcib_softc *sc; sc = device_get_softc(dev); if (!pcib_present(sc)) { switch (width) { case 2: return (0xffff); case 1: return (0xff); default: return (0xffffffff); } } #endif pcib_xlate_ari(dev, b, &s, &f); return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, width)); } static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { #ifdef PCI_HP struct pcib_softc *sc; sc = device_get_softc(dev); if (!pcib_present(sc)) return; #endif pcib_xlate_ari(dev, b, &s, &f); PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, val, width); } /* * Route an interrupt across a PCI bridge. */ int pcib_route_interrupt(device_t pcib, device_t dev, int pin) { device_t bus; int parent_intpin; int intnum; /* * * The PCI standard defines a swizzle of the child-side device/intpin to * the parent-side intpin as follows. * * device = device on child bus * child_intpin = intpin on child bus slot (0-3) * parent_intpin = intpin on parent bus slot (0-3) * * parent_intpin = (device + child_intpin) % 4 */ parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4; /* * Our parent is a PCI bus. Its parent must export the pcib interface * which includes the ability to route interrupts. */ bus = device_get_parent(pcib); intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1); if (PCI_INTERRUPT_VALID(intnum) && bootverbose) { device_printf(pcib, "slot %d INT%c is routed to irq %d\n", pci_get_slot(dev), 'A' + pin - 1, intnum); } return(intnum); } /* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */ int pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSI) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } /* Pass request to release MSI/MSI-X messages up to the parent bridge. */ int pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); } /* Pass request to alloc an MSI-X message up to the parent bridge. */ int pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSIX) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to release an MSI-X message up to the parent bridge. */ int pcib_release_msix(device_t pcib, device_t dev, int irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to map MSI/MSI-X message up to parent bridge. */ int pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus; int error; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); pci_ht_map_msi(pcib, *addr); return (0); } /* Pass request for device power state up to parent bridge. */ int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate) { device_t bus; bus = device_get_parent(pcib); return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate)); } static int pcib_ari_enabled(device_t pcib) { struct pcib_softc *sc; sc = device_get_softc(pcib); return ((sc->flags & PCIB_ENABLE_ARI) != 0); } static int pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id) { struct pcib_softc *sc; device_t bus_dev; uint8_t bus, slot, func; if (type != PCI_ID_RID) { bus_dev = device_get_parent(pcib); return (PCIB_GET_ID(device_get_parent(bus_dev), dev, type, id)); } sc = device_get_softc(pcib); if (sc->flags & PCIB_ENABLE_ARI) { bus = pci_get_bus(dev); func = pci_get_function(dev); *id = (PCI_ARI_RID(bus, func)); } else { bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); *id = (PCI_RID(bus, slot, func)); } return (0); } /* * Check that the downstream port (pcib) and the endpoint device (dev) both * support ARI. If so, enable it and return 0, otherwise return an error. */ static int pcib_try_enable_ari(device_t pcib, device_t dev) { struct pcib_softc *sc; int error; uint32_t cap2; int ari_cap_off; uint32_t ari_ver; uint32_t pcie_pos; sc = device_get_softc(pcib); /* * ARI is controlled in a register in the PCIe capability structure. * If the downstream port does not have the PCIe capability structure * then it does not support ARI. */ error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos); if (error != 0) return (ENODEV); /* Check that the PCIe port advertises ARI support. */ cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4); if (!(cap2 & PCIEM_CAP2_ARI)) return (ENODEV); /* * Check that the endpoint device advertises ARI support via the ARI * extended capability structure. */ error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off); if (error != 0) return (ENODEV); /* * Finally, check that the endpoint device supports the same version * of ARI that we do. */ ari_ver = pci_read_config(dev, ari_cap_off, 4); if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) { if (bootverbose) device_printf(pcib, "Unsupported version of ARI (%d) detected\n", PCI_EXTCAP_VER(ari_ver)); return (ENXIO); } pcib_enable_ari(sc, pcie_pos); return (0); } int pcib_request_feature_allow(device_t pcib, device_t dev, enum pci_feature feature) { /* * No host firmware we have to negotiate with, so we allow * every valid feature requested. */ switch (feature) { case PCI_FEATURE_AER: case PCI_FEATURE_HP: break; default: return (EINVAL); } return (0); } int pcib_request_feature(device_t dev, enum pci_feature feature) { /* * Invoke PCIB_REQUEST_FEATURE of this bridge first in case * the firmware overrides the method of PCI-PCI bridges. */ return (PCIB_REQUEST_FEATURE(dev, dev, feature)); } /* * Pass the request to use this PCI feature up the tree. Either there's a * firmware like ACPI that's using this feature that will approve (or deny) the * request to take it over, or the platform has no such firmware, in which case * the request will be approved. If the request is approved, the OS is expected * to make use of the feature or render it harmless. */ static int pcib_request_feature_default(device_t pcib, device_t dev, enum pci_feature feature) { device_t bus; /* * Our parent is necessarily a pci bus. Its parent will either be * another pci bridge (which passes it up) or a host bridge that can * approve or reject the request. */ bus = device_get_parent(pcib); return (PCIB_REQUEST_FEATURE(device_get_parent(bus), dev, feature)); } static int pcib_reset_child(device_t dev, device_t child, int flags) { struct pci_devinfo *pdinfo; int error; error = 0; if (dev == NULL || device_get_parent(child) != dev) goto out; error = ENXIO; if (device_get_devclass(child) != devclass_find("pci")) goto out; pdinfo = device_get_ivars(dev); if (pdinfo->cfg.pcie.pcie_location != 0 && (pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT || pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)) { error = bus_helper_reset_prepare(child, flags); if (error == 0) { error = pcie_link_reset(dev, pdinfo->cfg.pcie.pcie_location); /* XXXKIB call _post even if error != 0 ? */ bus_helper_reset_post(child, flags); } } out: return (error); } diff --git a/sys/dev/pci/pci_user.c b/sys/dev/pci/pci_user.c index fc84b5cfaa25..77d63c462ad9 100644 --- a/sys/dev/pci/pci_user.c +++ b/sys/dev/pci/pci_user.c @@ -1,1415 +1,1418 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 1997, Stefan Esser * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" /* XXX trim includes */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #ifdef COMPAT_FREEBSD32 struct pci_conf32 { struct pcisel pc_sel; /* domain+bus+slot+function */ u_int8_t pc_hdr; /* PCI header type */ u_int16_t pc_subvendor; /* card vendor ID */ u_int16_t pc_subdevice; /* card device ID, assigned by card vendor */ u_int16_t pc_vendor; /* chip vendor ID */ u_int16_t pc_device; /* chip device ID, assigned by chip vendor */ u_int8_t pc_class; /* chip PCI class */ u_int8_t pc_subclass; /* chip PCI subclass */ u_int8_t pc_progif; /* chip PCI programming interface */ u_int8_t pc_revid; /* chip revision ID */ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */ u_int32_t pd_unit; /* device unit number */ }; struct pci_match_conf32 { struct pcisel pc_sel; /* domain+bus+slot+function */ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */ u_int32_t pd_unit; /* Unit number */ u_int16_t pc_vendor; /* PCI Vendor ID */ u_int16_t pc_device; /* PCI Device ID */ u_int8_t pc_class; /* PCI class */ u_int32_t flags; /* Matching expression */ }; struct pci_conf_io32 { u_int32_t pat_buf_len; /* pattern buffer length */ u_int32_t num_patterns; /* number of patterns */ u_int32_t patterns; /* struct pci_match_conf ptr */ u_int32_t match_buf_len; /* match buffer length */ u_int32_t num_matches; /* number of matches returned */ u_int32_t matches; /* struct pci_conf ptr */ u_int32_t offset; /* offset into device list */ u_int32_t generation; /* device list generation */ u_int32_t status; /* request status */ }; #define PCIOCGETCONF32 _IOC_NEWTYPE(PCIOCGETCONF, struct pci_conf_io32) #endif /* * This is the user interface to PCI configuration space. */ static d_open_t pci_open; static d_close_t pci_close; static d_ioctl_t pci_ioctl; struct cdevsw pcicdev = { .d_version = D_VERSION, .d_flags = 0, .d_open = pci_open, .d_close = pci_close, .d_ioctl = pci_ioctl, .d_name = "pci", }; static int pci_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { int error; if (oflags & FWRITE) { error = securelevel_gt(td->td_ucred, 0); if (error) return (error); } return (0); } static int pci_close(struct cdev *dev, int flag, int devtype, struct thread *td) { return 0; } /* * Match a single pci_conf structure against an array of pci_match_conf * structures. The first argument, 'matches', is an array of num_matches * pci_match_conf structures. match_buf is a pointer to the pci_conf * structure that will be compared to every entry in the matches array. * This function returns 1 on failure, 0 on success. */ static int pci_conf_match_native(struct pci_match_conf *matches, int num_matches, struct pci_conf *match_buf) { int i; if ((matches == NULL) || (match_buf == NULL) || (num_matches <= 0)) return(1); for (i = 0; i < num_matches; i++) { /* * I'm not sure why someone would do this...but... */ if (matches[i].flags == PCI_GETCONF_NO_MATCH) continue; /* * Look at each of the match flags. If it's set, do the * comparison. If the comparison fails, we don't have a * match, go on to the next item if there is one. */ if (((matches[i].flags & PCI_GETCONF_MATCH_DOMAIN) != 0) && (match_buf->pc_sel.pc_domain != matches[i].pc_sel.pc_domain)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_BUS) != 0) && (match_buf->pc_sel.pc_bus != matches[i].pc_sel.pc_bus)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_DEV) != 0) && (match_buf->pc_sel.pc_dev != matches[i].pc_sel.pc_dev)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_FUNC) != 0) && (match_buf->pc_sel.pc_func != matches[i].pc_sel.pc_func)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_VENDOR) != 0) && (match_buf->pc_vendor != matches[i].pc_vendor)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_DEVICE) != 0) && (match_buf->pc_device != matches[i].pc_device)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_CLASS) != 0) && (match_buf->pc_class != matches[i].pc_class)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_UNIT) != 0) && (match_buf->pd_unit != matches[i].pd_unit)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_NAME) != 0) && (strncmp(matches[i].pd_name, match_buf->pd_name, sizeof(match_buf->pd_name)) != 0)) continue; return(0); } return(1); } #ifdef COMPAT_FREEBSD32 static int pci_conf_match32(struct pci_match_conf32 *matches, int num_matches, struct pci_conf *match_buf) { int i; if ((matches == NULL) || (match_buf == NULL) || (num_matches <= 0)) return(1); for (i = 0; i < num_matches; i++) { /* * I'm not sure why someone would do this...but... */ if (matches[i].flags == PCI_GETCONF_NO_MATCH) continue; /* * Look at each of the match flags. If it's set, do the * comparison. If the comparison fails, we don't have a * match, go on to the next item if there is one. */ if (((matches[i].flags & PCI_GETCONF_MATCH_DOMAIN) != 0) && (match_buf->pc_sel.pc_domain != matches[i].pc_sel.pc_domain)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_BUS) != 0) && (match_buf->pc_sel.pc_bus != matches[i].pc_sel.pc_bus)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_DEV) != 0) && (match_buf->pc_sel.pc_dev != matches[i].pc_sel.pc_dev)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_FUNC) != 0) && (match_buf->pc_sel.pc_func != matches[i].pc_sel.pc_func)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_VENDOR) != 0) && (match_buf->pc_vendor != matches[i].pc_vendor)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_DEVICE) != 0) && (match_buf->pc_device != matches[i].pc_device)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_CLASS) != 0) && (match_buf->pc_class != matches[i].pc_class)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_UNIT) != 0) && (match_buf->pd_unit != matches[i].pd_unit)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_NAME) != 0) && (strncmp(matches[i].pd_name, match_buf->pd_name, sizeof(match_buf->pd_name)) != 0)) continue; return(0); } return(1); } #endif /* COMPAT_FREEBSD32 */ #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD6) #define PRE7_COMPAT typedef enum { PCI_GETCONF_NO_MATCH_OLD = 0x00, PCI_GETCONF_MATCH_BUS_OLD = 0x01, PCI_GETCONF_MATCH_DEV_OLD = 0x02, PCI_GETCONF_MATCH_FUNC_OLD = 0x04, PCI_GETCONF_MATCH_NAME_OLD = 0x08, PCI_GETCONF_MATCH_UNIT_OLD = 0x10, PCI_GETCONF_MATCH_VENDOR_OLD = 0x20, PCI_GETCONF_MATCH_DEVICE_OLD = 0x40, PCI_GETCONF_MATCH_CLASS_OLD = 0x80 } pci_getconf_flags_old; struct pcisel_old { u_int8_t pc_bus; /* bus number */ u_int8_t pc_dev; /* device on this bus */ u_int8_t pc_func; /* function on this device */ }; struct pci_conf_old { struct pcisel_old pc_sel; /* bus+slot+function */ u_int8_t pc_hdr; /* PCI header type */ u_int16_t pc_subvendor; /* card vendor ID */ u_int16_t pc_subdevice; /* card device ID, assigned by card vendor */ u_int16_t pc_vendor; /* chip vendor ID */ u_int16_t pc_device; /* chip device ID, assigned by chip vendor */ u_int8_t pc_class; /* chip PCI class */ u_int8_t pc_subclass; /* chip PCI subclass */ u_int8_t pc_progif; /* chip PCI programming interface */ u_int8_t pc_revid; /* chip revision ID */ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */ u_long pd_unit; /* device unit number */ }; struct pci_match_conf_old { struct pcisel_old pc_sel; /* bus+slot+function */ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */ u_long pd_unit; /* Unit number */ u_int16_t pc_vendor; /* PCI Vendor ID */ u_int16_t pc_device; /* PCI Device ID */ u_int8_t pc_class; /* PCI class */ pci_getconf_flags_old flags; /* Matching expression */ }; struct pci_io_old { struct pcisel_old pi_sel; /* device to operate on */ int pi_reg; /* configuration register to examine */ int pi_width; /* width (in bytes) of read or write */ u_int32_t pi_data; /* data to write or result of read */ }; #ifdef COMPAT_FREEBSD32 struct pci_conf_old32 { struct pcisel_old pc_sel; /* bus+slot+function */ uint8_t pc_hdr; /* PCI header type */ uint16_t pc_subvendor; /* card vendor ID */ uint16_t pc_subdevice; /* card device ID, assigned by card vendor */ uint16_t pc_vendor; /* chip vendor ID */ uint16_t pc_device; /* chip device ID, assigned by chip vendor */ uint8_t pc_class; /* chip PCI class */ uint8_t pc_subclass; /* chip PCI subclass */ uint8_t pc_progif; /* chip PCI programming interface */ uint8_t pc_revid; /* chip revision ID */ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */ uint32_t pd_unit; /* device unit number (u_long) */ }; struct pci_match_conf_old32 { struct pcisel_old pc_sel; /* bus+slot+function */ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */ uint32_t pd_unit; /* Unit number (u_long) */ uint16_t pc_vendor; /* PCI Vendor ID */ uint16_t pc_device; /* PCI Device ID */ uint8_t pc_class; /* PCI class */ pci_getconf_flags_old flags; /* Matching expression */ }; #define PCIOCGETCONF_OLD32 _IOWR('p', 1, struct pci_conf_io32) #endif /* COMPAT_FREEBSD32 */ #define PCIOCGETCONF_OLD _IOWR('p', 1, struct pci_conf_io) #define PCIOCREAD_OLD _IOWR('p', 2, struct pci_io_old) #define PCIOCWRITE_OLD _IOWR('p', 3, struct pci_io_old) static int pci_conf_match_old(struct pci_match_conf_old *matches, int num_matches, struct pci_conf *match_buf) { int i; if ((matches == NULL) || (match_buf == NULL) || (num_matches <= 0)) return(1); for (i = 0; i < num_matches; i++) { if (match_buf->pc_sel.pc_domain != 0) continue; /* * I'm not sure why someone would do this...but... */ if (matches[i].flags == PCI_GETCONF_NO_MATCH_OLD) continue; /* * Look at each of the match flags. If it's set, do the * comparison. If the comparison fails, we don't have a * match, go on to the next item if there is one. */ if (((matches[i].flags & PCI_GETCONF_MATCH_BUS_OLD) != 0) && (match_buf->pc_sel.pc_bus != matches[i].pc_sel.pc_bus)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_DEV_OLD) != 0) && (match_buf->pc_sel.pc_dev != matches[i].pc_sel.pc_dev)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_FUNC_OLD) != 0) && (match_buf->pc_sel.pc_func != matches[i].pc_sel.pc_func)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_VENDOR_OLD) != 0) && (match_buf->pc_vendor != matches[i].pc_vendor)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_DEVICE_OLD) != 0) && (match_buf->pc_device != matches[i].pc_device)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_CLASS_OLD) != 0) && (match_buf->pc_class != matches[i].pc_class)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_UNIT_OLD) != 0) && (match_buf->pd_unit != matches[i].pd_unit)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_NAME_OLD) != 0) && (strncmp(matches[i].pd_name, match_buf->pd_name, sizeof(match_buf->pd_name)) != 0)) continue; return(0); } return(1); } #ifdef COMPAT_FREEBSD32 static int pci_conf_match_old32(struct pci_match_conf_old32 *matches, int num_matches, struct pci_conf *match_buf) { int i; if ((matches == NULL) || (match_buf == NULL) || (num_matches <= 0)) return(1); for (i = 0; i < num_matches; i++) { if (match_buf->pc_sel.pc_domain != 0) continue; /* * I'm not sure why someone would do this...but... */ if (matches[i].flags == PCI_GETCONF_NO_MATCH_OLD) continue; /* * Look at each of the match flags. If it's set, do the * comparison. If the comparison fails, we don't have a * match, go on to the next item if there is one. */ if (((matches[i].flags & PCI_GETCONF_MATCH_BUS_OLD) != 0) && (match_buf->pc_sel.pc_bus != matches[i].pc_sel.pc_bus)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_DEV_OLD) != 0) && (match_buf->pc_sel.pc_dev != matches[i].pc_sel.pc_dev)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_FUNC_OLD) != 0) && (match_buf->pc_sel.pc_func != matches[i].pc_sel.pc_func)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_VENDOR_OLD) != 0) && (match_buf->pc_vendor != matches[i].pc_vendor)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_DEVICE_OLD) != 0) && (match_buf->pc_device != matches[i].pc_device)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_CLASS_OLD) != 0) && (match_buf->pc_class != matches[i].pc_class)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_UNIT_OLD) != 0) && ((u_int32_t)match_buf->pd_unit != matches[i].pd_unit)) continue; if (((matches[i].flags & PCI_GETCONF_MATCH_NAME_OLD) != 0) && (strncmp(matches[i].pd_name, match_buf->pd_name, sizeof(match_buf->pd_name)) != 0)) continue; return (0); } return (1); } #endif /* COMPAT_FREEBSD32 */ #endif /* !PRE7_COMPAT */ union pci_conf_union { struct pci_conf pc; #ifdef COMPAT_FREEBSD32 struct pci_conf32 pc32; #endif #ifdef PRE7_COMPAT struct pci_conf_old pco; #ifdef COMPAT_FREEBSD32 struct pci_conf_old32 pco32; #endif #endif }; static int pci_conf_match(u_long cmd, struct pci_match_conf *matches, int num_matches, struct pci_conf *match_buf) { switch (cmd) { case PCIOCGETCONF: return (pci_conf_match_native( (struct pci_match_conf *)matches, num_matches, match_buf)); #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: return (pci_conf_match32((struct pci_match_conf32 *)matches, num_matches, match_buf)); #endif #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD: return (pci_conf_match_old( (struct pci_match_conf_old *)matches, num_matches, match_buf)); #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF_OLD32: return (pci_conf_match_old32( (struct pci_match_conf_old32 *)matches, num_matches, match_buf)); #endif #endif default: /* programmer error */ return (0); } } /* * Like PVE_NEXT but takes an explicit length since 'pve' is a user * pointer that cannot be dereferenced. */ #define PVE_NEXT_LEN(pve, datalen) \ ((struct pci_vpd_element *)((char *)(pve) + \ sizeof(struct pci_vpd_element) + (datalen))) static int pci_list_vpd(device_t dev, struct pci_list_vpd_io *lvio) { struct pci_vpd_element vpd_element, *vpd_user; struct pcicfg_vpd *vpd; size_t len, datalen; int error, i; vpd = pci_fetch_vpd_list(dev); if (vpd->vpd_reg == 0 || vpd->vpd_ident == NULL) return (ENXIO); /* * Calculate the amount of space needed in the data buffer. An * identifier element is always present followed by the read-only * and read-write keywords. */ len = sizeof(struct pci_vpd_element) + strlen(vpd->vpd_ident); for (i = 0; i < vpd->vpd_rocnt; i++) len += sizeof(struct pci_vpd_element) + vpd->vpd_ros[i].len; for (i = 0; i < vpd->vpd_wcnt; i++) len += sizeof(struct pci_vpd_element) + vpd->vpd_w[i].len; if (lvio->plvi_len == 0) { lvio->plvi_len = len; return (0); } if (lvio->plvi_len < len) { lvio->plvi_len = len; return (ENOMEM); } /* * Copyout the identifier string followed by each keyword and * value. */ datalen = strlen(vpd->vpd_ident); KASSERT(datalen <= 255, ("invalid VPD ident length")); vpd_user = lvio->plvi_data; vpd_element.pve_keyword[0] = '\0'; vpd_element.pve_keyword[1] = '\0'; vpd_element.pve_flags = PVE_FLAG_IDENT; vpd_element.pve_datalen = datalen; error = copyout(&vpd_element, vpd_user, sizeof(vpd_element)); if (error) return (error); error = copyout(vpd->vpd_ident, vpd_user->pve_data, datalen); if (error) return (error); vpd_user = PVE_NEXT_LEN(vpd_user, vpd_element.pve_datalen); vpd_element.pve_flags = 0; for (i = 0; i < vpd->vpd_rocnt; i++) { vpd_element.pve_keyword[0] = vpd->vpd_ros[i].keyword[0]; vpd_element.pve_keyword[1] = vpd->vpd_ros[i].keyword[1]; vpd_element.pve_datalen = vpd->vpd_ros[i].len; error = copyout(&vpd_element, vpd_user, sizeof(vpd_element)); if (error) return (error); error = copyout(vpd->vpd_ros[i].value, vpd_user->pve_data, vpd->vpd_ros[i].len); if (error) return (error); vpd_user = PVE_NEXT_LEN(vpd_user, vpd_element.pve_datalen); } vpd_element.pve_flags = PVE_FLAG_RW; for (i = 0; i < vpd->vpd_wcnt; i++) { vpd_element.pve_keyword[0] = vpd->vpd_w[i].keyword[0]; vpd_element.pve_keyword[1] = vpd->vpd_w[i].keyword[1]; vpd_element.pve_datalen = vpd->vpd_w[i].len; error = copyout(&vpd_element, vpd_user, sizeof(vpd_element)); if (error) return (error); error = copyout(vpd->vpd_w[i].value, vpd_user->pve_data, vpd->vpd_w[i].len); if (error) return (error); vpd_user = PVE_NEXT_LEN(vpd_user, vpd_element.pve_datalen); } KASSERT((char *)vpd_user - (char *)lvio->plvi_data == len, ("length mismatch")); lvio->plvi_len = len; return (0); } static size_t pci_match_conf_size(u_long cmd) { switch (cmd) { case PCIOCGETCONF: return (sizeof(struct pci_match_conf)); #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: return (sizeof(struct pci_match_conf32)); #endif #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD: return (sizeof(struct pci_match_conf_old)); #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF_OLD32: return (sizeof(struct pci_match_conf_old32)); #endif #endif default: /* programmer error */ return (0); } } static size_t pci_conf_size(u_long cmd) { switch (cmd) { case PCIOCGETCONF: return (sizeof(struct pci_conf)); #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: return (sizeof(struct pci_conf32)); #endif #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD: return (sizeof(struct pci_conf_old)); #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF_OLD32: return (sizeof(struct pci_conf_old32)); #endif #endif default: /* programmer error */ return (0); } } static void pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd) { #if defined(COMPAT_FREEBSD32) struct pci_conf_io32 *cio32; #endif switch (cmd) { case PCIOCGETCONF: #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD: #endif *cio = *(struct pci_conf_io *)data; return; #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD32: #endif cio32 = (struct pci_conf_io32 *)data; cio->pat_buf_len = cio32->pat_buf_len; cio->num_patterns = cio32->num_patterns; cio->patterns = (void *)(uintptr_t)cio32->patterns; cio->match_buf_len = cio32->match_buf_len; cio->num_matches = cio32->num_matches; cio->matches = (void *)(uintptr_t)cio32->matches; cio->offset = cio32->offset; cio->generation = cio32->generation; cio->status = cio32->status; return; #endif default: /* programmer error */ return; } } static void pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data, u_long cmd) { struct pci_conf_io *d_cio; #if defined(COMPAT_FREEBSD32) struct pci_conf_io32 *cio32; #endif switch (cmd) { case PCIOCGETCONF: #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD: #endif d_cio = (struct pci_conf_io *)data; d_cio->status = cio->status; d_cio->generation = cio->generation; d_cio->offset = cio->offset; d_cio->num_matches = cio->num_matches; return; #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD32: #endif cio32 = (struct pci_conf_io32 *)data; cio32->status = cio->status; cio32->generation = cio->generation; cio32->offset = cio->offset; cio32->num_matches = cio->num_matches; return; #endif default: /* programmer error */ return; } } static void pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup, u_long cmd) { memset(pcup, 0, sizeof(*pcup)); switch (cmd) { case PCIOCGETCONF: pcup->pc = *pcp; return; #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: pcup->pc32.pc_sel = pcp->pc_sel; pcup->pc32.pc_hdr = pcp->pc_hdr; pcup->pc32.pc_subvendor = pcp->pc_subvendor; pcup->pc32.pc_subdevice = pcp->pc_subdevice; pcup->pc32.pc_vendor = pcp->pc_vendor; pcup->pc32.pc_device = pcp->pc_device; pcup->pc32.pc_class = pcp->pc_class; pcup->pc32.pc_subclass = pcp->pc_subclass; pcup->pc32.pc_progif = pcp->pc_progif; pcup->pc32.pc_revid = pcp->pc_revid; strlcpy(pcup->pc32.pd_name, pcp->pd_name, sizeof(pcup->pc32.pd_name)); pcup->pc32.pd_unit = (uint32_t)pcp->pd_unit; return; #endif #ifdef PRE7_COMPAT #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF_OLD32: pcup->pco32.pc_sel.pc_bus = pcp->pc_sel.pc_bus; pcup->pco32.pc_sel.pc_dev = pcp->pc_sel.pc_dev; pcup->pco32.pc_sel.pc_func = pcp->pc_sel.pc_func; pcup->pco32.pc_hdr = pcp->pc_hdr; pcup->pco32.pc_subvendor = pcp->pc_subvendor; pcup->pco32.pc_subdevice = pcp->pc_subdevice; pcup->pco32.pc_vendor = pcp->pc_vendor; pcup->pco32.pc_device = pcp->pc_device; pcup->pco32.pc_class = pcp->pc_class; pcup->pco32.pc_subclass = pcp->pc_subclass; pcup->pco32.pc_progif = pcp->pc_progif; pcup->pco32.pc_revid = pcp->pc_revid; strlcpy(pcup->pco32.pd_name, pcp->pd_name, sizeof(pcup->pco32.pd_name)); pcup->pco32.pd_unit = (uint32_t)pcp->pd_unit; return; #endif /* COMPAT_FREEBSD32 */ case PCIOCGETCONF_OLD: pcup->pco.pc_sel.pc_bus = pcp->pc_sel.pc_bus; pcup->pco.pc_sel.pc_dev = pcp->pc_sel.pc_dev; pcup->pco.pc_sel.pc_func = pcp->pc_sel.pc_func; pcup->pco.pc_hdr = pcp->pc_hdr; pcup->pco.pc_subvendor = pcp->pc_subvendor; pcup->pco.pc_subdevice = pcp->pc_subdevice; pcup->pco.pc_vendor = pcp->pc_vendor; pcup->pco.pc_device = pcp->pc_device; pcup->pco.pc_class = pcp->pc_class; pcup->pco.pc_subclass = pcp->pc_subclass; pcup->pco.pc_progif = pcp->pc_progif; pcup->pco.pc_revid = pcp->pc_revid; strlcpy(pcup->pco.pd_name, pcp->pd_name, sizeof(pcup->pco.pd_name)); pcup->pco.pd_unit = pcp->pd_unit; return; #endif /* PRE7_COMPAT */ default: /* programmer error */ return; } } static int pci_bar_mmap(device_t pcidev, struct pci_bar_mmap *pbm) { vm_map_t map; vm_object_t obj; struct thread *td; struct sglist *sg; struct pci_map *pm; vm_paddr_t membase; vm_paddr_t pbase; vm_size_t plen; vm_offset_t addr; vm_prot_t prot; int error, flags; td = curthread; map = &td->td_proc->p_vmspace->vm_map; if ((pbm->pbm_flags & ~(PCIIO_BAR_MMAP_FIXED | PCIIO_BAR_MMAP_EXCL | PCIIO_BAR_MMAP_RW | PCIIO_BAR_MMAP_ACTIVATE)) != 0 || pbm->pbm_memattr != (vm_memattr_t)pbm->pbm_memattr || !pmap_is_valid_memattr(map->pmap, pbm->pbm_memattr)) return (EINVAL); /* Fetch the BAR physical base and length. */ pm = pci_find_bar(pcidev, pbm->pbm_reg); if (pm == NULL) return (EINVAL); if (!pci_bar_enabled(pcidev, pm)) return (EBUSY); /* XXXKIB enable if _ACTIVATE */ if (!PCI_BAR_MEM(pm->pm_value)) return (EIO); membase = pm->pm_value & PCIM_BAR_MEM_BASE; pbase = trunc_page(membase); plen = round_page(membase + ((pci_addr_t)1 << pm->pm_size)) - pbase; prot = VM_PROT_READ | (((pbm->pbm_flags & PCIIO_BAR_MMAP_RW) != 0) ? VM_PROT_WRITE : 0); /* Create vm structures and mmap. */ sg = sglist_alloc(1, M_WAITOK); error = sglist_append_phys(sg, pbase, plen); if (error != 0) goto out; obj = vm_pager_allocate(OBJT_SG, sg, plen, prot, 0, td->td_ucred); if (obj == NULL) { error = EIO; goto out; } obj->memattr = pbm->pbm_memattr; flags = MAP_SHARED; addr = 0; if ((pbm->pbm_flags & PCIIO_BAR_MMAP_FIXED) != 0) { addr = (uintptr_t)pbm->pbm_map_base; flags |= MAP_FIXED; } if ((pbm->pbm_flags & PCIIO_BAR_MMAP_EXCL) != 0) flags |= MAP_CHECK_EXCL; error = vm_mmap_object(map, &addr, plen, prot, prot, flags, obj, 0, FALSE, td); if (error != 0) { vm_object_deallocate(obj); goto out; } pbm->pbm_map_base = (void *)addr; pbm->pbm_map_length = plen; pbm->pbm_bar_off = membase - pbase; pbm->pbm_bar_length = (pci_addr_t)1 << pm->pm_size; out: sglist_free(sg); return (error); } static int pci_bar_io(device_t pcidev, struct pci_bar_ioreq *pbi) { struct pci_map *pm; struct resource *res; uint32_t offset, width; int bar, error, type; if (pbi->pbi_op != PCIBARIO_READ && pbi->pbi_op != PCIBARIO_WRITE) return (EINVAL); bar = PCIR_BAR(pbi->pbi_bar); pm = pci_find_bar(pcidev, bar); if (pm == NULL) return (EINVAL); offset = pbi->pbi_offset; width = pbi->pbi_width; if (offset + width < offset || ((pci_addr_t)1 << pm->pm_size) < offset + width) return (EINVAL); type = PCI_BAR_MEM(pm->pm_value) ? SYS_RES_MEMORY : SYS_RES_IOPORT; /* * This will fail if a driver has allocated the resource. This could be * worked around by detecting that case and using bus_map_resource() to * populate the handle, but so far this is not needed. */ res = bus_alloc_resource_any(pcidev, type, &bar, RF_ACTIVE); if (res == NULL) return (ENOENT); error = 0; switch (pbi->pbi_op) { case PCIBARIO_READ: switch (pbi->pbi_width) { case 1: pbi->pbi_value = bus_read_1(res, offset); break; case 2: pbi->pbi_value = bus_read_2(res, offset); break; case 4: pbi->pbi_value = bus_read_4(res, offset); break; #ifndef __i386__ case 8: pbi->pbi_value = bus_read_8(res, offset); break; #endif default: error = EINVAL; break; } break; case PCIBARIO_WRITE: switch (pbi->pbi_width) { case 1: bus_write_1(res, offset, pbi->pbi_value); break; case 2: bus_write_2(res, offset, pbi->pbi_value); break; case 4: bus_write_4(res, offset, pbi->pbi_value); break; #ifndef __i386__ case 8: bus_write_8(res, offset, pbi->pbi_value); break; #endif default: error = EINVAL; break; } break; } bus_release_resource(pcidev, type, bar, res); return (error); } static int pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { device_t pcidev; const char *name; struct devlist *devlist_head; struct pci_conf_io *cio = NULL; struct pci_devinfo *dinfo; struct pci_io *io; struct pci_bar_ioreq *pbi; struct pci_bar_io *bio; struct pci_list_vpd_io *lvio; struct pci_match_conf *pattern_buf; struct pci_map *pm; struct pci_bar_mmap *pbm; size_t confsz, iolen; int error, ionum, i, num_patterns; union pci_conf_union pcu; #ifdef PRE7_COMPAT struct pci_io iodata; struct pci_io_old *io_old; io_old = NULL; #endif /* * Interpret read-only opened /dev/pci as a promise that no * operation of the file descriptor could modify system state, * including side-effects due to reading devices registers. */ if ((flag & FWRITE) == 0) { switch (cmd) { case PCIOCGETCONF: #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: #endif #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD: #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF_OLD32: #endif #endif case PCIOCGETBAR: case PCIOCLISTVPD: break; default: return (EPERM); } } - /* Giant because newbus is Giant locked revisit with newbus locking */ - mtx_lock(&Giant); + /* + * Use bus topology lock to ensure that the pci list of devies doesn't + * change while we're traversing the list, in some cases multiple times. + */ + bus_topo_lock(); switch (cmd) { case PCIOCGETCONF: #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF32: #endif #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD: #ifdef COMPAT_FREEBSD32 case PCIOCGETCONF_OLD32: #endif #endif cio = malloc(sizeof(struct pci_conf_io), M_TEMP, M_WAITOK | M_ZERO); pci_conf_io_init(cio, data, cmd); pattern_buf = NULL; num_patterns = 0; dinfo = NULL; cio->num_matches = 0; /* * If the user specified an offset into the device list, * but the list has changed since they last called this * ioctl, tell them that the list has changed. They will * have to get the list from the beginning. */ if ((cio->offset != 0) && (cio->generation != pci_generation)){ cio->status = PCI_GETCONF_LIST_CHANGED; error = 0; goto getconfexit; } /* * Check to see whether the user has asked for an offset * past the end of our list. */ if (cio->offset >= pci_numdevs) { cio->status = PCI_GETCONF_LAST_DEVICE; error = 0; goto getconfexit; } /* get the head of the device queue */ devlist_head = &pci_devq; /* * Determine how much room we have for pci_conf structures. * Round the user's buffer size down to the nearest * multiple of sizeof(struct pci_conf) in case the user * didn't specify a multiple of that size. */ confsz = pci_conf_size(cmd); iolen = min(cio->match_buf_len - (cio->match_buf_len % confsz), pci_numdevs * confsz); /* * Since we know that iolen is a multiple of the size of * the pciconf union, it's okay to do this. */ ionum = iolen / confsz; /* * If this test is true, the user wants the pci_conf * structures returned to match the supplied entries. */ if ((cio->num_patterns > 0) && (cio->num_patterns < pci_numdevs) && (cio->pat_buf_len > 0)) { /* * pat_buf_len needs to be: * num_patterns * sizeof(struct pci_match_conf) * While it is certainly possible the user just * allocated a large buffer, but set the number of * matches correctly, it is far more likely that * their kernel doesn't match the userland utility * they're using. It's also possible that the user * forgot to initialize some variables. Yes, this * may be overly picky, but I hazard to guess that * it's far more likely to just catch folks that * updated their kernel but not their userland. */ if (cio->num_patterns * pci_match_conf_size(cmd) != cio->pat_buf_len) { /* The user made a mistake, return an error. */ cio->status = PCI_GETCONF_ERROR; error = EINVAL; goto getconfexit; } /* * Allocate a buffer to hold the patterns. */ pattern_buf = malloc(cio->pat_buf_len, M_TEMP, M_WAITOK); error = copyin(cio->patterns, pattern_buf, cio->pat_buf_len); if (error != 0) { error = EINVAL; goto getconfexit; } num_patterns = cio->num_patterns; } else if ((cio->num_patterns > 0) || (cio->pat_buf_len > 0)) { /* * The user made a mistake, spit out an error. */ cio->status = PCI_GETCONF_ERROR; error = EINVAL; goto getconfexit; } /* * Go through the list of devices and copy out the devices * that match the user's criteria. */ for (cio->num_matches = 0, i = 0, dinfo = STAILQ_FIRST(devlist_head); dinfo != NULL; dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { if (i < cio->offset) continue; /* Populate pd_name and pd_unit */ name = NULL; if (dinfo->cfg.dev) name = device_get_name(dinfo->cfg.dev); if (name) { strncpy(dinfo->conf.pd_name, name, sizeof(dinfo->conf.pd_name)); dinfo->conf.pd_name[PCI_MAXNAMELEN] = 0; dinfo->conf.pd_unit = device_get_unit(dinfo->cfg.dev); } else { dinfo->conf.pd_name[0] = '\0'; dinfo->conf.pd_unit = 0; } if (pattern_buf == NULL || pci_conf_match(cmd, pattern_buf, num_patterns, &dinfo->conf) == 0) { /* * If we've filled up the user's buffer, * break out at this point. Since we've * got a match here, we'll pick right back * up at the matching entry. We can also * tell the user that there are more matches * left. */ if (cio->num_matches >= ionum) { error = 0; break; } pci_conf_for_copyout(&dinfo->conf, &pcu, cmd); error = copyout(&pcu, (caddr_t)cio->matches + confsz * cio->num_matches, confsz); if (error) break; cio->num_matches++; } } /* * Set the pointer into the list, so if the user is getting * n records at a time, where n < pci_numdevs, */ cio->offset = i; /* * Set the generation, the user will need this if they make * another ioctl call with offset != 0. */ cio->generation = pci_generation; /* * If this is the last device, inform the user so he won't * bother asking for more devices. If dinfo isn't NULL, we * know that there are more matches in the list because of * the way the traversal is done. */ if (dinfo == NULL) cio->status = PCI_GETCONF_LAST_DEVICE; else cio->status = PCI_GETCONF_MORE_DEVS; getconfexit: pci_conf_io_update_data(cio, data, cmd); free(cio, M_TEMP); free(pattern_buf, M_TEMP); break; #ifdef PRE7_COMPAT case PCIOCREAD_OLD: case PCIOCWRITE_OLD: io_old = (struct pci_io_old *)data; iodata.pi_sel.pc_domain = 0; iodata.pi_sel.pc_bus = io_old->pi_sel.pc_bus; iodata.pi_sel.pc_dev = io_old->pi_sel.pc_dev; iodata.pi_sel.pc_func = io_old->pi_sel.pc_func; iodata.pi_reg = io_old->pi_reg; iodata.pi_width = io_old->pi_width; iodata.pi_data = io_old->pi_data; data = (caddr_t)&iodata; /* FALLTHROUGH */ #endif case PCIOCREAD: case PCIOCWRITE: io = (struct pci_io *)data; switch(io->pi_width) { case 4: case 2: case 1: /* Make sure register is not negative and aligned. */ if (io->pi_reg < 0 || io->pi_reg & (io->pi_width - 1)) { error = EINVAL; break; } /* * Assume that the user-level bus number is * in fact the physical PCI bus number. * Look up the grandparent, i.e. the bridge device, * so that we can issue configuration space cycles. */ pcidev = pci_find_dbsf(io->pi_sel.pc_domain, io->pi_sel.pc_bus, io->pi_sel.pc_dev, io->pi_sel.pc_func); if (pcidev) { #ifdef PRE7_COMPAT if (cmd == PCIOCWRITE || cmd == PCIOCWRITE_OLD) #else if (cmd == PCIOCWRITE) #endif pci_write_config(pcidev, io->pi_reg, io->pi_data, io->pi_width); #ifdef PRE7_COMPAT else if (cmd == PCIOCREAD_OLD) io_old->pi_data = pci_read_config(pcidev, io->pi_reg, io->pi_width); #endif else io->pi_data = pci_read_config(pcidev, io->pi_reg, io->pi_width); error = 0; } else { #ifdef COMPAT_FREEBSD4 if (cmd == PCIOCREAD_OLD) { io_old->pi_data = -1; error = 0; } else #endif error = ENODEV; } break; default: error = EINVAL; break; } break; case PCIOCGETBAR: bio = (struct pci_bar_io *)data; /* * Assume that the user-level bus number is * in fact the physical PCI bus number. */ pcidev = pci_find_dbsf(bio->pbi_sel.pc_domain, bio->pbi_sel.pc_bus, bio->pbi_sel.pc_dev, bio->pbi_sel.pc_func); if (pcidev == NULL) { error = ENODEV; break; } pm = pci_find_bar(pcidev, bio->pbi_reg); if (pm == NULL) { error = EINVAL; break; } bio->pbi_base = pm->pm_value; bio->pbi_length = (pci_addr_t)1 << pm->pm_size; bio->pbi_enabled = pci_bar_enabled(pcidev, pm); error = 0; break; case PCIOCATTACHED: error = 0; io = (struct pci_io *)data; pcidev = pci_find_dbsf(io->pi_sel.pc_domain, io->pi_sel.pc_bus, io->pi_sel.pc_dev, io->pi_sel.pc_func); if (pcidev != NULL) io->pi_data = device_is_attached(pcidev); else error = ENODEV; break; case PCIOCLISTVPD: lvio = (struct pci_list_vpd_io *)data; /* * Assume that the user-level bus number is * in fact the physical PCI bus number. */ pcidev = pci_find_dbsf(lvio->plvi_sel.pc_domain, lvio->plvi_sel.pc_bus, lvio->plvi_sel.pc_dev, lvio->plvi_sel.pc_func); if (pcidev == NULL) { error = ENODEV; break; } error = pci_list_vpd(pcidev, lvio); break; case PCIOCBARMMAP: pbm = (struct pci_bar_mmap *)data; if ((flag & FWRITE) == 0 && (pbm->pbm_flags & PCIIO_BAR_MMAP_RW) != 0) { error = EPERM; break; } pcidev = pci_find_dbsf(pbm->pbm_sel.pc_domain, pbm->pbm_sel.pc_bus, pbm->pbm_sel.pc_dev, pbm->pbm_sel.pc_func); error = pcidev == NULL ? ENODEV : pci_bar_mmap(pcidev, pbm); break; case PCIOCBARIO: pbi = (struct pci_bar_ioreq *)data; pcidev = pci_find_dbsf(pbi->pbi_sel.pc_domain, pbi->pbi_sel.pc_bus, pbi->pbi_sel.pc_dev, pbi->pbi_sel.pc_func); if (pcidev == NULL) { error = ENODEV; break; } error = pci_bar_io(pcidev, pbi); break; default: error = ENOTTY; break; } - mtx_unlock(&Giant); + bus_topo_unlock(); return (error); } diff --git a/sys/dev/sdio/sdiob.c b/sys/dev/sdio/sdiob.c index afa111971bf5..559e8c50166a 100644 --- a/sys/dev/sdio/sdiob.c +++ b/sys/dev/sdio/sdiob.c @@ -1,1180 +1,1180 @@ /*- * Copyright (c) 2017 Ilya Bakulin. All rights reserved. * Copyright (c) 2018-2019 The FreeBSD Foundation * * Portions of this software were developed by Björn Zeeb * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Portions of this software may have been developed with reference to * the SD Simplified Specification. The following disclaimer may apply: * * The following conditions apply to the release of the simplified * specification ("Simplified Specification") by the SD Card Association and * the SD Group. The Simplified Specification is a subset of the complete SD * Specification which is owned by the SD Card Association and the SD * Group. This Simplified Specification is provided on a non-confidential * basis subject to the disclaimers below. Any implementation of the * Simplified Specification may require a license from the SD Card * Association, SD Group, SD-3C LLC or other third parties. * * Disclaimers: * * The information contained in the Simplified Specification is presented only * as a standard specification for SD Cards and SD Host/Ancillary products and * is provided "AS-IS" without any representations or warranties of any * kind. No responsibility is assumed by the SD Group, SD-3C LLC or the SD * Card Association for any damages, any infringements of patents or other * right of the SD Group, SD-3C LLC, the SD Card Association or any third * parties, which may result from its use. No license is granted by * implication, estoppel or otherwise under any patent or other rights of the * SD Group, SD-3C LLC, the SD Card Association or any third party. Nothing * herein shall be construed as an obligation by the SD Group, the SD-3C LLC * or the SD Card Association to disclose or distribute any technical * information, know-how or other confidential information to any third party. */ /* * Implements the (kernel specific) SDIO parts. * This will hide all cam(4) functionality from the SDIO driver implementations * which will just be newbus/device(9) and hence look like any other driver for, * e.g., PCI. * The sdiob(4) parts effetively "translate" between the two worlds "bridging" * messages from MMCCAM to newbus and back. */ #include __FBSDID("$FreeBSD$"); #include "opt_cam.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for cam_path */ #include #include #include #include #include "sdio_if.h" #ifdef DEBUG #define DPRINTF(...) printf(__VA_ARGS__) #define DPRINTFDEV(_dev, ...) device_printf((_dev), __VA_ARGS__) #else #define DPRINTF(...) #define DPRINTFDEV(_dev, ...) #endif struct sdiob_softc { uint32_t sdio_state; #define SDIO_STATE_DEAD 0x0001 #define SDIO_STATE_INITIALIZING 0x0002 #define SDIO_STATE_READY 0x0004 uint32_t nb_state; #define NB_STATE_DEAD 0x0001 #define NB_STATE_SIM_ADDED 0x0002 #define NB_STATE_READY 0x0004 /* CAM side (including sim_dev). */ struct card_info cardinfo; struct cam_periph *periph; union ccb *ccb; struct task discover_task; /* Newbus side. */ device_t dev; /* Ourselves. */ device_t child[8]; }; /* -------------------------------------------------------------------------- */ /* * SDIO CMD52 and CM53 implementations along with wrapper functions for * read/write and a CAM periph helper function. * These are the backend implementations of the sdio_if.m framework talking * through CAM to sdhci. * Note: these functions are also called during early discovery stage when * we are not a device(9) yet. Hence they cannot always use device_printf() * to log errors and have to call CAM_DEBUG() during these early stages. */ static int sdioerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { return (cam_periph_error(ccb, cam_flags, sense_flags)); } /* CMD52: direct byte access. */ static int sdiob_rw_direct_sc(struct sdiob_softc *sc, uint8_t fn, uint32_t addr, bool wr, uint8_t *val) { uint32_t arg, flags; int error; KASSERT((val != NULL), ("%s val passed as NULL\n", __func__)); if (sc->ccb == NULL) sc->ccb = xpt_alloc_ccb(); else memset(sc->ccb, 0, sizeof(*sc->ccb)); xpt_setup_ccb(&sc->ccb->ccb_h, sc->periph->path, CAM_PRIORITY_NONE); CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_TRACE, ("%s(fn=%d, addr=%#02x, wr=%d, *val=%#02x)\n", __func__, fn, addr, wr, *val)); flags = MMC_RSP_R5 | MMC_CMD_AC; arg = SD_IO_RW_FUNC(fn) | SD_IO_RW_ADR(addr); if (wr) arg |= SD_IO_RW_WR | SD_IO_RW_RAW | SD_IO_RW_DAT(*val); cam_fill_mmcio(&sc->ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_NONE, /*mmc_opcode*/ SD_IO_RW_DIRECT, /*mmc_arg*/ arg, /*mmc_flags*/ flags, /*mmc_data*/ 0, /*timeout*/ sc->cardinfo.f[fn].timeout); error = cam_periph_runccb(sc->ccb, sdioerror, CAM_FLAG_NONE, 0, NULL); if (error != 0) { if (sc->dev != NULL) device_printf(sc->dev, "%s: Failed to %s address %#10x error=%d\n", __func__, (wr) ? "write" : "read", addr, error); else CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_INFO, ("%s: Failed to %s address: %#10x error=%d\n", __func__, (wr) ? "write" : "read", addr, error)); return (error); } /* TODO: Add handling of MMC errors */ /* ccb->mmcio.cmd.error ? */ if (wr == false) *val = sc->ccb->mmcio.cmd.resp[0] & 0xff; return (0); } static int sdio_rw_direct(device_t dev, uint8_t fn, uint32_t addr, bool wr, uint8_t *val) { struct sdiob_softc *sc; int error; sc = device_get_softc(dev); cam_periph_lock(sc->periph); error = sdiob_rw_direct_sc(sc, fn, addr, wr, val); cam_periph_unlock(sc->periph); return (error); } static int sdiob_read_direct(device_t dev, uint8_t fn, uint32_t addr, uint8_t *val) { int error; uint8_t v; error = sdio_rw_direct(dev, fn, addr, false, &v); /* Be polite and do not touch the value on read error. */ if (error == 0 && val != NULL) *val = v; return (error); } static int sdiob_write_direct(device_t dev, uint8_t fn, uint32_t addr, uint8_t val) { return (sdio_rw_direct(dev, fn, addr, true, &val)); } /* * CMD53: IO_RW_EXTENDED, read and write multiple I/O registers. * Increment false gets FIFO mode (single register address). */ /* * A b_count of 0 means byte mode, b_count > 0 gets block mode. * A b_count of >= 512 would mean infinitive block transfer, which would become * b_count = 0, is not yet supported. * For b_count == 0, blksz is the len of bytes, otherwise it is the amount of * full sized blocks (you must not round the blocks up and leave the last one * partial!) * For byte mode, the maximum of blksz is the functions cur_blksize. * This function should ever only be called by sdio_rw_extended_sc()! */ static int sdiob_rw_extended_cam(struct sdiob_softc *sc, uint8_t fn, uint32_t addr, bool wr, uint8_t *buffer, bool incaddr, uint32_t b_count, uint16_t blksz) { struct mmc_data mmcd; uint32_t arg, cam_flags, flags, len; int error; if (sc->ccb == NULL) sc->ccb = xpt_alloc_ccb(); else memset(sc->ccb, 0, sizeof(*sc->ccb)); xpt_setup_ccb(&sc->ccb->ccb_h, sc->periph->path, CAM_PRIORITY_NONE); CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_TRACE, ("%s(fn=%d addr=%#0x wr=%d b_count=%u blksz=%u buf=%p incr=%d)\n", __func__, fn, addr, wr, b_count, blksz, buffer, incaddr)); KASSERT((b_count <= 511), ("%s: infinitive block transfer not yet " "supported: b_count %u blksz %u, sc %p, fn %u, addr %#10x, %s, " "buffer %p, %s\n", __func__, b_count, blksz, sc, fn, addr, wr ? "wr" : "rd", buffer, incaddr ? "incaddr" : "fifo")); /* Blksz needs to be within bounds for both byte and block mode! */ KASSERT((blksz <= sc->cardinfo.f[fn].cur_blksize), ("%s: blksz " "%u > bur_blksize %u, sc %p, fn %u, addr %#10x, %s, " "buffer %p, %s, b_count %u\n", __func__, blksz, sc->cardinfo.f[fn].cur_blksize, sc, fn, addr, wr ? "wr" : "rd", buffer, incaddr ? "incaddr" : "fifo", b_count)); if (b_count == 0) { /* Byte mode */ len = blksz; if (blksz == 512) blksz = 0; arg = SD_IOE_RW_LEN(blksz); } else { /* Block mode. */ #ifdef __notyet__ if (b_count > 511) { /* Infinitive block transfer. */ b_count = 0; } #endif len = b_count * blksz; arg = SD_IOE_RW_BLK | SD_IOE_RW_LEN(b_count); } flags = MMC_RSP_R5 | MMC_CMD_ADTC; arg |= SD_IOE_RW_FUNC(fn) | SD_IOE_RW_ADR(addr); if (incaddr) arg |= SD_IOE_RW_INCR; memset(&mmcd, 0, sizeof(mmcd)); mmcd.data = buffer; mmcd.len = len; if (arg & SD_IOE_RW_BLK) { /* XXX both should be known from elsewhere, aren't they? */ mmcd.block_size = blksz; mmcd.block_count = b_count; } if (wr) { arg |= SD_IOE_RW_WR; cam_flags = CAM_DIR_OUT; mmcd.flags = MMC_DATA_WRITE; } else { cam_flags = CAM_DIR_IN; mmcd.flags = MMC_DATA_READ; } #ifdef __notyet__ if (b_count == 0) { /* XXX-BZ TODO FIXME. Cancel I/O: CCCR -> ASx */ /* Stop cmd. */ } #endif cam_fill_mmcio(&sc->ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ cam_flags, /*mmc_opcode*/ SD_IO_RW_EXTENDED, /*mmc_arg*/ arg, /*mmc_flags*/ flags, /*mmc_data*/ &mmcd, /*timeout*/ sc->cardinfo.f[fn].timeout); if (arg & SD_IOE_RW_BLK) { mmcd.flags |= MMC_DATA_BLOCK_SIZE; if (b_count != 1) sc->ccb->mmcio.cmd.data->flags |= MMC_DATA_MULTI; } /* Execute. */ error = cam_periph_runccb(sc->ccb, sdioerror, CAM_FLAG_NONE, 0, NULL); if (error != 0) { if (sc->dev != NULL) device_printf(sc->dev, "%s: Failed to %s address %#10x buffer %p size %u " "%s b_count %u blksz %u error=%d\n", __func__, (wr) ? "write to" : "read from", addr, buffer, len, (incaddr) ? "incr" : "fifo", b_count, blksz, error); else CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_INFO, ("%s: Failed to %s address %#10x buffer %p size %u " "%s b_count %u blksz %u error=%d\n", __func__, (wr) ? "write to" : "read from", addr, buffer, len, (incaddr) ? "incr" : "fifo", b_count, blksz, error)); return (error); } /* TODO: Add handling of MMC errors */ /* ccb->mmcio.cmd.error ? */ error = sc->ccb->mmcio.cmd.resp[0] & 0xff; if (error != 0) { if (sc->dev != NULL) device_printf(sc->dev, "%s: Failed to %s address %#10x buffer %p size %u " "%s b_count %u blksz %u mmcio resp error=%d\n", __func__, (wr) ? "write to" : "read from", addr, buffer, len, (incaddr) ? "incr" : "fifo", b_count, blksz, error); else CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_INFO, ("%s: Failed to %s address %#10x buffer %p size %u " "%s b_count %u blksz %u mmcio resp error=%d\n", __func__, (wr) ? "write to" : "read from", addr, buffer, len, (incaddr) ? "incr" : "fifo", b_count, blksz, error)); } return (error); } static int sdiob_rw_extended_sc(struct sdiob_softc *sc, uint8_t fn, uint32_t addr, bool wr, uint32_t size, uint8_t *buffer, bool incaddr) { int error; uint32_t len; uint32_t b_count; /* * If block mode is supported and we have at least 4 bytes to write and * the size is at least one block, then start doing blk transfers. */ while (sc->cardinfo.support_multiblk && size > 4 && size >= sc->cardinfo.f[fn].cur_blksize) { b_count = size / sc->cardinfo.f[fn].cur_blksize; KASSERT(b_count >= 1, ("%s: block count too small %u size %u " "cur_blksize %u\n", __func__, b_count, size, sc->cardinfo.f[fn].cur_blksize)); #ifdef __notyet__ /* XXX support inifinite transfer with b_count = 0. */ #else if (b_count > 511) b_count = 511; #endif len = b_count * sc->cardinfo.f[fn].cur_blksize; error = sdiob_rw_extended_cam(sc, fn, addr, wr, buffer, incaddr, b_count, sc->cardinfo.f[fn].cur_blksize); if (error != 0) return (error); size -= len; buffer += len; if (incaddr) addr += len; } while (size > 0) { len = MIN(size, sc->cardinfo.f[fn].cur_blksize); error = sdiob_rw_extended_cam(sc, fn, addr, wr, buffer, incaddr, 0, len); if (error != 0) return (error); /* Prepare for next iteration. */ size -= len; buffer += len; if (incaddr) addr += len; } return (0); } static int sdiob_rw_extended(device_t dev, uint8_t fn, uint32_t addr, bool wr, uint32_t size, uint8_t *buffer, bool incaddr) { struct sdiob_softc *sc; int error; sc = device_get_softc(dev); cam_periph_lock(sc->periph); error = sdiob_rw_extended_sc(sc, fn, addr, wr, size, buffer, incaddr); cam_periph_unlock(sc->periph); return (error); } static int sdiob_read_extended(device_t dev, uint8_t fn, uint32_t addr, uint32_t size, uint8_t *buffer, bool incaddr) { return (sdiob_rw_extended(dev, fn, addr, false, size, buffer, incaddr)); } static int sdiob_write_extended(device_t dev, uint8_t fn, uint32_t addr, uint32_t size, uint8_t *buffer, bool incaddr) { return (sdiob_rw_extended(dev, fn, addr, true, size, buffer, incaddr)); } /* -------------------------------------------------------------------------- */ /* Bus interface, ivars handling. */ static int sdiob_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct sdiob_softc *sc; struct sdio_func *f; f = device_get_ivars(child); KASSERT(f != NULL, ("%s: dev %p child %p which %d, child ivars NULL\n", __func__, dev, child, which)); switch (which) { case SDIOB_IVAR_SUPPORT_MULTIBLK: sc = device_get_softc(dev); KASSERT(sc != NULL, ("%s: dev %p child %p which %d, sc NULL\n", __func__, dev, child, which)); *result = sc->cardinfo.support_multiblk; break; case SDIOB_IVAR_FUNCTION: *result = (uintptr_t)f; break; case SDIOB_IVAR_FUNCNUM: *result = f->fn; break; case SDIOB_IVAR_CLASS: *result = f->class; break; case SDIOB_IVAR_VENDOR: *result = f->vendor; break; case SDIOB_IVAR_DEVICE: *result = f->device; break; case SDIOB_IVAR_DRVDATA: *result = f->drvdata; break; default: return (ENOENT); } return (0); } static int sdiob_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct sdio_func *f; f = device_get_ivars(child); KASSERT(f != NULL, ("%s: dev %p child %p which %d, child ivars NULL\n", __func__, dev, child, which)); switch (which) { case SDIOB_IVAR_SUPPORT_MULTIBLK: case SDIOB_IVAR_FUNCTION: case SDIOB_IVAR_FUNCNUM: case SDIOB_IVAR_CLASS: case SDIOB_IVAR_VENDOR: case SDIOB_IVAR_DEVICE: return (EINVAL); /* Disallowed. */ case SDIOB_IVAR_DRVDATA: f->drvdata = value; break; default: return (ENOENT); } return (0); } /* -------------------------------------------------------------------------- */ /* * Newbus functions for ourselves to probe/attach/detach and become a proper * device(9). Attach will also probe for child devices (another driver * implementing SDIO). */ static int sdiob_probe(device_t dev) { device_set_desc(dev, "SDIO CAM-Newbus bridge"); return (BUS_PROBE_DEFAULT); } static int sdiob_attach(device_t dev) { struct sdiob_softc *sc; int error, i; sc = device_get_softc(dev); if (sc == NULL) return (ENXIO); /* * Now that we are a dev, create one child device per function, * initialize the backpointer, so we can pass them around and * call CAM operations on the parent, and also set the function * itself as ivars, so that we can query/update them. * Do this before any child gets a chance to attach. */ for (i = 0; i < sc->cardinfo.num_funcs; i++) { sc->child[i] = device_add_child(dev, NULL, -1); if (sc->child[i] == NULL) { device_printf(dev, "%s: failed to add child\n", __func__); return (ENXIO); } sc->cardinfo.f[i].dev = sc->child[i]; /* Set the function as ivar to the child device. */ device_set_ivars(sc->child[i], &sc->cardinfo.f[i]); } /* * No one will ever attach to F0; we do the above to have a "device" * to talk to in a general way in the code. * Also do the probe/attach in a 2nd loop, so that all devices are * present as we do have drivers consuming more than one device/func * and might play "tricks" in order to do that assuming devices and * ivars are available for all. */ for (i = 1; i < sc->cardinfo.num_funcs; i++) { error = device_probe_and_attach(sc->child[i]); if (error != 0 && bootverbose) device_printf(dev, "%s: device_probe_and_attach(%p %s) " "failed %d for function %d, no child yet\n", __func__, sc->child, device_get_nameunit(sc->child[i]), error, i); } sc->nb_state = NB_STATE_READY; cam_periph_lock(sc->periph); xpt_announce_periph(sc->periph, NULL); cam_periph_unlock(sc->periph); return (0); } static int sdiob_detach(device_t dev) { /* XXX TODO? */ return (EOPNOTSUPP); } /* -------------------------------------------------------------------------- */ /* * driver(9) and device(9) "control plane". * This is what we use when we are making ourselves a device(9) in order to * provide a newbus interface again, as well as the implementation of the * SDIO interface. */ static device_method_t sdiob_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, sdiob_probe), DEVMETHOD(device_attach, sdiob_attach), DEVMETHOD(device_detach, sdiob_detach), /* Bus interface. */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), DEVMETHOD(bus_read_ivar, sdiob_read_ivar), DEVMETHOD(bus_write_ivar, sdiob_write_ivar), /* SDIO interface. */ DEVMETHOD(sdio_read_direct, sdiob_read_direct), DEVMETHOD(sdio_write_direct, sdiob_write_direct), DEVMETHOD(sdio_read_extended, sdiob_read_extended), DEVMETHOD(sdio_write_extended, sdiob_write_extended), DEVMETHOD_END }; static devclass_t sdiob_devclass; static driver_t sdiob_driver = { SDIOB_NAME_S, sdiob_methods, 0 }; /* -------------------------------------------------------------------------- */ /* * CIS related. * Read card and function information and populate the cardinfo structure. */ static int sdio_read_direct_sc(struct sdiob_softc *sc, uint8_t fn, uint32_t addr, uint8_t *val) { int error; uint8_t v; error = sdiob_rw_direct_sc(sc, fn, addr, false, &v); if (error == 0 && val != NULL) *val = v; return (error); } static int sdio_func_read_cis(struct sdiob_softc *sc, uint8_t fn, uint32_t cis_addr) { char cis1_info_buf[256]; char *cis1_info[4]; int start, i, count, ret; uint32_t addr; uint8_t ch, tuple_id, tuple_len, tuple_count, v; /* If we encounter any read errors, abort and return. */ #define ERR_OUT(ret) \ if (ret != 0) \ goto err; ret = 0; /* Use to prevent infinite loop in case of parse errors. */ tuple_count = 0; memset(cis1_info_buf, 0, 256); do { addr = cis_addr; ret = sdio_read_direct_sc(sc, 0, addr++, &tuple_id); ERR_OUT(ret); if (tuple_id == SD_IO_CISTPL_END) break; if (tuple_id == 0) { cis_addr++; continue; } ret = sdio_read_direct_sc(sc, 0, addr++, &tuple_len); ERR_OUT(ret); if (tuple_len == 0) { CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: parse error: 0-length tuple %#02x\n", __func__, tuple_id)); return (EIO); } switch (tuple_id) { case SD_IO_CISTPL_VERS_1: addr += 2; for (count = 0, start = 0, i = 0; (count < 4) && ((i + 4) < 256); i++) { ret = sdio_read_direct_sc(sc, 0, addr + i, &ch); ERR_OUT(ret); DPRINTF("%s: count=%d, start=%d, i=%d, got " "(%#02x)\n", __func__, count, start, i, ch); if (ch == 0xff) break; cis1_info_buf[i] = ch; if (ch == 0) { cis1_info[count] = cis1_info_buf + start; start = i + 1; count++; } } DPRINTF("Card info: "); for (i=0; i < 4; i++) if (cis1_info[i]) DPRINTF(" %s", cis1_info[i]); DPRINTF("\n"); break; case SD_IO_CISTPL_MANFID: /* TPLMID_MANF */ ret = sdio_read_direct_sc(sc, 0, addr++, &v); ERR_OUT(ret); sc->cardinfo.f[fn].vendor = v; ret = sdio_read_direct_sc(sc, 0, addr++, &v); ERR_OUT(ret); sc->cardinfo.f[fn].vendor |= (v << 8); /* TPLMID_CARD */ ret = sdio_read_direct_sc(sc, 0, addr++, &v); ERR_OUT(ret); sc->cardinfo.f[fn].device = v; ret = sdio_read_direct_sc(sc, 0, addr, &v); ERR_OUT(ret); sc->cardinfo.f[fn].device |= (v << 8); break; case SD_IO_CISTPL_FUNCID: /* Not sure if we need to parse it? */ break; case SD_IO_CISTPL_FUNCE: if (tuple_len < 4) { printf("%s: FUNCE is too short: %d\n", __func__, tuple_len); break; } /* TPLFE_TYPE (Extended Data) */ ret = sdio_read_direct_sc(sc, 0, addr++, &v); ERR_OUT(ret); if (fn == 0) { if (v != 0x00) break; } else { if (v != 0x01) break; addr += 0x0b; } ret = sdio_read_direct_sc(sc, 0, addr, &v); ERR_OUT(ret); sc->cardinfo.f[fn].max_blksize = v; ret = sdio_read_direct_sc(sc, 0, addr+1, &v); ERR_OUT(ret); sc->cardinfo.f[fn].max_blksize |= (v << 8); break; default: CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: Skipping fn %d tuple %d ID %#02x " "len %#02x\n", __func__, fn, tuple_count, tuple_id, tuple_len)); } if (tuple_len == 0xff) { /* Also marks the end of a tuple chain (E1 16.2) */ /* The tuple is valid, hence this going at the end. */ break; } cis_addr += 2 + tuple_len; tuple_count++; } while (tuple_count < 20); err: #undef ERR_OUT return (ret); } static int sdio_get_common_cis_addr(struct sdiob_softc *sc, uint32_t *addr) { int error; uint32_t a; uint8_t val; error = sdio_read_direct_sc(sc, 0, SD_IO_CCCR_CISPTR + 0, &val); if (error != 0) goto err; a = val; error = sdio_read_direct_sc(sc, 0, SD_IO_CCCR_CISPTR + 1, &val); if (error != 0) goto err; a |= (val << 8); error = sdio_read_direct_sc(sc, 0, SD_IO_CCCR_CISPTR + 2, &val); if (error != 0) goto err; a |= (val << 16); if (a < SD_IO_CIS_START || a > SD_IO_CIS_START + SD_IO_CIS_SIZE) { err: CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: bad CIS address: %#04x, error %d\n", __func__, a, error)); } else if (error == 0 && addr != NULL) *addr = a; return (error); } static int sdiob_get_card_info(struct sdiob_softc *sc) { struct mmc_params *mmcp; uint32_t cis_addr, fbr_addr; int fn, error; uint8_t fn_max, val; error = sdio_get_common_cis_addr(sc, &cis_addr); if (error != 0) return (-1); memset(&sc->cardinfo, 0, sizeof(sc->cardinfo)); /* F0 must always be present. */ fn = 0; error = sdio_func_read_cis(sc, fn, cis_addr); if (error != 0) return (error); sc->cardinfo.num_funcs++; /* Read CCCR Card Capability. */ error = sdio_read_direct_sc(sc, 0, SD_IO_CCCR_CARDCAP, &val); if (error != 0) return (error); sc->cardinfo.support_multiblk = (val & CCCR_CC_SMB) ? true : false; DPRINTF("%s: F%d: Vendor %#04x product %#04x max block size %d bytes " "support_multiblk %s\n", __func__, fn, sc->cardinfo.f[fn].vendor, sc->cardinfo.f[fn].device, sc->cardinfo.f[fn].max_blksize, sc->cardinfo.support_multiblk ? "yes" : "no"); /* mmcp->sdio_func_count contains the number of functions w/o F0. */ mmcp = &sc->ccb->ccb_h.path->device->mmc_ident_data; fn_max = MIN(mmcp->sdio_func_count + 1, nitems(sc->cardinfo.f)); for (fn = 1; fn < fn_max; fn++) { fbr_addr = SD_IO_FBR_START * fn + SD_IO_FBR_CIS_OFFSET; error = sdio_read_direct_sc(sc, 0, fbr_addr++, &val); if (error != 0) break; cis_addr = val; error = sdio_read_direct_sc(sc, 0, fbr_addr++, &val); if (error != 0) break; cis_addr |= (val << 8); error = sdio_read_direct_sc(sc, 0, fbr_addr++, &val); if (error != 0) break; cis_addr |= (val << 16); error = sdio_func_read_cis(sc, fn, cis_addr); if (error != 0) break; /* Read the Standard SDIO Function Interface Code. */ fbr_addr = SD_IO_FBR_START * fn; error = sdio_read_direct_sc(sc, 0, fbr_addr++, &val); if (error != 0) break; sc->cardinfo.f[fn].class = (val & 0x0f); if (sc->cardinfo.f[fn].class == 0x0f) { error = sdio_read_direct_sc(sc, 0, fbr_addr, &val); if (error != 0) break; sc->cardinfo.f[fn].class = val; } sc->cardinfo.f[fn].fn = fn; sc->cardinfo.f[fn].cur_blksize = sc->cardinfo.f[fn].max_blksize; sc->cardinfo.f[fn].retries = 0; sc->cardinfo.f[fn].timeout = 5000; DPRINTF("%s: F%d: Class %d Vendor %#04x product %#04x " "max_blksize %d bytes\n", __func__, fn, sc->cardinfo.f[fn].class, sc->cardinfo.f[fn].vendor, sc->cardinfo.f[fn].device, sc->cardinfo.f[fn].max_blksize); if (sc->cardinfo.f[fn].vendor == 0) { DPRINTF("%s: F%d doesn't exist\n", __func__, fn); break; } sc->cardinfo.num_funcs++; } return (error); } /* -------------------------------------------------------------------------- */ /* * CAM periph registration, allocation, and detached from that a discovery * task, which goes off reads cardinfo, and then adds ourselves to our SIM's * device adding the devclass and registering the driver. This keeps the * newbus chain connected though we will talk CAM in the middle (until one * day CAM might be newbusyfied). */ static int sdio_newbus_sim_add(struct sdiob_softc *sc) { device_t pdev; devclass_t bus_devclass; int error; /* Add ourselves to our parent (SIM) device. */ /* Add ourselves to our parent. That way we can become a parent. */ KASSERT(sc->periph->sim->sim_dev != NULL, ("%s: sim_dev is NULL, sc %p " "periph %p sim %p\n", __func__, sc, sc->periph, sc->periph->sim)); if (sc->dev == NULL) sc->dev = BUS_ADD_CHILD(sc->periph->sim->sim_dev, 0, SDIOB_NAME_S, -1); if (sc->dev == NULL) return (ENXIO); device_set_softc(sc->dev, sc); /* * Don't set description here; devclass_add_driver() -> * device_probe_child() -> device_set_driver() will nuke it again. */ pdev = device_get_parent(sc->dev); KASSERT(pdev != NULL, ("%s: sc %p dev %p (%s) parent is NULL\n", __func__, sc, sc->dev, device_get_nameunit(sc->dev))); bus_devclass = device_get_devclass(pdev); if (bus_devclass == NULL) { printf("%s: Failed to get devclass from %s.\n", __func__, device_get_nameunit(pdev)); return (ENXIO); } - mtx_lock(&Giant); + bus_topo_lock(); error = devclass_add_driver(bus_devclass, &sdiob_driver, BUS_PASS_DEFAULT, &sdiob_devclass); - mtx_unlock(&Giant); + bus_topo_unlock(); if (error != 0) { printf("%s: Failed to add driver to devclass: %d.\n", __func__, error); return (error); } /* Done. */ sc->nb_state = NB_STATE_SIM_ADDED; return (0); } static void sdiobdiscover(void *context, int pending) { struct cam_periph *periph; struct sdiob_softc *sc; int error; KASSERT(context != NULL, ("%s: context is NULL\n", __func__)); periph = (struct cam_periph *)context; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("%s\n", __func__)); /* Periph was held for us when this task was enqueued. */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } sc = periph->softc; sc->sdio_state = SDIO_STATE_INITIALIZING; if (sc->ccb == NULL) sc->ccb = xpt_alloc_ccb(); else memset(sc->ccb, 0, sizeof(*sc->ccb)); xpt_setup_ccb(&sc->ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); /* * Read CCCR and FBR of each function, get manufacturer and device IDs, * max block size, and whatever else we deem necessary. */ cam_periph_lock(periph); error = sdiob_get_card_info(sc); if (error == 0) sc->sdio_state = SDIO_STATE_READY; else sc->sdio_state = SDIO_STATE_DEAD; cam_periph_unlock(periph); if (error) return; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("%s: num_func %d\n", __func__, sc->cardinfo.num_funcs)); /* * Now CAM portion of the driver has been initialized and * we know VID/PID of all the functions on the card. * Time to hook into the newbus. */ error = sdio_newbus_sim_add(sc); if (error != 0) sc->nb_state = NB_STATE_DEAD; return; } /* Called at the end of cam_periph_alloc() for us to finish allocation. */ static cam_status sdiobregister(struct cam_periph *periph, void *arg) { struct sdiob_softc *sc; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("%s: arg %p\n", __func__, arg)); if (arg == NULL) { printf("%s: no getdev CCB, can't register device pariph %p\n", __func__, periph); return(CAM_REQ_CMP_ERR); } if (periph->sim == NULL || periph->sim->sim_dev == NULL) { printf("%s: no sim %p or sim_dev %p\n", __func__, periph->sim, (periph->sim != NULL) ? periph->sim->sim_dev : NULL); return(CAM_REQ_CMP_ERR); } sc = (struct sdiob_softc *) malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT|M_ZERO); if (sc == NULL) { printf("%s: unable to allocate sc\n", __func__); return (CAM_REQ_CMP_ERR); } sc->sdio_state = SDIO_STATE_DEAD; sc->nb_state = NB_STATE_DEAD; TASK_INIT(&sc->discover_task, 0, sdiobdiscover, periph); /* Refcount until we are setup. Can't block. */ error = cam_periph_hold(periph, PRIBIO); if (error != 0) { printf("%s: lost periph during registration!\n", __func__); free(sc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } periph->softc = sc; sc->periph = periph; cam_periph_unlock(periph); error = taskqueue_enqueue(taskqueue_thread, &sc->discover_task); cam_periph_lock(periph); /* We will continue to hold a refcount for discover_task. */ /* cam_periph_unhold(periph); */ xpt_schedule(periph, CAM_PRIORITY_XPT); return (CAM_REQ_CMP); } static void sdioboninvalidate(struct cam_periph *periph) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("%s:\n", __func__)); return; } static void sdiobcleanup(struct cam_periph *periph) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("%s:\n", __func__)); return; } static void sdiobstart(struct cam_periph *periph, union ccb *ccb) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("%s: ccb %p\n", __func__, ccb)); return; } static void sdiobasync(void *softc, uint32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct ccb_getdev *cgd; cam_status status; periph = (struct cam_periph *)softc; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("%s(code=%d)\n", __func__, code)); switch (code) { case AC_FOUND_DEVICE: if (arg == NULL) break; cgd = (struct ccb_getdev *)arg; if (cgd->protocol != PROTO_MMCSD) break; /* We do not support SD memory (Combo) Cards. */ if ((path->device->mmc_ident_data.card_features & CARD_FEATURE_MEMORY)) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Memory card, not interested\n")); break; } /* * Allocate a peripheral instance for this device which starts * the probe process. */ status = cam_periph_alloc(sdiobregister, sdioboninvalidate, sdiobcleanup, sdiobstart, SDIOB_NAME_S, CAM_PERIPH_BIO, path, sdiobasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) CAM_DEBUG(path, CAM_DEBUG_PERIPH, ("%s: Unable to attach to new device due to " "status %#02x\n", __func__, status)); break; default: CAM_DEBUG(path, CAM_DEBUG_PERIPH, ("%s: cannot handle async code %#02x\n", __func__, code)); cam_periph_async(periph, code, path, arg); break; } } static void sdiobinit(void) { cam_status status; /* * Register for new device notification. We will be notified for all * already existing ones. */ status = xpt_register_async(AC_FOUND_DEVICE, sdiobasync, NULL, NULL); if (status != CAM_REQ_CMP) printf("%s: Failed to attach async callback, statux %#02x", __func__, status); } /* This function will allow unloading the KLD. */ static int sdiobdeinit(void) { return (EOPNOTSUPP); } static struct periph_driver sdiobdriver = { .init = sdiobinit, .driver_name = SDIOB_NAME_S, .units = TAILQ_HEAD_INITIALIZER(sdiobdriver.units), .generation = 0, .flags = 0, .deinit = sdiobdeinit, }; PERIPHDRIVER_DECLARE(SDIOB_NAME, sdiobdriver); MODULE_VERSION(SDIOB_NAME, 1); diff --git a/sys/dev/twe/twe_freebsd.c b/sys/dev/twe/twe_freebsd.c index c28444228149..75549f5f8f3d 100644 --- a/sys/dev/twe/twe_freebsd.c +++ b/sys/dev/twe/twe_freebsd.c @@ -1,1173 +1,1173 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Michael Smith * Copyright (c) 2003 Paul Saab * Copyright (c) 2003 Vinod Kashyap * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * FreeBSD-specific code. */ #include #include #include #include #include #include static devclass_t twe_devclass; #ifdef TWE_DEBUG static u_int32_t twed_bio_in; #define TWED_BIO_IN twed_bio_in++ static u_int32_t twed_bio_out; #define TWED_BIO_OUT twed_bio_out++ #else #define TWED_BIO_IN #define TWED_BIO_OUT #endif static void twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error); static void twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error); /******************************************************************************** ******************************************************************************** Control device interface ******************************************************************************** ********************************************************************************/ static d_open_t twe_open; static d_close_t twe_close; static d_ioctl_t twe_ioctl_wrapper; static struct cdevsw twe_cdevsw = { .d_version = D_VERSION, .d_open = twe_open, .d_close = twe_close, .d_ioctl = twe_ioctl_wrapper, .d_name = "twe", }; /******************************************************************************** * Accept an open operation on the control device. */ static int twe_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct twe_softc *sc = (struct twe_softc *)dev->si_drv1; TWE_IO_LOCK(sc); if (sc->twe_state & TWE_STATE_DETACHING) { TWE_IO_UNLOCK(sc); return (ENXIO); } sc->twe_state |= TWE_STATE_OPEN; TWE_IO_UNLOCK(sc); return(0); } /******************************************************************************** * Accept the last close on the control device. */ static int twe_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct twe_softc *sc = (struct twe_softc *)dev->si_drv1; TWE_IO_LOCK(sc); sc->twe_state &= ~TWE_STATE_OPEN; TWE_IO_UNLOCK(sc); return (0); } /******************************************************************************** * Handle controller-specific control operations. */ static int twe_ioctl_wrapper(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct twe_softc *sc = (struct twe_softc *)dev->si_drv1; return(twe_ioctl(sc, cmd, addr)); } /******************************************************************************** ******************************************************************************** PCI device interface ******************************************************************************** ********************************************************************************/ static int twe_probe(device_t dev); static int twe_attach(device_t dev); static void twe_free(struct twe_softc *sc); static int twe_detach(device_t dev); static int twe_shutdown(device_t dev); static int twe_suspend(device_t dev); static int twe_resume(device_t dev); static void twe_pci_intr(void *arg); static void twe_intrhook(void *arg); static device_method_t twe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, twe_probe), DEVMETHOD(device_attach, twe_attach), DEVMETHOD(device_detach, twe_detach), DEVMETHOD(device_shutdown, twe_shutdown), DEVMETHOD(device_suspend, twe_suspend), DEVMETHOD(device_resume, twe_resume), DEVMETHOD_END }; static driver_t twe_pci_driver = { "twe", twe_methods, sizeof(struct twe_softc) }; DRIVER_MODULE(twe, pci, twe_pci_driver, twe_devclass, 0, 0); /******************************************************************************** * Match a 3ware Escalade ATA RAID controller. */ static int twe_probe(device_t dev) { debug_called(4); if ((pci_get_vendor(dev) == TWE_VENDOR_ID) && ((pci_get_device(dev) == TWE_DEVICE_ID) || (pci_get_device(dev) == TWE_DEVICE_ID_ASIC))) { device_set_desc_copy(dev, TWE_DEVICE_NAME ". Driver version " TWE_DRIVER_VERSION_STRING); return(BUS_PROBE_DEFAULT); } return(ENXIO); } /******************************************************************************** * Allocate resources, initialise the controller. */ static int twe_attach(device_t dev) { struct twe_softc *sc; struct sysctl_oid *sysctl_tree; int rid, error; debug_called(4); /* * Initialise the softc structure. */ sc = device_get_softc(dev); sc->twe_dev = dev; mtx_init(&sc->twe_io_lock, "twe I/O", NULL, MTX_DEF); sx_init(&sc->twe_config_lock, "twe config"); /* * XXX: This sysctl tree must stay at hw.tweX rather than using * the device_get_sysctl_tree() created by new-bus because * existing 3rd party binary tools such as tw_cli and 3dm2 use the * existence of this sysctl node to discover controllers. */ sysctl_tree = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev), SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); if (sysctl_tree == NULL) { twe_printf(sc, "cannot add sysctl tree node\n"); return (ENXIO); } SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RD, TWE_DRIVER_VERSION_STRING, 0, "TWE driver version"); /* * Force the busmaster enable bit on, in case the BIOS forgot. */ pci_enable_busmaster(dev); /* * Allocate the PCI register window. */ rid = TWE_IO_CONFIG_REG; if ((sc->twe_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE)) == NULL) { twe_printf(sc, "can't allocate register window\n"); twe_free(sc); return(ENXIO); } /* * Allocate the parent bus DMA tag appropriate for PCI. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->twe_parent_dmat)) { twe_printf(sc, "can't allocate parent DMA tag\n"); twe_free(sc); return(ENOMEM); } /* * Allocate and connect our interrupt. */ rid = 0; if ((sc->twe_irq = bus_alloc_resource_any(sc->twe_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { twe_printf(sc, "can't allocate interrupt\n"); twe_free(sc); return(ENXIO); } if (bus_setup_intr(sc->twe_dev, sc->twe_irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE, NULL, twe_pci_intr, sc, &sc->twe_intr)) { twe_printf(sc, "can't set up interrupt\n"); twe_free(sc); return(ENXIO); } /* * Create DMA tag for mapping command's into controller-addressable space. */ if (bus_dma_tag_create(sc->twe_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(TWE_Command) * TWE_Q_LENGTH, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->twe_cmd_dmat)) { twe_printf(sc, "can't allocate data buffer DMA tag\n"); twe_free(sc); return(ENOMEM); } /* * Allocate memory and make it available for DMA. */ if (bus_dmamem_alloc(sc->twe_cmd_dmat, (void **)&sc->twe_cmd, BUS_DMA_NOWAIT, &sc->twe_cmdmap)) { twe_printf(sc, "can't allocate command memory\n"); return(ENOMEM); } bus_dmamap_load(sc->twe_cmd_dmat, sc->twe_cmdmap, sc->twe_cmd, sizeof(TWE_Command) * TWE_Q_LENGTH, twe_setup_request_dmamap, sc, 0); bzero(sc->twe_cmd, sizeof(TWE_Command) * TWE_Q_LENGTH); /* * Create DMA tag for mapping objects into controller-addressable space. */ if (bus_dma_tag_create(sc->twe_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE,/* maxsize */ TWE_MAX_SGL_LENGTH, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->twe_io_lock, /* lockarg */ &sc->twe_buffer_dmat)) { twe_printf(sc, "can't allocate data buffer DMA tag\n"); twe_free(sc); return(ENOMEM); } /* * Create DMA tag for mapping objects into controller-addressable space. */ if (bus_dma_tag_create(sc->twe_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DFLTPHYS, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->twe_immediate_dmat)) { twe_printf(sc, "can't allocate data buffer DMA tag\n"); twe_free(sc); return(ENOMEM); } /* * Allocate memory for requests which cannot sleep or support continuation. */ if (bus_dmamem_alloc(sc->twe_immediate_dmat, (void **)&sc->twe_immediate, BUS_DMA_NOWAIT, &sc->twe_immediate_map)) { twe_printf(sc, "can't allocate memory for immediate requests\n"); return(ENOMEM); } /* * Initialise the controller and driver core. */ if ((error = twe_setup(sc))) { twe_free(sc); return(error); } /* * Print some information about the controller and configuration. */ twe_describe_controller(sc); /* * Create the control device. */ sc->twe_dev_t = make_dev(&twe_cdevsw, device_get_unit(sc->twe_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "twe%d", device_get_unit(sc->twe_dev)); sc->twe_dev_t->si_drv1 = sc; /* * Schedule ourselves to bring the controller up once interrupts are available. * This isn't strictly necessary, since we disable interrupts while probing the * controller, but it is more in keeping with common practice for other disk * devices. */ sc->twe_ich.ich_func = twe_intrhook; sc->twe_ich.ich_arg = sc; if (config_intrhook_establish(&sc->twe_ich) != 0) { twe_printf(sc, "can't establish configuration hook\n"); twe_free(sc); return(ENXIO); } return(0); } /******************************************************************************** * Free all of the resources associated with (sc). * * Should not be called if the controller is active. */ static void twe_free(struct twe_softc *sc) { struct twe_request *tr; debug_called(4); /* throw away any command buffers */ while ((tr = twe_dequeue_free(sc)) != NULL) twe_free_request(tr); if (sc->twe_cmd != NULL) { bus_dmamap_unload(sc->twe_cmd_dmat, sc->twe_cmdmap); bus_dmamem_free(sc->twe_cmd_dmat, sc->twe_cmd, sc->twe_cmdmap); } if (sc->twe_immediate != NULL) { bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map); bus_dmamem_free(sc->twe_immediate_dmat, sc->twe_immediate, sc->twe_immediate_map); } if (sc->twe_immediate_dmat) bus_dma_tag_destroy(sc->twe_immediate_dmat); /* destroy the data-transfer DMA tag */ if (sc->twe_buffer_dmat) bus_dma_tag_destroy(sc->twe_buffer_dmat); /* disconnect the interrupt handler */ if (sc->twe_intr) bus_teardown_intr(sc->twe_dev, sc->twe_irq, sc->twe_intr); if (sc->twe_irq != NULL) bus_release_resource(sc->twe_dev, SYS_RES_IRQ, 0, sc->twe_irq); /* destroy the parent DMA tag */ if (sc->twe_parent_dmat) bus_dma_tag_destroy(sc->twe_parent_dmat); /* release the register window mapping */ if (sc->twe_io != NULL) bus_release_resource(sc->twe_dev, SYS_RES_IOPORT, TWE_IO_CONFIG_REG, sc->twe_io); /* destroy control device */ if (sc->twe_dev_t != (struct cdev *)NULL) destroy_dev(sc->twe_dev_t); sx_destroy(&sc->twe_config_lock); mtx_destroy(&sc->twe_io_lock); } /******************************************************************************** * Disconnect from the controller completely, in preparation for unload. */ static int twe_detach(device_t dev) { struct twe_softc *sc = device_get_softc(dev); debug_called(4); TWE_IO_LOCK(sc); if (sc->twe_state & TWE_STATE_OPEN) { TWE_IO_UNLOCK(sc); return (EBUSY); } sc->twe_state |= TWE_STATE_DETACHING; TWE_IO_UNLOCK(sc); /* * Shut the controller down. */ if (twe_shutdown(dev)) { TWE_IO_LOCK(sc); sc->twe_state &= ~TWE_STATE_DETACHING; TWE_IO_UNLOCK(sc); return (EBUSY); } twe_free(sc); return(0); } /******************************************************************************** * Bring the controller down to a dormant state and detach all child devices. * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ static int twe_shutdown(device_t dev) { struct twe_softc *sc = device_get_softc(dev); int i, error = 0; debug_called(4); /* * Delete all our child devices. */ TWE_CONFIG_LOCK(sc); for (i = 0; i < TWE_MAX_UNITS; i++) { if (sc->twe_drive[i].td_disk != 0) { if ((error = twe_detach_drive(sc, i)) != 0) { TWE_CONFIG_UNLOCK(sc); return (error); } } } TWE_CONFIG_UNLOCK(sc); /* * Bring the controller down. */ TWE_IO_LOCK(sc); twe_deinit(sc); TWE_IO_UNLOCK(sc); return(0); } /******************************************************************************** * Bring the controller to a quiescent state, ready for system suspend. */ static int twe_suspend(device_t dev) { struct twe_softc *sc = device_get_softc(dev); debug_called(4); TWE_IO_LOCK(sc); sc->twe_state |= TWE_STATE_SUSPEND; twe_disable_interrupts(sc); TWE_IO_UNLOCK(sc); return(0); } /******************************************************************************** * Bring the controller back to a state ready for operation. */ static int twe_resume(device_t dev) { struct twe_softc *sc = device_get_softc(dev); debug_called(4); TWE_IO_LOCK(sc); sc->twe_state &= ~TWE_STATE_SUSPEND; twe_enable_interrupts(sc); TWE_IO_UNLOCK(sc); return(0); } /******************************************************************************* * Take an interrupt, or be poked by other code to look for interrupt-worthy * status. */ static void twe_pci_intr(void *arg) { struct twe_softc *sc = arg; TWE_IO_LOCK(sc); twe_intr(sc); TWE_IO_UNLOCK(sc); } /******************************************************************************** * Delayed-startup hook */ static void twe_intrhook(void *arg) { struct twe_softc *sc = (struct twe_softc *)arg; /* pull ourselves off the intrhook chain */ config_intrhook_disestablish(&sc->twe_ich); /* call core startup routine */ twe_init(sc); } /******************************************************************************** * Given a detected drive, attach it to the bio interface. * * This is called from twe_add_unit. */ int twe_attach_drive(struct twe_softc *sc, struct twe_drive *dr) { char buf[80]; int error; - mtx_lock(&Giant); + bus_topo_lock(); dr->td_disk = device_add_child(sc->twe_dev, NULL, -1); if (dr->td_disk == NULL) { - mtx_unlock(&Giant); + bus_topo_unlock(); twe_printf(sc, "Cannot add unit\n"); return (EIO); } device_set_ivars(dr->td_disk, dr); /* * XXX It would make sense to test the online/initialising bits, but they seem to be * always set... */ sprintf(buf, "Unit %d, %s, %s", dr->td_twe_unit, twe_describe_code(twe_table_unittype, dr->td_type), twe_describe_code(twe_table_unitstate, dr->td_state & TWE_PARAM_UNITSTATUS_MASK)); device_set_desc_copy(dr->td_disk, buf); error = device_probe_and_attach(dr->td_disk); - mtx_unlock(&Giant); + bus_topo_unlock(); if (error != 0) { twe_printf(sc, "Cannot attach unit to controller. error = %d\n", error); return (EIO); } return (0); } /******************************************************************************** * Detach the specified unit if it exsists * * This is called from twe_del_unit. */ int twe_detach_drive(struct twe_softc *sc, int unit) { int error = 0; TWE_CONFIG_ASSERT_LOCKED(sc); - mtx_lock(&Giant); + bus_topo_lock(); error = device_delete_child(sc->twe_dev, sc->twe_drive[unit].td_disk); - mtx_unlock(&Giant); + bus_topo_unlock(); if (error != 0) { twe_printf(sc, "failed to delete unit %d\n", unit); return(error); } bzero(&sc->twe_drive[unit], sizeof(sc->twe_drive[unit])); return(error); } /******************************************************************************** * Clear a PCI parity error. */ void twe_clear_pci_parity_error(struct twe_softc *sc) { TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PARITY_ERROR); pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2); } /******************************************************************************** * Clear a PCI abort. */ void twe_clear_pci_abort(struct twe_softc *sc) { TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PCI_ABORT); pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2); } /******************************************************************************** ******************************************************************************** Disk device ******************************************************************************** ********************************************************************************/ /* * Disk device softc */ struct twed_softc { device_t twed_dev; struct twe_softc *twed_controller; /* parent device softc */ struct twe_drive *twed_drive; /* drive data in parent softc */ struct disk *twed_disk; /* generic disk handle */ }; /* * Disk device bus interface */ static int twed_probe(device_t dev); static int twed_attach(device_t dev); static int twed_detach(device_t dev); static device_method_t twed_methods[] = { DEVMETHOD(device_probe, twed_probe), DEVMETHOD(device_attach, twed_attach), DEVMETHOD(device_detach, twed_detach), { 0, 0 } }; static driver_t twed_driver = { "twed", twed_methods, sizeof(struct twed_softc) }; static devclass_t twed_devclass; DRIVER_MODULE(twed, twe, twed_driver, twed_devclass, 0, 0); /* * Disk device control interface. */ /******************************************************************************** * Handle open from generic layer. * * Note that this is typically only called by the diskslice code, and not * for opens on subdevices (eg. slices, partitions). */ static int twed_open(struct disk *dp) { struct twed_softc *sc = (struct twed_softc *)dp->d_drv1; debug_called(4); if (sc == NULL) return (ENXIO); /* check that the controller is up and running */ if (sc->twed_controller->twe_state & TWE_STATE_SHUTDOWN) return(ENXIO); return (0); } /******************************************************************************** * Handle an I/O request. */ static void twed_strategy(struct bio *bp) { struct twed_softc *sc = bp->bio_disk->d_drv1; debug_called(4); bp->bio_driver1 = &sc->twed_drive->td_twe_unit; TWED_BIO_IN; /* bogus disk? */ if (sc == NULL || sc->twed_drive->td_disk == NULL) { bp->bio_error = EINVAL; bp->bio_flags |= BIO_ERROR; printf("twe: bio for invalid disk!\n"); biodone(bp); TWED_BIO_OUT; return; } /* queue the bio on the controller */ TWE_IO_LOCK(sc->twed_controller); twe_enqueue_bio(sc->twed_controller, bp); /* poke the controller to start I/O */ twe_startio(sc->twed_controller); TWE_IO_UNLOCK(sc->twed_controller); return; } /******************************************************************************** * System crashdump support */ static int twed_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct twed_softc *twed_sc; struct twe_softc *twe_sc; int error; struct disk *dp; dp = arg; twed_sc = (struct twed_softc *)dp->d_drv1; if (twed_sc == NULL) return(ENXIO); twe_sc = (struct twe_softc *)twed_sc->twed_controller; if (length > 0) { if ((error = twe_dump_blocks(twe_sc, twed_sc->twed_drive->td_twe_unit, offset / TWE_BLOCK_SIZE, virtual, length / TWE_BLOCK_SIZE)) != 0) return(error); } return(0); } /******************************************************************************** * Handle completion of an I/O request. */ void twed_intr(struct bio *bp) { debug_called(4); /* if no error, transfer completed */ if (!(bp->bio_flags & BIO_ERROR)) bp->bio_resid = 0; biodone(bp); TWED_BIO_OUT; } /******************************************************************************** * Default probe stub. */ static int twed_probe(device_t dev) { return (0); } /******************************************************************************** * Attach a unit to the controller. */ static int twed_attach(device_t dev) { struct twed_softc *sc; device_t parent; debug_called(4); /* initialise our softc */ sc = device_get_softc(dev); parent = device_get_parent(dev); sc->twed_controller = (struct twe_softc *)device_get_softc(parent); sc->twed_drive = device_get_ivars(dev); sc->twed_dev = dev; /* report the drive */ twed_printf(sc, "%uMB (%u sectors)\n", sc->twed_drive->td_size / ((1024 * 1024) / TWE_BLOCK_SIZE), sc->twed_drive->td_size); /* attach a generic disk device to ourselves */ sc->twed_drive->td_sys_unit = device_get_unit(dev); sc->twed_disk = disk_alloc(); sc->twed_disk->d_open = twed_open; sc->twed_disk->d_strategy = twed_strategy; sc->twed_disk->d_dump = (dumper_t *)twed_dump; sc->twed_disk->d_name = "twed"; sc->twed_disk->d_drv1 = sc; sc->twed_disk->d_maxsize = (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE; sc->twed_disk->d_sectorsize = TWE_BLOCK_SIZE; sc->twed_disk->d_mediasize = TWE_BLOCK_SIZE * (off_t)sc->twed_drive->td_size; if (sc->twed_drive->td_type == TWE_UD_CONFIG_RAID0 || sc->twed_drive->td_type == TWE_UD_CONFIG_RAID5 || sc->twed_drive->td_type == TWE_UD_CONFIG_RAID10) { sc->twed_disk->d_stripesize = TWE_BLOCK_SIZE << sc->twed_drive->td_stripe; sc->twed_disk->d_stripeoffset = 0; } sc->twed_disk->d_fwsectors = sc->twed_drive->td_sectors; sc->twed_disk->d_fwheads = sc->twed_drive->td_heads; sc->twed_disk->d_unit = sc->twed_drive->td_sys_unit; disk_create(sc->twed_disk, DISK_VERSION); /* set the maximum I/O size to the theoretical maximum allowed by the S/G list size */ return (0); } /******************************************************************************** * Disconnect ourselves from the system. */ static int twed_detach(device_t dev) { struct twed_softc *sc = (struct twed_softc *)device_get_softc(dev); debug_called(4); if (sc->twed_disk->d_flags & DISKFLAG_OPEN) return(EBUSY); disk_destroy(sc->twed_disk); return(0); } /******************************************************************************** ******************************************************************************** Misc ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Allocate a command buffer */ static MALLOC_DEFINE(TWE_MALLOC_CLASS, "twe_commands", "twe commands"); struct twe_request * twe_allocate_request(struct twe_softc *sc, int tag) { struct twe_request *tr; tr = malloc(sizeof(struct twe_request), TWE_MALLOC_CLASS, M_WAITOK | M_ZERO); tr->tr_sc = sc; tr->tr_tag = tag; if (bus_dmamap_create(sc->twe_buffer_dmat, 0, &tr->tr_dmamap)) { twe_free_request(tr); twe_printf(sc, "unable to allocate dmamap for tag %d\n", tag); return(NULL); } return(tr); } /******************************************************************************** * Permanently discard a command buffer. */ void twe_free_request(struct twe_request *tr) { struct twe_softc *sc = tr->tr_sc; debug_called(4); bus_dmamap_destroy(sc->twe_buffer_dmat, tr->tr_dmamap); free(tr, TWE_MALLOC_CLASS); } /******************************************************************************** * Map/unmap (tr)'s command and data in the controller's addressable space. * * These routines ensure that the data which the controller is going to try to * access is actually visible to the controller, in a machine-independent * fashion. Due to a hardware limitation, I/O buffers must be 512-byte aligned * and we take care of that here as well. */ static void twe_fillin_sgl(TWE_SG_Entry *sgl, bus_dma_segment_t *segs, int nsegments, int max_sgl) { int i; for (i = 0; i < nsegments; i++) { sgl[i].address = segs[i].ds_addr; sgl[i].length = segs[i].ds_len; } for (; i < max_sgl; i++) { /* XXX necessary? */ sgl[i].address = 0; sgl[i].length = 0; } } static void twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct twe_request *tr = (struct twe_request *)arg; struct twe_softc *sc = tr->tr_sc; TWE_Command *cmd = TWE_FIND_COMMAND(tr); debug_called(4); if (tr->tr_flags & TWE_CMD_MAPPED) panic("already mapped command"); tr->tr_flags |= TWE_CMD_MAPPED; if (tr->tr_flags & TWE_CMD_IN_PROGRESS) sc->twe_state &= ~TWE_STATE_FRZN; /* save base of first segment in command (applicable if there only one segment) */ tr->tr_dataphys = segs[0].ds_addr; /* correct command size for s/g list size */ cmd->generic.size += 2 * nsegments; /* * Due to the fact that parameter and I/O commands have the scatter/gather list in * different places, we need to determine which sort of command this actually is * before we can populate it correctly. */ switch(cmd->generic.opcode) { case TWE_OP_GET_PARAM: case TWE_OP_SET_PARAM: cmd->generic.sgl_offset = 2; twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH); break; case TWE_OP_READ: case TWE_OP_WRITE: cmd->generic.sgl_offset = 3; twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH); break; case TWE_OP_ATA_PASSTHROUGH: cmd->generic.sgl_offset = 5; twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH); break; default: /* * Fall back to what the linux driver does. * Do this because the API may send an opcode * the driver knows nothing about and this will * at least stop PCIABRT's from hosing us. */ switch (cmd->generic.sgl_offset) { case 2: twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH); break; case 3: twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH); break; case 5: twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH); break; } } if (tr->tr_flags & TWE_CMD_DATAIN) { if (tr->tr_flags & TWE_CMD_IMMEDIATE) { bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map, BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap, BUS_DMASYNC_PREREAD); } } if (tr->tr_flags & TWE_CMD_DATAOUT) { /* * if we're using an alignment buffer, and we're writing data * copy the real data out */ if (tr->tr_flags & TWE_CMD_ALIGNBUF) bcopy(tr->tr_realdata, tr->tr_data, tr->tr_length); if (tr->tr_flags & TWE_CMD_IMMEDIATE) { bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map, BUS_DMASYNC_PREWRITE); } else { bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap, BUS_DMASYNC_PREWRITE); } } if (twe_start(tr) == EBUSY) { tr->tr_sc->twe_state |= TWE_STATE_CTLR_BUSY; twe_requeue_ready(tr); } } static void twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct twe_softc *sc = (struct twe_softc *)arg; debug_called(4); /* command can't cross a page boundary */ sc->twe_cmdphys = segs[0].ds_addr; } int twe_map_request(struct twe_request *tr) { struct twe_softc *sc = tr->tr_sc; int error = 0; debug_called(4); if (!dumping) TWE_IO_ASSERT_LOCKED(sc); if (sc->twe_state & (TWE_STATE_CTLR_BUSY | TWE_STATE_FRZN)) { twe_requeue_ready(tr); return (EBUSY); } bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_PREWRITE); /* * If the command involves data, map that too. */ if (tr->tr_data != NULL && ((tr->tr_flags & TWE_CMD_MAPPED) == 0)) { /* * Data must be 64-byte aligned; allocate a fixup buffer if it's not. */ if (((vm_offset_t)tr->tr_data % TWE_ALIGNMENT) != 0) { tr->tr_realdata = tr->tr_data; /* save pointer to 'real' data */ tr->tr_flags |= TWE_CMD_ALIGNBUF; tr->tr_data = malloc(tr->tr_length, TWE_MALLOC_CLASS, M_NOWAIT); if (tr->tr_data == NULL) { twe_printf(sc, "%s: malloc failed\n", __func__); tr->tr_data = tr->tr_realdata; /* restore original data pointer */ return(ENOMEM); } } /* * Map the data buffer into bus space and build the s/g list. */ if (tr->tr_flags & TWE_CMD_IMMEDIATE) { error = bus_dmamap_load(sc->twe_immediate_dmat, sc->twe_immediate_map, sc->twe_immediate, tr->tr_length, twe_setup_data_dmamap, tr, BUS_DMA_NOWAIT); } else { error = bus_dmamap_load(sc->twe_buffer_dmat, tr->tr_dmamap, tr->tr_data, tr->tr_length, twe_setup_data_dmamap, tr, 0); } if (error == EINPROGRESS) { tr->tr_flags |= TWE_CMD_IN_PROGRESS; sc->twe_state |= TWE_STATE_FRZN; error = 0; } } else if ((error = twe_start(tr)) == EBUSY) { sc->twe_state |= TWE_STATE_CTLR_BUSY; twe_requeue_ready(tr); } return(error); } void twe_unmap_request(struct twe_request *tr) { struct twe_softc *sc = tr->tr_sc; debug_called(4); if (!dumping) TWE_IO_ASSERT_LOCKED(sc); bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_POSTWRITE); /* * If the command involved data, unmap that too. */ if (tr->tr_data != NULL) { if (tr->tr_flags & TWE_CMD_DATAIN) { if (tr->tr_flags & TWE_CMD_IMMEDIATE) { bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map, BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap, BUS_DMASYNC_POSTREAD); } /* if we're using an alignment buffer, and we're reading data, copy the real data in */ if (tr->tr_flags & TWE_CMD_ALIGNBUF) bcopy(tr->tr_data, tr->tr_realdata, tr->tr_length); } if (tr->tr_flags & TWE_CMD_DATAOUT) { if (tr->tr_flags & TWE_CMD_IMMEDIATE) { bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map, BUS_DMASYNC_POSTWRITE); } else { bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap, BUS_DMASYNC_POSTWRITE); } } if (tr->tr_flags & TWE_CMD_IMMEDIATE) { bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map); } else { bus_dmamap_unload(sc->twe_buffer_dmat, tr->tr_dmamap); } } /* free alignment buffer if it was used */ if (tr->tr_flags & TWE_CMD_ALIGNBUF) { free(tr->tr_data, TWE_MALLOC_CLASS); tr->tr_data = tr->tr_realdata; /* restore 'real' data pointer */ } } #ifdef TWE_DEBUG void twe_report(void); /******************************************************************************** * Print current controller status, call from DDB. */ void twe_report(void) { struct twe_softc *sc; int i; for (i = 0; (sc = devclass_get_softc(twe_devclass, i)) != NULL; i++) twe_print_controller(sc); printf("twed: total bio count in %u out %u\n", twed_bio_in, twed_bio_out); } #endif diff --git a/sys/dev/usb/controller/usb_controller.c b/sys/dev/usb/controller/usb_controller.c index fe8e48efa01c..fd9bcd5ee524 100644 --- a/sys/dev/usb/controller/usb_controller.c +++ b/sys/dev/usb/controller/usb_controller.c @@ -1,1036 +1,1036 @@ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR usb_ctrl_debug #include #include #include #include #include #include #include #include #include #include #include #include "usb_if.h" #endif /* USB_GLOBAL_INCLUDE_FILE */ /* function prototypes */ static device_probe_t usb_probe; static device_attach_t usb_attach; static device_detach_t usb_detach; static device_suspend_t usb_suspend; static device_resume_t usb_resume; static device_shutdown_t usb_shutdown; static void usb_attach_sub(device_t, struct usb_bus *); /* static variables */ #ifdef USB_DEBUG static int usb_ctrl_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, ctrl, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB controller"); SYSCTL_INT(_hw_usb_ctrl, OID_AUTO, debug, CTLFLAG_RWTUN, &usb_ctrl_debug, 0, "Debug level"); #endif #if USB_HAVE_ROOT_MOUNT_HOLD static int usb_no_boot_wait = 0; SYSCTL_INT(_hw_usb, OID_AUTO, no_boot_wait, CTLFLAG_RDTUN, &usb_no_boot_wait, 0, "No USB device enumerate waiting at boot."); #endif static int usb_no_suspend_wait = 0; SYSCTL_INT(_hw_usb, OID_AUTO, no_suspend_wait, CTLFLAG_RWTUN, &usb_no_suspend_wait, 0, "No USB device waiting at system suspend."); static int usb_no_shutdown_wait = 0; SYSCTL_INT(_hw_usb, OID_AUTO, no_shutdown_wait, CTLFLAG_RWTUN, &usb_no_shutdown_wait, 0, "No USB device waiting at system shutdown."); static devclass_t usb_devclass; static device_method_t usb_methods[] = { DEVMETHOD(device_probe, usb_probe), DEVMETHOD(device_attach, usb_attach), DEVMETHOD(device_detach, usb_detach), DEVMETHOD(device_suspend, usb_suspend), DEVMETHOD(device_resume, usb_resume), DEVMETHOD(device_shutdown, usb_shutdown), DEVMETHOD_END }; static driver_t usb_driver = { .name = "usbus", .methods = usb_methods, .size = 0, }; /* Host Only Drivers */ DRIVER_MODULE(usbus, ohci, usb_driver, usb_devclass, 0, 0); DRIVER_MODULE(usbus, uhci, usb_driver, usb_devclass, 0, 0); DRIVER_MODULE(usbus, ehci, usb_driver, usb_devclass, 0, 0); DRIVER_MODULE(usbus, xhci, usb_driver, usb_devclass, 0, 0); /* Device Only Drivers */ DRIVER_MODULE(usbus, musbotg, usb_driver, usb_devclass, 0, 0); DRIVER_MODULE(usbus, uss820dci, usb_driver, usb_devclass, 0, 0); DRIVER_MODULE(usbus, octusb, usb_driver, usb_devclass, 0, 0); /* Dual Mode Drivers */ DRIVER_MODULE(usbus, dwcotg, usb_driver, usb_devclass, 0, 0); DRIVER_MODULE(usbus, saf1761otg, usb_driver, usb_devclass, 0, 0); /*------------------------------------------------------------------------* * usb_probe * * This function is called from "{ehci,ohci,uhci}_pci_attach()". *------------------------------------------------------------------------*/ static int usb_probe(device_t dev) { DPRINTF("\n"); return (0); } #if USB_HAVE_ROOT_MOUNT_HOLD static void usb_root_mount_rel(struct usb_bus *bus) { if (bus->bus_roothold != NULL) { DPRINTF("Releasing root mount hold %p\n", bus->bus_roothold); root_mount_rel(bus->bus_roothold); bus->bus_roothold = NULL; } } #endif /*------------------------------------------------------------------------* * usb_attach *------------------------------------------------------------------------*/ static int usb_attach(device_t dev) { struct usb_bus *bus = device_get_ivars(dev); DPRINTF("\n"); if (bus == NULL) { device_printf(dev, "USB device has no ivars\n"); return (ENXIO); } #if USB_HAVE_ROOT_MOUNT_HOLD if (usb_no_boot_wait == 0) { /* delay vfs_mountroot until the bus is explored */ bus->bus_roothold = root_mount_hold(device_get_nameunit(dev)); } #endif usb_attach_sub(dev, bus); return (0); /* return success */ } /*------------------------------------------------------------------------* * usb_detach *------------------------------------------------------------------------*/ static int usb_detach(device_t dev) { struct usb_bus *bus = device_get_softc(dev); DPRINTF("\n"); if (bus == NULL) { /* was never setup properly */ return (0); } /* Stop power watchdog */ usb_callout_drain(&bus->power_wdog); #if USB_HAVE_ROOT_MOUNT_HOLD /* Let the USB explore process detach all devices. */ usb_root_mount_rel(bus); #endif USB_BUS_LOCK(bus); /* Queue detach job */ usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus), &bus->detach_msg[0], &bus->detach_msg[1]); /* Wait for detach to complete */ usb_proc_mwait(USB_BUS_EXPLORE_PROC(bus), &bus->detach_msg[0], &bus->detach_msg[1]); #if USB_HAVE_UGEN /* Wait for cleanup to complete */ usb_proc_mwait(USB_BUS_EXPLORE_PROC(bus), &bus->cleanup_msg[0], &bus->cleanup_msg[1]); #endif USB_BUS_UNLOCK(bus); #if USB_HAVE_PER_BUS_PROCESS /* Get rid of USB callback processes */ usb_proc_free(USB_BUS_GIANT_PROC(bus)); usb_proc_free(USB_BUS_NON_GIANT_ISOC_PROC(bus)); usb_proc_free(USB_BUS_NON_GIANT_BULK_PROC(bus)); /* Get rid of USB explore process */ usb_proc_free(USB_BUS_EXPLORE_PROC(bus)); /* Get rid of control transfer process */ usb_proc_free(USB_BUS_CONTROL_XFER_PROC(bus)); #endif #if USB_HAVE_PF usbpf_detach(bus); #endif return (0); } /*------------------------------------------------------------------------* * usb_suspend *------------------------------------------------------------------------*/ static int usb_suspend(device_t dev) { struct usb_bus *bus = device_get_softc(dev); DPRINTF("\n"); if (bus == NULL) { /* was never setup properly */ return (0); } USB_BUS_LOCK(bus); usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus), &bus->suspend_msg[0], &bus->suspend_msg[1]); if (usb_no_suspend_wait == 0) { /* wait for suspend callback to be executed */ usb_proc_mwait(USB_BUS_EXPLORE_PROC(bus), &bus->suspend_msg[0], &bus->suspend_msg[1]); } USB_BUS_UNLOCK(bus); return (0); } /*------------------------------------------------------------------------* * usb_resume *------------------------------------------------------------------------*/ static int usb_resume(device_t dev) { struct usb_bus *bus = device_get_softc(dev); DPRINTF("\n"); if (bus == NULL) { /* was never setup properly */ return (0); } USB_BUS_LOCK(bus); usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus), &bus->resume_msg[0], &bus->resume_msg[1]); USB_BUS_UNLOCK(bus); return (0); } /*------------------------------------------------------------------------* * usb_bus_reset_async_locked *------------------------------------------------------------------------*/ void usb_bus_reset_async_locked(struct usb_bus *bus) { USB_BUS_LOCK_ASSERT(bus, MA_OWNED); DPRINTF("\n"); if (bus->reset_msg[0].hdr.pm_qentry.tqe_prev != NULL || bus->reset_msg[1].hdr.pm_qentry.tqe_prev != NULL) { DPRINTF("Reset already pending\n"); return; } device_printf(bus->parent, "Resetting controller\n"); usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus), &bus->reset_msg[0], &bus->reset_msg[1]); } /*------------------------------------------------------------------------* * usb_shutdown *------------------------------------------------------------------------*/ static int usb_shutdown(device_t dev) { struct usb_bus *bus = device_get_softc(dev); DPRINTF("\n"); if (bus == NULL) { /* was never setup properly */ return (0); } DPRINTF("%s: Controller shutdown\n", device_get_nameunit(bus->bdev)); USB_BUS_LOCK(bus); usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus), &bus->shutdown_msg[0], &bus->shutdown_msg[1]); if (usb_no_shutdown_wait == 0) { /* wait for shutdown callback to be executed */ usb_proc_mwait(USB_BUS_EXPLORE_PROC(bus), &bus->shutdown_msg[0], &bus->shutdown_msg[1]); } USB_BUS_UNLOCK(bus); DPRINTF("%s: Controller shutdown complete\n", device_get_nameunit(bus->bdev)); return (0); } /*------------------------------------------------------------------------* * usb_bus_explore * * This function is used to explore the device tree from the root. *------------------------------------------------------------------------*/ static void usb_bus_explore(struct usb_proc_msg *pm) { struct usb_bus *bus; struct usb_device *udev; bus = ((struct usb_bus_msg *)pm)->bus; udev = bus->devices[USB_ROOT_HUB_ADDR]; if (bus->no_explore != 0) return; if (udev != NULL) { USB_BUS_UNLOCK(bus); uhub_explore_handle_re_enumerate(udev); USB_BUS_LOCK(bus); } if (udev != NULL && udev->hub != NULL) { if (bus->do_probe) { bus->do_probe = 0; bus->driver_added_refcount++; } if (bus->driver_added_refcount == 0) { /* avoid zero, hence that is memory default */ bus->driver_added_refcount = 1; } #ifdef DDB /* * The following three lines of code are only here to * recover from DDB: */ usb_proc_rewakeup(USB_BUS_CONTROL_XFER_PROC(bus)); usb_proc_rewakeup(USB_BUS_GIANT_PROC(bus)); usb_proc_rewakeup(USB_BUS_NON_GIANT_ISOC_PROC(bus)); usb_proc_rewakeup(USB_BUS_NON_GIANT_BULK_PROC(bus)); #endif USB_BUS_UNLOCK(bus); #if USB_HAVE_POWERD /* * First update the USB power state! */ usb_bus_powerd(bus); #endif /* Explore the Root USB HUB. */ (udev->hub->explore) (udev); USB_BUS_LOCK(bus); } #if USB_HAVE_ROOT_MOUNT_HOLD usb_root_mount_rel(bus); #endif } /*------------------------------------------------------------------------* * usb_bus_detach * * This function is used to detach the device tree from the root. *------------------------------------------------------------------------*/ static void usb_bus_detach(struct usb_proc_msg *pm) { struct usb_bus *bus; struct usb_device *udev; device_t dev; bus = ((struct usb_bus_msg *)pm)->bus; udev = bus->devices[USB_ROOT_HUB_ADDR]; dev = bus->bdev; /* clear the softc */ device_set_softc(dev, NULL); USB_BUS_UNLOCK(bus); /* detach children first */ - mtx_lock(&Giant); + bus_topo_lock(); bus_generic_detach(dev); - mtx_unlock(&Giant); + bus_topo_unlock(); /* * Free USB device and all subdevices, if any. */ usb_free_device(udev, 0); USB_BUS_LOCK(bus); /* clear bdev variable last */ bus->bdev = NULL; } /*------------------------------------------------------------------------* * usb_bus_suspend * * This function is used to suspend the USB controller. *------------------------------------------------------------------------*/ static void usb_bus_suspend(struct usb_proc_msg *pm) { struct usb_bus *bus; struct usb_device *udev; usb_error_t err; uint8_t do_unlock; DPRINTF("\n"); bus = ((struct usb_bus_msg *)pm)->bus; udev = bus->devices[USB_ROOT_HUB_ADDR]; if (udev == NULL || bus->bdev == NULL) return; USB_BUS_UNLOCK(bus); /* * We use the shutdown event here because the suspend and * resume events are reserved for the USB port suspend and * resume. The USB system suspend is implemented like full * shutdown and all connected USB devices will be disconnected * subsequently. At resume all USB devices will be * re-connected again. */ bus_generic_shutdown(bus->bdev); do_unlock = usbd_enum_lock(udev); err = usbd_set_config_index(udev, USB_UNCONFIG_INDEX); if (err) device_printf(bus->bdev, "Could not unconfigure root HUB\n"); USB_BUS_LOCK(bus); bus->hw_power_state = 0; bus->no_explore = 1; USB_BUS_UNLOCK(bus); if (bus->methods->set_hw_power != NULL) (bus->methods->set_hw_power) (bus); if (bus->methods->set_hw_power_sleep != NULL) (bus->methods->set_hw_power_sleep) (bus, USB_HW_POWER_SUSPEND); if (do_unlock) usbd_enum_unlock(udev); USB_BUS_LOCK(bus); } /*------------------------------------------------------------------------* * usb_bus_resume * * This function is used to resume the USB controller. *------------------------------------------------------------------------*/ static void usb_bus_resume(struct usb_proc_msg *pm) { struct usb_bus *bus; struct usb_device *udev; usb_error_t err; uint8_t do_unlock; DPRINTF("\n"); bus = ((struct usb_bus_msg *)pm)->bus; udev = bus->devices[USB_ROOT_HUB_ADDR]; if (udev == NULL || bus->bdev == NULL) return; USB_BUS_UNLOCK(bus); do_unlock = usbd_enum_lock(udev); #if 0 DEVMETHOD(usb_take_controller, NULL); /* dummy */ #endif USB_TAKE_CONTROLLER(device_get_parent(bus->bdev)); USB_BUS_LOCK(bus); bus->hw_power_state = USB_HW_POWER_CONTROL | USB_HW_POWER_BULK | USB_HW_POWER_INTERRUPT | USB_HW_POWER_ISOC | USB_HW_POWER_NON_ROOT_HUB; bus->no_explore = 0; USB_BUS_UNLOCK(bus); if (bus->methods->set_hw_power_sleep != NULL) (bus->methods->set_hw_power_sleep) (bus, USB_HW_POWER_RESUME); if (bus->methods->set_hw_power != NULL) (bus->methods->set_hw_power) (bus); /* restore USB configuration to index 0 */ err = usbd_set_config_index(udev, 0); if (err) device_printf(bus->bdev, "Could not configure root HUB\n"); /* probe and attach */ err = usb_probe_and_attach(udev, USB_IFACE_INDEX_ANY); if (err) { device_printf(bus->bdev, "Could not probe and " "attach root HUB\n"); } if (do_unlock) usbd_enum_unlock(udev); USB_BUS_LOCK(bus); } /*------------------------------------------------------------------------* * usb_bus_reset * * This function is used to reset the USB controller. *------------------------------------------------------------------------*/ static void usb_bus_reset(struct usb_proc_msg *pm) { struct usb_bus *bus; DPRINTF("\n"); bus = ((struct usb_bus_msg *)pm)->bus; if (bus->bdev == NULL || bus->no_explore != 0) return; /* a suspend and resume will reset the USB controller */ usb_bus_suspend(pm); usb_bus_resume(pm); } /*------------------------------------------------------------------------* * usb_bus_shutdown * * This function is used to shutdown the USB controller. *------------------------------------------------------------------------*/ static void usb_bus_shutdown(struct usb_proc_msg *pm) { struct usb_bus *bus; struct usb_device *udev; usb_error_t err; uint8_t do_unlock; bus = ((struct usb_bus_msg *)pm)->bus; udev = bus->devices[USB_ROOT_HUB_ADDR]; if (udev == NULL || bus->bdev == NULL) return; USB_BUS_UNLOCK(bus); bus_generic_shutdown(bus->bdev); do_unlock = usbd_enum_lock(udev); err = usbd_set_config_index(udev, USB_UNCONFIG_INDEX); if (err) device_printf(bus->bdev, "Could not unconfigure root HUB\n"); USB_BUS_LOCK(bus); bus->hw_power_state = 0; bus->no_explore = 1; USB_BUS_UNLOCK(bus); if (bus->methods->set_hw_power != NULL) (bus->methods->set_hw_power) (bus); if (bus->methods->set_hw_power_sleep != NULL) (bus->methods->set_hw_power_sleep) (bus, USB_HW_POWER_SHUTDOWN); if (do_unlock) usbd_enum_unlock(udev); USB_BUS_LOCK(bus); } /*------------------------------------------------------------------------* * usb_bus_cleanup * * This function is used to cleanup leftover USB character devices. *------------------------------------------------------------------------*/ #if USB_HAVE_UGEN static void usb_bus_cleanup(struct usb_proc_msg *pm) { struct usb_bus *bus; struct usb_fs_privdata *pd; bus = ((struct usb_bus_msg *)pm)->bus; while ((pd = LIST_FIRST(&bus->pd_cleanup_list)) != NULL) { LIST_REMOVE(pd, pd_next); USB_BUS_UNLOCK(bus); usb_destroy_dev_sync(pd); USB_BUS_LOCK(bus); } } #endif static void usb_power_wdog(void *arg) { struct usb_bus *bus = arg; USB_BUS_LOCK_ASSERT(bus, MA_OWNED); usb_callout_reset(&bus->power_wdog, 4 * hz, usb_power_wdog, arg); #ifdef DDB /* * The following line of code is only here to recover from * DDB: */ usb_proc_rewakeup(USB_BUS_EXPLORE_PROC(bus)); /* recover from DDB */ #endif #if USB_HAVE_POWERD USB_BUS_UNLOCK(bus); usb_bus_power_update(bus); USB_BUS_LOCK(bus); #endif } /*------------------------------------------------------------------------* * usb_bus_attach * * This function attaches USB in context of the explore thread. *------------------------------------------------------------------------*/ static void usb_bus_attach(struct usb_proc_msg *pm) { struct usb_bus *bus; struct usb_device *child; device_t dev; usb_error_t err; enum usb_dev_speed speed; bus = ((struct usb_bus_msg *)pm)->bus; dev = bus->bdev; DPRINTF("\n"); switch (bus->usbrev) { case USB_REV_1_0: speed = USB_SPEED_FULL; device_printf(bus->bdev, "12Mbps Full Speed USB v1.0\n"); break; case USB_REV_1_1: speed = USB_SPEED_FULL; device_printf(bus->bdev, "12Mbps Full Speed USB v1.1\n"); break; case USB_REV_2_0: speed = USB_SPEED_HIGH; device_printf(bus->bdev, "480Mbps High Speed USB v2.0\n"); break; case USB_REV_2_5: speed = USB_SPEED_VARIABLE; device_printf(bus->bdev, "480Mbps Wireless USB v2.5\n"); break; case USB_REV_3_0: speed = USB_SPEED_SUPER; device_printf(bus->bdev, "5.0Gbps Super Speed USB v3.0\n"); break; default: device_printf(bus->bdev, "Unsupported USB revision\n"); #if USB_HAVE_ROOT_MOUNT_HOLD usb_root_mount_rel(bus); #endif return; } /* default power_mask value */ bus->hw_power_state = USB_HW_POWER_CONTROL | USB_HW_POWER_BULK | USB_HW_POWER_INTERRUPT | USB_HW_POWER_ISOC | USB_HW_POWER_NON_ROOT_HUB; USB_BUS_UNLOCK(bus); /* make sure power is set at least once */ if (bus->methods->set_hw_power != NULL) { (bus->methods->set_hw_power) (bus); } /* allocate the Root USB device */ child = usb_alloc_device(bus->bdev, bus, NULL, 0, 0, 1, speed, USB_MODE_HOST); if (child) { err = usb_probe_and_attach(child, USB_IFACE_INDEX_ANY); if (!err) { if ((bus->devices[USB_ROOT_HUB_ADDR] == NULL) || (bus->devices[USB_ROOT_HUB_ADDR]->hub == NULL)) { err = USB_ERR_NO_ROOT_HUB; } } } else { err = USB_ERR_NOMEM; } USB_BUS_LOCK(bus); if (err) { device_printf(bus->bdev, "Root HUB problem, error=%s\n", usbd_errstr(err)); #if USB_HAVE_ROOT_MOUNT_HOLD usb_root_mount_rel(bus); #endif } /* set softc - we are ready */ device_set_softc(dev, bus); /* start watchdog */ usb_power_wdog(bus); } /*------------------------------------------------------------------------* * usb_attach_sub * * This function creates a thread which runs the USB attach code. *------------------------------------------------------------------------*/ static void usb_attach_sub(device_t dev, struct usb_bus *bus) { - mtx_lock(&Giant); + bus_topo_lock(); if (usb_devclass_ptr == NULL) usb_devclass_ptr = devclass_find("usbus"); - mtx_unlock(&Giant); + bus_topo_unlock(); #if USB_HAVE_PF usbpf_attach(bus); #endif /* Initialise USB process messages */ bus->explore_msg[0].hdr.pm_callback = &usb_bus_explore; bus->explore_msg[0].bus = bus; bus->explore_msg[1].hdr.pm_callback = &usb_bus_explore; bus->explore_msg[1].bus = bus; bus->detach_msg[0].hdr.pm_callback = &usb_bus_detach; bus->detach_msg[0].bus = bus; bus->detach_msg[1].hdr.pm_callback = &usb_bus_detach; bus->detach_msg[1].bus = bus; bus->attach_msg[0].hdr.pm_callback = &usb_bus_attach; bus->attach_msg[0].bus = bus; bus->attach_msg[1].hdr.pm_callback = &usb_bus_attach; bus->attach_msg[1].bus = bus; bus->suspend_msg[0].hdr.pm_callback = &usb_bus_suspend; bus->suspend_msg[0].bus = bus; bus->suspend_msg[1].hdr.pm_callback = &usb_bus_suspend; bus->suspend_msg[1].bus = bus; bus->resume_msg[0].hdr.pm_callback = &usb_bus_resume; bus->resume_msg[0].bus = bus; bus->resume_msg[1].hdr.pm_callback = &usb_bus_resume; bus->resume_msg[1].bus = bus; bus->reset_msg[0].hdr.pm_callback = &usb_bus_reset; bus->reset_msg[0].bus = bus; bus->reset_msg[1].hdr.pm_callback = &usb_bus_reset; bus->reset_msg[1].bus = bus; bus->shutdown_msg[0].hdr.pm_callback = &usb_bus_shutdown; bus->shutdown_msg[0].bus = bus; bus->shutdown_msg[1].hdr.pm_callback = &usb_bus_shutdown; bus->shutdown_msg[1].bus = bus; #if USB_HAVE_UGEN LIST_INIT(&bus->pd_cleanup_list); bus->cleanup_msg[0].hdr.pm_callback = &usb_bus_cleanup; bus->cleanup_msg[0].bus = bus; bus->cleanup_msg[1].hdr.pm_callback = &usb_bus_cleanup; bus->cleanup_msg[1].bus = bus; #endif #if USB_HAVE_PER_BUS_PROCESS /* Create USB explore and callback processes */ if (usb_proc_create(USB_BUS_GIANT_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_MED)) { device_printf(dev, "WARNING: Creation of USB Giant " "callback process failed.\n"); } else if (usb_proc_create(USB_BUS_NON_GIANT_ISOC_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_HIGHEST)) { device_printf(dev, "WARNING: Creation of USB non-Giant ISOC " "callback process failed.\n"); } else if (usb_proc_create(USB_BUS_NON_GIANT_BULK_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_HIGH)) { device_printf(dev, "WARNING: Creation of USB non-Giant BULK " "callback process failed.\n"); } else if (usb_proc_create(USB_BUS_EXPLORE_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_MED)) { device_printf(dev, "WARNING: Creation of USB explore " "process failed.\n"); } else if (usb_proc_create(USB_BUS_CONTROL_XFER_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_MED)) { device_printf(dev, "WARNING: Creation of USB control transfer " "process failed.\n"); } else #endif { /* Get final attach going */ USB_BUS_LOCK(bus); usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus), &bus->attach_msg[0], &bus->attach_msg[1]); USB_BUS_UNLOCK(bus); /* Do initial explore */ usb_needs_explore(bus, 1); } } SYSUNINIT(usb_bus_unload, SI_SUB_KLD, SI_ORDER_ANY, usb_bus_unload, NULL); /*------------------------------------------------------------------------* * usb_bus_mem_flush_all_cb *------------------------------------------------------------------------*/ #if USB_HAVE_BUSDMA static void usb_bus_mem_flush_all_cb(struct usb_bus *bus, struct usb_page_cache *pc, struct usb_page *pg, usb_size_t size, usb_size_t align) { usb_pc_cpu_flush(pc); } #endif /*------------------------------------------------------------------------* * usb_bus_mem_flush_all - factored out code *------------------------------------------------------------------------*/ #if USB_HAVE_BUSDMA void usb_bus_mem_flush_all(struct usb_bus *bus, usb_bus_mem_cb_t *cb) { if (cb) { cb(bus, &usb_bus_mem_flush_all_cb); } } #endif /*------------------------------------------------------------------------* * usb_bus_mem_alloc_all_cb *------------------------------------------------------------------------*/ #if USB_HAVE_BUSDMA static void usb_bus_mem_alloc_all_cb(struct usb_bus *bus, struct usb_page_cache *pc, struct usb_page *pg, usb_size_t size, usb_size_t align) { /* need to initialize the page cache */ pc->tag_parent = bus->dma_parent_tag; if (usb_pc_alloc_mem(pc, pg, size, align)) { bus->alloc_failed = 1; } } #endif /*------------------------------------------------------------------------* * usb_bus_mem_alloc_all - factored out code * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ uint8_t usb_bus_mem_alloc_all(struct usb_bus *bus, bus_dma_tag_t dmat, usb_bus_mem_cb_t *cb) { bus->alloc_failed = 0; mtx_init(&bus->bus_mtx, device_get_nameunit(bus->parent), "usb_def_mtx", MTX_DEF | MTX_RECURSE); mtx_init(&bus->bus_spin_lock, device_get_nameunit(bus->parent), "usb_spin_mtx", MTX_SPIN | MTX_RECURSE); usb_callout_init_mtx(&bus->power_wdog, &bus->bus_mtx, 0); TAILQ_INIT(&bus->intr_q.head); #if USB_HAVE_BUSDMA usb_dma_tag_setup(bus->dma_parent_tag, bus->dma_tags, dmat, &bus->bus_mtx, NULL, bus->dma_bits, USB_BUS_DMA_TAG_MAX); #endif if ((bus->devices_max > USB_MAX_DEVICES) || (bus->devices_max < USB_MIN_DEVICES) || (bus->devices == NULL)) { DPRINTFN(0, "Devices field has not been " "initialised properly\n"); bus->alloc_failed = 1; /* failure */ } #if USB_HAVE_BUSDMA if (cb) { cb(bus, &usb_bus_mem_alloc_all_cb); } #endif if (bus->alloc_failed) { usb_bus_mem_free_all(bus, cb); } return (bus->alloc_failed); } /*------------------------------------------------------------------------* * usb_bus_mem_free_all_cb *------------------------------------------------------------------------*/ #if USB_HAVE_BUSDMA static void usb_bus_mem_free_all_cb(struct usb_bus *bus, struct usb_page_cache *pc, struct usb_page *pg, usb_size_t size, usb_size_t align) { usb_pc_free_mem(pc); } #endif /*------------------------------------------------------------------------* * usb_bus_mem_free_all - factored out code *------------------------------------------------------------------------*/ void usb_bus_mem_free_all(struct usb_bus *bus, usb_bus_mem_cb_t *cb) { #if USB_HAVE_BUSDMA if (cb) { cb(bus, &usb_bus_mem_free_all_cb); } usb_dma_tag_unsetup(bus->dma_parent_tag); #endif mtx_destroy(&bus->bus_mtx); mtx_destroy(&bus->bus_spin_lock); } /* convenience wrappers */ void usb_proc_explore_mwait(struct usb_device *udev, void *pm1, void *pm2) { usb_proc_mwait(USB_BUS_EXPLORE_PROC(udev->bus), pm1, pm2); } void * usb_proc_explore_msignal(struct usb_device *udev, void *pm1, void *pm2) { return (usb_proc_msignal(USB_BUS_EXPLORE_PROC(udev->bus), pm1, pm2)); } void usb_proc_explore_lock(struct usb_device *udev) { USB_BUS_LOCK(udev->bus); } void usb_proc_explore_unlock(struct usb_device *udev) { USB_BUS_UNLOCK(udev->bus); } diff --git a/sys/dev/usb/net/if_axe.c b/sys/dev/usb/net/if_axe.c index faef9cd3cb90..eaab2798c242 100644 --- a/sys/dev/usb/net/if_axe.c +++ b/sys/dev/usb/net/if_axe.c @@ -1,1505 +1,1505 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997, 1998, 1999, 2000-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * ASIX Electronics AX88172/AX88178/AX88778 USB 2.0 ethernet driver. * Used in the LinkSys USB200M and various other adapters. * * Manuals available from: * http://www.asix.com.tw/datasheet/mac/Ax88172.PDF * Note: you need the manual for the AX88170 chip (USB 1.x ethernet * controller) to find the definitions for the RX control register. * http://www.asix.com.tw/datasheet/mac/Ax88170.PDF * * Written by Bill Paul * Senior Engineer * Wind River Systems */ /* * The AX88172 provides USB ethernet supports at 10 and 100Mbps. * It uses an external PHY (reference designs use a RealTek chip), * and has a 64-bit multicast hash filter. There is some information * missing from the manual which one needs to know in order to make * the chip function: * * - You must set bit 7 in the RX control register, otherwise the * chip won't receive any packets. * - You must initialize all 3 IPG registers, or you won't be able * to send any packets. * * Note that this device appears to only support loading the station * address via autload from the EEPROM (i.e. there's no way to manually * set it). * * (Adam Weinberger wanted me to name this driver if_gir.c.) */ /* * Ax88178 and Ax88772 support backported from the OpenBSD driver. * 2007/02/12, J.R. Oldroyd, fbsd@opal.com * * Manual here: * http://www.asix.com.tw/FrootAttach/datasheet/AX88178_datasheet_Rev10.pdf * http://www.asix.com.tw/FrootAttach/datasheet/AX88772_datasheet_Rev10.pdf */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR axe_debug #include #include #include #include #include "miibus_if.h" /* * AXE_178_MAX_FRAME_BURST * max frame burst size for Ax88178 and Ax88772 * 0 2048 bytes * 1 4096 bytes * 2 8192 bytes * 3 16384 bytes * use the largest your system can handle without USB stalling. * * NB: 88772 parts appear to generate lots of input errors with * a 2K rx buffer and 8K is only slightly faster than 4K on an * EHCI port on a T42 so change at your own risk. */ #define AXE_178_MAX_FRAME_BURST 1 #define AXE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #ifdef USB_DEBUG static int axe_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, axe, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB axe"); SYSCTL_INT(_hw_usb_axe, OID_AUTO, debug, CTLFLAG_RWTUN, &axe_debug, 0, "Debug level"); #endif /* * Various supported device vendors/products. */ static const STRUCT_USB_HOST_ID axe_devs[] = { #define AXE_DEV(v,p,i) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i) } AXE_DEV(ABOCOM, UF200, 0), AXE_DEV(ACERCM, EP1427X2, 0), AXE_DEV(APPLE, ETHERNET, AXE_FLAG_772), AXE_DEV(ASIX, AX88172, 0), AXE_DEV(ASIX, AX88178, AXE_FLAG_178), AXE_DEV(ASIX, AX88772, AXE_FLAG_772), AXE_DEV(ASIX, AX88772A, AXE_FLAG_772A), AXE_DEV(ASIX, AX88772B, AXE_FLAG_772B), AXE_DEV(ASIX, AX88772B_1, AXE_FLAG_772B), AXE_DEV(ATEN, UC210T, 0), AXE_DEV(BELKIN, F5D5055, AXE_FLAG_178), AXE_DEV(BILLIONTON, USB2AR, 0), AXE_DEV(CISCOLINKSYS, USB200MV2, AXE_FLAG_772A), AXE_DEV(COREGA, FETHER_USB2_TX, 0), AXE_DEV(DLINK, DUBE100, 0), AXE_DEV(DLINK, DUBE100B1, AXE_FLAG_772), AXE_DEV(DLINK, DUBE100C1, AXE_FLAG_772B), AXE_DEV(GOODWAY, GWUSB2E, 0), AXE_DEV(IODATA, ETGUS2, AXE_FLAG_178), AXE_DEV(JVC, MP_PRX1, 0), AXE_DEV(LENOVO, ETHERNET, AXE_FLAG_772B), AXE_DEV(LINKSYS2, USB200M, 0), AXE_DEV(LINKSYS4, USB1000, AXE_FLAG_178), AXE_DEV(LOGITEC, LAN_GTJU2A, AXE_FLAG_178), AXE_DEV(MELCO, LUAU2KTX, 0), AXE_DEV(MELCO, LUA3U2AGT, AXE_FLAG_178), AXE_DEV(NETGEAR, FA120, 0), AXE_DEV(OQO, ETHER01PLUS, AXE_FLAG_772), AXE_DEV(PLANEX3, GU1000T, AXE_FLAG_178), AXE_DEV(SITECOM, LN029, 0), AXE_DEV(SITECOMEU, LN028, AXE_FLAG_178), AXE_DEV(SITECOMEU, LN031, AXE_FLAG_178), AXE_DEV(SYSTEMTALKS, SGCX2UL, 0), #undef AXE_DEV }; static device_probe_t axe_probe; static device_attach_t axe_attach; static device_detach_t axe_detach; static usb_callback_t axe_bulk_read_callback; static usb_callback_t axe_bulk_write_callback; static miibus_readreg_t axe_miibus_readreg; static miibus_writereg_t axe_miibus_writereg; static miibus_statchg_t axe_miibus_statchg; static uether_fn_t axe_attach_post; static uether_fn_t axe_init; static uether_fn_t axe_stop; static uether_fn_t axe_start; static uether_fn_t axe_tick; static uether_fn_t axe_setmulti; static uether_fn_t axe_setpromisc; static int axe_attach_post_sub(struct usb_ether *); static int axe_ifmedia_upd(struct ifnet *); static void axe_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int axe_cmd(struct axe_softc *, int, int, int, void *); static void axe_ax88178_init(struct axe_softc *); static void axe_ax88772_init(struct axe_softc *); static void axe_ax88772_phywake(struct axe_softc *); static void axe_ax88772a_init(struct axe_softc *); static void axe_ax88772b_init(struct axe_softc *); static int axe_get_phyno(struct axe_softc *, int); static int axe_ioctl(struct ifnet *, u_long, caddr_t); static int axe_rx_frame(struct usb_ether *, struct usb_page_cache *, int); static int axe_rxeof(struct usb_ether *, struct usb_page_cache *, unsigned int offset, unsigned int, struct axe_csum_hdr *); static void axe_csum_cfg(struct usb_ether *); static const struct usb_config axe_config[AXE_N_TRANSFER] = { [AXE_BULK_DT_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .frames = 16, .bufsize = 16 * MCLBYTES, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = axe_bulk_write_callback, .timeout = 10000, /* 10 seconds */ }, [AXE_BULK_DT_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 16384, /* bytes */ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = axe_bulk_read_callback, .timeout = 0, /* no timeout */ }, }; static const struct ax88772b_mfb ax88772b_mfb_table[] = { { 0x8000, 0x8001, 2048 }, { 0x8100, 0x8147, 4096}, { 0x8200, 0x81EB, 6144}, { 0x8300, 0x83D7, 8192}, { 0x8400, 0x851E, 16384}, { 0x8500, 0x8666, 20480}, { 0x8600, 0x87AE, 24576}, { 0x8700, 0x8A3D, 32768} }; static device_method_t axe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, axe_probe), DEVMETHOD(device_attach, axe_attach), DEVMETHOD(device_detach, axe_detach), /* MII interface */ DEVMETHOD(miibus_readreg, axe_miibus_readreg), DEVMETHOD(miibus_writereg, axe_miibus_writereg), DEVMETHOD(miibus_statchg, axe_miibus_statchg), DEVMETHOD_END }; static driver_t axe_driver = { .name = "axe", .methods = axe_methods, .size = sizeof(struct axe_softc), }; static devclass_t axe_devclass; DRIVER_MODULE(axe, uhub, axe_driver, axe_devclass, NULL, 0); DRIVER_MODULE(miibus, axe, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(axe, uether, 1, 1, 1); MODULE_DEPEND(axe, usb, 1, 1, 1); MODULE_DEPEND(axe, ether, 1, 1, 1); MODULE_DEPEND(axe, miibus, 1, 1, 1); MODULE_VERSION(axe, 1); USB_PNP_HOST_INFO(axe_devs); static const struct usb_ether_methods axe_ue_methods = { .ue_attach_post = axe_attach_post, .ue_attach_post_sub = axe_attach_post_sub, .ue_start = axe_start, .ue_init = axe_init, .ue_stop = axe_stop, .ue_tick = axe_tick, .ue_setmulti = axe_setmulti, .ue_setpromisc = axe_setpromisc, .ue_mii_upd = axe_ifmedia_upd, .ue_mii_sts = axe_ifmedia_sts, }; static int axe_cmd(struct axe_softc *sc, int cmd, int index, int val, void *buf) { struct usb_device_request req; usb_error_t err; AXE_LOCK_ASSERT(sc, MA_OWNED); req.bmRequestType = (AXE_CMD_IS_WRITE(cmd) ? UT_WRITE_VENDOR_DEVICE : UT_READ_VENDOR_DEVICE); req.bRequest = AXE_CMD_CMD(cmd); USETW(req.wValue, val); USETW(req.wIndex, index); USETW(req.wLength, AXE_CMD_LEN(cmd)); err = uether_do_request(&sc->sc_ue, &req, buf, 1000); return (err); } static int axe_miibus_readreg(device_t dev, int phy, int reg) { struct axe_softc *sc = device_get_softc(dev); uint16_t val; int locked; locked = mtx_owned(&sc->sc_mtx); if (!locked) AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL); axe_cmd(sc, AXE_CMD_MII_READ_REG, reg, phy, &val); axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL); val = le16toh(val); if (AXE_IS_772(sc) && reg == MII_BMSR) { /* * BMSR of AX88772 indicates that it supports extended * capability but the extended status register is * revered for embedded ethernet PHY. So clear the * extended capability bit of BMSR. */ val &= ~BMSR_EXTCAP; } if (!locked) AXE_UNLOCK(sc); return (val); } static int axe_miibus_writereg(device_t dev, int phy, int reg, int val) { struct axe_softc *sc = device_get_softc(dev); int locked; val = htole32(val); locked = mtx_owned(&sc->sc_mtx); if (!locked) AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL); axe_cmd(sc, AXE_CMD_MII_WRITE_REG, reg, phy, &val); axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL); if (!locked) AXE_UNLOCK(sc); return (0); } static void axe_miibus_statchg(device_t dev) { struct axe_softc *sc = device_get_softc(dev); struct mii_data *mii = GET_MII(sc); struct ifnet *ifp; uint16_t val; int err, locked; locked = mtx_owned(&sc->sc_mtx); if (!locked) AXE_LOCK(sc); ifp = uether_getifp(&sc->sc_ue); if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; sc->sc_flags &= ~AXE_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->sc_flags |= AXE_FLAG_LINK; break; case IFM_1000_T: if ((sc->sc_flags & AXE_FLAG_178) == 0) break; sc->sc_flags |= AXE_FLAG_LINK; break; default: break; } } /* Lost link, do nothing. */ if ((sc->sc_flags & AXE_FLAG_LINK) == 0) goto done; val = 0; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { val |= AXE_MEDIA_FULL_DUPLEX; if (AXE_IS_178_FAMILY(sc)) { if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) val |= AXE_178_MEDIA_TXFLOW_CONTROL_EN; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) val |= AXE_178_MEDIA_RXFLOW_CONTROL_EN; } } if (AXE_IS_178_FAMILY(sc)) { val |= AXE_178_MEDIA_RX_EN | AXE_178_MEDIA_MAGIC; if ((sc->sc_flags & AXE_FLAG_178) != 0) val |= AXE_178_MEDIA_ENCK; switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: val |= AXE_178_MEDIA_GMII | AXE_178_MEDIA_ENCK; break; case IFM_100_TX: val |= AXE_178_MEDIA_100TX; break; case IFM_10_T: /* doesn't need to be handled */ break; } } err = axe_cmd(sc, AXE_CMD_WRITE_MEDIA, 0, val, NULL); if (err) device_printf(dev, "media change failed, error %d\n", err); done: if (!locked) AXE_UNLOCK(sc); } /* * Set media options. */ static int axe_ifmedia_upd(struct ifnet *ifp) { struct axe_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); struct mii_softc *miisc; int error; AXE_LOCK_ASSERT(sc, MA_OWNED); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); return (error); } /* * Report current media status. */ static void axe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct axe_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); AXE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; AXE_UNLOCK(sc); } static u_int axe_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint8_t *hashtbl = arg; uint32_t h; h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; hashtbl[h / 8] |= 1 << (h % 8); return (1); } static void axe_setmulti(struct usb_ether *ue) { struct axe_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); uint16_t rxmode; uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; AXE_LOCK_ASSERT(sc, MA_OWNED); axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode); rxmode = le16toh(rxmode); if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { rxmode |= AXE_RXCMD_ALLMULTI; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); return; } rxmode &= ~AXE_RXCMD_ALLMULTI; if_foreach_llmaddr(ifp, axe_hash_maddr, &hashtbl); axe_cmd(sc, AXE_CMD_WRITE_MCAST, 0, 0, (void *)&hashtbl); axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); } static int axe_get_phyno(struct axe_softc *sc, int sel) { int phyno; switch (AXE_PHY_TYPE(sc->sc_phyaddrs[sel])) { case PHY_TYPE_100_HOME: case PHY_TYPE_GIG: phyno = AXE_PHY_NO(sc->sc_phyaddrs[sel]); break; case PHY_TYPE_SPECIAL: /* FALLTHROUGH */ case PHY_TYPE_RSVD: /* FALLTHROUGH */ case PHY_TYPE_NON_SUP: /* FALLTHROUGH */ default: phyno = -1; break; } return (phyno); } #define AXE_GPIO_WRITE(x, y) do { \ axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, (x), NULL); \ uether_pause(ue, (y)); \ } while (0) static void axe_ax88178_init(struct axe_softc *sc) { struct usb_ether *ue; int gpio0, ledmode, phymode; uint16_t eeprom, val; ue = &sc->sc_ue; axe_cmd(sc, AXE_CMD_SROM_WR_ENABLE, 0, 0, NULL); /* XXX magic */ axe_cmd(sc, AXE_CMD_SROM_READ, 0, 0x0017, &eeprom); eeprom = le16toh(eeprom); axe_cmd(sc, AXE_CMD_SROM_WR_DISABLE, 0, 0, NULL); /* if EEPROM is invalid we have to use to GPIO0 */ if (eeprom == 0xffff) { phymode = AXE_PHY_MODE_MARVELL; gpio0 = 1; ledmode = 0; } else { phymode = eeprom & 0x7f; gpio0 = (eeprom & 0x80) ? 0 : 1; ledmode = eeprom >> 8; } if (bootverbose) device_printf(sc->sc_ue.ue_dev, "EEPROM data : 0x%04x, phymode : 0x%02x\n", eeprom, phymode); /* Program GPIOs depending on PHY hardware. */ switch (phymode) { case AXE_PHY_MODE_MARVELL: if (gpio0 == 1) { AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0_EN, hz / 32); AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2_EN, hz / 4); AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); } else { AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 | AXE_GPIO1_EN, hz / 3); if (ledmode == 1) { AXE_GPIO_WRITE(AXE_GPIO1_EN, hz / 3); AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN, hz / 3); } else { AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2_EN, hz / 4); AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); } } break; case AXE_PHY_MODE_CICADA: case AXE_PHY_MODE_CICADA_V2: case AXE_PHY_MODE_CICADA_V2_ASIX: if (gpio0 == 1) AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0 | AXE_GPIO0_EN, hz / 32); else AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 | AXE_GPIO1_EN, hz / 32); break; case AXE_PHY_MODE_AGERE: AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 | AXE_GPIO1_EN, hz / 32); AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2_EN, hz / 4); AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); break; case AXE_PHY_MODE_REALTEK_8211CL: case AXE_PHY_MODE_REALTEK_8211BN: case AXE_PHY_MODE_REALTEK_8251CL: val = gpio0 == 1 ? AXE_GPIO0 | AXE_GPIO0_EN : AXE_GPIO1 | AXE_GPIO1_EN; AXE_GPIO_WRITE(val, hz / 32); AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); AXE_GPIO_WRITE(val | AXE_GPIO2_EN, hz / 4); AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); if (phymode == AXE_PHY_MODE_REALTEK_8211CL) { axe_miibus_writereg(ue->ue_dev, sc->sc_phyno, 0x1F, 0x0005); axe_miibus_writereg(ue->ue_dev, sc->sc_phyno, 0x0C, 0x0000); val = axe_miibus_readreg(ue->ue_dev, sc->sc_phyno, 0x0001); axe_miibus_writereg(ue->ue_dev, sc->sc_phyno, 0x01, val | 0x0080); axe_miibus_writereg(ue->ue_dev, sc->sc_phyno, 0x1F, 0x0000); } break; default: /* Unknown PHY model or no need to program GPIOs. */ break; } /* soft reset */ axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL); uether_pause(ue, hz / 4); axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRL | AXE_178_RESET_MAGIC, NULL); uether_pause(ue, hz / 4); /* Enable MII/GMII/RGMII interface to work with external PHY. */ axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0, NULL); uether_pause(ue, hz / 4); axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL); } static void axe_ax88772_init(struct axe_softc *sc) { axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, 0x00b0, NULL); uether_pause(&sc->sc_ue, hz / 16); if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) { /* ask for the embedded PHY */ axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x01, NULL); uether_pause(&sc->sc_ue, hz / 64); /* power down and reset state, pin reset state */ axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL); uether_pause(&sc->sc_ue, hz / 16); /* power down/reset state, pin operating state */ axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL); uether_pause(&sc->sc_ue, hz / 4); /* power up, reset */ axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRL, NULL); /* power up, operating */ axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL | AXE_SW_RESET_PRL, NULL); } else { /* ask for external PHY */ axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x00, NULL); uether_pause(&sc->sc_ue, hz / 64); /* power down internal PHY */ axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL); } uether_pause(&sc->sc_ue, hz / 4); axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL); } static void axe_ax88772_phywake(struct axe_softc *sc) { if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) { /* Manually select internal(embedded) PHY - MAC mode. */ axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB | AXE_SW_PHY_SELECT_EMBEDDED | AXE_SW_PHY_SELECT_SS_MII, NULL); uether_pause(&sc->sc_ue, hz / 32); } else { /* * Manually select external PHY - MAC mode. * Reverse MII/RMII is for AX88772A PHY mode. */ axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB | AXE_SW_PHY_SELECT_EXT | AXE_SW_PHY_SELECT_SS_MII, NULL); uether_pause(&sc->sc_ue, hz / 32); } /* Take PHY out of power down. */ axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPPD | AXE_SW_RESET_IPRL, NULL); uether_pause(&sc->sc_ue, hz / 4); axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL); uether_pause(&sc->sc_ue, hz); axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL); uether_pause(&sc->sc_ue, hz / 32); axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL); uether_pause(&sc->sc_ue, hz / 32); } static void axe_ax88772a_init(struct axe_softc *sc) { struct usb_ether *ue; ue = &sc->sc_ue; /* Reload EEPROM. */ AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32); axe_ax88772_phywake(sc); /* Stop MAC. */ axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL); } static void axe_ax88772b_init(struct axe_softc *sc) { struct usb_ether *ue; uint16_t eeprom; uint8_t *eaddr; int i; ue = &sc->sc_ue; /* Reload EEPROM. */ AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32); /* * Save PHY power saving configuration(high byte) and * clear EEPROM checksum value(low byte). */ axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_PHY_PWRCFG, &eeprom); sc->sc_pwrcfg = le16toh(eeprom) & 0xFF00; /* * Auto-loaded default station address from internal ROM is * 00:00:00:00:00:00 such that an explicit access to EEPROM * is required to get real station address. */ eaddr = ue->ue_eaddr; for (i = 0; i < ETHER_ADDR_LEN / 2; i++) { axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_NODE_ID + i, &eeprom); eeprom = le16toh(eeprom); *eaddr++ = (uint8_t)(eeprom & 0xFF); *eaddr++ = (uint8_t)((eeprom >> 8) & 0xFF); } /* Wakeup PHY. */ axe_ax88772_phywake(sc); /* Stop MAC. */ axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL); } #undef AXE_GPIO_WRITE static void axe_reset(struct axe_softc *sc) { struct usb_config_descriptor *cd; usb_error_t err; cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, cd->bConfigurationValue); if (err) DPRINTF("reset failed (ignored)\n"); /* Wait a little while for the chip to get its brains in order. */ uether_pause(&sc->sc_ue, hz / 100); /* Reinitialize controller to achieve full reset. */ if (sc->sc_flags & AXE_FLAG_178) axe_ax88178_init(sc); else if (sc->sc_flags & AXE_FLAG_772) axe_ax88772_init(sc); else if (sc->sc_flags & AXE_FLAG_772A) axe_ax88772a_init(sc); else if (sc->sc_flags & AXE_FLAG_772B) axe_ax88772b_init(sc); } static void axe_attach_post(struct usb_ether *ue) { struct axe_softc *sc = uether_getsc(ue); /* * Load PHY indexes first. Needed by axe_xxx_init(). */ axe_cmd(sc, AXE_CMD_READ_PHYID, 0, 0, sc->sc_phyaddrs); if (bootverbose) device_printf(sc->sc_ue.ue_dev, "PHYADDR 0x%02x:0x%02x\n", sc->sc_phyaddrs[0], sc->sc_phyaddrs[1]); sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_PRI); if (sc->sc_phyno == -1) sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_SEC); if (sc->sc_phyno == -1) { device_printf(sc->sc_ue.ue_dev, "no valid PHY address found, assuming PHY address 0\n"); sc->sc_phyno = 0; } /* Initialize controller and get station address. */ if (sc->sc_flags & AXE_FLAG_178) { axe_ax88178_init(sc); axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr); } else if (sc->sc_flags & AXE_FLAG_772) { axe_ax88772_init(sc); axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr); } else if (sc->sc_flags & AXE_FLAG_772A) { axe_ax88772a_init(sc); axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr); } else if (sc->sc_flags & AXE_FLAG_772B) { axe_ax88772b_init(sc); } else axe_cmd(sc, AXE_172_CMD_READ_NODEID, 0, 0, ue->ue_eaddr); /* * Fetch IPG values. */ if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B)) { /* Set IPG values. */ sc->sc_ipgs[0] = 0x15; sc->sc_ipgs[1] = 0x16; sc->sc_ipgs[2] = 0x1A; } else axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, sc->sc_ipgs); } static int axe_attach_post_sub(struct usb_ether *ue) { struct axe_softc *sc; struct ifnet *ifp; u_int adv_pause; int error; sc = uether_getsc(ue); ifp = ue->ue_ifp; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = uether_start; ifp->if_ioctl = axe_ioctl; ifp->if_init = uether_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); if (AXE_IS_178_FAMILY(sc)) ifp->if_capabilities |= IFCAP_VLAN_MTU; if (sc->sc_flags & AXE_FLAG_772B) { ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_RXCSUM; ifp->if_hwassist = AXE_CSUM_FEATURES; /* * Checksum offloading of AX88772B also works with VLAN * tagged frames but there is no way to take advantage * of the feature because vlan(4) assumes * IFCAP_VLAN_HWTAGGING is prerequisite condition to * support checksum offloading with VLAN. VLAN hardware * tagging support of AX88772B is very limited so it's * not possible to announce IFCAP_VLAN_HWTAGGING. */ } ifp->if_capenable = ifp->if_capabilities; if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B | AXE_FLAG_178)) adv_pause = MIIF_DOPAUSE; else adv_pause = 0; - mtx_lock(&Giant); + bus_topo_lock(); error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, adv_pause); - mtx_unlock(&Giant); + bus_topo_unlock(); return (error); } /* * Probe for a AX88172 chip. */ static int axe_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != AXE_CONFIG_IDX) return (ENXIO); if (uaa->info.bIfaceIndex != AXE_IFACE_IDX) return (ENXIO); return (usbd_lookup_id_by_uaa(axe_devs, sizeof(axe_devs), uaa)); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int axe_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct axe_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; uint8_t iface_index; int error; sc->sc_flags = USB_GET_DRIVER_INFO(uaa); device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); iface_index = AXE_IFACE_IDX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, axe_config, AXE_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(dev, "allocating USB transfers failed\n"); goto detach; } ue->ue_sc = sc; ue->ue_dev = dev; ue->ue_udev = uaa->device; ue->ue_mtx = &sc->sc_mtx; ue->ue_methods = &axe_ue_methods; error = uether_ifattach(ue); if (error) { device_printf(dev, "could not attach interface\n"); goto detach; } return (0); /* success */ detach: axe_detach(dev); return (ENXIO); /* failure */ } static int axe_detach(device_t dev) { struct axe_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; usbd_transfer_unsetup(sc->sc_xfer, AXE_N_TRANSFER); uether_ifdetach(ue); mtx_destroy(&sc->sc_mtx); return (0); } #if (AXE_BULK_BUF_SIZE >= 0x10000) #error "Please update axe_bulk_read_callback()!" #endif static void axe_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct axe_softc *sc = usbd_xfer_softc(xfer); struct usb_ether *ue = &sc->sc_ue; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: pc = usbd_xfer_get_frame(xfer, 0); axe_rx_frame(ue, pc, actlen); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); uether_rxflush(ue); return; default: /* Error */ DPRINTF("bulk read error, %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static int axe_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) { struct axe_softc *sc; struct axe_sframe_hdr hdr; struct axe_csum_hdr csum_hdr; int error, len, pos; sc = uether_getsc(ue); pos = 0; len = 0; error = 0; if ((sc->sc_flags & AXE_FLAG_STD_FRAME) != 0) { while (pos < actlen) { if ((int)(pos + sizeof(hdr)) > actlen) { /* too little data */ error = EINVAL; break; } usbd_copy_out(pc, pos, &hdr, sizeof(hdr)); if ((hdr.len ^ hdr.ilen) != sc->sc_lenmask) { /* we lost sync */ error = EINVAL; break; } pos += sizeof(hdr); len = le16toh(hdr.len); if (pos + len > actlen) { /* invalid length */ error = EINVAL; break; } axe_rxeof(ue, pc, pos, len, NULL); pos += len + (len % 2); } } else if ((sc->sc_flags & AXE_FLAG_CSUM_FRAME) != 0) { while (pos < actlen) { if ((int)(pos + sizeof(csum_hdr)) > actlen) { /* too little data */ error = EINVAL; break; } usbd_copy_out(pc, pos, &csum_hdr, sizeof(csum_hdr)); csum_hdr.len = le16toh(csum_hdr.len); csum_hdr.ilen = le16toh(csum_hdr.ilen); csum_hdr.cstatus = le16toh(csum_hdr.cstatus); if ((AXE_CSUM_RXBYTES(csum_hdr.len) ^ AXE_CSUM_RXBYTES(csum_hdr.ilen)) != sc->sc_lenmask) { /* we lost sync */ error = EINVAL; break; } /* * Get total transferred frame length including * checksum header. The length should be multiple * of 4. */ len = sizeof(csum_hdr) + AXE_CSUM_RXBYTES(csum_hdr.len); len = (len + 3) & ~3; if (pos + len > actlen) { /* invalid length */ error = EINVAL; break; } axe_rxeof(ue, pc, pos + sizeof(csum_hdr), AXE_CSUM_RXBYTES(csum_hdr.len), &csum_hdr); pos += len; } } else axe_rxeof(ue, pc, 0, actlen, NULL); if (error != 0) if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1); return (error); } static int axe_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, unsigned int len, struct axe_csum_hdr *csum_hdr) { struct ifnet *ifp = ue->ue_ifp; struct mbuf *m; if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return (EINVAL); } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return (ENOMEM); } m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, ETHER_ALIGN); usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; if (csum_hdr != NULL && csum_hdr->cstatus & AXE_CSUM_HDR_L3_TYPE_IPV4) { if ((csum_hdr->cstatus & (AXE_CSUM_HDR_L4_CSUM_ERR | AXE_CSUM_HDR_L3_CSUM_ERR)) == 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; if ((csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) == AXE_CSUM_HDR_L4_TYPE_TCP || (csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) == AXE_CSUM_HDR_L4_TYPE_UDP) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } (void)mbufq_enqueue(&ue->ue_rxq, m); return (0); } #if ((AXE_BULK_BUF_SIZE >= 0x10000) || (AXE_BULK_BUF_SIZE < (MCLBYTES+4))) #error "Please update axe_bulk_write_callback()!" #endif static void axe_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct axe_softc *sc = usbd_xfer_softc(xfer); struct axe_sframe_hdr hdr; struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc; struct mbuf *m; int nframes, pos; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete\n"); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: if ((sc->sc_flags & AXE_FLAG_LINK) == 0 || (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { /* * Don't send anything if there is no link or * controller is busy. */ return; } for (nframes = 0; nframes < 16 && !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, nframes); pos = 0; pc = usbd_xfer_get_frame(xfer, nframes); if (AXE_IS_178_FAMILY(sc)) { hdr.len = htole16(m->m_pkthdr.len); hdr.ilen = ~hdr.len; /* * If upper stack computed checksum, driver * should tell controller not to insert * computed checksum for checksum offloading * enabled controller. */ if (ifp->if_capabilities & IFCAP_TXCSUM) { if ((m->m_pkthdr.csum_flags & AXE_CSUM_FEATURES) != 0) hdr.len |= htole16( AXE_TX_CSUM_PSEUDO_HDR); else hdr.len |= htole16( AXE_TX_CSUM_DIS); } usbd_copy_in(pc, pos, &hdr, sizeof(hdr)); pos += sizeof(hdr); usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); pos += m->m_pkthdr.len; if ((pos % 512) == 0) { hdr.len = 0; hdr.ilen = 0xffff; usbd_copy_in(pc, pos, &hdr, sizeof(hdr)); pos += sizeof(hdr); } } else { usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); pos += m->m_pkthdr.len; } /* * XXX * Update TX packet counter here. This is not * correct way but it seems that there is no way * to know how many packets are sent at the end * of transfer because controller combines * multiple writes into single one if there is * room in TX buffer of controller. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* * if there's a BPF listener, bounce a copy * of this frame to him: */ BPF_MTAP(ifp, m); m_freem(m); /* Set frame length. */ usbd_xfer_set_frame_len(xfer, nframes, pos); } if (nframes != 0) { usbd_xfer_set_frames(xfer, nframes); usbd_transfer_submit(xfer); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return; /* NOTREACHED */ default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static void axe_tick(struct usb_ether *ue) { struct axe_softc *sc = uether_getsc(ue); struct mii_data *mii = GET_MII(sc); AXE_LOCK_ASSERT(sc, MA_OWNED); mii_tick(mii); if ((sc->sc_flags & AXE_FLAG_LINK) == 0) { axe_miibus_statchg(ue->ue_dev); if ((sc->sc_flags & AXE_FLAG_LINK) != 0) axe_start(ue); } } static void axe_start(struct usb_ether *ue) { struct axe_softc *sc = uether_getsc(ue); /* * start the USB transfers, if not already started: */ usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_RD]); usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_WR]); } static void axe_csum_cfg(struct usb_ether *ue) { struct axe_softc *sc; struct ifnet *ifp; uint16_t csum1, csum2; sc = uether_getsc(ue); AXE_LOCK_ASSERT(sc, MA_OWNED); if ((sc->sc_flags & AXE_FLAG_772B) != 0) { ifp = uether_getifp(ue); csum1 = 0; csum2 = 0; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) csum1 |= AXE_TXCSUM_IP | AXE_TXCSUM_TCP | AXE_TXCSUM_UDP; axe_cmd(sc, AXE_772B_CMD_WRITE_TXCSUM, csum2, csum1, NULL); csum1 = 0; csum2 = 0; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) csum1 |= AXE_RXCSUM_IP | AXE_RXCSUM_IPVE | AXE_RXCSUM_TCP | AXE_RXCSUM_UDP | AXE_RXCSUM_ICMP | AXE_RXCSUM_IGMP; axe_cmd(sc, AXE_772B_CMD_WRITE_RXCSUM, csum2, csum1, NULL); } } static void axe_init(struct usb_ether *ue) { struct axe_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); uint16_t rxmode; AXE_LOCK_ASSERT(sc, MA_OWNED); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* Cancel pending I/O */ axe_stop(ue); axe_reset(sc); /* Set MAC address and transmitter IPG values. */ if (AXE_IS_178_FAMILY(sc)) { axe_cmd(sc, AXE_178_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp)); axe_cmd(sc, AXE_178_CMD_WRITE_IPG012, sc->sc_ipgs[2], (sc->sc_ipgs[1] << 8) | (sc->sc_ipgs[0]), NULL); } else { axe_cmd(sc, AXE_172_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp)); axe_cmd(sc, AXE_172_CMD_WRITE_IPG0, 0, sc->sc_ipgs[0], NULL); axe_cmd(sc, AXE_172_CMD_WRITE_IPG1, 0, sc->sc_ipgs[1], NULL); axe_cmd(sc, AXE_172_CMD_WRITE_IPG2, 0, sc->sc_ipgs[2], NULL); } if (AXE_IS_178_FAMILY(sc)) { sc->sc_flags &= ~(AXE_FLAG_STD_FRAME | AXE_FLAG_CSUM_FRAME); if ((sc->sc_flags & AXE_FLAG_772B) != 0 && (ifp->if_capenable & IFCAP_RXCSUM) != 0) { sc->sc_lenmask = AXE_CSUM_HDR_LEN_MASK; sc->sc_flags |= AXE_FLAG_CSUM_FRAME; } else { sc->sc_lenmask = AXE_HDR_LEN_MASK; sc->sc_flags |= AXE_FLAG_STD_FRAME; } } /* Configure TX/RX checksum offloading. */ axe_csum_cfg(ue); if (sc->sc_flags & AXE_FLAG_772B) { /* AX88772B uses different maximum frame burst configuration. */ axe_cmd(sc, AXE_772B_CMD_RXCTL_WRITE_CFG, ax88772b_mfb_table[AX88772B_MFB_16K].threshold, ax88772b_mfb_table[AX88772B_MFB_16K].byte_cnt, NULL); } /* Enable receiver, set RX mode. */ rxmode = (AXE_RXCMD_MULTICAST | AXE_RXCMD_ENABLE); if (AXE_IS_178_FAMILY(sc)) { if (sc->sc_flags & AXE_FLAG_772B) { /* * Select RX header format type 1. Aligning IP * header on 4 byte boundary is not needed when * checksum offloading feature is not used * because we always copy the received frame in * RX handler. When RX checksum offloading is * active, aligning IP header is required to * reflect actual frame length including RX * header size. */ rxmode |= AXE_772B_RXCMD_HDR_TYPE_1; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) rxmode |= AXE_772B_RXCMD_IPHDR_ALIGN; } else { /* * Default Rx buffer size is too small to get * maximum performance. */ rxmode |= AXE_178_RXCMD_MFB_16384; } } else { rxmode |= AXE_172_RXCMD_UNICAST; } /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) rxmode |= AXE_RXCMD_PROMISC; if (ifp->if_flags & IFF_BROADCAST) rxmode |= AXE_RXCMD_BROADCAST; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); /* Load the multicast filter. */ axe_setmulti(ue); usbd_xfer_set_stall(sc->sc_xfer[AXE_BULK_DT_WR]); ifp->if_drv_flags |= IFF_DRV_RUNNING; /* Switch to selected media. */ axe_ifmedia_upd(ifp); } static void axe_setpromisc(struct usb_ether *ue) { struct axe_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); uint16_t rxmode; axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode); rxmode = le16toh(rxmode); if (ifp->if_flags & IFF_PROMISC) { rxmode |= AXE_RXCMD_PROMISC; } else { rxmode &= ~AXE_RXCMD_PROMISC; } axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); axe_setmulti(ue); } static void axe_stop(struct usb_ether *ue) { struct axe_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); AXE_LOCK_ASSERT(sc, MA_OWNED); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_flags &= ~AXE_FLAG_LINK; /* * stop all the transfers, if not already stopped: */ usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_WR]); usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_RD]); } static int axe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct usb_ether *ue = ifp->if_softc; struct axe_softc *sc; struct ifreq *ifr; int error, mask, reinit; sc = uether_getsc(ue); ifr = (struct ifreq *)data; error = 0; reinit = 0; if (cmd == SIOCSIFCAP) { AXE_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= AXE_CSUM_FEATURES; else ifp->if_hwassist &= ~AXE_CSUM_FEATURES; reinit++; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; reinit++; } if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) ifp->if_drv_flags &= ~IFF_DRV_RUNNING; else reinit = 0; AXE_UNLOCK(sc); if (reinit > 0) uether_init(ue); } else error = uether_ioctl(ifp, cmd, data); return (error); } diff --git a/sys/dev/usb/net/if_axge.c b/sys/dev/usb/net/if_axge.c index e2f12e209303..e777a8298cd9 100644 --- a/sys/dev/usb/net/if_axge.c +++ b/sys/dev/usb/net/if_axge.c @@ -1,1066 +1,1066 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2014 Kevin Lo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * ASIX Electronics AX88178A/AX88179 USB 2.0/3.0 gigabit ethernet driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR axge_debug #include #include #include #include #include "miibus_if.h" /* * Various supported device vendors/products. */ static const STRUCT_USB_HOST_ID axge_devs[] = { #define AXGE_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } AXGE_DEV(ASIX, AX88178A), AXGE_DEV(ASIX, AX88179), AXGE_DEV(BELKIN, B2B128), AXGE_DEV(DLINK, DUB1312), AXGE_DEV(LENOVO, GIGALAN), AXGE_DEV(SITECOMEU, LN032), #undef AXGE_DEV }; static const struct { uint8_t ctrl; uint8_t timer_l; uint8_t timer_h; uint8_t size; uint8_t ifg; } __packed axge_bulk_size[] = { { 7, 0x4f, 0x00, 0x12, 0xff }, { 7, 0x20, 0x03, 0x16, 0xff }, { 7, 0xae, 0x07, 0x18, 0xff }, { 7, 0xcc, 0x4c, 0x18, 0x08 } }; /* prototypes */ static device_probe_t axge_probe; static device_attach_t axge_attach; static device_detach_t axge_detach; static usb_callback_t axge_bulk_read_callback; static usb_callback_t axge_bulk_write_callback; static miibus_readreg_t axge_miibus_readreg; static miibus_writereg_t axge_miibus_writereg; static miibus_statchg_t axge_miibus_statchg; static uether_fn_t axge_attach_post; static uether_fn_t axge_init; static uether_fn_t axge_stop; static uether_fn_t axge_start; static uether_fn_t axge_tick; static uether_fn_t axge_rxfilter; static int axge_read_mem(struct axge_softc *, uint8_t, uint16_t, uint16_t, void *, int); static void axge_write_mem(struct axge_softc *, uint8_t, uint16_t, uint16_t, void *, int); static uint8_t axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t); static uint16_t axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t, uint16_t); static void axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t, uint8_t); static void axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t, uint16_t, uint16_t); static void axge_chip_init(struct axge_softc *); static void axge_reset(struct axge_softc *); static int axge_attach_post_sub(struct usb_ether *); static int axge_ifmedia_upd(struct ifnet *); static void axge_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int axge_ioctl(struct ifnet *, u_long, caddr_t); static void axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int); static void axge_rxeof(struct usb_ether *, struct usb_page_cache *, unsigned int, unsigned int, uint32_t); static void axge_csum_cfg(struct usb_ether *); #define AXGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #ifdef USB_DEBUG static int axge_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB axge"); SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0, "Debug level"); #endif static const struct usb_config axge_config[AXGE_N_TRANSFER] = { [AXGE_BULK_DT_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .frames = AXGE_N_FRAMES, .bufsize = AXGE_N_FRAMES * MCLBYTES, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = axge_bulk_write_callback, .timeout = 10000, /* 10 seconds */ }, [AXGE_BULK_DT_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 65536, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = axge_bulk_read_callback, .timeout = 0, /* no timeout */ }, }; static device_method_t axge_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, axge_probe), DEVMETHOD(device_attach, axge_attach), DEVMETHOD(device_detach, axge_detach), /* MII interface. */ DEVMETHOD(miibus_readreg, axge_miibus_readreg), DEVMETHOD(miibus_writereg, axge_miibus_writereg), DEVMETHOD(miibus_statchg, axge_miibus_statchg), DEVMETHOD_END }; static driver_t axge_driver = { .name = "axge", .methods = axge_methods, .size = sizeof(struct axge_softc), }; static devclass_t axge_devclass; DRIVER_MODULE(axge, uhub, axge_driver, axge_devclass, NULL, NULL); DRIVER_MODULE(miibus, axge, miibus_driver, miibus_devclass, NULL, NULL); MODULE_DEPEND(axge, uether, 1, 1, 1); MODULE_DEPEND(axge, usb, 1, 1, 1); MODULE_DEPEND(axge, ether, 1, 1, 1); MODULE_DEPEND(axge, miibus, 1, 1, 1); MODULE_VERSION(axge, 1); USB_PNP_HOST_INFO(axge_devs); static const struct usb_ether_methods axge_ue_methods = { .ue_attach_post = axge_attach_post, .ue_attach_post_sub = axge_attach_post_sub, .ue_start = axge_start, .ue_init = axge_init, .ue_stop = axge_stop, .ue_tick = axge_tick, .ue_setmulti = axge_rxfilter, .ue_setpromisc = axge_rxfilter, .ue_mii_upd = axge_ifmedia_upd, .ue_mii_sts = axge_ifmedia_sts, }; static int axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, uint16_t val, void *buf, int len) { struct usb_device_request req; AXGE_LOCK_ASSERT(sc, MA_OWNED); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = cmd; USETW(req.wValue, val); USETW(req.wIndex, index); USETW(req.wLength, len); return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); } static void axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, uint16_t val, void *buf, int len) { struct usb_device_request req; AXGE_LOCK_ASSERT(sc, MA_OWNED); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = cmd; USETW(req.wValue, val); USETW(req.wIndex, index); USETW(req.wLength, len); if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) { /* Error ignored. */ } } static uint8_t axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg) { uint8_t val; axge_read_mem(sc, cmd, 1, reg, &val, 1); return (val); } static uint16_t axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, uint16_t reg) { uint8_t val[2]; axge_read_mem(sc, cmd, index, reg, &val, 2); return (UGETW(val)); } static void axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val) { axge_write_mem(sc, cmd, 1, reg, &val, 1); } static void axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, uint16_t reg, uint16_t val) { uint8_t temp[2]; USETW(temp, val); axge_write_mem(sc, cmd, index, reg, &temp, 2); } static int axge_miibus_readreg(device_t dev, int phy, int reg) { struct axge_softc *sc; uint16_t val; int locked; sc = device_get_softc(dev); locked = mtx_owned(&sc->sc_mtx); if (!locked) AXGE_LOCK(sc); val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy); if (!locked) AXGE_UNLOCK(sc); return (val); } static int axge_miibus_writereg(device_t dev, int phy, int reg, int val) { struct axge_softc *sc; int locked; sc = device_get_softc(dev); locked = mtx_owned(&sc->sc_mtx); if (!locked) AXGE_LOCK(sc); axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val); if (!locked) AXGE_UNLOCK(sc); return (0); } static void axge_miibus_statchg(device_t dev) { struct axge_softc *sc; struct mii_data *mii; struct ifnet *ifp; uint8_t link_status, tmp[5]; uint16_t val; int locked; sc = device_get_softc(dev); mii = GET_MII(sc); locked = mtx_owned(&sc->sc_mtx); if (!locked) AXGE_LOCK(sc); ifp = uether_getifp(&sc->sc_ue); if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; sc->sc_flags &= ~AXGE_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: case IFM_1000_T: sc->sc_flags |= AXGE_FLAG_LINK; break; default: break; } } /* Lost link, do nothing. */ if ((sc->sc_flags & AXGE_FLAG_LINK) == 0) goto done; link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR); val = 0; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { val |= MSR_FD; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) val |= MSR_TFC; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) val |= MSR_RFC; } val |= MSR_RE; switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: val |= MSR_GM | MSR_EN_125MHZ; if (link_status & PLSR_USB_SS) memcpy(tmp, &axge_bulk_size[0], 5); else if (link_status & PLSR_USB_HS) memcpy(tmp, &axge_bulk_size[1], 5); else memcpy(tmp, &axge_bulk_size[3], 5); break; case IFM_100_TX: val |= MSR_PS; if (link_status & (PLSR_USB_SS | PLSR_USB_HS)) memcpy(tmp, &axge_bulk_size[2], 5); else memcpy(tmp, &axge_bulk_size[3], 5); break; case IFM_10_T: memcpy(tmp, &axge_bulk_size[3], 5); break; } /* Rx bulk configuration. */ axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5); axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); done: if (!locked) AXGE_UNLOCK(sc); } static void axge_chip_init(struct axge_softc *sc) { /* Power up ethernet PHY. */ axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0); axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL); uether_pause(&sc->sc_ue, hz / 4); axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS); uether_pause(&sc->sc_ue, hz / 10); } static void axge_reset(struct axge_softc *sc) { struct usb_config_descriptor *cd; usb_error_t err; cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, cd->bConfigurationValue); if (err) DPRINTF("reset failed (ignored)\n"); /* Wait a little while for the chip to get its brains in order. */ uether_pause(&sc->sc_ue, hz / 100); /* Reinitialize controller to achieve full reset. */ axge_chip_init(sc); } static void axge_attach_post(struct usb_ether *ue) { struct axge_softc *sc; sc = uether_getsc(ue); /* Initialize controller and get station address. */ axge_chip_init(sc); axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, ue->ue_eaddr, ETHER_ADDR_LEN); } static int axge_attach_post_sub(struct usb_ether *ue) { struct ifnet *ifp; int error; ifp = ue->ue_ifp; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = uether_start; ifp->if_ioctl = axge_ioctl; ifp->if_init = uether_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM; ifp->if_hwassist = AXGE_CSUM_FEATURES; ifp->if_capenable = ifp->if_capabilities; - mtx_lock(&Giant); + bus_topo_lock(); error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, BMSR_DEFCAPMASK, AXGE_PHY_ADDR, MII_OFFSET_ANY, MIIF_DOPAUSE); - mtx_unlock(&Giant); + bus_topo_unlock(); return (error); } /* * Set media options. */ static int axge_ifmedia_upd(struct ifnet *ifp) { struct axge_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; mii = GET_MII(sc); AXGE_LOCK_ASSERT(sc, MA_OWNED); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); return (error); } /* * Report current media status. */ static void axge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct axge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = GET_MII(sc); AXGE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; AXGE_UNLOCK(sc); } /* * Probe for a AX88179 chip. */ static int axge_probe(device_t dev) { struct usb_attach_arg *uaa; uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX) return (ENXIO); if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX) return (ENXIO); return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa)); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int axge_attach(device_t dev) { struct usb_attach_arg *uaa; struct axge_softc *sc; struct usb_ether *ue; uint8_t iface_index; int error; uaa = device_get_ivars(dev); sc = device_get_softc(dev); ue = &sc->sc_ue; device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); iface_index = AXGE_IFACE_IDX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(dev, "allocating USB transfers failed\n"); mtx_destroy(&sc->sc_mtx); return (ENXIO); } ue->ue_sc = sc; ue->ue_dev = dev; ue->ue_udev = uaa->device; ue->ue_mtx = &sc->sc_mtx; ue->ue_methods = &axge_ue_methods; error = uether_ifattach(ue); if (error) { device_printf(dev, "could not attach interface\n"); goto detach; } return (0); /* success */ detach: axge_detach(dev); return (ENXIO); /* failure */ } static int axge_detach(device_t dev) { struct axge_softc *sc; struct usb_ether *ue; uint16_t val; sc = device_get_softc(dev); ue = &sc->sc_ue; if (device_is_attached(dev)) { /* wait for any post attach or other command to complete */ usb_proc_drain(&ue->ue_tq); AXGE_LOCK(sc); /* * XXX * ether_ifdetach(9) should be called first. */ axge_stop(ue); /* Force bulk-in to return a zero-length USB packet. */ val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR); val |= EPPRCR_BZ | EPPRCR_IPRL; axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, val); /* Change clock. */ axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 0); /* Disable MAC. */ axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, 0); AXGE_UNLOCK(sc); } usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER); uether_ifdetach(ue); mtx_destroy(&sc->sc_mtx); return (0); } static void axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct axge_softc *sc; struct usb_ether *ue; struct usb_page_cache *pc; int actlen; sc = usbd_xfer_softc(xfer); ue = &sc->sc_ue; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: pc = usbd_xfer_get_frame(xfer, 0); axge_rx_frame(ue, pc, actlen); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); uether_rxflush(ue); break; default: if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static void axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct axge_softc *sc; struct ifnet *ifp; struct usb_page_cache *pc; struct mbuf *m; struct axge_frame_txhdr txhdr; int nframes, pos; sc = usbd_xfer_softc(xfer); ifp = uether_getifp(&sc->sc_ue); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 || (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { /* * Don't send anything if there is no link or * controller is busy. */ return; } for (nframes = 0; nframes < AXGE_N_FRAMES && !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, nframes); pc = usbd_xfer_get_frame(xfer, nframes); txhdr.mss = 0; txhdr.len = htole32(AXGE_TXBYTES(m->m_pkthdr.len)); if ((ifp->if_capenable & IFCAP_TXCSUM) != 0 && (m->m_pkthdr.csum_flags & AXGE_CSUM_FEATURES) == 0) txhdr.len |= htole32(AXGE_CSUM_DISABLE); pos = 0; usbd_copy_in(pc, pos, &txhdr, sizeof(txhdr)); pos += sizeof(txhdr); usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); pos += m->m_pkthdr.len; /* * if there's a BPF listener, bounce a copy * of this frame to him: */ BPF_MTAP(ifp, m); m_freem(m); /* Set frame length. */ usbd_xfer_set_frame_len(xfer, nframes, pos); } if (nframes != 0) { /* * XXX * Update TX packet counter here. This is not * correct way but it seems that there is no way * to know how many packets are sent at the end * of transfer because controller combines * multiple writes into single one if there is * room in TX buffer of controller. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, nframes); usbd_xfer_set_frames(xfer, nframes); usbd_transfer_submit(xfer); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return; /* NOTREACHED */ default: if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static void axge_tick(struct usb_ether *ue) { struct axge_softc *sc; struct mii_data *mii; sc = uether_getsc(ue); mii = GET_MII(sc); AXGE_LOCK_ASSERT(sc, MA_OWNED); mii_tick(mii); } static u_int axge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint8_t *hashtbl = arg; uint32_t h; h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; hashtbl[h / 8] |= 1 << (h % 8); return (1); } static void axge_rxfilter(struct usb_ether *ue) { struct axge_softc *sc; struct ifnet *ifp; uint16_t rxmode; uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; sc = uether_getsc(ue); ifp = uether_getifp(ue); AXGE_LOCK_ASSERT(sc, MA_OWNED); /* * Configure RX settings. * Don't set RCR_IPE(IP header alignment on 32bit boundary) to disable * inserting extra padding bytes. This wastes ethernet to USB host * bandwidth as well as complicating RX handling logic. Current USB * framework requires copying RX frames to mbufs so there is no need * to worry about alignment. */ rxmode = RCR_DROP_CRCERR | RCR_START; if (ifp->if_flags & IFF_BROADCAST) rxmode |= RCR_ACPT_BCAST; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { if (ifp->if_flags & IFF_PROMISC) rxmode |= RCR_PROMISC; rxmode |= RCR_ACPT_ALL_MCAST; axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); return; } rxmode |= RCR_ACPT_MCAST; if_foreach_llmaddr(ifp, axge_hash_maddr, &hashtbl); axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8); axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); } static void axge_start(struct usb_ether *ue) { struct axge_softc *sc; sc = uether_getsc(ue); /* * Start the USB transfers, if not already started. */ usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]); usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]); } static void axge_init(struct usb_ether *ue) { struct axge_softc *sc; struct ifnet *ifp; sc = uether_getsc(ue); ifp = uether_getifp(ue); AXGE_LOCK_ASSERT(sc, MA_OWNED); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel pending I/O and free all RX/TX buffers. */ axge_stop(ue); axge_reset(sc); /* Set MAC address. */ axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, IF_LLADDR(ifp), ETHER_ADDR_LEN); axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34); axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52); /* Configure TX/RX checksum offloading. */ axge_csum_cfg(ue); /* Configure RX filters. */ axge_rxfilter(ue); /* * XXX * Controller supports wakeup on link change detection, * magic packet and wakeup frame recpetion. But it seems * there is no framework for USB ethernet suspend/wakeup. * Disable all wakeup functions. */ axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 0); (void)axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR); /* Configure default medium type. */ axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, MSR_GM | MSR_FD | MSR_RFC | MSR_TFC | MSR_RE); usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]); ifp->if_drv_flags |= IFF_DRV_RUNNING; /* Switch to selected media. */ axge_ifmedia_upd(ifp); } static void axge_stop(struct usb_ether *ue) { struct axge_softc *sc; struct ifnet *ifp; uint16_t val; sc = uether_getsc(ue); ifp = uether_getifp(ue); AXGE_LOCK_ASSERT(sc, MA_OWNED); val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR); val &= ~MSR_RE; axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); if (ifp != NULL) ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_flags &= ~AXGE_FLAG_LINK; /* * Stop all the transfers, if not already stopped: */ usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]); usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]); } static int axge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct usb_ether *ue; struct axge_softc *sc; struct ifreq *ifr; int error, mask, reinit; ue = ifp->if_softc; sc = uether_getsc(ue); ifr = (struct ifreq *)data; error = 0; reinit = 0; if (cmd == SIOCSIFCAP) { AXGE_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= AXGE_CSUM_FEATURES; else ifp->if_hwassist &= ~AXGE_CSUM_FEATURES; reinit++; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; reinit++; } if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) ifp->if_drv_flags &= ~IFF_DRV_RUNNING; else reinit = 0; AXGE_UNLOCK(sc); if (reinit > 0) uether_init(ue); } else error = uether_ioctl(ifp, cmd, data); return (error); } static void axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) { struct axge_frame_rxhdr pkt_hdr; uint32_t rxhdr; uint32_t pos; uint32_t pkt_cnt, pkt_end; uint32_t hdr_off; uint32_t pktlen; /* verify we have enough data */ if (actlen < (int)sizeof(rxhdr)) return; pos = 0; usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr)); rxhdr = le32toh(rxhdr); pkt_cnt = rxhdr & 0xFFFF; hdr_off = pkt_end = (rxhdr >> 16) & 0xFFFF; /* * <----------------------- actlen ------------------------> * [frame #0]...[frame #N][pkt_hdr #0]...[pkt_hdr #N][rxhdr] * Each RX frame would be aligned on 8 bytes boundary. If * RCR_IPE bit is set in AXGE_RCR register, there would be 2 * padding bytes and 6 dummy bytes(as the padding also should * be aligned on 8 bytes boundary) for each RX frame to align * IP header on 32bits boundary. Driver don't set RCR_IPE bit * of AXGE_RCR register, so there should be no padding bytes * which simplifies RX logic a lot. */ while (pkt_cnt--) { /* verify the header offset */ if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) { DPRINTF("End of packet headers\n"); break; } usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr)); pkt_hdr.status = le32toh(pkt_hdr.status); pktlen = AXGE_RXBYTES(pkt_hdr.status); if (pos + pktlen > pkt_end) { DPRINTF("Data position reached end\n"); break; } if (AXGE_RX_ERR(pkt_hdr.status) != 0) { DPRINTF("Dropped a packet\n"); if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1); } else axge_rxeof(ue, pc, pos, pktlen, pkt_hdr.status); pos += (pktlen + 7) & ~7; hdr_off += sizeof(pkt_hdr); } } static void axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, unsigned int len, uint32_t status) { struct ifnet *ifp; struct mbuf *m; ifp = ue->ue_ifp; if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } if (len > MHLEN - ETHER_ALIGN) m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); else m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return; } m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len = len; m->m_data += ETHER_ALIGN; usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { if ((status & AXGE_RX_L3_CSUM_ERR) == 0 && (status & AXGE_RX_L3_TYPE_MASK) == AXGE_RX_L3_TYPE_IPV4) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; if ((status & AXGE_RX_L4_CSUM_ERR) == 0 && ((status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_UDP || (status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_TCP)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); (void)mbufq_enqueue(&ue->ue_rxq, m); } static void axge_csum_cfg(struct usb_ether *ue) { struct axge_softc *sc; struct ifnet *ifp; uint8_t csum; sc = uether_getsc(ue); AXGE_LOCK_ASSERT(sc, MA_OWNED); ifp = uether_getifp(ue); csum = 0; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) csum |= CTCR_IP | CTCR_TCP | CTCR_UDP; axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum); csum = 0; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) csum |= CRCR_IP | CRCR_TCP | CRCR_UDP; axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum); } diff --git a/sys/dev/usb/net/if_muge.c b/sys/dev/usb/net/if_muge.c index f907ff5fc072..439181906153 100644 --- a/sys/dev/usb/net/if_muge.c +++ b/sys/dev/usb/net/if_muge.c @@ -1,2280 +1,2280 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2012 Ben Gray . * Copyright (C) 2018 The FreeBSD Foundation. * * This software was developed by Arshan Khanifar * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); /* * USB-To-Ethernet adapter driver for Microchip's LAN78XX and related families. * * USB 3.1 to 10/100/1000 Mbps Ethernet * LAN7800 http://www.microchip.com/wwwproducts/en/LAN7800 * * USB 2.0 to 10/100/1000 Mbps Ethernet * LAN7850 http://www.microchip.com/wwwproducts/en/LAN7850 * * USB 2 to 10/100/1000 Mbps Ethernet with built-in USB hub * LAN7515 (no datasheet available, but probes and functions as LAN7800) * * This driver is based on the if_smsc driver, with lan78xx-specific * functionality modelled on Microchip's Linux lan78xx driver. * * UNIMPLEMENTED FEATURES * ------------------ * A number of features supported by the lan78xx are not yet implemented in * this driver: * * - TX checksum offloading: Nothing has been implemented yet. * - Direct address translation filtering: Implemented but untested. * - VLAN tag removal. * - Support for USB interrupt endpoints. * - Latency Tolerance Messaging (LTM) support. * - TCP LSO support. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_platform.h" #ifdef FDT #include #include #include #include #endif #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR lan78xx_debug #include #include #include #include #include "miibus_if.h" #ifdef USB_DEBUG static int muge_debug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, muge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Microchip LAN78xx USB-GigE"); SYSCTL_INT(_hw_usb_muge, OID_AUTO, debug, CTLFLAG_RWTUN, &muge_debug, 0, "Debug level"); #endif #define MUGE_DEFAULT_TX_CSUM_ENABLE (false) #define MUGE_DEFAULT_TSO_ENABLE (false) /* Supported Vendor and Product IDs. */ static const struct usb_device_id lan78xx_devs[] = { #define MUGE_DEV(p,i) { USB_VPI(USB_VENDOR_SMC2, USB_PRODUCT_SMC2_##p, i) } MUGE_DEV(LAN7800_ETH, 0), MUGE_DEV(LAN7801_ETH, 0), MUGE_DEV(LAN7850_ETH, 0), #undef MUGE_DEV }; #ifdef USB_DEBUG #define muge_dbg_printf(sc, fmt, args...) \ do { \ if (muge_debug > 0) \ device_printf((sc)->sc_ue.ue_dev, "debug: " fmt, ##args); \ } while(0) #else #define muge_dbg_printf(sc, fmt, args...) do { } while (0) #endif #define muge_warn_printf(sc, fmt, args...) \ device_printf((sc)->sc_ue.ue_dev, "warning: " fmt, ##args) #define muge_err_printf(sc, fmt, args...) \ device_printf((sc)->sc_ue.ue_dev, "error: " fmt, ##args) #define ETHER_IS_VALID(addr) \ (!ETHER_IS_MULTICAST(addr) && !ETHER_IS_ZERO(addr)) /* USB endpoints. */ enum { MUGE_BULK_DT_RD, MUGE_BULK_DT_WR, #if 0 /* Ignore interrupt endpoints for now as we poll on MII status. */ MUGE_INTR_DT_WR, MUGE_INTR_DT_RD, #endif MUGE_N_TRANSFER, }; struct muge_softc { struct usb_ether sc_ue; struct mtx sc_mtx; struct usb_xfer *sc_xfer[MUGE_N_TRANSFER]; int sc_phyno; uint32_t sc_leds; uint16_t sc_led_modes; uint16_t sc_led_modes_mask; /* Settings for the mac control (MAC_CSR) register. */ uint32_t sc_rfe_ctl; uint32_t sc_mdix_ctl; uint16_t chipid; uint16_t chiprev; uint32_t sc_mchash_table[ETH_DP_SEL_VHF_HASH_LEN]; uint32_t sc_pfilter_table[MUGE_NUM_PFILTER_ADDRS_][2]; uint32_t sc_flags; #define MUGE_FLAG_LINK 0x0001 #define MUGE_FLAG_INIT_DONE 0x0002 }; #define MUGE_IFACE_IDX 0 #define MUGE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define MUGE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define MUGE_LOCK_ASSERT(_sc, t) mtx_assert(&(_sc)->sc_mtx, t) static device_probe_t muge_probe; static device_attach_t muge_attach; static device_detach_t muge_detach; static usb_callback_t muge_bulk_read_callback; static usb_callback_t muge_bulk_write_callback; static miibus_readreg_t lan78xx_miibus_readreg; static miibus_writereg_t lan78xx_miibus_writereg; static miibus_statchg_t lan78xx_miibus_statchg; static int muge_attach_post_sub(struct usb_ether *ue); static uether_fn_t muge_attach_post; static uether_fn_t muge_init; static uether_fn_t muge_stop; static uether_fn_t muge_start; static uether_fn_t muge_tick; static uether_fn_t muge_setmulti; static uether_fn_t muge_setpromisc; static int muge_ifmedia_upd(struct ifnet *); static void muge_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int lan78xx_chip_init(struct muge_softc *sc); static int muge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static const struct usb_config muge_config[MUGE_N_TRANSFER] = { [MUGE_BULK_DT_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .frames = 16, .bufsize = 16 * (MCLBYTES + 16), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = muge_bulk_write_callback, .timeout = 10000, /* 10 seconds */ }, [MUGE_BULK_DT_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 20480, /* bytes */ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = muge_bulk_read_callback, .timeout = 0, /* no timeout */ }, /* * The chip supports interrupt endpoints, however they aren't * needed as we poll on the MII status. */ }; static const struct usb_ether_methods muge_ue_methods = { .ue_attach_post = muge_attach_post, .ue_attach_post_sub = muge_attach_post_sub, .ue_start = muge_start, .ue_ioctl = muge_ioctl, .ue_init = muge_init, .ue_stop = muge_stop, .ue_tick = muge_tick, .ue_setmulti = muge_setmulti, .ue_setpromisc = muge_setpromisc, .ue_mii_upd = muge_ifmedia_upd, .ue_mii_sts = muge_ifmedia_sts, }; /** * lan78xx_read_reg - Read a 32-bit register on the device * @sc: driver soft context * @off: offset of the register * @data: pointer a value that will be populated with the register value * * LOCKING: * The device lock must be held before calling this function. * * RETURNS: * 0 on success, a USB_ERR_?? error code on failure. */ static int lan78xx_read_reg(struct muge_softc *sc, uint32_t off, uint32_t *data) { struct usb_device_request req; uint32_t buf; usb_error_t err; MUGE_LOCK_ASSERT(sc, MA_OWNED); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UVR_READ_REG; USETW(req.wValue, 0); USETW(req.wIndex, off); USETW(req.wLength, 4); err = uether_do_request(&sc->sc_ue, &req, &buf, 1000); if (err != 0) muge_warn_printf(sc, "Failed to read register 0x%0x\n", off); *data = le32toh(buf); return (err); } /** * lan78xx_write_reg - Write a 32-bit register on the device * @sc: driver soft context * @off: offset of the register * @data: the 32-bit value to write into the register * * LOCKING: * The device lock must be held before calling this function. * * RETURNS: * 0 on success, a USB_ERR_?? error code on failure. */ static int lan78xx_write_reg(struct muge_softc *sc, uint32_t off, uint32_t data) { struct usb_device_request req; uint32_t buf; usb_error_t err; MUGE_LOCK_ASSERT(sc, MA_OWNED); buf = htole32(data); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UVR_WRITE_REG; USETW(req.wValue, 0); USETW(req.wIndex, off); USETW(req.wLength, 4); err = uether_do_request(&sc->sc_ue, &req, &buf, 1000); if (err != 0) muge_warn_printf(sc, "Failed to write register 0x%0x\n", off); return (err); } /** * lan78xx_wait_for_bits - Poll on a register value until bits are cleared * @sc: soft context * @reg: offset of the register * @bits: if the bits are clear the function returns * * LOCKING: * The device lock must be held before calling this function. * * RETURNS: * 0 on success, or a USB_ERR_?? error code on failure. */ static int lan78xx_wait_for_bits(struct muge_softc *sc, uint32_t reg, uint32_t bits) { usb_ticks_t start_ticks; const usb_ticks_t max_ticks = USB_MS_TO_TICKS(1000); uint32_t val; int err; MUGE_LOCK_ASSERT(sc, MA_OWNED); start_ticks = (usb_ticks_t)ticks; do { if ((err = lan78xx_read_reg(sc, reg, &val)) != 0) return (err); if (!(val & bits)) return (0); uether_pause(&sc->sc_ue, hz / 100); } while (((usb_ticks_t)(ticks - start_ticks)) < max_ticks); return (USB_ERR_TIMEOUT); } /** * lan78xx_eeprom_read_raw - Read the attached EEPROM * @sc: soft context * @off: the eeprom address offset * @buf: stores the bytes * @buflen: the number of bytes to read * * Simply reads bytes from an attached eeprom. * * LOCKING: * The function takes and releases the device lock if not already held. * * RETURNS: * 0 on success, or a USB_ERR_?? error code on failure. */ static int lan78xx_eeprom_read_raw(struct muge_softc *sc, uint16_t off, uint8_t *buf, uint16_t buflen) { usb_ticks_t start_ticks; const usb_ticks_t max_ticks = USB_MS_TO_TICKS(1000); int err; uint32_t val, saved; uint16_t i; bool locked; locked = mtx_owned(&sc->sc_mtx); /* XXX */ if (!locked) MUGE_LOCK(sc); if (sc->chipid == ETH_ID_REV_CHIP_ID_7800_) { /* EEDO/EECLK muxed with LED0/LED1 on LAN7800. */ err = lan78xx_read_reg(sc, ETH_HW_CFG, &val); saved = val; val &= ~(ETH_HW_CFG_LEDO_EN_ | ETH_HW_CFG_LED1_EN_); err = lan78xx_write_reg(sc, ETH_HW_CFG, val); } err = lan78xx_wait_for_bits(sc, ETH_E2P_CMD, ETH_E2P_CMD_BUSY_); if (err != 0) { muge_warn_printf(sc, "eeprom busy, failed to read data\n"); goto done; } /* Start reading the bytes, one at a time. */ for (i = 0; i < buflen; i++) { val = ETH_E2P_CMD_BUSY_ | ETH_E2P_CMD_READ_; val |= (ETH_E2P_CMD_ADDR_MASK_ & (off + i)); if ((err = lan78xx_write_reg(sc, ETH_E2P_CMD, val)) != 0) goto done; start_ticks = (usb_ticks_t)ticks; do { if ((err = lan78xx_read_reg(sc, ETH_E2P_CMD, &val)) != 0) goto done; if (!(val & ETH_E2P_CMD_BUSY_) || (val & ETH_E2P_CMD_TIMEOUT_)) break; uether_pause(&sc->sc_ue, hz / 100); } while (((usb_ticks_t)(ticks - start_ticks)) < max_ticks); if (val & (ETH_E2P_CMD_BUSY_ | ETH_E2P_CMD_TIMEOUT_)) { muge_warn_printf(sc, "eeprom command failed\n"); err = USB_ERR_IOERROR; break; } if ((err = lan78xx_read_reg(sc, ETH_E2P_DATA, &val)) != 0) goto done; buf[i] = (val & 0xff); } done: if (!locked) MUGE_UNLOCK(sc); if (sc->chipid == ETH_ID_REV_CHIP_ID_7800_) { /* Restore saved LED configuration. */ lan78xx_write_reg(sc, ETH_HW_CFG, saved); } return (err); } static bool lan78xx_eeprom_present(struct muge_softc *sc) { int ret; uint8_t sig; ret = lan78xx_eeprom_read_raw(sc, ETH_E2P_INDICATOR_OFFSET, &sig, 1); return (ret == 0 && sig == ETH_E2P_INDICATOR); } /** * lan78xx_otp_read_raw * @sc: soft context * @off: the otp address offset * @buf: stores the bytes * @buflen: the number of bytes to read * * Simply reads bytes from the OTP. * * LOCKING: * The function takes and releases the device lock if not already held. * * RETURNS: * 0 on success, or a USB_ERR_?? error code on failure. * */ static int lan78xx_otp_read_raw(struct muge_softc *sc, uint16_t off, uint8_t *buf, uint16_t buflen) { int err; uint32_t val; uint16_t i; bool locked; locked = mtx_owned(&sc->sc_mtx); if (!locked) MUGE_LOCK(sc); err = lan78xx_read_reg(sc, OTP_PWR_DN, &val); /* Checking if bit is set. */ if (val & OTP_PWR_DN_PWRDN_N) { /* Clear it, then wait for it to be cleared. */ lan78xx_write_reg(sc, OTP_PWR_DN, 0); err = lan78xx_wait_for_bits(sc, OTP_PWR_DN, OTP_PWR_DN_PWRDN_N); if (err != 0) { muge_warn_printf(sc, "OTP off? failed to read data\n"); goto done; } } /* Start reading the bytes, one at a time. */ for (i = 0; i < buflen; i++) { err = lan78xx_write_reg(sc, OTP_ADDR1, ((off + i) >> 8) & OTP_ADDR1_15_11); err = lan78xx_write_reg(sc, OTP_ADDR2, ((off + i) & OTP_ADDR2_10_3)); err = lan78xx_write_reg(sc, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); err = lan78xx_write_reg(sc, OTP_CMD_GO, OTP_CMD_GO_GO_); err = lan78xx_wait_for_bits(sc, OTP_STATUS, OTP_STATUS_BUSY_); if (err != 0) { muge_warn_printf(sc, "OTP busy failed to read data\n"); goto done; } if ((err = lan78xx_read_reg(sc, OTP_RD_DATA, &val)) != 0) goto done; buf[i] = (uint8_t)(val & 0xff); } done: if (!locked) MUGE_UNLOCK(sc); return (err); } /** * lan78xx_otp_read * @sc: soft context * @off: the otp address offset * @buf: stores the bytes * @buflen: the number of bytes to read * * Simply reads bytes from the otp. * * LOCKING: * The function takes and releases device lock if it is not already held. * * RETURNS: * 0 on success, or a USB_ERR_?? error code on failure. */ static int lan78xx_otp_read(struct muge_softc *sc, uint16_t off, uint8_t *buf, uint16_t buflen) { uint8_t sig; int err; err = lan78xx_otp_read_raw(sc, OTP_INDICATOR_OFFSET, &sig, 1); if (err == 0) { if (sig == OTP_INDICATOR_1) { } else if (sig == OTP_INDICATOR_2) { off += 0x100; /* XXX */ } else { err = -EINVAL; } if (!err) err = lan78xx_otp_read_raw(sc, off, buf, buflen); } return (err); } /** * lan78xx_setmacaddress - Set the mac address in the device * @sc: driver soft context * @addr: pointer to array contain at least 6 bytes of the mac * * LOCKING: * Should be called with the MUGE lock held. * * RETURNS: * Returns 0 on success or a negative error code. */ static int lan78xx_setmacaddress(struct muge_softc *sc, const uint8_t *addr) { int err; uint32_t val; muge_dbg_printf(sc, "setting mac address to %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); MUGE_LOCK_ASSERT(sc, MA_OWNED); val = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; if ((err = lan78xx_write_reg(sc, ETH_RX_ADDRL, val)) != 0) goto done; val = (addr[5] << 8) | addr[4]; err = lan78xx_write_reg(sc, ETH_RX_ADDRH, val); done: return (err); } /** * lan78xx_set_rx_max_frame_length * @sc: driver soft context * @size: pointer to array contain at least 6 bytes of the mac * * Sets the maximum frame length to be received. Frames bigger than * this size are aborted. * * RETURNS: * Returns 0 on success or a negative error code. */ static int lan78xx_set_rx_max_frame_length(struct muge_softc *sc, int size) { int err = 0; uint32_t buf; bool rxenabled; /* First we have to disable rx before changing the length. */ err = lan78xx_read_reg(sc, ETH_MAC_RX, &buf); rxenabled = ((buf & ETH_MAC_RX_EN_) != 0); if (rxenabled) { buf &= ~ETH_MAC_RX_EN_; err = lan78xx_write_reg(sc, ETH_MAC_RX, buf); } /* Setting max frame length. */ buf &= ~ETH_MAC_RX_MAX_FR_SIZE_MASK_; buf |= (((size + 4) << ETH_MAC_RX_MAX_FR_SIZE_SHIFT_) & ETH_MAC_RX_MAX_FR_SIZE_MASK_); err = lan78xx_write_reg(sc, ETH_MAC_RX, buf); /* If it were enabled before, we enable it back. */ if (rxenabled) { buf |= ETH_MAC_RX_EN_; err = lan78xx_write_reg(sc, ETH_MAC_RX, buf); } return (0); } /** * lan78xx_miibus_readreg - Read a MII/MDIO register * @dev: usb ether device * @phy: the number of phy reading from * @reg: the register address * * LOCKING: * Takes and releases the device mutex lock if not already held. * * RETURNS: * Returns the 16-bits read from the MII register, if this function fails * 0 is returned. */ static int lan78xx_miibus_readreg(device_t dev, int phy, int reg) { struct muge_softc *sc = device_get_softc(dev); uint32_t addr, val; bool locked; val = 0; locked = mtx_owned(&sc->sc_mtx); if (!locked) MUGE_LOCK(sc); if (lan78xx_wait_for_bits(sc, ETH_MII_ACC, ETH_MII_ACC_MII_BUSY_) != 0) { muge_warn_printf(sc, "MII is busy\n"); goto done; } addr = (phy << 11) | (reg << 6) | ETH_MII_ACC_MII_READ_ | ETH_MII_ACC_MII_BUSY_; lan78xx_write_reg(sc, ETH_MII_ACC, addr); if (lan78xx_wait_for_bits(sc, ETH_MII_ACC, ETH_MII_ACC_MII_BUSY_) != 0) { muge_warn_printf(sc, "MII read timeout\n"); goto done; } lan78xx_read_reg(sc, ETH_MII_DATA, &val); val = le32toh(val); done: if (!locked) MUGE_UNLOCK(sc); return (val & 0xFFFF); } /** * lan78xx_miibus_writereg - Writes a MII/MDIO register * @dev: usb ether device * @phy: the number of phy writing to * @reg: the register address * @val: the value to write * * Attempts to write a PHY register through the usb controller registers. * * LOCKING: * Takes and releases the device mutex lock if not already held. * * RETURNS: * Always returns 0 regardless of success or failure. */ static int lan78xx_miibus_writereg(device_t dev, int phy, int reg, int val) { struct muge_softc *sc = device_get_softc(dev); uint32_t addr; bool locked; if (sc->sc_phyno != phy) return (0); locked = mtx_owned(&sc->sc_mtx); if (!locked) MUGE_LOCK(sc); if (lan78xx_wait_for_bits(sc, ETH_MII_ACC, ETH_MII_ACC_MII_BUSY_) != 0) { muge_warn_printf(sc, "MII is busy\n"); goto done; } val = htole32(val); lan78xx_write_reg(sc, ETH_MII_DATA, val); addr = (phy << 11) | (reg << 6) | ETH_MII_ACC_MII_WRITE_ | ETH_MII_ACC_MII_BUSY_; lan78xx_write_reg(sc, ETH_MII_ACC, addr); if (lan78xx_wait_for_bits(sc, ETH_MII_ACC, ETH_MII_ACC_MII_BUSY_) != 0) muge_warn_printf(sc, "MII write timeout\n"); done: if (!locked) MUGE_UNLOCK(sc); return (0); } /* * lan78xx_miibus_statchg - Called to detect phy status change * @dev: usb ether device * * This function is called periodically by the system to poll for status * changes of the link. * * LOCKING: * Takes and releases the device mutex lock if not already held. */ static void lan78xx_miibus_statchg(device_t dev) { struct muge_softc *sc = device_get_softc(dev); struct mii_data *mii = uether_getmii(&sc->sc_ue); struct ifnet *ifp; int err; uint32_t flow = 0; uint32_t fct_flow = 0; bool locked; locked = mtx_owned(&sc->sc_mtx); if (!locked) MUGE_LOCK(sc); ifp = uether_getifp(&sc->sc_ue); if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; /* Use the MII status to determine link status */ sc->sc_flags &= ~MUGE_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { muge_dbg_printf(sc, "media is active\n"); switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->sc_flags |= MUGE_FLAG_LINK; muge_dbg_printf(sc, "10/100 ethernet\n"); break; case IFM_1000_T: sc->sc_flags |= MUGE_FLAG_LINK; muge_dbg_printf(sc, "Gigabit ethernet\n"); break; default: break; } } /* Lost link, do nothing. */ if ((sc->sc_flags & MUGE_FLAG_LINK) == 0) { muge_dbg_printf(sc, "link flag not set\n"); goto done; } err = lan78xx_read_reg(sc, ETH_FCT_FLOW, &fct_flow); if (err) { muge_warn_printf(sc, "failed to read initial flow control thresholds, error %d\n", err); goto done; } /* Enable/disable full duplex operation and TX/RX pause. */ if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { muge_dbg_printf(sc, "full duplex operation\n"); /* Enable transmit MAC flow control function. */ if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) flow |= ETH_FLOW_CR_TX_FCEN_ | 0xFFFF; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) flow |= ETH_FLOW_CR_RX_FCEN_; } /* XXX Flow control settings obtained from Microchip's driver. */ switch(usbd_get_speed(sc->sc_ue.ue_udev)) { case USB_SPEED_SUPER: fct_flow = 0x817; break; case USB_SPEED_HIGH: fct_flow = 0x211; break; default: break; } err += lan78xx_write_reg(sc, ETH_FLOW, flow); err += lan78xx_write_reg(sc, ETH_FCT_FLOW, fct_flow); if (err) muge_warn_printf(sc, "media change failed, error %d\n", err); done: if (!locked) MUGE_UNLOCK(sc); } /* * lan78xx_set_mdix_auto - Configure the device to enable automatic * crossover and polarity detection. LAN7800 provides HP Auto-MDIX * functionality for seamless crossover and polarity detection. * * @sc: driver soft context * * LOCKING: * Takes and releases the device mutex lock if not already held. */ static void lan78xx_set_mdix_auto(struct muge_softc *sc) { uint32_t buf, err; err = lan78xx_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MUGE_EXT_PAGE_ACCESS, MUGE_EXT_PAGE_SPACE_1); buf = lan78xx_miibus_readreg(sc->sc_ue.ue_dev, sc->sc_phyno, MUGE_EXT_MODE_CTRL); buf &= ~MUGE_EXT_MODE_CTRL_MDIX_MASK_; buf |= MUGE_EXT_MODE_CTRL_AUTO_MDIX_; lan78xx_miibus_readreg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_BMCR); err += lan78xx_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MUGE_EXT_MODE_CTRL, buf); err += lan78xx_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MUGE_EXT_PAGE_ACCESS, MUGE_EXT_PAGE_SPACE_0); if (err != 0) muge_warn_printf(sc, "error setting PHY's MDIX status\n"); sc->sc_mdix_ctl = buf; } /** * lan78xx_phy_init - Initialises the in-built MUGE phy * @sc: driver soft context * * Resets the PHY part of the chip and then initialises it to default * values. The 'link down' and 'auto-negotiation complete' interrupts * from the PHY are also enabled, however we don't monitor the interrupt * endpoints for the moment. * * RETURNS: * Returns 0 on success or EIO if failed to reset the PHY. */ static int lan78xx_phy_init(struct muge_softc *sc) { muge_dbg_printf(sc, "Initializing PHY.\n"); uint16_t bmcr, lmsr; usb_ticks_t start_ticks; uint32_t hw_reg; const usb_ticks_t max_ticks = USB_MS_TO_TICKS(1000); MUGE_LOCK_ASSERT(sc, MA_OWNED); /* Reset phy and wait for reset to complete. */ lan78xx_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_BMCR, BMCR_RESET); start_ticks = ticks; do { uether_pause(&sc->sc_ue, hz / 100); bmcr = lan78xx_miibus_readreg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_BMCR); } while ((bmcr & BMCR_RESET) && ((ticks - start_ticks) < max_ticks)); if (((usb_ticks_t)(ticks - start_ticks)) >= max_ticks) { muge_err_printf(sc, "PHY reset timed-out\n"); return (EIO); } /* Setup phy to interrupt upon link down or autoneg completion. */ lan78xx_miibus_readreg(sc->sc_ue.ue_dev, sc->sc_phyno, MUGE_PHY_INTR_STAT); lan78xx_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MUGE_PHY_INTR_MASK, (MUGE_PHY_INTR_ANEG_COMP | MUGE_PHY_INTR_LINK_CHANGE)); /* Enable Auto-MDIX for crossover and polarity detection. */ lan78xx_set_mdix_auto(sc); /* Enable all modes. */ lan78xx_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_ANAR, ANAR_10 | ANAR_10_FD | ANAR_TX | ANAR_TX_FD | ANAR_CSMA | ANAR_FC | ANAR_PAUSE_ASYM); /* Restart auto-negotiation. */ bmcr |= BMCR_STARTNEG; bmcr |= BMCR_AUTOEN; lan78xx_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_BMCR, bmcr); bmcr = lan78xx_miibus_readreg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_BMCR); /* Configure LED Modes. */ if (sc->sc_led_modes_mask != 0) { lmsr = lan78xx_miibus_readreg(sc->sc_ue.ue_dev, sc->sc_phyno, MUGE_PHY_LED_MODE); lmsr &= ~sc->sc_led_modes_mask; lmsr |= sc->sc_led_modes; lan78xx_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MUGE_PHY_LED_MODE, lmsr); } /* Enable appropriate LEDs. */ if (sc->sc_leds != 0 && lan78xx_read_reg(sc, ETH_HW_CFG, &hw_reg) == 0) { hw_reg &= ~(ETH_HW_CFG_LEDO_EN_ | ETH_HW_CFG_LED1_EN_ | ETH_HW_CFG_LED2_EN_ | ETH_HW_CFG_LED3_EN_ ); hw_reg |= sc->sc_leds; lan78xx_write_reg(sc, ETH_HW_CFG, hw_reg); } return (0); } /** * lan78xx_chip_init - Initialises the chip after power on * @sc: driver soft context * * This initialisation sequence is modelled on the procedure in the Linux * driver. * * RETURNS: * Returns 0 on success or an error code on failure. */ static int lan78xx_chip_init(struct muge_softc *sc) { int err; uint32_t buf; uint32_t burst_cap; MUGE_LOCK_ASSERT(sc, MA_OWNED); /* Enter H/W config mode. */ lan78xx_write_reg(sc, ETH_HW_CFG, ETH_HW_CFG_LRST_); if ((err = lan78xx_wait_for_bits(sc, ETH_HW_CFG, ETH_HW_CFG_LRST_)) != 0) { muge_warn_printf(sc, "timed-out waiting for lite reset to complete\n"); goto init_failed; } /* Set the mac address. */ if ((err = lan78xx_setmacaddress(sc, sc->sc_ue.ue_eaddr)) != 0) { muge_warn_printf(sc, "failed to set the MAC address\n"); goto init_failed; } /* Read and display the revision register. */ if ((err = lan78xx_read_reg(sc, ETH_ID_REV, &buf)) < 0) { muge_warn_printf(sc, "failed to read ETH_ID_REV (err = %d)\n", err); goto init_failed; } sc->chipid = (buf & ETH_ID_REV_CHIP_ID_MASK_) >> 16; sc->chiprev = buf & ETH_ID_REV_CHIP_REV_MASK_; switch (sc->chipid) { case ETH_ID_REV_CHIP_ID_7800_: case ETH_ID_REV_CHIP_ID_7850_: break; default: muge_warn_printf(sc, "Chip ID 0x%04x not yet supported\n", sc->chipid); goto init_failed; } device_printf(sc->sc_ue.ue_dev, "Chip ID 0x%04x rev %04x\n", sc->chipid, sc->chiprev); /* Respond to BULK-IN tokens with a NAK when RX FIFO is empty. */ if ((err = lan78xx_read_reg(sc, ETH_USB_CFG0, &buf)) != 0) { muge_warn_printf(sc, "failed to read ETH_USB_CFG0 (err=%d)\n", err); goto init_failed; } buf |= ETH_USB_CFG_BIR_; lan78xx_write_reg(sc, ETH_USB_CFG0, buf); /* * XXX LTM support will go here. */ /* Configuring the burst cap. */ switch (usbd_get_speed(sc->sc_ue.ue_udev)) { case USB_SPEED_SUPER: burst_cap = MUGE_DEFAULT_BURST_CAP_SIZE/MUGE_SS_USB_PKT_SIZE; break; case USB_SPEED_HIGH: burst_cap = MUGE_DEFAULT_BURST_CAP_SIZE/MUGE_HS_USB_PKT_SIZE; break; default: burst_cap = MUGE_DEFAULT_BURST_CAP_SIZE/MUGE_FS_USB_PKT_SIZE; } lan78xx_write_reg(sc, ETH_BURST_CAP, burst_cap); /* Set the default bulk in delay (same value from Linux driver). */ lan78xx_write_reg(sc, ETH_BULK_IN_DLY, MUGE_DEFAULT_BULK_IN_DELAY); /* Multiple ethernet frames per USB packets. */ err = lan78xx_read_reg(sc, ETH_HW_CFG, &buf); buf |= ETH_HW_CFG_MEF_; err = lan78xx_write_reg(sc, ETH_HW_CFG, buf); /* Enable burst cap. */ if ((err = lan78xx_read_reg(sc, ETH_USB_CFG0, &buf)) < 0) { muge_warn_printf(sc, "failed to read ETH_USB_CFG0 (err=%d)\n", err); goto init_failed; } buf |= ETH_USB_CFG_BCE_; err = lan78xx_write_reg(sc, ETH_USB_CFG0, buf); /* * Set FCL's RX and TX FIFO sizes: according to data sheet this is * already the default value. But we initialize it to the same value * anyways, as that's what the Linux driver does. * */ buf = (MUGE_MAX_RX_FIFO_SIZE - 512) / 512; err = lan78xx_write_reg(sc, ETH_FCT_RX_FIFO_END, buf); buf = (MUGE_MAX_TX_FIFO_SIZE - 512) / 512; err = lan78xx_write_reg(sc, ETH_FCT_TX_FIFO_END, buf); /* Enabling interrupts. (Not using them for now) */ err = lan78xx_write_reg(sc, ETH_INT_STS, ETH_INT_STS_CLEAR_ALL_); /* * Initializing flow control registers to 0. These registers are * properly set is handled in link-reset function in the Linux driver. */ err = lan78xx_write_reg(sc, ETH_FLOW, 0); err = lan78xx_write_reg(sc, ETH_FCT_FLOW, 0); /* * Settings for the RFE, we enable broadcast and destination address * perfect filtering. */ err = lan78xx_read_reg(sc, ETH_RFE_CTL, &buf); buf |= ETH_RFE_CTL_BCAST_EN_ | ETH_RFE_CTL_DA_PERFECT_; err = lan78xx_write_reg(sc, ETH_RFE_CTL, buf); /* * At this point the Linux driver writes multicast tables, and enables * checksum engines. But in FreeBSD that gets done in muge_init, * which gets called when the interface is brought up. */ /* Reset the PHY. */ lan78xx_write_reg(sc, ETH_PMT_CTL, ETH_PMT_CTL_PHY_RST_); if ((err = lan78xx_wait_for_bits(sc, ETH_PMT_CTL, ETH_PMT_CTL_PHY_RST_)) != 0) { muge_warn_printf(sc, "timed-out waiting for phy reset to complete\n"); goto init_failed; } err = lan78xx_read_reg(sc, ETH_MAC_CR, &buf); if (sc->chipid == ETH_ID_REV_CHIP_ID_7800_ && !lan78xx_eeprom_present(sc)) { /* Set automatic duplex and speed on LAN7800 without EEPROM. */ buf |= ETH_MAC_CR_AUTO_DUPLEX_ | ETH_MAC_CR_AUTO_SPEED_; } err = lan78xx_write_reg(sc, ETH_MAC_CR, buf); /* * Enable PHY interrupts (Not really getting used for now) * ETH_INT_EP_CTL: interrupt endpoint control register * phy events cause interrupts to be issued */ err = lan78xx_read_reg(sc, ETH_INT_EP_CTL, &buf); buf |= ETH_INT_ENP_PHY_INT; err = lan78xx_write_reg(sc, ETH_INT_EP_CTL, buf); /* * Enables mac's transmitter. It will transmit frames from the buffer * onto the cable. */ err = lan78xx_read_reg(sc, ETH_MAC_TX, &buf); buf |= ETH_MAC_TX_TXEN_; err = lan78xx_write_reg(sc, ETH_MAC_TX, buf); /* FIFO is capable of transmitting frames to MAC. */ err = lan78xx_read_reg(sc, ETH_FCT_TX_CTL, &buf); buf |= ETH_FCT_TX_CTL_EN_; err = lan78xx_write_reg(sc, ETH_FCT_TX_CTL, buf); /* * Set max frame length. In linux this is dev->mtu (which by default * is 1500) + VLAN_ETH_HLEN = 1518. */ err = lan78xx_set_rx_max_frame_length(sc, ETHER_MAX_LEN); /* Initialise the PHY. */ if ((err = lan78xx_phy_init(sc)) != 0) goto init_failed; /* Enable MAC RX. */ err = lan78xx_read_reg(sc, ETH_MAC_RX, &buf); buf |= ETH_MAC_RX_EN_; err = lan78xx_write_reg(sc, ETH_MAC_RX, buf); /* Enable FIFO controller RX. */ err = lan78xx_read_reg(sc, ETH_FCT_RX_CTL, &buf); buf |= ETH_FCT_TX_CTL_EN_; err = lan78xx_write_reg(sc, ETH_FCT_RX_CTL, buf); sc->sc_flags |= MUGE_FLAG_INIT_DONE; return (0); init_failed: muge_err_printf(sc, "lan78xx_chip_init failed (err=%d)\n", err); return (err); } static void muge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct muge_softc *sc = usbd_xfer_softc(xfer); struct usb_ether *ue = &sc->sc_ue; struct ifnet *ifp = uether_getifp(ue); struct mbuf *m; struct usb_page_cache *pc; uint32_t rx_cmd_a, rx_cmd_b; uint16_t rx_cmd_c; int pktlen; int off; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); muge_dbg_printf(sc, "rx : actlen %d\n", actlen); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: /* * There is always a zero length frame after bringing the * interface up. */ if (actlen < (sizeof(rx_cmd_a) + ETHER_CRC_LEN)) goto tr_setup; /* * There may be multiple packets in the USB frame. Each will * have a header and each needs to have its own mbuf allocated * and populated for it. */ pc = usbd_xfer_get_frame(xfer, 0); off = 0; while (off < actlen) { /* The frame header is aligned on a 4 byte boundary. */ off = ((off + 0x3) & ~0x3); /* Extract RX CMD A. */ if (off + sizeof(rx_cmd_a) > actlen) goto tr_setup; usbd_copy_out(pc, off, &rx_cmd_a, sizeof(rx_cmd_a)); off += (sizeof(rx_cmd_a)); rx_cmd_a = le32toh(rx_cmd_a); /* Extract RX CMD B. */ if (off + sizeof(rx_cmd_b) > actlen) goto tr_setup; usbd_copy_out(pc, off, &rx_cmd_b, sizeof(rx_cmd_b)); off += (sizeof(rx_cmd_b)); rx_cmd_b = le32toh(rx_cmd_b); /* Extract RX CMD C. */ if (off + sizeof(rx_cmd_c) > actlen) goto tr_setup; usbd_copy_out(pc, off, &rx_cmd_c, sizeof(rx_cmd_c)); off += (sizeof(rx_cmd_c)); rx_cmd_c = le16toh(rx_cmd_c); if (off > actlen) goto tr_setup; pktlen = (rx_cmd_a & RX_CMD_A_LEN_MASK_); muge_dbg_printf(sc, "rx_cmd_a 0x%08x rx_cmd_b 0x%08x rx_cmd_c 0x%04x " " pktlen %d actlen %d off %d\n", rx_cmd_a, rx_cmd_b, rx_cmd_c, pktlen, actlen, off); if (rx_cmd_a & RX_CMD_A_RED_) { muge_dbg_printf(sc, "rx error (hdr 0x%08x)\n", rx_cmd_a); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } else { /* Ethernet frame too big or too small? */ if ((pktlen < ETHER_HDR_LEN) || (pktlen > (actlen - off))) goto tr_setup; /* Create a new mbuf to store the packet. */ m = uether_newbuf(); if (m == NULL) { muge_warn_printf(sc, "failed to create new mbuf\n"); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); goto tr_setup; } if (pktlen > m->m_len) { muge_dbg_printf(sc, "buffer too small %d vs %d bytes", pktlen, m->m_len); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); m_freem(m); goto tr_setup; } usbd_copy_out(pc, off, mtod(m, uint8_t *), pktlen); /* * Check if RX checksums are computed, and * offload them */ if ((ifp->if_capenable & IFCAP_RXCSUM) && !(rx_cmd_a & RX_CMD_A_ICSM_)) { struct ether_header *eh; eh = mtod(m, struct ether_header *); /* * Remove the extra 2 bytes of the csum * * The checksum appears to be * simplistically calculated over the * protocol headers up to the end of the * eth frame. Which means if the eth * frame is padded the csum calculation * is incorrectly performed over the * padding bytes as well. Therefore to * be safe we ignore the H/W csum on * frames less than or equal to * 64 bytes. * * Protocols checksummed: * TCP, UDP, ICMP, IGMP, IP */ if (pktlen > ETHER_MIN_LEN) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; /* * Copy the checksum from the * last 2 bytes of the transfer * and put in the csum_data * field. */ usbd_copy_out(pc, (off + pktlen), &m->m_pkthdr.csum_data, 2); /* * The data is copied in network * order, but the csum algorithm * in the kernel expects it to * be in host network order. */ m->m_pkthdr.csum_data = ntohs(0xffff); muge_dbg_printf(sc, "RX checksum offloaded (0x%04x)\n", m->m_pkthdr.csum_data); } } /* Enqueue the mbuf on the receive queue. */ if (pktlen < (4 + ETHER_HDR_LEN)) { m_freem(m); goto tr_setup; } /* Remove 4 trailing bytes */ uether_rxmbuf(ue, m, pktlen - 4); } /* * Update the offset to move to the next potential * packet. */ off += pktlen; } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); uether_rxflush(ue); return; default: if (error != USB_ERR_CANCELLED) { muge_warn_printf(sc, "bulk read error, %s\n", usbd_errstr(error)); usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } /** * muge_bulk_write_callback - Write callback used to send ethernet frame(s) * @xfer: the USB transfer * @error: error code if the transfers is in an errored state * * The main write function that pulls ethernet frames off the queue and * sends them out. * */ static void muge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct muge_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc; struct mbuf *m; int nframes; uint32_t frm_len = 0, tx_cmd_a = 0, tx_cmd_b = 0; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: muge_dbg_printf(sc, "USB TRANSFER status: USB_ST_TRANSFERRED\n"); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* FALLTHROUGH */ case USB_ST_SETUP: muge_dbg_printf(sc, "USB TRANSFER status: USB_ST_SETUP\n"); tr_setup: if ((sc->sc_flags & MUGE_FLAG_LINK) == 0 || (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { muge_dbg_printf(sc, "sc->sc_flags & MUGE_FLAG_LINK: %d\n", (sc->sc_flags & MUGE_FLAG_LINK)); muge_dbg_printf(sc, "ifp->if_drv_flags & IFF_DRV_OACTIVE: %d\n", (ifp->if_drv_flags & IFF_DRV_OACTIVE)); muge_dbg_printf(sc, "USB TRANSFER not sending: no link or controller is busy \n"); /* * Don't send anything if there is no link or * controller is busy. */ return; } for (nframes = 0; nframes < 16 && !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, nframes); frm_len = 0; pc = usbd_xfer_get_frame(xfer, nframes); /* * Each frame is prefixed with two 32-bit values * describing the length of the packet and buffer. */ tx_cmd_a = (m->m_pkthdr.len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; tx_cmd_a = htole32(tx_cmd_a); usbd_copy_in(pc, 0, &tx_cmd_a, sizeof(tx_cmd_a)); tx_cmd_b = 0; /* TCP LSO Support will probably be implemented here. */ tx_cmd_b = htole32(tx_cmd_b); usbd_copy_in(pc, 4, &tx_cmd_b, sizeof(tx_cmd_b)); frm_len += 8; /* Next copy in the actual packet */ usbd_m_copy_in(pc, frm_len, m, 0, m->m_pkthdr.len); frm_len += m->m_pkthdr.len; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* * If there's a BPF listener, bounce a copy of this * frame to it. */ BPF_MTAP(ifp, m); m_freem(m); /* Set frame length. */ usbd_xfer_set_frame_len(xfer, nframes, frm_len); } muge_dbg_printf(sc, "USB TRANSFER nframes: %d\n", nframes); if (nframes != 0) { muge_dbg_printf(sc, "USB TRANSFER submit attempt\n"); usbd_xfer_set_frames(xfer, nframes); usbd_transfer_submit(xfer); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return; default: if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (error != USB_ERR_CANCELLED) { muge_err_printf(sc, "usb error on tx: %s\n", usbd_errstr(error)); usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } /** * muge_set_mac_addr - Initiailizes NIC MAC address * @ue: the USB ethernet device * * Tries to obtain MAC address from number of sources: registers, * EEPROM, DTB blob. If all sources fail - generates random MAC. */ static void muge_set_mac_addr(struct usb_ether *ue) { struct muge_softc *sc = uether_getsc(ue); uint32_t mac_h, mac_l; memset(ue->ue_eaddr, 0xff, ETHER_ADDR_LEN); uint32_t val; lan78xx_read_reg(sc, 0, &val); /* Read current MAC address from RX_ADDRx registers. */ if ((lan78xx_read_reg(sc, ETH_RX_ADDRL, &mac_l) == 0) && (lan78xx_read_reg(sc, ETH_RX_ADDRH, &mac_h) == 0)) { ue->ue_eaddr[5] = (uint8_t)((mac_h >> 8) & 0xff); ue->ue_eaddr[4] = (uint8_t)((mac_h) & 0xff); ue->ue_eaddr[3] = (uint8_t)((mac_l >> 24) & 0xff); ue->ue_eaddr[2] = (uint8_t)((mac_l >> 16) & 0xff); ue->ue_eaddr[1] = (uint8_t)((mac_l >> 8) & 0xff); ue->ue_eaddr[0] = (uint8_t)((mac_l) & 0xff); } /* * If RX_ADDRx did not provide a valid MAC address, try EEPROM. If that * doesn't work, try OTP. Whether any of these methods work or not, try * FDT data, because it is allowed to override the EEPROM/OTP values. */ if (ETHER_IS_VALID(ue->ue_eaddr)) { muge_dbg_printf(sc, "MAC assigned from registers\n"); } else if (lan78xx_eeprom_present(sc) && lan78xx_eeprom_read_raw(sc, ETH_E2P_MAC_OFFSET, ue->ue_eaddr, ETHER_ADDR_LEN) == 0 && ETHER_IS_VALID(ue->ue_eaddr)) { muge_dbg_printf(sc, "MAC assigned from EEPROM\n"); } else if (lan78xx_otp_read(sc, OTP_MAC_OFFSET, ue->ue_eaddr, ETHER_ADDR_LEN) == 0 && ETHER_IS_VALID(ue->ue_eaddr)) { muge_dbg_printf(sc, "MAC assigned from OTP\n"); } #ifdef FDT /* ue->ue_eaddr modified only if config exists for this dev instance. */ usb_fdt_get_mac_addr(ue->ue_dev, ue); if (ETHER_IS_VALID(ue->ue_eaddr)) { muge_dbg_printf(sc, "MAC assigned from FDT data\n"); } #endif if (!ETHER_IS_VALID(ue->ue_eaddr)) { muge_dbg_printf(sc, "MAC assigned randomly\n"); arc4rand(ue->ue_eaddr, ETHER_ADDR_LEN, 0); ue->ue_eaddr[0] &= ~0x01; /* unicast */ ue->ue_eaddr[0] |= 0x02; /* locally administered */ } } /** * muge_set_leds - Initializes NIC LEDs pattern * @ue: the USB ethernet device * * Tries to store the LED modes. * Supports only DTB blob like the Linux driver does. */ static void muge_set_leds(struct usb_ether *ue) { #ifdef FDT struct muge_softc *sc = uether_getsc(ue); phandle_t node; pcell_t modes[4]; /* 4 LEDs are possible */ ssize_t proplen; uint32_t count; if ((node = usb_fdt_get_node(ue->ue_dev, ue->ue_udev)) != -1 && (proplen = OF_getencprop(node, "microchip,led-modes", modes, sizeof(modes))) > 0) { count = proplen / sizeof( uint32_t ); sc->sc_leds = (count > 0) * ETH_HW_CFG_LEDO_EN_ | (count > 1) * ETH_HW_CFG_LED1_EN_ | (count > 2) * ETH_HW_CFG_LED2_EN_ | (count > 3) * ETH_HW_CFG_LED3_EN_; while (count-- > 0) { sc->sc_led_modes |= (modes[count] & 0xf) << (4 * count); sc->sc_led_modes_mask |= 0xf << (4 * count); } muge_dbg_printf(sc, "LED modes set from FDT data\n"); } #endif } /** * muge_attach_post - Called after the driver attached to the USB interface * @ue: the USB ethernet device * * This is where the chip is intialised for the first time. This is * different from the muge_init() function in that that one is designed to * setup the H/W to match the UE settings and can be called after a reset. * */ static void muge_attach_post(struct usb_ether *ue) { struct muge_softc *sc = uether_getsc(ue); muge_dbg_printf(sc, "Calling muge_attach_post.\n"); /* Setup some of the basics */ sc->sc_phyno = 1; muge_set_mac_addr(ue); muge_set_leds(ue); /* Initialise the chip for the first time */ lan78xx_chip_init(sc); } /** * muge_attach_post_sub - Called after attach to the USB interface * @ue: the USB ethernet device * * Most of this is boilerplate code and copied from the base USB ethernet * driver. It has been overriden so that we can indicate to the system * that the chip supports H/W checksumming. * * RETURNS: * Returns 0 on success or a negative error code. */ static int muge_attach_post_sub(struct usb_ether *ue) { struct muge_softc *sc; struct ifnet *ifp; int error; sc = uether_getsc(ue); muge_dbg_printf(sc, "Calling muge_attach_post_sub.\n"); ifp = ue->ue_ifp; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = uether_start; ifp->if_ioctl = muge_ioctl; ifp->if_init = uether_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); /* * The chip supports TCP/UDP checksum offloading on TX and RX paths, * however currently only RX checksum is supported in the driver * (see top of file). */ ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_hwassist = 0; ifp->if_capabilities |= IFCAP_RXCSUM; if (MUGE_DEFAULT_TX_CSUM_ENABLE) ifp->if_capabilities |= IFCAP_TXCSUM; /* * In the Linux driver they also enable scatter/gather (NETIF_F_SG) * here, that's something related to socket buffers used in Linux. * FreeBSD doesn't have that as an interface feature. */ if (MUGE_DEFAULT_TSO_ENABLE) ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; #if 0 /* TX checksuming is disabled since not yet implemented. */ ifp->if_capabilities |= IFCAP_TXCSUM; ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist = CSUM_TCP | CSUM_UDP; #endif ifp->if_capenable = ifp->if_capabilities; - mtx_lock(&Giant); + bus_topo_lock(); error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, 0); - mtx_unlock(&Giant); + bus_topo_unlock(); return (0); } /** * muge_start - Starts communication with the LAN78xx chip * @ue: USB ether interface */ static void muge_start(struct usb_ether *ue) { struct muge_softc *sc = uether_getsc(ue); /* * Start the USB transfers, if not already started. */ usbd_transfer_start(sc->sc_xfer[MUGE_BULK_DT_RD]); usbd_transfer_start(sc->sc_xfer[MUGE_BULK_DT_WR]); } /** * muge_ioctl - ioctl function for the device * @ifp: interface pointer * @cmd: the ioctl command * @data: data passed in the ioctl call, typically a pointer to struct * ifreq. * * The ioctl routine is overridden to detect change requests for the H/W * checksum capabilities. * * RETURNS: * 0 on success and an error code on failure. */ static int muge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct usb_ether *ue = ifp->if_softc; struct muge_softc *sc; struct ifreq *ifr; int rc; int mask; int reinit; if (cmd == SIOCSIFCAP) { sc = uether_getsc(ue); ifr = (struct ifreq *)data; MUGE_LOCK(sc); rc = 0; reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; /* Modify the RX CSUM enable bits. */ if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; reinit = 1; } } MUGE_UNLOCK(sc); if (reinit) uether_init(ue); } else { rc = uether_ioctl(ifp, cmd, data); } return (rc); } /** * muge_reset - Reset the SMSC chip * @sc: device soft context * * LOCKING: * Should be called with the SMSC lock held. */ static void muge_reset(struct muge_softc *sc) { struct usb_config_descriptor *cd; usb_error_t err; cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, cd->bConfigurationValue); if (err) muge_warn_printf(sc, "reset failed (ignored)\n"); /* Wait a little while for the chip to get its brains in order. */ uether_pause(&sc->sc_ue, hz / 100); /* Reinitialize controller to achieve full reset. */ lan78xx_chip_init(sc); } /** * muge_set_addr_filter * * @sc: device soft context * @index: index of the entry to the perfect address table * @addr: address to be written * */ static void muge_set_addr_filter(struct muge_softc *sc, int index, uint8_t addr[ETHER_ADDR_LEN]) { uint32_t tmp; if ((sc) && (index > 0) && (index < MUGE_NUM_PFILTER_ADDRS_)) { tmp = addr[3]; tmp |= addr[2] | (tmp << 8); tmp |= addr[1] | (tmp << 8); tmp |= addr[0] | (tmp << 8); sc->sc_pfilter_table[index][1] = tmp; tmp = addr[5]; tmp |= addr[4] | (tmp << 8); tmp |= ETH_MAF_HI_VALID_ | ETH_MAF_HI_TYPE_DST_; sc->sc_pfilter_table[index][0] = tmp; } } /** * lan78xx_dataport_write - write to the selected RAM * @sc: The device soft context. * @ram_select: Select which RAM to access. * @addr: Starting address to write to. * @buf: word-sized buffer to write to RAM, starting at @addr. * @length: length of @buf * * * RETURNS: * 0 if write successful. */ static int lan78xx_dataport_write(struct muge_softc *sc, uint32_t ram_select, uint32_t addr, uint32_t length, uint32_t *buf) { uint32_t dp_sel; int i, ret; MUGE_LOCK_ASSERT(sc, MA_OWNED); ret = lan78xx_wait_for_bits(sc, ETH_DP_SEL, ETH_DP_SEL_DPRDY_); if (ret < 0) goto done; ret = lan78xx_read_reg(sc, ETH_DP_SEL, &dp_sel); dp_sel &= ~ETH_DP_SEL_RSEL_MASK_; dp_sel |= ram_select; ret = lan78xx_write_reg(sc, ETH_DP_SEL, dp_sel); for (i = 0; i < length; i++) { ret = lan78xx_write_reg(sc, ETH_DP_ADDR, addr + i); ret = lan78xx_write_reg(sc, ETH_DP_DATA, buf[i]); ret = lan78xx_write_reg(sc, ETH_DP_CMD, ETH_DP_CMD_WRITE_); ret = lan78xx_wait_for_bits(sc, ETH_DP_SEL, ETH_DP_SEL_DPRDY_); if (ret != 0) goto done; } done: return (ret); } /** * muge_multicast_write * @sc: device's soft context * * Writes perfect addres filters and hash address filters to their * corresponding registers and RAMs. * */ static void muge_multicast_write(struct muge_softc *sc) { int i, ret; lan78xx_dataport_write(sc, ETH_DP_SEL_RSEL_VLAN_DA_, ETH_DP_SEL_VHF_VLAN_LEN, ETH_DP_SEL_VHF_HASH_LEN, sc->sc_mchash_table); for (i = 1; i < MUGE_NUM_PFILTER_ADDRS_; i++) { ret = lan78xx_write_reg(sc, PFILTER_HI(i), 0); ret = lan78xx_write_reg(sc, PFILTER_LO(i), sc->sc_pfilter_table[i][1]); ret = lan78xx_write_reg(sc, PFILTER_HI(i), sc->sc_pfilter_table[i][0]); } } /** * muge_hash - Calculate the hash of a mac address * @addr: The mac address to calculate the hash on * * This function is used when configuring a range of multicast mac * addresses to filter on. The hash of the mac address is put in the * device's mac hash table. * * RETURNS: * Returns a value from 0-63 value which is the hash of the mac address. */ static inline uint32_t muge_hash(uint8_t addr[ETHER_ADDR_LEN]) { return (ether_crc32_be(addr, ETHER_ADDR_LEN) >> 23) & 0x1ff; } static u_int muge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct muge_softc *sc = arg; uint32_t bitnum; /* First fill up the perfect address table. */ if (cnt < 32 /* XXX */) muge_set_addr_filter(sc, cnt + 1, LLADDR(sdl)); else { bitnum = muge_hash(LLADDR(sdl)); sc->sc_mchash_table[bitnum / 32] |= (1 << (bitnum % 32)); sc->sc_rfe_ctl |= ETH_RFE_CTL_MCAST_HASH_; } return (1); } /** * muge_setmulti - Setup multicast * @ue: usb ethernet device context * * Tells the device to either accept frames with a multicast mac address, * a select group of m'cast mac addresses or just the devices mac address. * * LOCKING: * Should be called with the MUGE lock held. */ static void muge_setmulti(struct usb_ether *ue) { struct muge_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); uint8_t i; MUGE_LOCK_ASSERT(sc, MA_OWNED); sc->sc_rfe_ctl &= ~(ETH_RFE_CTL_UCAST_EN_ | ETH_RFE_CTL_MCAST_EN_ | ETH_RFE_CTL_DA_PERFECT_ | ETH_RFE_CTL_MCAST_HASH_); /* Initialize hash filter table. */ for (i = 0; i < ETH_DP_SEL_VHF_HASH_LEN; i++) sc->sc_mchash_table[i] = 0; /* Initialize perfect filter table. */ for (i = 1; i < MUGE_NUM_PFILTER_ADDRS_; i++) { sc->sc_pfilter_table[i][0] = sc->sc_pfilter_table[i][1] = 0; } sc->sc_rfe_ctl |= ETH_RFE_CTL_BCAST_EN_; if (ifp->if_flags & IFF_PROMISC) { muge_dbg_printf(sc, "promiscuous mode enabled\n"); sc->sc_rfe_ctl |= ETH_RFE_CTL_MCAST_EN_ | ETH_RFE_CTL_UCAST_EN_; } else if (ifp->if_flags & IFF_ALLMULTI) { muge_dbg_printf(sc, "receive all multicast enabled\n"); sc->sc_rfe_ctl |= ETH_RFE_CTL_MCAST_EN_; } else { if_foreach_llmaddr(ifp, muge_hash_maddr, sc); muge_multicast_write(sc); } lan78xx_write_reg(sc, ETH_RFE_CTL, sc->sc_rfe_ctl); } /** * muge_setpromisc - Enables/disables promiscuous mode * @ue: usb ethernet device context * * LOCKING: * Should be called with the MUGE lock held. */ static void muge_setpromisc(struct usb_ether *ue) { struct muge_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); muge_dbg_printf(sc, "promiscuous mode %sabled\n", (ifp->if_flags & IFF_PROMISC) ? "en" : "dis"); MUGE_LOCK_ASSERT(sc, MA_OWNED); if (ifp->if_flags & IFF_PROMISC) sc->sc_rfe_ctl |= ETH_RFE_CTL_MCAST_EN_ | ETH_RFE_CTL_UCAST_EN_; else sc->sc_rfe_ctl &= ~(ETH_RFE_CTL_MCAST_EN_); lan78xx_write_reg(sc, ETH_RFE_CTL, sc->sc_rfe_ctl); } /** * muge_sethwcsum - Enable or disable H/W UDP and TCP checksumming * @sc: driver soft context * * LOCKING: * Should be called with the MUGE lock held. * * RETURNS: * Returns 0 on success or a negative error code. */ static int muge_sethwcsum(struct muge_softc *sc) { struct ifnet *ifp = uether_getifp(&sc->sc_ue); int err; if (!ifp) return (-EIO); MUGE_LOCK_ASSERT(sc, MA_OWNED); if (ifp->if_capenable & IFCAP_RXCSUM) { sc->sc_rfe_ctl |= ETH_RFE_CTL_IGMP_COE_ | ETH_RFE_CTL_ICMP_COE_; sc->sc_rfe_ctl |= ETH_RFE_CTL_TCPUDP_COE_ | ETH_RFE_CTL_IP_COE_; } else { sc->sc_rfe_ctl &= ~(ETH_RFE_CTL_IGMP_COE_ | ETH_RFE_CTL_ICMP_COE_); sc->sc_rfe_ctl &= ~(ETH_RFE_CTL_TCPUDP_COE_ | ETH_RFE_CTL_IP_COE_); } sc->sc_rfe_ctl &= ~ETH_RFE_CTL_VLAN_FILTER_; err = lan78xx_write_reg(sc, ETH_RFE_CTL, sc->sc_rfe_ctl); if (err != 0) { muge_warn_printf(sc, "failed to write ETH_RFE_CTL (err=%d)\n", err); return (err); } return (0); } /** * muge_ifmedia_upd - Set media options * @ifp: interface pointer * * Basically boilerplate code that simply calls the mii functions to set * the media options. * * LOCKING: * The device lock must be held before this function is called. * * RETURNS: * Returns 0 on success or a negative error code. */ static int muge_ifmedia_upd(struct ifnet *ifp) { struct muge_softc *sc = ifp->if_softc; muge_dbg_printf(sc, "Calling muge_ifmedia_upd.\n"); struct mii_data *mii = uether_getmii(&sc->sc_ue); struct mii_softc *miisc; int err; MUGE_LOCK_ASSERT(sc, MA_OWNED); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); err = mii_mediachg(mii); return (err); } /** * muge_init - Initialises the LAN95xx chip * @ue: USB ether interface * * Called when the interface is brought up (i.e. ifconfig ue0 up), this * initialise the interface and the rx/tx pipes. * * LOCKING: * Should be called with the MUGE lock held. */ static void muge_init(struct usb_ether *ue) { struct muge_softc *sc = uether_getsc(ue); muge_dbg_printf(sc, "Calling muge_init.\n"); struct ifnet *ifp = uether_getifp(ue); MUGE_LOCK_ASSERT(sc, MA_OWNED); if (lan78xx_setmacaddress(sc, IF_LLADDR(ifp))) muge_dbg_printf(sc, "setting MAC address failed\n"); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* Cancel pending I/O. */ muge_stop(ue); /* Reset the ethernet interface. */ muge_reset(sc); /* Load the multicast filter. */ muge_setmulti(ue); /* TCP/UDP checksum offload engines. */ muge_sethwcsum(sc); usbd_xfer_set_stall(sc->sc_xfer[MUGE_BULK_DT_WR]); /* Indicate we are up and running. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; /* Switch to selected media. */ muge_ifmedia_upd(ifp); muge_start(ue); } /** * muge_stop - Stops communication with the LAN78xx chip * @ue: USB ether interface */ static void muge_stop(struct usb_ether *ue) { struct muge_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); MUGE_LOCK_ASSERT(sc, MA_OWNED); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_flags &= ~MUGE_FLAG_LINK; /* * Stop all the transfers, if not already stopped. */ usbd_transfer_stop(sc->sc_xfer[MUGE_BULK_DT_WR]); usbd_transfer_stop(sc->sc_xfer[MUGE_BULK_DT_RD]); } /** * muge_tick - Called periodically to monitor the state of the LAN95xx chip * @ue: USB ether interface * * Simply calls the mii status functions to check the state of the link. * * LOCKING: * Should be called with the MUGE lock held. */ static void muge_tick(struct usb_ether *ue) { struct muge_softc *sc = uether_getsc(ue); struct mii_data *mii = uether_getmii(&sc->sc_ue); MUGE_LOCK_ASSERT(sc, MA_OWNED); mii_tick(mii); if ((sc->sc_flags & MUGE_FLAG_LINK) == 0) { lan78xx_miibus_statchg(ue->ue_dev); if ((sc->sc_flags & MUGE_FLAG_LINK) != 0) muge_start(ue); } } /** * muge_ifmedia_sts - Report current media status * @ifp: inet interface pointer * @ifmr: interface media request * * Call the mii functions to get the media status. * * LOCKING: * Internally takes and releases the device lock. */ static void muge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct muge_softc *sc = ifp->if_softc; struct mii_data *mii = uether_getmii(&sc->sc_ue); MUGE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; MUGE_UNLOCK(sc); } /** * muge_probe - Probe the interface. * @dev: muge device handle * * Checks if the device is a match for this driver. * * RETURNS: * Returns 0 on success or an error code on failure. */ static int muge_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != MUGE_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != MUGE_IFACE_IDX) return (ENXIO); return (usbd_lookup_id_by_uaa(lan78xx_devs, sizeof(lan78xx_devs), uaa)); } /** * muge_attach - Attach the interface. * @dev: muge device handle * * Allocate softc structures, do ifmedia setup and ethernet/BPF attach. * * RETURNS: * Returns 0 on success or a negative error code. */ static int muge_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct muge_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; uint8_t iface_index; int err; sc->sc_flags = USB_GET_DRIVER_INFO(uaa); device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Setup the endpoints for the Microchip LAN78xx device. */ iface_index = MUGE_IFACE_IDX; err = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, muge_config, MUGE_N_TRANSFER, sc, &sc->sc_mtx); if (err) { device_printf(dev, "error: allocating USB transfers failed\n"); goto err; } ue->ue_sc = sc; ue->ue_dev = dev; ue->ue_udev = uaa->device; ue->ue_mtx = &sc->sc_mtx; ue->ue_methods = &muge_ue_methods; err = uether_ifattach(ue); if (err) { device_printf(dev, "error: could not attach interface\n"); goto err_usbd; } /* Wait for lan78xx_chip_init from post-attach callback to complete. */ uether_ifattach_wait(ue); if (!(sc->sc_flags & MUGE_FLAG_INIT_DONE)) goto err_attached; return (0); err_attached: uether_ifdetach(ue); err_usbd: usbd_transfer_unsetup(sc->sc_xfer, MUGE_N_TRANSFER); err: mtx_destroy(&sc->sc_mtx); return (ENXIO); } /** * muge_detach - Detach the interface. * @dev: muge device handle * * RETURNS: * Returns 0. */ static int muge_detach(device_t dev) { struct muge_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; usbd_transfer_unsetup(sc->sc_xfer, MUGE_N_TRANSFER); uether_ifdetach(ue); mtx_destroy(&sc->sc_mtx); return (0); } static device_method_t muge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, muge_probe), DEVMETHOD(device_attach, muge_attach), DEVMETHOD(device_detach, muge_detach), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, lan78xx_miibus_readreg), DEVMETHOD(miibus_writereg, lan78xx_miibus_writereg), DEVMETHOD(miibus_statchg, lan78xx_miibus_statchg), DEVMETHOD_END }; static driver_t muge_driver = { .name = "muge", .methods = muge_methods, .size = sizeof(struct muge_softc), }; static devclass_t muge_devclass; DRIVER_MODULE(muge, uhub, muge_driver, muge_devclass, NULL, NULL); DRIVER_MODULE(miibus, muge, miibus_driver, miibus_devclass, NULL, NULL); MODULE_DEPEND(muge, uether, 1, 1, 1); MODULE_DEPEND(muge, usb, 1, 1, 1); MODULE_DEPEND(muge, ether, 1, 1, 1); MODULE_DEPEND(muge, miibus, 1, 1, 1); MODULE_VERSION(muge, 1); USB_PNP_HOST_INFO(lan78xx_devs); diff --git a/sys/dev/usb/net/if_smsc.c b/sys/dev/usb/net/if_smsc.c index f1c7ce7082b2..00c127937412 100644 --- a/sys/dev/usb/net/if_smsc.c +++ b/sys/dev/usb/net/if_smsc.c @@ -1,1781 +1,1781 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * SMSC LAN9xxx devices (http://www.smsc.com/) * * The LAN9500 & LAN9500A devices are stand-alone USB to Ethernet chips that * support USB 2.0 and 10/100 Mbps Ethernet. * * The LAN951x devices are an integrated USB hub and USB to Ethernet adapter. * The driver only covers the Ethernet part, the standard USB hub driver * supports the hub part. * * This driver is closely modelled on the Linux driver written and copyrighted * by SMSC. * * * * * H/W TCP & UDP Checksum Offloading * --------------------------------- * The chip supports both tx and rx offloading of UDP & TCP checksums, this * feature can be dynamically enabled/disabled. * * RX checksuming is performed across bytes after the IPv4 header to the end of * the Ethernet frame, this means if the frame is padded with non-zero values * the H/W checksum will be incorrect, however the rx code compensates for this. * * TX checksuming is more complicated, the device requires a special header to * be prefixed onto the start of the frame which indicates the start and end * positions of the UDP or TCP frame. This requires the driver to manually * go through the packet data and decode the headers prior to sending. * On Linux they generally provide cues to the location of the csum and the * area to calculate it over, on FreeBSD we seem to have to do it all ourselves, * hence this is not as optimal and therefore h/w tX checksum is currently not * implemented. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_platform.h" #ifdef FDT #include #include #include #include #endif #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR smsc_debug #include #include #include #include #include "miibus_if.h" SYSCTL_NODE(_hw_usb, OID_AUTO, smsc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB smsc"); static bool smsc_rx_packet_batching = 1; SYSCTL_BOOL(_hw_usb_smsc, OID_AUTO, smsc_rx_packet_batching, CTLFLAG_RDTUN, &smsc_rx_packet_batching, 0, "If set, allows packet batching to increase throughput and latency. " "Else throughput and latency is decreased."); #ifdef USB_DEBUG static int smsc_debug = 0; SYSCTL_INT(_hw_usb_smsc, OID_AUTO, debug, CTLFLAG_RWTUN, &smsc_debug, 0, "Debug level"); #endif /* * Various supported device vendors/products. */ static const struct usb_device_id smsc_devs[] = { #define SMSC_DEV(p,i) { USB_VPI(USB_VENDOR_SMC2, USB_PRODUCT_SMC2_##p, i) } SMSC_DEV(LAN89530_ETH, 0), SMSC_DEV(LAN9500_ETH, 0), SMSC_DEV(LAN9500_ETH_2, 0), SMSC_DEV(LAN9500A_ETH, 0), SMSC_DEV(LAN9500A_ETH_2, 0), SMSC_DEV(LAN9505_ETH, 0), SMSC_DEV(LAN9505A_ETH, 0), SMSC_DEV(LAN9514_ETH, 0), SMSC_DEV(LAN9514_ETH_2, 0), SMSC_DEV(LAN9530_ETH, 0), SMSC_DEV(LAN9730_ETH, 0), SMSC_DEV(LAN9500_SAL10, 0), SMSC_DEV(LAN9505_SAL10, 0), SMSC_DEV(LAN9500A_SAL10, 0), SMSC_DEV(LAN9505A_SAL10, 0), SMSC_DEV(LAN9514_SAL10, 0), SMSC_DEV(LAN9500A_HAL, 0), SMSC_DEV(LAN9505A_HAL, 0), #undef SMSC_DEV }; #ifdef USB_DEBUG #define smsc_dbg_printf(sc, fmt, args...) \ do { \ if (smsc_debug > 0) \ device_printf((sc)->sc_ue.ue_dev, "debug: " fmt, ##args); \ } while(0) #else #define smsc_dbg_printf(sc, fmt, args...) do { } while (0) #endif #define smsc_warn_printf(sc, fmt, args...) \ device_printf((sc)->sc_ue.ue_dev, "warning: " fmt, ##args) #define smsc_err_printf(sc, fmt, args...) \ device_printf((sc)->sc_ue.ue_dev, "error: " fmt, ##args) #define ETHER_IS_VALID(addr) \ (!ETHER_IS_MULTICAST(addr) && !ETHER_IS_ZERO(addr)) static device_probe_t smsc_probe; static device_attach_t smsc_attach; static device_detach_t smsc_detach; static usb_callback_t smsc_bulk_read_callback; static usb_callback_t smsc_bulk_write_callback; static miibus_readreg_t smsc_miibus_readreg; static miibus_writereg_t smsc_miibus_writereg; static miibus_statchg_t smsc_miibus_statchg; static int smsc_attach_post_sub(struct usb_ether *ue); static uether_fn_t smsc_attach_post; static uether_fn_t smsc_init; static uether_fn_t smsc_stop; static uether_fn_t smsc_start; static uether_fn_t smsc_tick; static uether_fn_t smsc_setmulti; static uether_fn_t smsc_setpromisc; static int smsc_ifmedia_upd(struct ifnet *); static void smsc_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int smsc_chip_init(struct smsc_softc *sc); static int smsc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static const struct usb_config smsc_config[SMSC_N_TRANSFER] = { [SMSC_BULK_DT_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .frames = 16, .bufsize = 16 * (MCLBYTES + 16), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = smsc_bulk_write_callback, .timeout = 10000, /* 10 seconds */ }, [SMSC_BULK_DT_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 20480, /* bytes */ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = smsc_bulk_read_callback, .timeout = 0, /* no timeout */ }, /* The SMSC chip supports an interrupt endpoints, however they aren't * needed as we poll on the MII status. */ }; static const struct usb_ether_methods smsc_ue_methods = { .ue_attach_post = smsc_attach_post, .ue_attach_post_sub = smsc_attach_post_sub, .ue_start = smsc_start, .ue_ioctl = smsc_ioctl, .ue_init = smsc_init, .ue_stop = smsc_stop, .ue_tick = smsc_tick, .ue_setmulti = smsc_setmulti, .ue_setpromisc = smsc_setpromisc, .ue_mii_upd = smsc_ifmedia_upd, .ue_mii_sts = smsc_ifmedia_sts, }; /** * smsc_read_reg - Reads a 32-bit register on the device * @sc: driver soft context * @off: offset of the register * @data: pointer a value that will be populated with the register value * * LOCKING: * The device lock must be held before calling this function. * * RETURNS: * 0 on success, a USB_ERR_?? error code on failure. */ static int smsc_read_reg(struct smsc_softc *sc, uint32_t off, uint32_t *data) { struct usb_device_request req; uint32_t buf; usb_error_t err; SMSC_LOCK_ASSERT(sc, MA_OWNED); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = SMSC_UR_READ_REG; USETW(req.wValue, 0); USETW(req.wIndex, off); USETW(req.wLength, 4); err = uether_do_request(&sc->sc_ue, &req, &buf, 1000); if (err != 0) smsc_warn_printf(sc, "Failed to read register 0x%0x\n", off); *data = le32toh(buf); return (err); } /** * smsc_write_reg - Writes a 32-bit register on the device * @sc: driver soft context * @off: offset of the register * @data: the 32-bit value to write into the register * * LOCKING: * The device lock must be held before calling this function. * * RETURNS: * 0 on success, a USB_ERR_?? error code on failure. */ static int smsc_write_reg(struct smsc_softc *sc, uint32_t off, uint32_t data) { struct usb_device_request req; uint32_t buf; usb_error_t err; SMSC_LOCK_ASSERT(sc, MA_OWNED); buf = htole32(data); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = SMSC_UR_WRITE_REG; USETW(req.wValue, 0); USETW(req.wIndex, off); USETW(req.wLength, 4); err = uether_do_request(&sc->sc_ue, &req, &buf, 1000); if (err != 0) smsc_warn_printf(sc, "Failed to write register 0x%0x\n", off); return (err); } /** * smsc_wait_for_bits - Polls on a register value until bits are cleared * @sc: soft context * @reg: offset of the register * @bits: if the bits are clear the function returns * * LOCKING: * The device lock must be held before calling this function. * * RETURNS: * 0 on success, or a USB_ERR_?? error code on failure. */ static int smsc_wait_for_bits(struct smsc_softc *sc, uint32_t reg, uint32_t bits) { usb_ticks_t start_ticks; const usb_ticks_t max_ticks = USB_MS_TO_TICKS(1000); uint32_t val; int err; SMSC_LOCK_ASSERT(sc, MA_OWNED); start_ticks = (usb_ticks_t)ticks; do { if ((err = smsc_read_reg(sc, reg, &val)) != 0) return (err); if (!(val & bits)) return (0); uether_pause(&sc->sc_ue, hz / 100); } while (((usb_ticks_t)(ticks - start_ticks)) < max_ticks); return (USB_ERR_TIMEOUT); } /** * smsc_eeprom_read - Reads the attached EEPROM * @sc: soft context * @off: the eeprom address offset * @buf: stores the bytes * @buflen: the number of bytes to read * * Simply reads bytes from an attached eeprom. * * LOCKING: * The function takes and releases the device lock if it is not already held. * * RETURNS: * 0 on success, or a USB_ERR_?? error code on failure. */ static int smsc_eeprom_read(struct smsc_softc *sc, uint16_t off, uint8_t *buf, uint16_t buflen) { usb_ticks_t start_ticks; const usb_ticks_t max_ticks = USB_MS_TO_TICKS(1000); int err; int locked; uint32_t val; uint16_t i; locked = mtx_owned(&sc->sc_mtx); if (!locked) SMSC_LOCK(sc); err = smsc_wait_for_bits(sc, SMSC_EEPROM_CMD, SMSC_EEPROM_CMD_BUSY); if (err != 0) { smsc_warn_printf(sc, "eeprom busy, failed to read data\n"); goto done; } /* start reading the bytes, one at a time */ for (i = 0; i < buflen; i++) { val = SMSC_EEPROM_CMD_BUSY | (SMSC_EEPROM_CMD_ADDR_MASK & (off + i)); if ((err = smsc_write_reg(sc, SMSC_EEPROM_CMD, val)) != 0) goto done; start_ticks = (usb_ticks_t)ticks; do { if ((err = smsc_read_reg(sc, SMSC_EEPROM_CMD, &val)) != 0) goto done; if (!(val & SMSC_EEPROM_CMD_BUSY) || (val & SMSC_EEPROM_CMD_TIMEOUT)) break; uether_pause(&sc->sc_ue, hz / 100); } while (((usb_ticks_t)(ticks - start_ticks)) < max_ticks); if (val & (SMSC_EEPROM_CMD_BUSY | SMSC_EEPROM_CMD_TIMEOUT)) { smsc_warn_printf(sc, "eeprom command failed\n"); err = USB_ERR_IOERROR; break; } if ((err = smsc_read_reg(sc, SMSC_EEPROM_DATA, &val)) != 0) goto done; buf[i] = (val & 0xff); } done: if (!locked) SMSC_UNLOCK(sc); return (err); } /** * smsc_miibus_readreg - Reads a MII/MDIO register * @dev: usb ether device * @phy: the number of phy reading from * @reg: the register address * * Attempts to read a phy register over the MII bus. * * LOCKING: * Takes and releases the device mutex lock if not already held. * * RETURNS: * Returns the 16-bits read from the MII register, if this function fails 0 * is returned. */ static int smsc_miibus_readreg(device_t dev, int phy, int reg) { struct smsc_softc *sc = device_get_softc(dev); int locked; uint32_t addr; uint32_t val = 0; locked = mtx_owned(&sc->sc_mtx); if (!locked) SMSC_LOCK(sc); if (smsc_wait_for_bits(sc, SMSC_MII_ADDR, SMSC_MII_BUSY) != 0) { smsc_warn_printf(sc, "MII is busy\n"); goto done; } addr = (phy << 11) | (reg << 6) | SMSC_MII_READ | SMSC_MII_BUSY; smsc_write_reg(sc, SMSC_MII_ADDR, addr); if (smsc_wait_for_bits(sc, SMSC_MII_ADDR, SMSC_MII_BUSY) != 0) smsc_warn_printf(sc, "MII read timeout\n"); smsc_read_reg(sc, SMSC_MII_DATA, &val); val = le32toh(val); done: if (!locked) SMSC_UNLOCK(sc); return (val & 0xFFFF); } /** * smsc_miibus_writereg - Writes a MII/MDIO register * @dev: usb ether device * @phy: the number of phy writing to * @reg: the register address * @val: the value to write * * Attempts to write a phy register over the MII bus. * * LOCKING: * Takes and releases the device mutex lock if not already held. * * RETURNS: * Always returns 0 regardless of success or failure. */ static int smsc_miibus_writereg(device_t dev, int phy, int reg, int val) { struct smsc_softc *sc = device_get_softc(dev); int locked; uint32_t addr; if (sc->sc_phyno != phy) return (0); locked = mtx_owned(&sc->sc_mtx); if (!locked) SMSC_LOCK(sc); if (smsc_wait_for_bits(sc, SMSC_MII_ADDR, SMSC_MII_BUSY) != 0) { smsc_warn_printf(sc, "MII is busy\n"); goto done; } val = htole32(val); smsc_write_reg(sc, SMSC_MII_DATA, val); addr = (phy << 11) | (reg << 6) | SMSC_MII_WRITE | SMSC_MII_BUSY; smsc_write_reg(sc, SMSC_MII_ADDR, addr); if (smsc_wait_for_bits(sc, SMSC_MII_ADDR, SMSC_MII_BUSY) != 0) smsc_warn_printf(sc, "MII write timeout\n"); done: if (!locked) SMSC_UNLOCK(sc); return (0); } /** * smsc_miibus_statchg - Called to detect phy status change * @dev: usb ether device * * This function is called periodically by the system to poll for status * changes of the link. * * LOCKING: * Takes and releases the device mutex lock if not already held. */ static void smsc_miibus_statchg(device_t dev) { struct smsc_softc *sc = device_get_softc(dev); struct mii_data *mii = uether_getmii(&sc->sc_ue); struct ifnet *ifp; int locked; int err; uint32_t flow; uint32_t afc_cfg; locked = mtx_owned(&sc->sc_mtx); if (!locked) SMSC_LOCK(sc); ifp = uether_getifp(&sc->sc_ue); if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; /* Use the MII status to determine link status */ sc->sc_flags &= ~SMSC_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->sc_flags |= SMSC_FLAG_LINK; break; case IFM_1000_T: /* Gigabit ethernet not supported by chipset */ break; default: break; } } /* Lost link, do nothing. */ if ((sc->sc_flags & SMSC_FLAG_LINK) == 0) { smsc_dbg_printf(sc, "link flag not set\n"); goto done; } err = smsc_read_reg(sc, SMSC_AFC_CFG, &afc_cfg); if (err) { smsc_warn_printf(sc, "failed to read initial AFC_CFG, error %d\n", err); goto done; } /* Enable/disable full duplex operation and TX/RX pause */ if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { smsc_dbg_printf(sc, "full duplex operation\n"); sc->sc_mac_csr &= ~SMSC_MAC_CSR_RCVOWN; sc->sc_mac_csr |= SMSC_MAC_CSR_FDPX; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) flow = 0xffff0002; else flow = 0; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) afc_cfg |= 0xf; else afc_cfg &= ~0xf; } else { smsc_dbg_printf(sc, "half duplex operation\n"); sc->sc_mac_csr &= ~SMSC_MAC_CSR_FDPX; sc->sc_mac_csr |= SMSC_MAC_CSR_RCVOWN; flow = 0; afc_cfg |= 0xf; } err = smsc_write_reg(sc, SMSC_MAC_CSR, sc->sc_mac_csr); err += smsc_write_reg(sc, SMSC_FLOW, flow); err += smsc_write_reg(sc, SMSC_AFC_CFG, afc_cfg); if (err) smsc_warn_printf(sc, "media change failed, error %d\n", err); done: if (!locked) SMSC_UNLOCK(sc); } /** * smsc_ifmedia_upd - Set media options * @ifp: interface pointer * * Basically boilerplate code that simply calls the mii functions to set the * media options. * * LOCKING: * The device lock must be held before this function is called. * * RETURNS: * Returns 0 on success or a negative error code. */ static int smsc_ifmedia_upd(struct ifnet *ifp) { struct smsc_softc *sc = ifp->if_softc; struct mii_data *mii = uether_getmii(&sc->sc_ue); struct mii_softc *miisc; int err; SMSC_LOCK_ASSERT(sc, MA_OWNED); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); err = mii_mediachg(mii); return (err); } /** * smsc_ifmedia_sts - Report current media status * @ifp: inet interface pointer * @ifmr: interface media request * * Basically boilerplate code that simply calls the mii functions to get the * media status. * * LOCKING: * Internally takes and releases the device lock. */ static void smsc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct smsc_softc *sc = ifp->if_softc; struct mii_data *mii = uether_getmii(&sc->sc_ue); SMSC_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; SMSC_UNLOCK(sc); } /** * smsc_hash - Calculate the hash of a mac address * @addr: The mac address to calculate the hash on * * This function is used when configuring a range of m'cast mac addresses to * filter on. The hash of the mac address is put in the device's mac hash * table. * * RETURNS: * Returns a value from 0-63 value which is the hash of the mac address. */ static inline uint32_t smsc_hash(uint8_t addr[ETHER_ADDR_LEN]) { return (ether_crc32_be(addr, ETHER_ADDR_LEN) >> 26) & 0x3f; } static u_int smsc_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t hash, *hashtbl = arg; hash = smsc_hash(LLADDR(sdl)); hashtbl[hash >> 5] |= 1 << (hash & 0x1F); return (1); } /** * smsc_setmulti - Setup multicast * @ue: usb ethernet device context * * Tells the device to either accept frames with a multicast mac address, a * select group of m'cast mac addresses or just the devices mac address. * * LOCKING: * Should be called with the SMSC lock held. */ static void smsc_setmulti(struct usb_ether *ue) { struct smsc_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); uint32_t hashtbl[2] = { 0, 0 }; SMSC_LOCK_ASSERT(sc, MA_OWNED); if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { smsc_dbg_printf(sc, "receive all multicast enabled\n"); sc->sc_mac_csr |= SMSC_MAC_CSR_MCPAS; sc->sc_mac_csr &= ~SMSC_MAC_CSR_HPFILT; } else { if (if_foreach_llmaddr(ifp, smsc_hash_maddr, &hashtbl) > 0) { /* We are filtering on a set of address so calculate * hashes of each of the address and set the * corresponding bits in the register. */ sc->sc_mac_csr |= SMSC_MAC_CSR_HPFILT; sc->sc_mac_csr &= ~(SMSC_MAC_CSR_PRMS | SMSC_MAC_CSR_MCPAS); } else { /* Only receive packets with destination set to * our mac address */ sc->sc_mac_csr &= ~(SMSC_MAC_CSR_MCPAS | SMSC_MAC_CSR_HPFILT); } /* Debug */ if (sc->sc_mac_csr & SMSC_MAC_CSR_HPFILT) smsc_dbg_printf(sc, "receive select group of macs\n"); else smsc_dbg_printf(sc, "receive own packets only\n"); } /* Write the hash table and mac control registers */ smsc_write_reg(sc, SMSC_HASHH, hashtbl[1]); smsc_write_reg(sc, SMSC_HASHL, hashtbl[0]); smsc_write_reg(sc, SMSC_MAC_CSR, sc->sc_mac_csr); } /** * smsc_setpromisc - Enables/disables promiscuous mode * @ue: usb ethernet device context * * LOCKING: * Should be called with the SMSC lock held. */ static void smsc_setpromisc(struct usb_ether *ue) { struct smsc_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); smsc_dbg_printf(sc, "promiscuous mode %sabled\n", (ifp->if_flags & IFF_PROMISC) ? "en" : "dis"); SMSC_LOCK_ASSERT(sc, MA_OWNED); if (ifp->if_flags & IFF_PROMISC) sc->sc_mac_csr |= SMSC_MAC_CSR_PRMS; else sc->sc_mac_csr &= ~SMSC_MAC_CSR_PRMS; smsc_write_reg(sc, SMSC_MAC_CSR, sc->sc_mac_csr); } /** * smsc_sethwcsum - Enable or disable H/W UDP and TCP checksumming * @sc: driver soft context * * LOCKING: * Should be called with the SMSC lock held. * * RETURNS: * Returns 0 on success or a negative error code. */ static int smsc_sethwcsum(struct smsc_softc *sc) { struct ifnet *ifp = uether_getifp(&sc->sc_ue); uint32_t val; int err; if (!ifp) return (-EIO); SMSC_LOCK_ASSERT(sc, MA_OWNED); err = smsc_read_reg(sc, SMSC_COE_CTRL, &val); if (err != 0) { smsc_warn_printf(sc, "failed to read SMSC_COE_CTRL (err=%d)\n", err); return (err); } /* Enable/disable the Rx checksum */ if ((ifp->if_capabilities & ifp->if_capenable) & IFCAP_RXCSUM) val |= SMSC_COE_CTRL_RX_EN; else val &= ~SMSC_COE_CTRL_RX_EN; /* Enable/disable the Tx checksum (currently not supported) */ if ((ifp->if_capabilities & ifp->if_capenable) & IFCAP_TXCSUM) val |= SMSC_COE_CTRL_TX_EN; else val &= ~SMSC_COE_CTRL_TX_EN; err = smsc_write_reg(sc, SMSC_COE_CTRL, val); if (err != 0) { smsc_warn_printf(sc, "failed to write SMSC_COE_CTRL (err=%d)\n", err); return (err); } return (0); } /** * smsc_setmacaddress - Sets the mac address in the device * @sc: driver soft context * @addr: pointer to array contain at least 6 bytes of the mac * * Writes the MAC address into the device, usually the MAC is programmed with * values from the EEPROM. * * LOCKING: * Should be called with the SMSC lock held. * * RETURNS: * Returns 0 on success or a negative error code. */ static int smsc_setmacaddress(struct smsc_softc *sc, const uint8_t *addr) { int err; uint32_t val; smsc_dbg_printf(sc, "setting mac address to %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); SMSC_LOCK_ASSERT(sc, MA_OWNED); val = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; if ((err = smsc_write_reg(sc, SMSC_MAC_ADDRL, val)) != 0) goto done; val = (addr[5] << 8) | addr[4]; err = smsc_write_reg(sc, SMSC_MAC_ADDRH, val); done: return (err); } /** * smsc_reset - Reset the SMSC chip * @sc: device soft context * * LOCKING: * Should be called with the SMSC lock held. */ static void smsc_reset(struct smsc_softc *sc) { struct usb_config_descriptor *cd; usb_error_t err; cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, cd->bConfigurationValue); if (err) smsc_warn_printf(sc, "reset failed (ignored)\n"); /* Wait a little while for the chip to get its brains in order. */ uether_pause(&sc->sc_ue, hz / 100); /* Reinitialize controller to achieve full reset. */ smsc_chip_init(sc); } /** * smsc_init - Initialises the LAN95xx chip * @ue: USB ether interface * * Called when the interface is brought up (i.e. ifconfig ue0 up), this * initialise the interface and the rx/tx pipes. * * LOCKING: * Should be called with the SMSC lock held. */ static void smsc_init(struct usb_ether *ue) { struct smsc_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); SMSC_LOCK_ASSERT(sc, MA_OWNED); if (smsc_setmacaddress(sc, IF_LLADDR(ifp))) smsc_dbg_printf(sc, "setting MAC address failed\n"); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* Cancel pending I/O */ smsc_stop(ue); /* Reset the ethernet interface. */ smsc_reset(sc); /* Load the multicast filter. */ smsc_setmulti(ue); /* TCP/UDP checksum offload engines. */ smsc_sethwcsum(sc); usbd_xfer_set_stall(sc->sc_xfer[SMSC_BULK_DT_WR]); /* Indicate we are up and running. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; /* Switch to selected media. */ smsc_ifmedia_upd(ifp); smsc_start(ue); } /** * smsc_bulk_read_callback - Read callback used to process the USB URB * @xfer: the USB transfer * @error: * * Reads the URB data which can contain one or more ethernet frames, the * frames are copyed into a mbuf and given to the system. * * LOCKING: * No locking required, doesn't access internal driver settings. */ static void smsc_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct smsc_softc *sc = usbd_xfer_softc(xfer); struct usb_ether *ue = &sc->sc_ue; struct ifnet *ifp = uether_getifp(ue); struct mbuf *m; struct usb_page_cache *pc; uint32_t rxhdr; int pktlen; int off; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); smsc_dbg_printf(sc, "rx : actlen %d\n", actlen); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: /* There is always a zero length frame after bringing the IF up */ if (actlen < (sizeof(rxhdr) + ETHER_CRC_LEN)) goto tr_setup; /* There maybe multiple packets in the USB frame, each will have a * header and each needs to have it's own mbuf allocated and populated * for it. */ pc = usbd_xfer_get_frame(xfer, 0); off = 0; while (off < actlen) { /* The frame header is always aligned on a 4 byte boundary */ off = ((off + 0x3) & ~0x3); if ((off + sizeof(rxhdr)) > actlen) goto tr_setup; usbd_copy_out(pc, off, &rxhdr, sizeof(rxhdr)); off += (sizeof(rxhdr) + ETHER_ALIGN); rxhdr = le32toh(rxhdr); pktlen = (uint16_t)SMSC_RX_STAT_FRM_LENGTH(rxhdr); smsc_dbg_printf(sc, "rx : rxhdr 0x%08x : pktlen %d : actlen %d : " "off %d\n", rxhdr, pktlen, actlen, off); if (rxhdr & SMSC_RX_STAT_ERROR) { smsc_dbg_printf(sc, "rx error (hdr 0x%08x)\n", rxhdr); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); if (rxhdr & SMSC_RX_STAT_COLLISION) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); } else { /* Check if the ethernet frame is too big or too small */ if ((pktlen < ETHER_HDR_LEN) || (pktlen > (actlen - off))) goto tr_setup; /* Create a new mbuf to store the packet in */ m = uether_newbuf(); if (m == NULL) { smsc_warn_printf(sc, "failed to create new mbuf\n"); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); goto tr_setup; } if (pktlen > m->m_len) { smsc_dbg_printf(sc, "buffer too small %d vs %d bytes", pktlen, m->m_len); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); m_freem(m); goto tr_setup; } usbd_copy_out(pc, off, mtod(m, uint8_t *), pktlen); /* Check if RX TCP/UDP checksumming is being offloaded */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { struct ether_header *eh; eh = mtod(m, struct ether_header *); /* Remove the extra 2 bytes of the csum */ pktlen -= 2; /* The checksum appears to be simplistically calculated * over the udp/tcp header and data up to the end of the * eth frame. Which means if the eth frame is padded * the csum calculation is incorrectly performed over * the padding bytes as well. Therefore to be safe we * ignore the H/W csum on frames less than or equal to * 64 bytes. * * Ignore H/W csum for non-IPv4 packets. */ if ((be16toh(eh->ether_type) == ETHERTYPE_IP) && (pktlen > ETHER_MIN_LEN)) { struct ip *ip; ip = (struct ip *)(eh + 1); if ((ip->ip_v == IPVERSION) && ((ip->ip_p == IPPROTO_TCP) || (ip->ip_p == IPPROTO_UDP))) { /* Indicate the UDP/TCP csum has been calculated */ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; /* Copy the TCP/UDP checksum from the last 2 bytes * of the transfer and put in the csum_data field. */ usbd_copy_out(pc, (off + pktlen), &m->m_pkthdr.csum_data, 2); /* The data is copied in network order, but the * csum algorithm in the kernel expects it to be * in host network order. */ m->m_pkthdr.csum_data = ntohs(m->m_pkthdr.csum_data); smsc_dbg_printf(sc, "RX checksum offloaded (0x%04x)\n", m->m_pkthdr.csum_data); } } /* Need to adjust the offset as well or we'll be off * by 2 because the csum is removed from the packet * length. */ off += 2; } /* Finally enqueue the mbuf on the receive queue */ /* Remove 4 trailing bytes */ if (pktlen < (4 + ETHER_HDR_LEN)) { m_freem(m); goto tr_setup; } uether_rxmbuf(ue, m, pktlen - 4); } /* Update the offset to move to the next potential packet */ off += pktlen; } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); uether_rxflush(ue); return; default: if (error != USB_ERR_CANCELLED) { smsc_warn_printf(sc, "bulk read error, %s\n", usbd_errstr(error)); usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } /** * smsc_bulk_write_callback - Write callback used to send ethernet frame(s) * @xfer: the USB transfer * @error: error code if the transfers is in an errored state * * The main write function that pulls ethernet frames off the queue and sends * them out. * * LOCKING: * */ static void smsc_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct smsc_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc; struct mbuf *m; uint32_t txhdr; uint32_t frm_len = 0; int nframes; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: if ((sc->sc_flags & SMSC_FLAG_LINK) == 0 || (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { /* Don't send anything if there is no link or controller is busy. */ return; } for (nframes = 0; nframes < 16 && !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, nframes); frm_len = 0; pc = usbd_xfer_get_frame(xfer, nframes); /* Each frame is prefixed with two 32-bit values describing the * length of the packet and buffer. */ txhdr = SMSC_TX_CTRL_0_BUF_SIZE(m->m_pkthdr.len) | SMSC_TX_CTRL_0_FIRST_SEG | SMSC_TX_CTRL_0_LAST_SEG; txhdr = htole32(txhdr); usbd_copy_in(pc, 0, &txhdr, sizeof(txhdr)); txhdr = SMSC_TX_CTRL_1_PKT_LENGTH(m->m_pkthdr.len); txhdr = htole32(txhdr); usbd_copy_in(pc, 4, &txhdr, sizeof(txhdr)); frm_len += 8; /* Next copy in the actual packet */ usbd_m_copy_in(pc, frm_len, m, 0, m->m_pkthdr.len); frm_len += m->m_pkthdr.len; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* If there's a BPF listener, bounce a copy of this frame to him */ BPF_MTAP(ifp, m); m_freem(m); /* Set frame length. */ usbd_xfer_set_frame_len(xfer, nframes, frm_len); } if (nframes != 0) { usbd_xfer_set_frames(xfer, nframes); usbd_transfer_submit(xfer); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return; default: if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (error != USB_ERR_CANCELLED) { smsc_err_printf(sc, "usb error on tx: %s\n", usbd_errstr(error)); usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } /** * smsc_tick - Called periodically to monitor the state of the LAN95xx chip * @ue: USB ether interface * * Simply calls the mii status functions to check the state of the link. * * LOCKING: * Should be called with the SMSC lock held. */ static void smsc_tick(struct usb_ether *ue) { struct smsc_softc *sc = uether_getsc(ue); struct mii_data *mii = uether_getmii(&sc->sc_ue); SMSC_LOCK_ASSERT(sc, MA_OWNED); mii_tick(mii); if ((sc->sc_flags & SMSC_FLAG_LINK) == 0) { smsc_miibus_statchg(ue->ue_dev); if ((sc->sc_flags & SMSC_FLAG_LINK) != 0) smsc_start(ue); } } /** * smsc_start - Starts communication with the LAN95xx chip * @ue: USB ether interface * * * */ static void smsc_start(struct usb_ether *ue) { struct smsc_softc *sc = uether_getsc(ue); /* * start the USB transfers, if not already started: */ usbd_transfer_start(sc->sc_xfer[SMSC_BULK_DT_RD]); usbd_transfer_start(sc->sc_xfer[SMSC_BULK_DT_WR]); } /** * smsc_stop - Stops communication with the LAN95xx chip * @ue: USB ether interface * * * */ static void smsc_stop(struct usb_ether *ue) { struct smsc_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); SMSC_LOCK_ASSERT(sc, MA_OWNED); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_flags &= ~SMSC_FLAG_LINK; /* * stop all the transfers, if not already stopped: */ usbd_transfer_stop(sc->sc_xfer[SMSC_BULK_DT_WR]); usbd_transfer_stop(sc->sc_xfer[SMSC_BULK_DT_RD]); } /** * smsc_phy_init - Initialises the in-built SMSC phy * @sc: driver soft context * * Resets the PHY part of the chip and then initialises it to default * values. The 'link down' and 'auto-negotiation complete' interrupts * from the PHY are also enabled, however we don't monitor the interrupt * endpoints for the moment. * * RETURNS: * Returns 0 on success or EIO if failed to reset the PHY. */ static int smsc_phy_init(struct smsc_softc *sc) { int bmcr; usb_ticks_t start_ticks; const usb_ticks_t max_ticks = USB_MS_TO_TICKS(1000); SMSC_LOCK_ASSERT(sc, MA_OWNED); /* Reset phy and wait for reset to complete */ smsc_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_BMCR, BMCR_RESET); start_ticks = ticks; do { uether_pause(&sc->sc_ue, hz / 100); bmcr = smsc_miibus_readreg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_BMCR); } while ((bmcr & BMCR_RESET) && ((ticks - start_ticks) < max_ticks)); if (((usb_ticks_t)(ticks - start_ticks)) >= max_ticks) { smsc_err_printf(sc, "PHY reset timed-out"); return (EIO); } smsc_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_ANAR, ANAR_10 | ANAR_10_FD | ANAR_TX | ANAR_TX_FD | /* all modes */ ANAR_CSMA | ANAR_FC | ANAR_PAUSE_ASYM); /* Setup the phy to interrupt when the link goes down or autoneg completes */ smsc_miibus_readreg(sc->sc_ue.ue_dev, sc->sc_phyno, SMSC_PHY_INTR_STAT); smsc_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, SMSC_PHY_INTR_MASK, (SMSC_PHY_INTR_ANEG_COMP | SMSC_PHY_INTR_LINK_DOWN)); /* Restart auto-negotiation */ bmcr = smsc_miibus_readreg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_BMCR); bmcr |= BMCR_STARTNEG; smsc_miibus_writereg(sc->sc_ue.ue_dev, sc->sc_phyno, MII_BMCR, bmcr); return (0); } /** * smsc_chip_init - Initialises the chip after power on * @sc: driver soft context * * This initialisation sequence is modelled on the procedure in the Linux * driver. * * RETURNS: * Returns 0 on success or an error code on failure. */ static int smsc_chip_init(struct smsc_softc *sc) { int err; int locked; uint32_t reg_val; int burst_cap; locked = mtx_owned(&sc->sc_mtx); if (!locked) SMSC_LOCK(sc); /* Enter H/W config mode */ smsc_write_reg(sc, SMSC_HW_CFG, SMSC_HW_CFG_LRST); if ((err = smsc_wait_for_bits(sc, SMSC_HW_CFG, SMSC_HW_CFG_LRST)) != 0) { smsc_warn_printf(sc, "timed-out waiting for reset to complete\n"); goto init_failed; } /* Reset the PHY */ smsc_write_reg(sc, SMSC_PM_CTRL, SMSC_PM_CTRL_PHY_RST); if ((err = smsc_wait_for_bits(sc, SMSC_PM_CTRL, SMSC_PM_CTRL_PHY_RST)) != 0) { smsc_warn_printf(sc, "timed-out waiting for phy reset to complete\n"); goto init_failed; } /* Set the mac address */ if ((err = smsc_setmacaddress(sc, sc->sc_ue.ue_eaddr)) != 0) { smsc_warn_printf(sc, "failed to set the MAC address\n"); goto init_failed; } /* Don't know what the HW_CFG_BIR bit is, but following the reset sequence * as used in the Linux driver. */ if ((err = smsc_read_reg(sc, SMSC_HW_CFG, ®_val)) != 0) { smsc_warn_printf(sc, "failed to read HW_CFG: %d\n", err); goto init_failed; } reg_val |= SMSC_HW_CFG_BIR; smsc_write_reg(sc, SMSC_HW_CFG, reg_val); /* There is a so called 'turbo mode' that the linux driver supports, it * seems to allow you to jam multiple frames per Rx transaction. By default * this driver supports that and therefore allows multiple frames per URB. * * The xfer buffer size needs to reflect this as well, therefore based on * the calculations in the Linux driver the RX bufsize is set to 18944, * bufsz = (16 * 1024 + 5 * 512) * * Burst capability is the number of URBs that can be in a burst of data/ * ethernet frames. */ if (!smsc_rx_packet_batching) burst_cap = 0; else if (usbd_get_speed(sc->sc_ue.ue_udev) == USB_SPEED_HIGH) burst_cap = 37; else burst_cap = 128; smsc_write_reg(sc, SMSC_BURST_CAP, burst_cap); /* Set the default bulk in delay (magic value from Linux driver) */ smsc_write_reg(sc, SMSC_BULK_IN_DLY, 0x00002000); /* * Initialise the RX interface */ if ((err = smsc_read_reg(sc, SMSC_HW_CFG, ®_val)) < 0) { smsc_warn_printf(sc, "failed to read HW_CFG: (err = %d)\n", err); goto init_failed; } /* Adjust the packet offset in the buffer (designed to try and align IP * header on 4 byte boundary) */ reg_val &= ~SMSC_HW_CFG_RXDOFF; reg_val |= (ETHER_ALIGN << 9) & SMSC_HW_CFG_RXDOFF; /* The following settings are used for 'turbo mode', a.k.a multiple frames * per Rx transaction (again info taken form Linux driver). */ if (smsc_rx_packet_batching) reg_val |= (SMSC_HW_CFG_MEF | SMSC_HW_CFG_BCE); smsc_write_reg(sc, SMSC_HW_CFG, reg_val); /* Clear the status register ? */ smsc_write_reg(sc, SMSC_INTR_STATUS, 0xffffffff); /* Read and display the revision register */ if ((err = smsc_read_reg(sc, SMSC_ID_REV, &sc->sc_rev_id)) < 0) { smsc_warn_printf(sc, "failed to read ID_REV (err = %d)\n", err); goto init_failed; } device_printf(sc->sc_ue.ue_dev, "chip 0x%04lx, rev. %04lx\n", (sc->sc_rev_id & SMSC_ID_REV_CHIP_ID_MASK) >> 16, (sc->sc_rev_id & SMSC_ID_REV_CHIP_REV_MASK)); /* GPIO/LED setup */ reg_val = SMSC_LED_GPIO_CFG_SPD_LED | SMSC_LED_GPIO_CFG_LNK_LED | SMSC_LED_GPIO_CFG_FDX_LED; smsc_write_reg(sc, SMSC_LED_GPIO_CFG, reg_val); /* * Initialise the TX interface */ smsc_write_reg(sc, SMSC_FLOW, 0); smsc_write_reg(sc, SMSC_AFC_CFG, AFC_CFG_DEFAULT); /* Read the current MAC configuration */ if ((err = smsc_read_reg(sc, SMSC_MAC_CSR, &sc->sc_mac_csr)) < 0) { smsc_warn_printf(sc, "failed to read MAC_CSR (err=%d)\n", err); goto init_failed; } /* Vlan */ smsc_write_reg(sc, SMSC_VLAN1, (uint32_t)ETHERTYPE_VLAN); /* * Initialise the PHY */ if ((err = smsc_phy_init(sc)) != 0) goto init_failed; /* * Start TX */ sc->sc_mac_csr |= SMSC_MAC_CSR_TXEN; smsc_write_reg(sc, SMSC_MAC_CSR, sc->sc_mac_csr); smsc_write_reg(sc, SMSC_TX_CFG, SMSC_TX_CFG_ON); /* * Start RX */ sc->sc_mac_csr |= SMSC_MAC_CSR_RXEN; smsc_write_reg(sc, SMSC_MAC_CSR, sc->sc_mac_csr); if (!locked) SMSC_UNLOCK(sc); return (0); init_failed: if (!locked) SMSC_UNLOCK(sc); smsc_err_printf(sc, "smsc_chip_init failed (err=%d)\n", err); return (err); } /** * smsc_ioctl - ioctl function for the device * @ifp: interface pointer * @cmd: the ioctl command * @data: data passed in the ioctl call, typically a pointer to struct ifreq. * * The ioctl routine is overridden to detect change requests for the H/W * checksum capabilities. * * RETURNS: * 0 on success and an error code on failure. */ static int smsc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct usb_ether *ue = ifp->if_softc; struct smsc_softc *sc; struct ifreq *ifr; int rc; int mask; int reinit; if (cmd == SIOCSIFCAP) { sc = uether_getsc(ue); ifr = (struct ifreq *)data; SMSC_LOCK(sc); rc = 0; reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; /* Modify the RX CSUM enable bits */ if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; reinit = 1; } } SMSC_UNLOCK(sc); if (reinit) uether_init(ue); } else { rc = uether_ioctl(ifp, cmd, data); } return (rc); } /** * smsc_attach_post - Called after the driver attached to the USB interface * @ue: the USB ethernet device * * This is where the chip is intialised for the first time. This is different * from the smsc_init() function in that that one is designed to setup the * H/W to match the UE settings and can be called after a reset. * * */ static void smsc_attach_post(struct usb_ether *ue) { struct smsc_softc *sc = uether_getsc(ue); uint32_t mac_h, mac_l; int err; smsc_dbg_printf(sc, "smsc_attach_post\n"); /* Setup some of the basics */ sc->sc_phyno = 1; /* Attempt to get the mac address, if an EEPROM is not attached this * will just return FF:FF:FF:FF:FF:FF, so in such cases we invent a MAC * address based on urandom. */ memset(sc->sc_ue.ue_eaddr, 0xff, ETHER_ADDR_LEN); /* Check if there is already a MAC address in the register */ if ((smsc_read_reg(sc, SMSC_MAC_ADDRL, &mac_l) == 0) && (smsc_read_reg(sc, SMSC_MAC_ADDRH, &mac_h) == 0)) { sc->sc_ue.ue_eaddr[5] = (uint8_t)((mac_h >> 8) & 0xff); sc->sc_ue.ue_eaddr[4] = (uint8_t)((mac_h) & 0xff); sc->sc_ue.ue_eaddr[3] = (uint8_t)((mac_l >> 24) & 0xff); sc->sc_ue.ue_eaddr[2] = (uint8_t)((mac_l >> 16) & 0xff); sc->sc_ue.ue_eaddr[1] = (uint8_t)((mac_l >> 8) & 0xff); sc->sc_ue.ue_eaddr[0] = (uint8_t)((mac_l) & 0xff); } /* MAC address is not set so try to read from EEPROM, if that fails generate * a random MAC address. */ if (!ETHER_IS_VALID(sc->sc_ue.ue_eaddr)) { err = smsc_eeprom_read(sc, 0x01, sc->sc_ue.ue_eaddr, ETHER_ADDR_LEN); #ifdef FDT if ((err != 0) || (!ETHER_IS_VALID(sc->sc_ue.ue_eaddr))) err = usb_fdt_get_mac_addr(sc->sc_ue.ue_dev, &sc->sc_ue); #endif if ((err != 0) || (!ETHER_IS_VALID(sc->sc_ue.ue_eaddr))) { read_random(sc->sc_ue.ue_eaddr, ETHER_ADDR_LEN); sc->sc_ue.ue_eaddr[0] &= ~0x01; /* unicast */ sc->sc_ue.ue_eaddr[0] |= 0x02; /* locally administered */ } } /* Initialise the chip for the first time */ smsc_chip_init(sc); } /** * smsc_attach_post_sub - Called after the driver attached to the USB interface * @ue: the USB ethernet device * * Most of this is boilerplate code and copied from the base USB ethernet * driver. It has been overriden so that we can indicate to the system that * the chip supports H/W checksumming. * * RETURNS: * Returns 0 on success or a negative error code. */ static int smsc_attach_post_sub(struct usb_ether *ue) { struct smsc_softc *sc; struct ifnet *ifp; int error; sc = uether_getsc(ue); ifp = ue->ue_ifp; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = uether_start; ifp->if_ioctl = smsc_ioctl; ifp->if_init = uether_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); /* The chip supports TCP/UDP checksum offloading on TX and RX paths, however * currently only RX checksum is supported in the driver (see top of file). */ ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_VLAN_MTU; ifp->if_hwassist = 0; /* TX checksuming is disabled (for now?) ifp->if_capabilities |= IFCAP_TXCSUM; ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist = CSUM_TCP | CSUM_UDP; */ ifp->if_capenable = ifp->if_capabilities; - mtx_lock(&Giant); + bus_topo_lock(); error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, 0); - mtx_unlock(&Giant); + bus_topo_unlock(); return (error); } /** * smsc_probe - Probe the interface. * @dev: smsc device handle * * Checks if the device is a match for this driver. * * RETURNS: * Returns 0 on success or an error code on failure. */ static int smsc_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != SMSC_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != SMSC_IFACE_IDX) return (ENXIO); return (usbd_lookup_id_by_uaa(smsc_devs, sizeof(smsc_devs), uaa)); } /** * smsc_attach - Attach the interface. * @dev: smsc device handle * * Allocate softc structures, do ifmedia setup and ethernet/BPF attach. * * RETURNS: * Returns 0 on success or a negative error code. */ static int smsc_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct smsc_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; uint8_t iface_index; int err; sc->sc_flags = USB_GET_DRIVER_INFO(uaa); device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Setup the endpoints for the SMSC LAN95xx device(s) */ iface_index = SMSC_IFACE_IDX; err = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, smsc_config, SMSC_N_TRANSFER, sc, &sc->sc_mtx); if (err) { device_printf(dev, "error: allocating USB transfers failed\n"); goto detach; } ue->ue_sc = sc; ue->ue_dev = dev; ue->ue_udev = uaa->device; ue->ue_mtx = &sc->sc_mtx; ue->ue_methods = &smsc_ue_methods; err = uether_ifattach(ue); if (err) { device_printf(dev, "error: could not attach interface\n"); goto detach; } return (0); /* success */ detach: smsc_detach(dev); return (ENXIO); /* failure */ } /** * smsc_detach - Detach the interface. * @dev: smsc device handle * * RETURNS: * Returns 0. */ static int smsc_detach(device_t dev) { struct smsc_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; usbd_transfer_unsetup(sc->sc_xfer, SMSC_N_TRANSFER); uether_ifdetach(ue); mtx_destroy(&sc->sc_mtx); return (0); } static device_method_t smsc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, smsc_probe), DEVMETHOD(device_attach, smsc_attach), DEVMETHOD(device_detach, smsc_detach), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, smsc_miibus_readreg), DEVMETHOD(miibus_writereg, smsc_miibus_writereg), DEVMETHOD(miibus_statchg, smsc_miibus_statchg), DEVMETHOD_END }; static driver_t smsc_driver = { .name = "smsc", .methods = smsc_methods, .size = sizeof(struct smsc_softc), }; static devclass_t smsc_devclass; DRIVER_MODULE(smsc, uhub, smsc_driver, smsc_devclass, NULL, 0); DRIVER_MODULE(miibus, smsc, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(smsc, uether, 1, 1, 1); MODULE_DEPEND(smsc, usb, 1, 1, 1); MODULE_DEPEND(smsc, ether, 1, 1, 1); MODULE_DEPEND(smsc, miibus, 1, 1, 1); MODULE_VERSION(smsc, 1); USB_PNP_HOST_INFO(smsc_devs); diff --git a/sys/dev/usb/net/if_ure.c b/sys/dev/usb/net/if_ure.c index 6439a0bfd71d..fa04a6a212ba 100644 --- a/sys/dev/usb/net/if_ure.c +++ b/sys/dev/usb/net/if_ure.c @@ -1,2240 +1,2240 @@ /*- * Copyright (c) 2015-2016 Kevin Lo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* needed for checksum offload */ #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR ure_debug #include #include #include #include #include "miibus_if.h" #include "opt_inet6.h" #ifdef USB_DEBUG static int ure_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, ure, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB ure"); SYSCTL_INT(_hw_usb_ure, OID_AUTO, debug, CTLFLAG_RWTUN, &ure_debug, 0, "Debug level"); #endif #ifdef USB_DEBUG_VAR #ifdef USB_DEBUG #define DEVPRINTFN(n,dev,fmt,...) do { \ if ((USB_DEBUG_VAR) >= (n)) { \ device_printf((dev), "%s: " fmt, \ __FUNCTION__ ,##__VA_ARGS__); \ } \ } while (0) #define DEVPRINTF(...) DEVPRINTFN(1, __VA_ARGS__) #else #define DEVPRINTF(...) do { } while (0) #define DEVPRINTFN(...) do { } while (0) #endif #endif /* * Various supported device vendors/products. */ static const STRUCT_USB_HOST_ID ure_devs[] = { #define URE_DEV(v,p,i) { \ USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i), \ USB_IFACE_CLASS(UICLASS_VENDOR), \ USB_IFACE_SUBCLASS(UISUBCLASS_VENDOR) } URE_DEV(LENOVO, RTL8153, URE_FLAG_8153), URE_DEV(LENOVO, TBT3LAN, 0), URE_DEV(LENOVO, TBT3LANGEN2, 0), URE_DEV(LENOVO, ONELINK, 0), URE_DEV(LENOVO, USBCLAN, 0), URE_DEV(LENOVO, USBCLANGEN2, 0), URE_DEV(NVIDIA, RTL8153, URE_FLAG_8153), URE_DEV(REALTEK, RTL8152, URE_FLAG_8152), URE_DEV(REALTEK, RTL8153, URE_FLAG_8153), URE_DEV(TPLINK, RTL8153, URE_FLAG_8153), URE_DEV(REALTEK, RTL8156, URE_FLAG_8156), #undef URE_DEV }; static device_probe_t ure_probe; static device_attach_t ure_attach; static device_detach_t ure_detach; static usb_callback_t ure_bulk_read_callback; static usb_callback_t ure_bulk_write_callback; static miibus_readreg_t ure_miibus_readreg; static miibus_writereg_t ure_miibus_writereg; static miibus_statchg_t ure_miibus_statchg; static uether_fn_t ure_attach_post; static uether_fn_t ure_init; static uether_fn_t ure_stop; static uether_fn_t ure_start; static uether_fn_t ure_tick; static uether_fn_t ure_rxfilter; static int ure_ctl(struct ure_softc *, uint8_t, uint16_t, uint16_t, void *, int); static int ure_read_mem(struct ure_softc *, uint16_t, uint16_t, void *, int); static int ure_write_mem(struct ure_softc *, uint16_t, uint16_t, void *, int); static uint8_t ure_read_1(struct ure_softc *, uint16_t, uint16_t); static uint16_t ure_read_2(struct ure_softc *, uint16_t, uint16_t); static uint32_t ure_read_4(struct ure_softc *, uint16_t, uint16_t); static int ure_write_1(struct ure_softc *, uint16_t, uint16_t, uint32_t); static int ure_write_2(struct ure_softc *, uint16_t, uint16_t, uint32_t); static int ure_write_4(struct ure_softc *, uint16_t, uint16_t, uint32_t); static uint16_t ure_ocp_reg_read(struct ure_softc *, uint16_t); static void ure_ocp_reg_write(struct ure_softc *, uint16_t, uint16_t); static void ure_sram_write(struct ure_softc *, uint16_t, uint16_t); static int ure_sysctl_chipver(SYSCTL_HANDLER_ARGS); static void ure_read_chipver(struct ure_softc *); static int ure_attach_post_sub(struct usb_ether *); static void ure_reset(struct ure_softc *); static int ure_ifmedia_upd(struct ifnet *); static void ure_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void ure_add_media_types(struct ure_softc *); static void ure_link_state(struct ure_softc *sc); static int ure_get_link_status(struct ure_softc *); static int ure_ioctl(struct ifnet *, u_long, caddr_t); static void ure_rtl8152_init(struct ure_softc *); static void ure_rtl8152_nic_reset(struct ure_softc *); static void ure_rtl8153_init(struct ure_softc *); static void ure_rtl8153b_init(struct ure_softc *); static void ure_rtl8153b_nic_reset(struct ure_softc *); static void ure_disable_teredo(struct ure_softc *); static void ure_enable_aldps(struct ure_softc *, bool); static uint16_t ure_phy_status(struct ure_softc *, uint16_t); static void ure_rxcsum(int capenb, struct ure_rxpkt *rp, struct mbuf *m); static int ure_txcsum(struct mbuf *m, int caps, uint32_t *regout); static device_method_t ure_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, ure_probe), DEVMETHOD(device_attach, ure_attach), DEVMETHOD(device_detach, ure_detach), /* MII interface. */ DEVMETHOD(miibus_readreg, ure_miibus_readreg), DEVMETHOD(miibus_writereg, ure_miibus_writereg), DEVMETHOD(miibus_statchg, ure_miibus_statchg), DEVMETHOD_END }; static driver_t ure_driver = { .name = "ure", .methods = ure_methods, .size = sizeof(struct ure_softc), }; static devclass_t ure_devclass; DRIVER_MODULE(ure, uhub, ure_driver, ure_devclass, NULL, NULL); DRIVER_MODULE(miibus, ure, miibus_driver, miibus_devclass, NULL, NULL); MODULE_DEPEND(ure, uether, 1, 1, 1); MODULE_DEPEND(ure, usb, 1, 1, 1); MODULE_DEPEND(ure, ether, 1, 1, 1); MODULE_DEPEND(ure, miibus, 1, 1, 1); MODULE_VERSION(ure, 1); USB_PNP_HOST_INFO(ure_devs); static const struct usb_ether_methods ure_ue_methods = { .ue_attach_post = ure_attach_post, .ue_attach_post_sub = ure_attach_post_sub, .ue_start = ure_start, .ue_init = ure_init, .ue_stop = ure_stop, .ue_tick = ure_tick, .ue_setmulti = ure_rxfilter, .ue_setpromisc = ure_rxfilter, .ue_mii_upd = ure_ifmedia_upd, .ue_mii_sts = ure_ifmedia_sts, }; #define URE_SETBIT_1(sc, reg, index, x) \ ure_write_1(sc, reg, index, ure_read_1(sc, reg, index) | (x)) #define URE_SETBIT_2(sc, reg, index, x) \ ure_write_2(sc, reg, index, ure_read_2(sc, reg, index) | (x)) #define URE_SETBIT_4(sc, reg, index, x) \ ure_write_4(sc, reg, index, ure_read_4(sc, reg, index) | (x)) #define URE_CLRBIT_1(sc, reg, index, x) \ ure_write_1(sc, reg, index, ure_read_1(sc, reg, index) & ~(x)) #define URE_CLRBIT_2(sc, reg, index, x) \ ure_write_2(sc, reg, index, ure_read_2(sc, reg, index) & ~(x)) #define URE_CLRBIT_4(sc, reg, index, x) \ ure_write_4(sc, reg, index, ure_read_4(sc, reg, index) & ~(x)) static int ure_ctl(struct ure_softc *sc, uint8_t rw, uint16_t val, uint16_t index, void *buf, int len) { struct usb_device_request req; URE_LOCK_ASSERT(sc, MA_OWNED); if (rw == URE_CTL_WRITE) req.bmRequestType = UT_WRITE_VENDOR_DEVICE; else req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UR_SET_ADDRESS; USETW(req.wValue, val); USETW(req.wIndex, index); USETW(req.wLength, len); return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); } static int ure_read_mem(struct ure_softc *sc, uint16_t addr, uint16_t index, void *buf, int len) { return (ure_ctl(sc, URE_CTL_READ, addr, index, buf, len)); } static int ure_write_mem(struct ure_softc *sc, uint16_t addr, uint16_t index, void *buf, int len) { return (ure_ctl(sc, URE_CTL_WRITE, addr, index, buf, len)); } static uint8_t ure_read_1(struct ure_softc *sc, uint16_t reg, uint16_t index) { uint32_t val; uint8_t temp[4]; uint8_t shift; shift = (reg & 3) << 3; reg &= ~3; ure_read_mem(sc, reg, index, &temp, 4); val = UGETDW(temp); val >>= shift; return (val & 0xff); } static uint16_t ure_read_2(struct ure_softc *sc, uint16_t reg, uint16_t index) { uint32_t val; uint8_t temp[4]; uint8_t shift; shift = (reg & 2) << 3; reg &= ~3; ure_read_mem(sc, reg, index, &temp, 4); val = UGETDW(temp); val >>= shift; return (val & 0xffff); } static uint32_t ure_read_4(struct ure_softc *sc, uint16_t reg, uint16_t index) { uint8_t temp[4]; ure_read_mem(sc, reg, index, &temp, 4); return (UGETDW(temp)); } static int ure_write_1(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val) { uint16_t byen; uint8_t temp[4]; uint8_t shift; byen = URE_BYTE_EN_BYTE; shift = reg & 3; val &= 0xff; if (reg & 3) { byen <<= shift; val <<= (shift << 3); reg &= ~3; } USETDW(temp, val); return (ure_write_mem(sc, reg, index | byen, &temp, 4)); } static int ure_write_2(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val) { uint16_t byen; uint8_t temp[4]; uint8_t shift; byen = URE_BYTE_EN_WORD; shift = reg & 2; val &= 0xffff; if (reg & 2) { byen <<= shift; val <<= (shift << 3); reg &= ~3; } USETDW(temp, val); return (ure_write_mem(sc, reg, index | byen, &temp, 4)); } static int ure_write_4(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val) { uint8_t temp[4]; USETDW(temp, val); return (ure_write_mem(sc, reg, index | URE_BYTE_EN_DWORD, &temp, 4)); } static uint16_t ure_ocp_reg_read(struct ure_softc *sc, uint16_t addr) { uint16_t reg; ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000); reg = (addr & 0x0fff) | 0xb000; return (ure_read_2(sc, reg, URE_MCU_TYPE_PLA)); } static void ure_ocp_reg_write(struct ure_softc *sc, uint16_t addr, uint16_t data) { uint16_t reg; ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000); reg = (addr & 0x0fff) | 0xb000; ure_write_2(sc, reg, URE_MCU_TYPE_PLA, data); } static void ure_sram_write(struct ure_softc *sc, uint16_t addr, uint16_t data) { ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, addr); ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, data); } static int ure_miibus_readreg(device_t dev, int phy, int reg) { struct ure_softc *sc; uint16_t val; int locked; sc = device_get_softc(dev); locked = mtx_owned(&sc->sc_mtx); if (!locked) URE_LOCK(sc); /* Let the rgephy driver read the URE_GMEDIASTAT register. */ if (reg == URE_GMEDIASTAT) { if (!locked) URE_UNLOCK(sc); return (ure_read_1(sc, URE_GMEDIASTAT, URE_MCU_TYPE_PLA)); } val = ure_ocp_reg_read(sc, URE_OCP_BASE_MII + reg * 2); if (!locked) URE_UNLOCK(sc); return (val); } static int ure_miibus_writereg(device_t dev, int phy, int reg, int val) { struct ure_softc *sc; int locked; sc = device_get_softc(dev); if (sc->sc_phyno != phy) return (0); locked = mtx_owned(&sc->sc_mtx); if (!locked) URE_LOCK(sc); ure_ocp_reg_write(sc, URE_OCP_BASE_MII + reg * 2, val); if (!locked) URE_UNLOCK(sc); return (0); } static void ure_miibus_statchg(device_t dev) { struct ure_softc *sc; struct mii_data *mii; struct ifnet *ifp; int locked; sc = device_get_softc(dev); mii = GET_MII(sc); locked = mtx_owned(&sc->sc_mtx); if (!locked) URE_LOCK(sc); ifp = uether_getifp(&sc->sc_ue); if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; sc->sc_flags &= ~URE_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->sc_flags |= URE_FLAG_LINK; sc->sc_rxstarted = 0; break; case IFM_1000_T: if ((sc->sc_flags & URE_FLAG_8152) != 0) break; sc->sc_flags |= URE_FLAG_LINK; sc->sc_rxstarted = 0; break; default: break; } } /* Lost link, do nothing. */ if ((sc->sc_flags & URE_FLAG_LINK) == 0) goto done; done: if (!locked) URE_UNLOCK(sc); } /* * Probe for a RTL8152/RTL8153 chip. */ static int ure_probe(device_t dev) { struct usb_attach_arg *uaa; uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bIfaceIndex != URE_IFACE_IDX) return (ENXIO); return (usbd_lookup_id_by_uaa(ure_devs, sizeof(ure_devs), uaa)); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int ure_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct ure_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; struct usb_config ure_config_rx[URE_MAX_RX]; struct usb_config ure_config_tx[URE_MAX_TX]; uint8_t iface_index; int error; int i; sc->sc_flags = USB_GET_DRIVER_INFO(uaa); device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); iface_index = URE_IFACE_IDX; if (sc->sc_flags & (URE_FLAG_8153 | URE_FLAG_8153B)) sc->sc_rxbufsz = URE_8153_RX_BUFSZ; else if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) sc->sc_rxbufsz = URE_8156_RX_BUFSZ; else sc->sc_rxbufsz = URE_8152_RX_BUFSZ; for (i = 0; i < URE_MAX_RX; i++) { ure_config_rx[i] = (struct usb_config) { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = sc->sc_rxbufsz, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = ure_bulk_read_callback, .timeout = 0, /* no timeout */ }; } error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_rx_xfer, ure_config_rx, URE_MAX_RX, sc, &sc->sc_mtx); if (error != 0) { device_printf(dev, "allocating USB RX transfers failed\n"); goto detach; } for (i = 0; i < URE_MAX_TX; i++) { ure_config_tx[i] = (struct usb_config) { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = URE_TX_BUFSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = ure_bulk_write_callback, .timeout = 10000, /* 10 seconds */ }; } error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_tx_xfer, ure_config_tx, URE_MAX_TX, sc, &sc->sc_mtx); if (error != 0) { usbd_transfer_unsetup(sc->sc_rx_xfer, URE_MAX_RX); device_printf(dev, "allocating USB TX transfers failed\n"); goto detach; } ue->ue_sc = sc; ue->ue_dev = dev; ue->ue_udev = uaa->device; ue->ue_mtx = &sc->sc_mtx; ue->ue_methods = &ure_ue_methods; error = uether_ifattach(ue); if (error != 0) { device_printf(dev, "could not attach interface\n"); goto detach; } return (0); /* success */ detach: ure_detach(dev); return (ENXIO); /* failure */ } static int ure_detach(device_t dev) { struct ure_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; usbd_transfer_unsetup(sc->sc_tx_xfer, URE_MAX_TX); usbd_transfer_unsetup(sc->sc_rx_xfer, URE_MAX_RX); uether_ifdetach(ue); mtx_destroy(&sc->sc_mtx); return (0); } /* * Copy from USB buffers to a new mbuf chain with pkt header. * * This will use m_getm2 to get a mbuf chain w/ properly sized mbuf * clusters as necessary. */ static struct mbuf * ure_makembuf(struct usb_page_cache *pc, usb_frlength_t offset, usb_frlength_t len) { struct usb_page_search_res; struct mbuf *m, *mb; usb_frlength_t tlen; m = m_getm2(NULL, len + ETHER_ALIGN, M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (m); /* uether_newbuf does this. */ m_adj(m, ETHER_ALIGN); m->m_pkthdr.len = len; for (mb = m; len > 0; mb = mb->m_next) { tlen = MIN(len, M_TRAILINGSPACE(mb)); usbd_copy_out(pc, offset, mtod(mb, uint8_t *), tlen); mb->m_len = tlen; offset += tlen; len -= tlen; } return (m); } static void ure_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct ure_softc *sc = usbd_xfer_softc(xfer); struct usb_ether *ue = &sc->sc_ue; struct ifnet *ifp = uether_getifp(ue); struct usb_page_cache *pc; struct mbuf *m; struct ure_rxpkt pkt; int actlen, off, len; int caps; uint32_t pktcsum; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: off = 0; pc = usbd_xfer_get_frame(xfer, 0); caps = if_getcapenable(ifp); DEVPRINTFN(13, sc->sc_ue.ue_dev, "rcb start\n"); while (actlen > 0) { if (actlen < (int)(sizeof(pkt))) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto tr_setup; } usbd_copy_out(pc, off, &pkt, sizeof(pkt)); off += sizeof(pkt); actlen -= sizeof(pkt); len = le32toh(pkt.ure_pktlen) & URE_RXPKT_LEN_MASK; DEVPRINTFN(13, sc->sc_ue.ue_dev, "rxpkt: %#x, %#x, %#x, %#x, %#x, %#x\n", pkt.ure_pktlen, pkt.ure_csum, pkt.ure_misc, pkt.ure_rsvd2, pkt.ure_rsvd3, pkt.ure_rsvd4); DEVPRINTFN(13, sc->sc_ue.ue_dev, "len: %d\n", len); if (len >= URE_RXPKT_LEN_MASK) { /* * drop the rest of this segment. With out * more information, we cannot know where next * packet starts. Blindly continuing would * cause a packet in packet attack, allowing * one VLAN to inject packets w/o a VLAN tag, * or injecting packets into other VLANs. */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto tr_setup; } if (actlen < len) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto tr_setup; } if (len >= (ETHER_HDR_LEN + ETHER_CRC_LEN)) m = ure_makembuf(pc, off, len - ETHER_CRC_LEN); else m = NULL; if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); } else { /* make mbuf and queue */ pktcsum = le32toh(pkt.ure_csum); if (caps & IFCAP_VLAN_HWTAGGING && pktcsum & URE_RXPKT_RX_VLAN_TAG) { m->m_pkthdr.ether_vtag = bswap16(pktcsum & URE_RXPKT_VLAN_MASK); m->m_flags |= M_VLANTAG; } /* set the necessary flags for rx checksum */ ure_rxcsum(caps, &pkt, m); uether_rxmbuf(ue, m, len - ETHER_CRC_LEN); } off += roundup(len, URE_RXPKT_ALIGN); actlen -= roundup(len, URE_RXPKT_ALIGN); } DEVPRINTFN(13, sc->sc_ue.ue_dev, "rcb end\n"); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); uether_rxflush(ue); return; default: /* Error */ DPRINTF("bulk read error, %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static void ure_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ure_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc; struct mbuf *m; struct ure_txpkt txpkt; uint32_t regtmp; int len, pos; int rem; int caps; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete\n"); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: if ((sc->sc_flags & URE_FLAG_LINK) == 0) { /* don't send anything if there is no link! */ break; } pc = usbd_xfer_get_frame(xfer, 0); caps = if_getcapenable(ifp); pos = 0; rem = URE_TX_BUFSZ; while (rem > sizeof(txpkt)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; /* * make sure we don't ever send too large of a * packet */ len = m->m_pkthdr.len; if ((len & URE_TXPKT_LEN_MASK) != len) { device_printf(sc->sc_ue.ue_dev, "pkt len too large: %#x", len); pkterror: if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); continue; } if (sizeof(txpkt) + roundup(len, URE_TXPKT_ALIGN) > rem) { /* out of space */ IFQ_DRV_PREPEND(&ifp->if_snd, m); m = NULL; break; } txpkt = (struct ure_txpkt){}; txpkt.ure_pktlen = htole32((len & URE_TXPKT_LEN_MASK) | URE_TKPKT_TX_FS | URE_TKPKT_TX_LS); if (m->m_flags & M_VLANTAG) { txpkt.ure_csum = htole32( bswap16(m->m_pkthdr.ether_vtag & URE_TXPKT_VLAN_MASK) | URE_TXPKT_VLAN); } if (ure_txcsum(m, caps, ®tmp)) { device_printf(sc->sc_ue.ue_dev, "pkt l4 off too large"); goto pkterror; } txpkt.ure_csum |= htole32(regtmp); DEVPRINTFN(13, sc->sc_ue.ue_dev, "txpkt: mbflg: %#x, %#x, %#x\n", m->m_pkthdr.csum_flags, le32toh(txpkt.ure_pktlen), le32toh(txpkt.ure_csum)); usbd_copy_in(pc, pos, &txpkt, sizeof(txpkt)); pos += sizeof(txpkt); rem -= sizeof(txpkt); usbd_m_copy_in(pc, pos, m, 0, len); pos += roundup(len, URE_TXPKT_ALIGN); rem -= roundup(len, URE_TXPKT_ALIGN); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* * If there's a BPF listener, bounce a copy * of this frame to him. */ BPF_MTAP(ifp, m); m_freem(m); } /* no packets to send */ if (pos == 0) break; /* Set frame length. */ usbd_xfer_set_frame_len(xfer, 0, pos); usbd_transfer_submit(xfer); return; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (error == USB_ERR_TIMEOUT) { DEVPRINTFN(12, sc->sc_ue.ue_dev, "pkt tx timeout\n"); } if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } } } static void ure_read_chipver(struct ure_softc *sc) { uint16_t ver; ver = ure_read_2(sc, URE_PLA_TCR1, URE_MCU_TYPE_PLA) & URE_VERSION_MASK; sc->sc_ver = ver; switch (ver) { case 0x4c00: sc->sc_chip |= URE_CHIP_VER_4C00; sc->sc_flags = URE_FLAG_8152; break; case 0x4c10: sc->sc_chip |= URE_CHIP_VER_4C10; sc->sc_flags = URE_FLAG_8152; break; case 0x5c00: sc->sc_chip |= URE_CHIP_VER_5C00; sc->sc_flags = URE_FLAG_8153; break; case 0x5c10: sc->sc_chip |= URE_CHIP_VER_5C10; sc->sc_flags = URE_FLAG_8153; break; case 0x5c20: sc->sc_chip |= URE_CHIP_VER_5C20; sc->sc_flags = URE_FLAG_8153; break; case 0x5c30: sc->sc_chip |= URE_CHIP_VER_5C30; sc->sc_flags = URE_FLAG_8153; break; case 0x6000: sc->sc_flags = URE_FLAG_8153B; sc->sc_chip |= URE_CHIP_VER_6000; break; case 0x6010: sc->sc_flags = URE_FLAG_8153B; sc->sc_chip |= URE_CHIP_VER_6010; break; case 0x7020: sc->sc_flags = URE_FLAG_8156; sc->sc_chip |= URE_CHIP_VER_7020; break; case 0x7030: sc->sc_flags = URE_FLAG_8156; sc->sc_chip |= URE_CHIP_VER_7030; break; case 0x7400: sc->sc_flags = URE_FLAG_8156B; sc->sc_chip |= URE_CHIP_VER_7400; break; case 0x7410: sc->sc_flags = URE_FLAG_8156B; sc->sc_chip |= URE_CHIP_VER_7410; break; default: device_printf(sc->sc_ue.ue_dev, "unknown version 0x%04x\n", ver); break; } } static int ure_sysctl_chipver(SYSCTL_HANDLER_ARGS) { struct sbuf sb; struct ure_softc *sc = arg1; int error; sbuf_new_for_sysctl(&sb, NULL, 0, req); sbuf_printf(&sb, "%04x", sc->sc_ver); error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } static void ure_attach_post(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); sc->sc_rxstarted = 0; sc->sc_phyno = 0; /* Determine the chip version. */ ure_read_chipver(sc); /* Initialize controller and get station address. */ if (sc->sc_flags & URE_FLAG_8152) ure_rtl8152_init(sc); else if (sc->sc_flags & (URE_FLAG_8153B | URE_FLAG_8156 | URE_FLAG_8156B)) ure_rtl8153b_init(sc); else ure_rtl8153_init(sc); if ((sc->sc_chip & URE_CHIP_VER_4C00) || (sc->sc_chip & URE_CHIP_VER_4C10)) ure_read_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA, ue->ue_eaddr, 8); else ure_read_mem(sc, URE_PLA_BACKUP, URE_MCU_TYPE_PLA, ue->ue_eaddr, 8); if (ETHER_IS_ZERO(sc->sc_ue.ue_eaddr)) { device_printf(sc->sc_ue.ue_dev, "MAC assigned randomly\n"); arc4rand(sc->sc_ue.ue_eaddr, ETHER_ADDR_LEN, 0); sc->sc_ue.ue_eaddr[0] &= ~0x01; /* unicast */ sc->sc_ue.ue_eaddr[0] |= 0x02; /* locally administered */ } } static int ure_attach_post_sub(struct usb_ether *ue) { struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; struct ure_softc *sc; struct ifnet *ifp; int error; sc = uether_getsc(ue); ifp = ue->ue_ifp; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = uether_start; ifp->if_ioctl = ure_ioctl; ifp->if_init = uether_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); /* * Try to keep two transfers full at a time. * ~(TRANSFER_SIZE / 80 bytes/pkt * 2 buffers in flight) */ ifp->if_snd.ifq_drv_maxlen = 512; IFQ_SET_READY(&ifp->if_snd); if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM|IFCAP_HWCSUM, 0); if_sethwassist(ifp, CSUM_IP|CSUM_IP_UDP|CSUM_IP_TCP); #ifdef INET6 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM_IPV6, 0); #endif if_setcapenable(ifp, if_getcapabilities(ifp)); - mtx_lock(&Giant); if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) { ifmedia_init(&sc->sc_ifmedia, IFM_IMASK, ure_ifmedia_upd, ure_ifmedia_sts); ure_add_media_types(sc); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO); sc->sc_ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; error = 0; } else { + bus_topo_lock(); error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, 0); + bus_topo_unlock(); } - mtx_unlock(&Giant); sctx = device_get_sysctl_ctx(sc->sc_ue.ue_dev); soid = device_get_sysctl_tree(sc->sc_ue.ue_dev); SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "chipver", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, ure_sysctl_chipver, "A", "Return string with chip version."); return (error); } static void ure_init(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); uint16_t cpcr; uint32_t reg; URE_LOCK_ASSERT(sc, MA_OWNED); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* Cancel pending I/O. */ ure_stop(ue); if (sc->sc_flags & (URE_FLAG_8153B | URE_FLAG_8156 | URE_FLAG_8156B)) ure_rtl8153b_nic_reset(sc); else ure_reset(sc); /* Set MAC address. */ ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_CONFIG); ure_write_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA | URE_BYTE_EN_SIX_BYTES, IF_LLADDR(ifp), 8); ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_NORAML); /* Set RX EARLY timeout and size */ if (sc->sc_flags & URE_FLAG_8153) { switch (usbd_get_speed(sc->sc_ue.ue_udev)) { case USB_SPEED_SUPER: reg = URE_COALESCE_SUPER / 8; break; case USB_SPEED_HIGH: reg = URE_COALESCE_HIGH / 8; break; default: reg = URE_COALESCE_SLOW / 8; break; } ure_write_2(sc, URE_USB_RX_EARLY_AGG, URE_MCU_TYPE_USB, reg); reg = URE_8153_RX_BUFSZ - (URE_FRAMELEN(if_getmtu(ifp)) + sizeof(struct ure_rxpkt) + URE_RXPKT_ALIGN); ure_write_2(sc, URE_USB_RX_EARLY_SIZE, URE_MCU_TYPE_USB, reg / 4); } else if (sc->sc_flags & URE_FLAG_8153B) { ure_write_2(sc, URE_USB_RX_EARLY_AGG, URE_MCU_TYPE_USB, 158); ure_write_2(sc, URE_USB_RX_EXTRA_AGG_TMR, URE_MCU_TYPE_USB, 1875); reg = URE_8153_RX_BUFSZ - (URE_FRAMELEN(if_getmtu(ifp)) + sizeof(struct ure_rxpkt) + URE_RXPKT_ALIGN); ure_write_2(sc, URE_USB_RX_EARLY_SIZE, URE_MCU_TYPE_USB, reg / 8); ure_write_1(sc, URE_USB_UPT_RXDMA_OWN, URE_MCU_TYPE_USB, URE_OWN_UPDATE | URE_OWN_CLEAR); } else if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) { ure_write_2(sc, URE_USB_RX_EARLY_AGG, URE_MCU_TYPE_USB, 80); ure_write_2(sc, URE_USB_RX_EXTRA_AGG_TMR, URE_MCU_TYPE_USB, 1875); reg = URE_8156_RX_BUFSZ - (URE_FRAMELEN(if_getmtu(ifp)) + sizeof(struct ure_rxpkt) + URE_RXPKT_ALIGN); ure_write_2(sc, URE_USB_RX_EARLY_SIZE, URE_MCU_TYPE_USB, reg / 8); ure_write_1(sc, URE_USB_UPT_RXDMA_OWN, URE_MCU_TYPE_USB, URE_OWN_UPDATE | URE_OWN_CLEAR); } if (sc->sc_flags & URE_FLAG_8156B) { URE_CLRBIT_2(sc, URE_USB_FW_TASK, URE_MCU_TYPE_USB, URE_FC_PATCH_TASK); uether_pause(&sc->sc_ue, hz / 500); URE_SETBIT_2(sc, URE_USB_FW_TASK, URE_MCU_TYPE_USB, URE_FC_PATCH_TASK); } /* Reset the packet filter. */ URE_CLRBIT_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA, URE_FMC_FCR_MCU_EN); URE_SETBIT_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA, URE_FMC_FCR_MCU_EN); /* Enable RX VLANs if enabled */ cpcr = ure_read_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA); if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { DEVPRINTFN(12, sc->sc_ue.ue_dev, "enabled hw vlan tag\n"); cpcr |= URE_CPCR_RX_VLAN; } else { DEVPRINTFN(12, sc->sc_ue.ue_dev, "disabled hw vlan tag\n"); cpcr &= ~URE_CPCR_RX_VLAN; } ure_write_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA, cpcr); /* Enable transmit and receive. */ URE_SETBIT_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, URE_CR_RE | URE_CR_TE); URE_CLRBIT_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA, URE_RXDY_GATED_EN); /* Configure RX filters. */ ure_rxfilter(ue); usbd_xfer_set_stall(sc->sc_tx_xfer[0]); /* Indicate we are up and running. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; /* Switch to selected media. */ ure_ifmedia_upd(ifp); } static void ure_tick(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); struct mii_data *mii; URE_LOCK_ASSERT(sc, MA_OWNED); (void)ifp; for (int i = 0; i < URE_MAX_RX; i++) DEVPRINTFN(13, sc->sc_ue.ue_dev, "rx[%d] = %d\n", i, USB_GET_STATE(sc->sc_rx_xfer[i])); for (int i = 0; i < URE_MAX_TX; i++) DEVPRINTFN(13, sc->sc_ue.ue_dev, "tx[%d] = %d\n", i, USB_GET_STATE(sc->sc_tx_xfer[i])); if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) { ure_link_state(sc); } else { mii = GET_MII(sc); mii_tick(mii); if ((sc->sc_flags & URE_FLAG_LINK) == 0 && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->sc_flags |= URE_FLAG_LINK; sc->sc_rxstarted = 0; ure_start(ue); } } } static u_int ure_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t h, *hashes = arg; h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); return (1); } /* * Program the 64-bit multicast hash filter. */ static void ure_rxfilter(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); uint32_t rxmode; uint32_t h, hashes[2] = { 0, 0 }; URE_LOCK_ASSERT(sc, MA_OWNED); rxmode = ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA); rxmode &= ~(URE_RCR_AAP | URE_RCR_AM); rxmode |= URE_RCR_APM; /* accept physical match packets */ rxmode |= URE_RCR_AB; /* always accept broadcasts */ if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { if (ifp->if_flags & IFF_PROMISC) rxmode |= URE_RCR_AAP; rxmode |= URE_RCR_AM; hashes[0] = hashes[1] = 0xffffffff; goto done; } /* calculate multicast masks */ if_foreach_llmaddr(ifp, ure_hash_maddr, &hashes); h = bswap32(hashes[0]); hashes[0] = bswap32(hashes[1]); hashes[1] = h; rxmode |= URE_RCR_AM; /* accept multicast packets */ done: DEVPRINTFN(14, ue->ue_dev, "rxfilt: RCR: %#x\n", ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA)); ure_write_4(sc, URE_PLA_MAR0, URE_MCU_TYPE_PLA, hashes[0]); ure_write_4(sc, URE_PLA_MAR4, URE_MCU_TYPE_PLA, hashes[1]); ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, rxmode); } static void ure_start(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); unsigned i; URE_LOCK_ASSERT(sc, MA_OWNED); if (!sc->sc_rxstarted) { sc->sc_rxstarted = 1; for (i = 0; i != URE_MAX_RX; i++) usbd_transfer_start(sc->sc_rx_xfer[i]); } for (i = 0; i != URE_MAX_TX; i++) usbd_transfer_start(sc->sc_tx_xfer[i]); } static void ure_reset(struct ure_softc *sc) { int i; ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, URE_CR_RST); for (i = 0; i < URE_TIMEOUT; i++) { if (!(ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) & URE_CR_RST)) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "reset never completed\n"); } /* * Set media options. */ static int ure_ifmedia_upd(struct ifnet *ifp) { struct ure_softc *sc = ifp->if_softc; struct ifmedia *ifm; struct mii_data *mii; struct mii_softc *miisc; int gig; int reg; int anar; int locked; int error; if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) { ifm = &sc->sc_ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); locked = mtx_owned(&sc->sc_mtx); if (!locked) URE_LOCK(sc); reg = ure_ocp_reg_read(sc, 0xa5d4); reg &= ~URE_ADV_2500TFDX; anar = gig = 0; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10; gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX; reg |= URE_ADV_2500TFDX; break; case IFM_2500_T: anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10; gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX; reg |= URE_ADV_2500TFDX; ifp->if_baudrate = IF_Mbps(2500); break; case IFM_1000_T: anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10; gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX; ifp->if_baudrate = IF_Gbps(1); break; case IFM_100_TX: anar |= ANAR_TX | ANAR_TX_FD; ifp->if_baudrate = IF_Mbps(100); break; case IFM_10_T: anar |= ANAR_10 | ANAR_10_FD; ifp->if_baudrate = IF_Mbps(10); break; default: device_printf(sc->sc_ue.ue_dev, "unsupported media type\n"); if (!locked) URE_UNLOCK(sc); return (EINVAL); } ure_ocp_reg_write(sc, URE_OCP_BASE_MII + MII_ANAR * 2, anar | ANAR_PAUSE_ASYM | ANAR_FC); ure_ocp_reg_write(sc, URE_OCP_BASE_MII + MII_100T2CR * 2, gig); ure_ocp_reg_write(sc, 0xa5d4, reg); ure_ocp_reg_write(sc, URE_OCP_BASE_MII + MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); if (!locked) URE_UNLOCK(sc); return (0); } mii = GET_MII(sc); URE_LOCK_ASSERT(sc, MA_OWNED); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); return (error); } /* * Report current media status. */ static void ure_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct ure_softc *sc; struct mii_data *mii; uint16_t status; sc = ifp->if_softc; if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) { URE_LOCK(sc); ifmr->ifm_status = IFM_AVALID; if (ure_get_link_status(sc)) { ifmr->ifm_status |= IFM_ACTIVE; status = ure_read_2(sc, URE_PLA_PHYSTATUS, URE_MCU_TYPE_PLA); if ((status & URE_PHYSTATUS_FDX) || (status & URE_PHYSTATUS_2500MBPS)) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; if (status & URE_PHYSTATUS_10MBPS) ifmr->ifm_active |= IFM_10_T; else if (status & URE_PHYSTATUS_100MBPS) ifmr->ifm_active |= IFM_100_TX; else if (status & URE_PHYSTATUS_1000MBPS) ifmr->ifm_active |= IFM_1000_T; else if (status & URE_PHYSTATUS_2500MBPS) ifmr->ifm_active |= IFM_2500_T; } URE_UNLOCK(sc); return; } mii = GET_MII(sc); URE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; URE_UNLOCK(sc); } static void ure_add_media_types(struct ure_softc *sc) { ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL); } static void ure_link_state(struct ure_softc *sc) { struct ifnet *ifp = uether_getifp(&sc->sc_ue); if (ure_get_link_status(sc)) { if (ifp->if_link_state != LINK_STATE_UP) { if_link_state_change(ifp, LINK_STATE_UP); /* Enable transmit and receive. */ URE_SETBIT_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, URE_CR_RE | URE_CR_TE); if (ure_read_2(sc, URE_PLA_PHYSTATUS, URE_MCU_TYPE_PLA) & URE_PHYSTATUS_2500MBPS) URE_CLRBIT_2(sc, URE_PLA_MAC_PWR_CTRL4, URE_MCU_TYPE_PLA, 0x40); else URE_SETBIT_2(sc, URE_PLA_MAC_PWR_CTRL4, URE_MCU_TYPE_PLA, 0x40); } } else { if (ifp->if_link_state != LINK_STATE_DOWN) { if_link_state_change(ifp, LINK_STATE_DOWN); } } } static int ure_get_link_status(struct ure_softc *sc) { if (ure_read_2(sc, URE_PLA_PHYSTATUS, URE_MCU_TYPE_PLA) & URE_PHYSTATUS_LINK) { sc->sc_flags |= URE_FLAG_LINK; return (1); } else { sc->sc_flags &= ~URE_FLAG_LINK; return (0); } } static int ure_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct usb_ether *ue = ifp->if_softc; struct ure_softc *sc; struct ifreq *ifr; int error, mask, reinit; sc = uether_getsc(ue); ifr = (struct ifreq *)data; error = 0; reinit = 0; switch (cmd) { case SIOCSIFCAP: URE_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit++; } if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; } if ((mask & IFCAP_TXCSUM_IPV6) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM_IPV6) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; } if ((mask & IFCAP_RXCSUM_IPV6) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM_IPV6) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; } if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) ifp->if_drv_flags &= ~IFF_DRV_RUNNING; else reinit = 0; URE_UNLOCK(sc); if (reinit > 0) uether_init(ue); break; case SIOCSIFMTU: /* * in testing large MTUs "crashes" the device, it * leaves the device w/ a broken state where link * is in a bad state. */ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > (4096 - ETHER_HDR_LEN - ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN)) { error = EINVAL; break; } URE_LOCK(sc); if (if_getmtu(ifp) != ifr->ifr_mtu) if_setmtu(ifp, ifr->ifr_mtu); URE_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); else error = uether_ioctl(ifp, cmd, data); break; default: error = uether_ioctl(ifp, cmd, data); break; } return (error); } static void ure_rtl8152_init(struct ure_softc *sc) { uint32_t pwrctrl; ure_enable_aldps(sc, false); if (sc->sc_chip & URE_CHIP_VER_4C00) { URE_CLRBIT_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA, URE_LED_MODE_MASK); } URE_CLRBIT_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB, URE_POWER_CUT); URE_CLRBIT_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB, URE_RESUME_INDICATE); URE_SETBIT_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA, URE_TX_10M_IDLE_EN | URE_PFM_PWM_SWITCH); pwrctrl = ure_read_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA); pwrctrl &= ~URE_MCU_CLK_RATIO_MASK; pwrctrl |= URE_MCU_CLK_RATIO | URE_D3_CLK_GATED_EN; ure_write_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, pwrctrl); ure_write_2(sc, URE_PLA_GPHY_INTR_IMR, URE_MCU_TYPE_PLA, URE_GPHY_STS_MSK | URE_SPEED_DOWN_MSK | URE_SPDWN_RXDV_MSK | URE_SPDWN_LINKCHG_MSK); /* Enable Rx aggregation. */ URE_CLRBIT_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB, URE_RX_AGG_DISABLE | URE_RX_ZERO_EN); ure_enable_aldps(sc, false); ure_rtl8152_nic_reset(sc); ure_write_1(sc, URE_USB_TX_AGG, URE_MCU_TYPE_USB, URE_TX_AGG_MAX_THRESHOLD); ure_write_4(sc, URE_USB_RX_BUF_TH, URE_MCU_TYPE_USB, URE_RX_THR_HIGH); ure_write_4(sc, URE_USB_TX_DMA, URE_MCU_TYPE_USB, URE_TEST_MODE_DISABLE | URE_TX_SIZE_ADJUST1); } static void ure_rtl8153_init(struct ure_softc *sc) { uint16_t val; uint8_t u1u2[8]; int i; ure_enable_aldps(sc, false); memset(u1u2, 0x00, sizeof(u1u2)); ure_write_mem(sc, URE_USB_TOLERANCE, URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); for (i = 0; i < URE_TIMEOUT; i++) { if (ure_read_2(sc, URE_PLA_BOOT_CTRL, URE_MCU_TYPE_PLA) & URE_AUTOLOAD_DONE) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for chip autoload\n"); for (i = 0; i < URE_TIMEOUT; i++) { val = ure_ocp_reg_read(sc, URE_OCP_PHY_STATUS) & URE_PHY_STAT_MASK; if (val == URE_PHY_STAT_LAN_ON || val == URE_PHY_STAT_PWRDN) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for phy to stabilize\n"); URE_CLRBIT_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, URE_U2P3_ENABLE); if (sc->sc_chip & URE_CHIP_VER_5C10) { val = ure_read_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB); val &= ~URE_PWD_DN_SCALE_MASK; val |= URE_PWD_DN_SCALE(96); ure_write_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB, val); URE_SETBIT_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB, URE_USB2PHY_L1 | URE_USB2PHY_SUSPEND); } else if (sc->sc_chip & URE_CHIP_VER_5C20) URE_CLRBIT_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA, URE_ECM_ALDPS); if (sc->sc_chip & (URE_CHIP_VER_5C20 | URE_CHIP_VER_5C30)) { val = ure_read_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB); if (ure_read_2(sc, URE_USB_BURST_SIZE, URE_MCU_TYPE_USB) == 0) val &= ~URE_DYNAMIC_BURST; else val |= URE_DYNAMIC_BURST; ure_write_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB, val); } URE_SETBIT_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB, URE_EP4_FULL_FC); URE_CLRBIT_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB, URE_TIMER11_EN); URE_CLRBIT_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA, URE_LED_MODE_MASK); if ((sc->sc_chip & URE_CHIP_VER_5C10) && usbd_get_speed(sc->sc_ue.ue_udev) != USB_SPEED_SUPER) val = URE_LPM_TIMER_500MS; else val = URE_LPM_TIMER_500US; ure_write_1(sc, URE_USB_LPM_CTRL, URE_MCU_TYPE_USB, val | URE_FIFO_EMPTY_1FB | URE_ROK_EXIT_LPM); val = ure_read_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB); val &= ~URE_SEN_VAL_MASK; val |= URE_SEN_VAL_NORMAL | URE_SEL_RXIDLE; ure_write_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB, val); ure_write_2(sc, URE_USB_CONNECT_TIMER, URE_MCU_TYPE_USB, 0x0001); URE_CLRBIT_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB, URE_PWR_EN | URE_PHASE2_EN); URE_CLRBIT_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB, URE_PCUT_STATUS); memset(u1u2, 0xff, sizeof(u1u2)); ure_write_mem(sc, URE_USB_TOLERANCE, URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); ure_write_2(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, URE_ALDPS_SPDWN_RATIO); ure_write_2(sc, URE_PLA_MAC_PWR_CTRL2, URE_MCU_TYPE_PLA, URE_EEE_SPDWN_RATIO); ure_write_2(sc, URE_PLA_MAC_PWR_CTRL3, URE_MCU_TYPE_PLA, URE_PKT_AVAIL_SPDWN_EN | URE_SUSPEND_SPDWN_EN | URE_U1U2_SPDWN_EN | URE_L1_SPDWN_EN); ure_write_2(sc, URE_PLA_MAC_PWR_CTRL4, URE_MCU_TYPE_PLA, URE_PWRSAVE_SPDWN_EN | URE_RXDV_SPDWN_EN | URE_TX10MIDLE_EN | URE_TP100_SPDWN_EN | URE_TP500_SPDWN_EN | URE_TP1000_SPDWN_EN | URE_EEE_SPDWN_EN); val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB); if (!(sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10))) val |= URE_U2P3_ENABLE; else val &= ~URE_U2P3_ENABLE; ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val); memset(u1u2, 0x00, sizeof(u1u2)); ure_write_mem(sc, URE_USB_TOLERANCE, URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); ure_enable_aldps(sc, false); if (sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10 | URE_CHIP_VER_5C20)) { ure_ocp_reg_write(sc, URE_OCP_ADC_CFG, URE_CKADSEL_L | URE_ADC_EN | URE_EN_EMI_L); } if (sc->sc_chip & URE_CHIP_VER_5C00) { ure_ocp_reg_write(sc, URE_OCP_EEE_CFG, ure_ocp_reg_read(sc, URE_OCP_EEE_CFG) & ~URE_CTAP_SHORT_EN); } ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) | URE_EEE_CLKDIV_EN); ure_ocp_reg_write(sc, URE_OCP_DOWN_SPEED, ure_ocp_reg_read(sc, URE_OCP_DOWN_SPEED) | URE_EN_10M_BGOFF); ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) | URE_EN_10M_PLLOFF); ure_sram_write(sc, URE_SRAM_IMPEDANCE, 0x0b13); URE_SETBIT_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA, URE_PFM_PWM_SWITCH); /* Enable LPF corner auto tune. */ ure_sram_write(sc, URE_SRAM_LPF_CFG, 0xf70f); /* Adjust 10M amplitude. */ ure_sram_write(sc, URE_SRAM_10M_AMP1, 0x00af); ure_sram_write(sc, URE_SRAM_10M_AMP2, 0x0208); ure_rtl8152_nic_reset(sc); /* Enable Rx aggregation. */ URE_CLRBIT_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB, URE_RX_AGG_DISABLE | URE_RX_ZERO_EN); val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB); if (!(sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10))) val |= URE_U2P3_ENABLE; else val &= ~URE_U2P3_ENABLE; ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val); memset(u1u2, 0xff, sizeof(u1u2)); ure_write_mem(sc, URE_USB_TOLERANCE, URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); } static void ure_rtl8153b_init(struct ure_softc *sc) { uint16_t val; int i; if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) { URE_CLRBIT_1(sc, 0xd26b, URE_MCU_TYPE_USB, 0x01); ure_write_2(sc, 0xd32a, URE_MCU_TYPE_USB, 0); URE_SETBIT_2(sc, 0xcfee, URE_MCU_TYPE_USB, 0x0020); } if (sc->sc_flags & URE_FLAG_8156B) { URE_SETBIT_2(sc, 0xb460, URE_MCU_TYPE_USB, 0x08); } ure_enable_aldps(sc, false); /* Disable U1U2 */ URE_CLRBIT_2(sc, URE_USB_LPM_CONFIG, URE_MCU_TYPE_USB, URE_LPM_U1U2_EN); /* Wait loading flash */ if (sc->sc_chip == URE_CHIP_VER_7410) { if ((ure_read_2(sc, 0xd3ae, URE_MCU_TYPE_PLA) & 0x0002) && !(ure_read_2(sc, 0xd284, URE_MCU_TYPE_USB) & 0x0020)) { for (i=0; i < 100; i++) { if (ure_read_2(sc, 0xd284, URE_MCU_TYPE_USB) & 0x0004) break; uether_pause(&sc->sc_ue, hz / 1000); } } } for (i = 0; i < URE_TIMEOUT; i++) { if (ure_read_2(sc, URE_PLA_BOOT_CTRL, URE_MCU_TYPE_PLA) & URE_AUTOLOAD_DONE) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for chip autoload\n"); val = ure_phy_status(sc, 0); if ((val == URE_PHY_STAT_EXT_INIT) & (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B))) { ure_ocp_reg_write(sc, 0xa468, ure_ocp_reg_read(sc, 0xa468) & ~0x0a); if (sc->sc_flags & URE_FLAG_8156B) ure_ocp_reg_write(sc, 0xa466, ure_ocp_reg_read(sc, 0xa466) & ~0x01); } val = ure_ocp_reg_read(sc, URE_OCP_BASE_MII + MII_BMCR); if (val & BMCR_PDOWN) { val &= ~BMCR_PDOWN; ure_ocp_reg_write(sc, URE_OCP_BASE_MII + MII_BMCR, val); } ure_phy_status(sc, URE_PHY_STAT_LAN_ON); /* Disable U2P3 */ URE_CLRBIT_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, URE_U2P3_ENABLE); /* MSC timer, 32760 ms. */ ure_write_2(sc, URE_USB_MSC_TIMER, URE_MCU_TYPE_USB, 0x0fff); /* U1/U2/L1 idle timer, 500 us. */ ure_write_2(sc, URE_USB_U1U2_TIMER, URE_MCU_TYPE_USB, 500); /* Disable power cut */ URE_CLRBIT_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB, URE_PWR_EN); URE_CLRBIT_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB, URE_PCUT_STATUS); /* Disable ups */ URE_CLRBIT_1(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB, URE_UPS_EN | URE_USP_PREWAKE); URE_CLRBIT_1(sc, 0xcfff, URE_MCU_TYPE_USB, 0x01); /* Disable queue wake */ URE_CLRBIT_1(sc, URE_PLA_INDICATE_FALG, URE_MCU_TYPE_USB, URE_UPCOMING_RUNTIME_D3); URE_CLRBIT_1(sc, URE_PLA_SUSPEND_FLAG, URE_MCU_TYPE_USB, URE_LINK_CHG_EVENT); URE_CLRBIT_2(sc, URE_PLA_EXTRA_STATUS, URE_MCU_TYPE_USB, URE_LINK_CHANGE_FLAG); /* Disable runtime suspend */ ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_CONFIG); URE_CLRBIT_2(sc, URE_PLA_CONFIG34, URE_MCU_TYPE_USB, URE_LINK_OFF_WAKE_EN); ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_NORAML); /* Enable U1U2 */ if (usbd_get_speed(sc->sc_ue.ue_udev) == USB_SPEED_SUPER) URE_SETBIT_2(sc, URE_USB_LPM_CONFIG, URE_MCU_TYPE_USB, URE_LPM_U1U2_EN); if (sc->sc_flags & URE_FLAG_8156B) { URE_CLRBIT_2(sc, 0xc010, URE_MCU_TYPE_PLA, 0x0800); URE_SETBIT_2(sc, 0xe854, URE_MCU_TYPE_PLA, 0x0001); /* enable fc timer and set timer to 600 ms. */ ure_write_2(sc, URE_USB_FC_TIMER, URE_MCU_TYPE_USB, URE_CTRL_TIMER_EN | (600 / 8)); if (!(ure_read_1(sc, 0xdc6b, URE_MCU_TYPE_PLA) & 0x80)) { val = ure_read_2(sc, URE_USB_FW_CTRL, URE_MCU_TYPE_USB); val |= URE_FLOW_CTRL_PATCH_OPT | 0x0100; val &= ~0x08; ure_write_2(sc, URE_USB_FW_CTRL, URE_MCU_TYPE_USB, val); } URE_SETBIT_2(sc, URE_USB_FW_TASK, URE_MCU_TYPE_USB, URE_FC_PATCH_TASK); } val = ure_read_2(sc, URE_PLA_EXTRA_STATUS, URE_MCU_TYPE_PLA); if (ure_get_link_status(sc)) val |= URE_CUR_LINK_OK; else val &= ~URE_CUR_LINK_OK; val |= URE_POLL_LINK_CHG; ure_write_2(sc, URE_PLA_EXTRA_STATUS, URE_MCU_TYPE_PLA, val); /* MAC clock speed down */ if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) { ure_write_2(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, 0x0403); val = ure_read_2(sc, URE_PLA_MAC_PWR_CTRL2, URE_MCU_TYPE_PLA); val &= ~0xff; val |= URE_MAC_CLK_SPDWN_EN | 0x03; ure_write_2(sc, URE_PLA_MAC_PWR_CTRL2, URE_MCU_TYPE_PLA, val); } else { URE_SETBIT_2(sc, URE_PLA_MAC_PWR_CTRL2, URE_MCU_TYPE_USB, URE_MAC_CLK_SPDWN_EN); } URE_CLRBIT_2(sc, URE_PLA_MAC_PWR_CTRL3, URE_MCU_TYPE_PLA, URE_PLA_MCU_SPDWN_EN); /* Enable Rx aggregation. */ URE_CLRBIT_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB, URE_RX_AGG_DISABLE | URE_RX_ZERO_EN); if (sc->sc_flags & URE_FLAG_8156) URE_SETBIT_1(sc, 0xd4b4, URE_MCU_TYPE_USB, 0x02); /* Reset tally */ URE_SETBIT_2(sc, URE_PLA_RSTTALLY, URE_MCU_TYPE_USB, URE_TALLY_RESET); } static void ure_rtl8153b_nic_reset(struct ure_softc *sc) { struct ifnet *ifp = uether_getifp(&sc->sc_ue); uint16_t val; int i; /* Disable U1U2 */ URE_CLRBIT_2(sc, URE_USB_LPM_CONFIG, URE_MCU_TYPE_USB, URE_LPM_U1U2_EN); /* Disable U2P3 */ URE_CLRBIT_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, URE_U2P3_ENABLE); ure_enable_aldps(sc, false); /* Enable rxdy_gated */ URE_SETBIT_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA, URE_RXDY_GATED_EN); /* Disable teredo */ ure_disable_teredo(sc); DEVPRINTFN(14, sc->sc_ue.ue_dev, "rtl8153b_nic_reset: RCR: %#x\n", ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA)); URE_CLRBIT_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, URE_RCR_ACPT_ALL); ure_reset(sc); /* Reset BMU */ URE_CLRBIT_1(sc, URE_USB_BMU_RESET, URE_MCU_TYPE_USB, URE_BMU_RESET_EP_IN | URE_BMU_RESET_EP_OUT); URE_SETBIT_1(sc, URE_USB_BMU_RESET, URE_MCU_TYPE_USB, URE_BMU_RESET_EP_IN | URE_BMU_RESET_EP_OUT); URE_CLRBIT_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA, URE_NOW_IS_OOB); URE_CLRBIT_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA, URE_MCU_BORW_EN); if (sc->sc_flags & URE_FLAG_8153B) { for (i = 0; i < URE_TIMEOUT; i++) { if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & URE_LINK_LIST_READY) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for OOB control\n"); URE_SETBIT_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA, URE_RE_INIT_LL); for (i = 0; i < URE_TIMEOUT; i++) { if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & URE_LINK_LIST_READY) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for OOB control\n"); } /* Configure rxvlan */ val = ure_read_2(sc, 0xc012, URE_MCU_TYPE_PLA); val &= ~0x00c0; if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) val |= 0x00c0; ure_write_2(sc, 0xc012, URE_MCU_TYPE_PLA, val); val = if_getmtu(ifp); ure_write_2(sc, URE_PLA_RMS, URE_MCU_TYPE_PLA, URE_FRAMELEN(val)); ure_write_1(sc, URE_PLA_MTPS, URE_MCU_TYPE_PLA, URE_MTPS_JUMBO); if (sc->sc_flags & URE_FLAG_8153B) { URE_SETBIT_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA, URE_TCR0_AUTO_FIFO); ure_reset(sc); } /* Configure fc parameter */ if (sc->sc_flags & URE_FLAG_8156) { ure_write_2(sc, 0xc0a6, URE_MCU_TYPE_PLA, 0x0400); ure_write_2(sc, 0xc0aa, URE_MCU_TYPE_PLA, 0x0800); } else if (sc->sc_flags & URE_FLAG_8156B) { ure_write_2(sc, 0xc0a6, URE_MCU_TYPE_PLA, 0x0200); ure_write_2(sc, 0xc0aa, URE_MCU_TYPE_PLA, 0x0400); } /* Configure Rx FIFO threshold. */ if (sc->sc_flags & URE_FLAG_8153B) { ure_write_4(sc, URE_PLA_RXFIFO_CTRL0, URE_MCU_TYPE_PLA, URE_RXFIFO_THR1_NORMAL); ure_write_2(sc, URE_PLA_RXFIFO_CTRL1, URE_MCU_TYPE_PLA, URE_RXFIFO_THR2_NORMAL); ure_write_2(sc, URE_PLA_RXFIFO_CTRL2, URE_MCU_TYPE_PLA, URE_RXFIFO_THR3_NORMAL); ure_write_4(sc, URE_USB_RX_BUF_TH, URE_MCU_TYPE_USB, URE_RX_THR_B); } else { ure_write_2(sc, 0xc0a2, URE_MCU_TYPE_PLA, (ure_read_2(sc, 0xc0a2, URE_MCU_TYPE_PLA) & ~0xfff) | 0x08); ure_write_4(sc, URE_USB_RX_BUF_TH, URE_MCU_TYPE_USB, 0x00600400); } /* Configure Tx FIFO threshold. */ if (sc->sc_flags & URE_FLAG_8153B) { ure_write_4(sc, URE_PLA_TXFIFO_CTRL, URE_MCU_TYPE_PLA, URE_TXFIFO_THR_NORMAL2); } else if (sc->sc_flags & URE_FLAG_8156) { ure_write_2(sc, URE_PLA_TXFIFO_CTRL, URE_MCU_TYPE_PLA, URE_TXFIFO_THR_NORMAL2); URE_SETBIT_2(sc, 0xd4b4, URE_MCU_TYPE_USB, 0x0002); } else if (sc->sc_flags & URE_FLAG_8156B) { ure_write_2(sc, URE_PLA_TXFIFO_CTRL, URE_MCU_TYPE_PLA, 0x0008); ure_write_2(sc, 0xe61a, URE_MCU_TYPE_PLA, (URE_FRAMELEN(val) + 0x100) / 16 ); } URE_CLRBIT_2(sc, URE_PLA_MAC_PWR_CTRL3, URE_MCU_TYPE_PLA, URE_PLA_MCU_SPDWN_EN); if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) URE_CLRBIT_2(sc, 0xd32a, URE_MCU_TYPE_USB, 0x300); ure_enable_aldps(sc, true); if (sc->sc_flags & (URE_FLAG_8156 | URE_FLAG_8156B)) { /* Enable U2P3 */ URE_SETBIT_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, URE_U2P3_ENABLE); } /* Enable U1U2 */ if (usbd_get_speed(sc->sc_ue.ue_udev) == USB_SPEED_SUPER) URE_SETBIT_2(sc, URE_USB_LPM_CONFIG, URE_MCU_TYPE_USB, URE_LPM_U1U2_EN); } static void ure_stop(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); URE_LOCK_ASSERT(sc, MA_OWNED); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_flags &= ~URE_FLAG_LINK; sc->sc_rxstarted = 0; /* * stop all the transfers, if not already stopped: */ for (int i = 0; i < URE_MAX_RX; i++) usbd_transfer_stop(sc->sc_rx_xfer[i]); for (int i = 0; i < URE_MAX_TX; i++) usbd_transfer_stop(sc->sc_tx_xfer[i]); } static void ure_disable_teredo(struct ure_softc *sc) { if (sc->sc_flags & (URE_FLAG_8153B | URE_FLAG_8156 | URE_FLAG_8156B)) ure_write_1(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA, 0xff); else { URE_CLRBIT_2(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA, (URE_TEREDO_SEL | URE_TEREDO_RS_EVENT_MASK | URE_OOB_TEREDO_EN)); } ure_write_2(sc, URE_PLA_WDT6_CTRL, URE_MCU_TYPE_PLA, URE_WDT6_SET_MODE); ure_write_2(sc, URE_PLA_REALWOW_TIMER, URE_MCU_TYPE_PLA, 0); ure_write_4(sc, URE_PLA_TEREDO_TIMER, URE_MCU_TYPE_PLA, 0); } static void ure_enable_aldps(struct ure_softc *sc, bool enable) { int i; if (enable) { ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) | URE_EN_ALDPS); } else { ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA | URE_DIS_SDSAVE); for (i = 0; i < 20; i++) { uether_pause(&sc->sc_ue, hz / 1000); if (ure_ocp_reg_read(sc, 0xe000) & 0x0100) break; } } } static uint16_t ure_phy_status(struct ure_softc *sc, uint16_t desired) { uint16_t val; int i; for (i = 0; i < URE_TIMEOUT; i++) { val = ure_ocp_reg_read(sc, URE_OCP_PHY_STATUS) & URE_PHY_STAT_MASK; if (desired) { if (val == desired) break; } else { if (val == URE_PHY_STAT_LAN_ON || val == URE_PHY_STAT_PWRDN || val == URE_PHY_STAT_EXT_INIT) break; } uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for phy to stabilize\n"); return (val); } static void ure_rtl8152_nic_reset(struct ure_softc *sc) { uint32_t rx_fifo1, rx_fifo2; int i; URE_SETBIT_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA, URE_RXDY_GATED_EN); ure_disable_teredo(sc); DEVPRINTFN(14, sc->sc_ue.ue_dev, "rtl8152_nic_reset: RCR: %#x\n", ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA)); URE_CLRBIT_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, URE_RCR_ACPT_ALL); ure_reset(sc); ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, 0); URE_CLRBIT_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA, URE_NOW_IS_OOB); URE_CLRBIT_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA, URE_MCU_BORW_EN); for (i = 0; i < URE_TIMEOUT; i++) { if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & URE_LINK_LIST_READY) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for OOB control\n"); URE_SETBIT_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA, URE_RE_INIT_LL); for (i = 0; i < URE_TIMEOUT; i++) { if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & URE_LINK_LIST_READY) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for OOB control\n"); URE_CLRBIT_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA, URE_CPCR_RX_VLAN); URE_SETBIT_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA, URE_TCR0_AUTO_FIFO); /* Configure Rx FIFO threshold. */ ure_write_4(sc, URE_PLA_RXFIFO_CTRL0, URE_MCU_TYPE_PLA, URE_RXFIFO_THR1_NORMAL); if (usbd_get_speed(sc->sc_ue.ue_udev) == USB_SPEED_FULL) { rx_fifo1 = URE_RXFIFO_THR2_FULL; rx_fifo2 = URE_RXFIFO_THR3_FULL; } else { rx_fifo1 = URE_RXFIFO_THR2_HIGH; rx_fifo2 = URE_RXFIFO_THR3_HIGH; } ure_write_4(sc, URE_PLA_RXFIFO_CTRL1, URE_MCU_TYPE_PLA, rx_fifo1); ure_write_4(sc, URE_PLA_RXFIFO_CTRL2, URE_MCU_TYPE_PLA, rx_fifo2); /* Configure Tx FIFO threshold. */ ure_write_4(sc, URE_PLA_TXFIFO_CTRL, URE_MCU_TYPE_PLA, URE_TXFIFO_THR_NORMAL); } /* * Update mbuf for rx checksum from hardware */ static void ure_rxcsum(int capenb, struct ure_rxpkt *rp, struct mbuf *m) { int flags; uint32_t csum, misc; int tcp, udp; m->m_pkthdr.csum_flags = 0; if (!(capenb & IFCAP_RXCSUM)) return; csum = le32toh(rp->ure_csum); misc = le32toh(rp->ure_misc); tcp = udp = 0; flags = 0; if (csum & URE_RXPKT_IPV4_CS) flags |= CSUM_IP_CHECKED; else if (csum & URE_RXPKT_IPV6_CS) flags = 0; tcp = rp->ure_csum & URE_RXPKT_TCP_CS; udp = rp->ure_csum & URE_RXPKT_UDP_CS; if (__predict_true((flags & CSUM_IP_CHECKED) && !(misc & URE_RXPKT_IP_F))) { flags |= CSUM_IP_VALID; } if (__predict_true( (tcp && !(misc & URE_RXPKT_TCP_F)) || (udp && !(misc & URE_RXPKT_UDP_F)))) { flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; } m->m_pkthdr.csum_flags = flags; } /* * If the L4 checksum offset is larger than 0x7ff (2047), return failure. * We currently restrict MTU such that it can't happen, and even if we * did have a large enough MTU, only a very specially crafted IPv6 packet * with MANY headers could possibly come close. * * Returns 0 for success, and 1 if the packet cannot be checksummed and * should be dropped. */ static int ure_txcsum(struct mbuf *m, int caps, uint32_t *regout) { struct ip ip; struct ether_header *eh; int flags; uint32_t data; uint32_t reg; int l3off, l4off; uint16_t type; *regout = 0; flags = m->m_pkthdr.csum_flags; if (flags == 0) return (0); if (__predict_true(m->m_len >= (int)sizeof(*eh))) { eh = mtod(m, struct ether_header *); type = eh->ether_type; } else m_copydata(m, offsetof(struct ether_header, ether_type), sizeof(type), (caddr_t)&type); switch (type = htons(type)) { case ETHERTYPE_IP: case ETHERTYPE_IPV6: l3off = ETHER_HDR_LEN; break; case ETHERTYPE_VLAN: /* XXX - what about QinQ? */ l3off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; break; default: return (0); } reg = 0; if (flags & CSUM_IP) reg |= URE_TXPKT_IPV4_CS; data = m->m_pkthdr.csum_data; if (flags & (CSUM_IP_TCP | CSUM_IP_UDP)) { m_copydata(m, l3off, sizeof ip, (caddr_t)&ip); l4off = l3off + (ip.ip_hl << 2) + data; if (__predict_false(l4off > URE_L4_OFFSET_MAX)) return (1); reg |= URE_TXPKT_IPV4_CS; if (flags & CSUM_IP_TCP) reg |= URE_TXPKT_TCP_CS; else if (flags & CSUM_IP_UDP) reg |= URE_TXPKT_UDP_CS; reg |= l4off << URE_L4_OFFSET_SHIFT; } #ifdef INET6 else if (flags & (CSUM_IP6_TCP | CSUM_IP6_UDP)) { l4off = l3off + data; if (__predict_false(l4off > URE_L4_OFFSET_MAX)) return (1); reg |= URE_TXPKT_IPV6_CS; if (flags & CSUM_IP6_TCP) reg |= URE_TXPKT_TCP_CS; else if (flags & CSUM_IP6_UDP) reg |= URE_TXPKT_UDP_CS; reg |= l4off << URE_L4_OFFSET_SHIFT; } #endif *regout = reg; return 0; } diff --git a/sys/dev/usb/net/usb_ethernet.c b/sys/dev/usb/net/usb_ethernet.c index 33659049f970..fe9fe12c9221 100644 --- a/sys/dev/usb/net/usb_ethernet.c +++ b/sys/dev/usb/net/usb_ethernet.c @@ -1,672 +1,671 @@ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Andrew Thompson (thompsa@FreeBSD.org) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_net, OID_AUTO, ue, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "USB Ethernet parameters"); #define UE_LOCK(_ue) mtx_lock((_ue)->ue_mtx) #define UE_UNLOCK(_ue) mtx_unlock((_ue)->ue_mtx) #define UE_LOCK_ASSERT(_ue, t) mtx_assert((_ue)->ue_mtx, t) MODULE_DEPEND(uether, usb, 1, 1, 1); MODULE_DEPEND(uether, miibus, 1, 1, 1); static struct unrhdr *ueunit; static usb_proc_callback_t ue_attach_post_task; static usb_proc_callback_t ue_promisc_task; static usb_proc_callback_t ue_setmulti_task; static usb_proc_callback_t ue_ifmedia_task; static usb_proc_callback_t ue_tick_task; static usb_proc_callback_t ue_start_task; static usb_proc_callback_t ue_stop_task; static void ue_init(void *); static void ue_start(struct ifnet *); static int ue_ifmedia_upd(struct ifnet *); static void ue_watchdog(void *); /* * Return values: * 0: success * Else: device has been detached */ uint8_t uether_pause(struct usb_ether *ue, unsigned int _ticks) { if (usb_proc_is_gone(&ue->ue_tq)) { /* nothing to do */ return (1); } usb_pause_mtx(ue->ue_mtx, _ticks); return (0); } static void ue_queue_command(struct usb_ether *ue, usb_proc_callback_t *fn, struct usb_proc_msg *t0, struct usb_proc_msg *t1) { struct usb_ether_cfg_task *task; UE_LOCK_ASSERT(ue, MA_OWNED); if (usb_proc_is_gone(&ue->ue_tq)) { return; /* nothing to do */ } /* * NOTE: The task cannot get executed before we drop the * "sc_mtx" mutex. It is safe to update fields in the message * structure after that the message got queued. */ task = (struct usb_ether_cfg_task *) usb_proc_msignal(&ue->ue_tq, t0, t1); /* Setup callback and self pointers */ task->hdr.pm_callback = fn; task->ue = ue; /* * Start and stop must be synchronous! */ if ((fn == ue_start_task) || (fn == ue_stop_task)) usb_proc_mwait(&ue->ue_tq, t0, t1); } struct ifnet * uether_getifp(struct usb_ether *ue) { return (ue->ue_ifp); } struct mii_data * uether_getmii(struct usb_ether *ue) { return (device_get_softc(ue->ue_miibus)); } void * uether_getsc(struct usb_ether *ue) { return (ue->ue_sc); } static int ue_sysctl_parent(SYSCTL_HANDLER_ARGS) { struct usb_ether *ue = arg1; const char *name; name = device_get_nameunit(ue->ue_dev); return SYSCTL_OUT_STR(req, name); } int uether_ifattach(struct usb_ether *ue) { int error; /* check some critical parameters */ if ((ue->ue_dev == NULL) || (ue->ue_udev == NULL) || (ue->ue_mtx == NULL) || (ue->ue_methods == NULL)) return (EINVAL); error = usb_proc_create(&ue->ue_tq, ue->ue_mtx, device_get_nameunit(ue->ue_dev), USB_PRI_MED); if (error) { device_printf(ue->ue_dev, "could not setup taskqueue\n"); goto error; } /* fork rest of the attach code */ UE_LOCK(ue); ue_queue_command(ue, ue_attach_post_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); UE_UNLOCK(ue); error: return (error); } void uether_ifattach_wait(struct usb_ether *ue) { UE_LOCK(ue); usb_proc_mwait(&ue->ue_tq, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); UE_UNLOCK(ue); } static void ue_attach_post_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; struct ifnet *ifp; int error; char num[14]; /* sufficient for 32 bits */ /* first call driver's post attach routine */ ue->ue_methods->ue_attach_post(ue); UE_UNLOCK(ue); ue->ue_unit = alloc_unr(ueunit); usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_mtx, 0); sysctl_ctx_init(&ue->ue_sysctl_ctx); mbufq_init(&ue->ue_rxq, 0 /* unlimited length */); error = 0; CURVNET_SET_QUIET(vnet0); ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(ue->ue_dev, "could not allocate ifnet\n"); goto fail; } ifp->if_softc = ue; if_initname(ifp, "ue", ue->ue_unit); if (ue->ue_methods->ue_attach_post_sub != NULL) { ue->ue_ifp = ifp; error = ue->ue_methods->ue_attach_post_sub(ue); } else { ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; if (ue->ue_methods->ue_ioctl != NULL) ifp->if_ioctl = ue->ue_methods->ue_ioctl; else ifp->if_ioctl = uether_ioctl; ifp->if_start = ue_start; ifp->if_init = ue_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ue->ue_ifp = ifp; if (ue->ue_methods->ue_mii_upd != NULL && ue->ue_methods->ue_mii_sts != NULL) { - /* device_xxx() depends on this */ - mtx_lock(&Giant); + bus_topo_lock(); error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, ue_ifmedia_upd, ue->ue_methods->ue_mii_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); - mtx_unlock(&Giant); + bus_topo_unlock(); } } if (error) { device_printf(ue->ue_dev, "attaching PHYs failed\n"); goto fail; } if_printf(ifp, " on %s\n", device_get_nameunit(ue->ue_dev)); ether_ifattach(ifp, ue->ue_eaddr); /* Tell upper layer we support VLAN oversized frames. */ if (ifp->if_capabilities & IFCAP_VLAN_MTU) ifp->if_hdrlen = sizeof(struct ether_vlan_header); CURVNET_RESTORE(); snprintf(num, sizeof(num), "%u", ue->ue_unit); ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx, &SYSCTL_NODE_CHILDREN(_net, ue), OID_AUTO, num, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx, SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ue, 0, ue_sysctl_parent, "A", "parent device"); UE_LOCK(ue); return; fail: CURVNET_RESTORE(); /* drain mbuf queue */ mbufq_drain(&ue->ue_rxq); /* free unit */ free_unr(ueunit, ue->ue_unit); if (ue->ue_ifp != NULL) { if_free(ue->ue_ifp); ue->ue_ifp = NULL; } UE_LOCK(ue); return; } void uether_ifdetach(struct usb_ether *ue) { struct ifnet *ifp; /* wait for any post attach or other command to complete */ usb_proc_drain(&ue->ue_tq); /* read "ifnet" pointer after taskqueue drain */ ifp = ue->ue_ifp; if (ifp != NULL) { /* we are not running any more */ UE_LOCK(ue); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; UE_UNLOCK(ue); /* drain any callouts */ usb_callout_drain(&ue->ue_watchdog); /* * Detach ethernet first to stop miibus calls from * user-space: */ ether_ifdetach(ifp); /* detach miibus */ if (ue->ue_miibus != NULL) { - mtx_lock(&Giant); /* device_xxx() depends on this */ + bus_topo_lock(); device_delete_child(ue->ue_dev, ue->ue_miibus); - mtx_unlock(&Giant); + bus_topo_unlock(); } /* free interface instance */ if_free(ifp); /* free sysctl */ sysctl_ctx_free(&ue->ue_sysctl_ctx); /* drain mbuf queue */ mbufq_drain(&ue->ue_rxq); /* free unit */ free_unr(ueunit, ue->ue_unit); } /* free taskqueue, if any */ usb_proc_free(&ue->ue_tq); } uint8_t uether_is_gone(struct usb_ether *ue) { return (usb_proc_is_gone(&ue->ue_tq)); } void uether_init(void *arg) { ue_init(arg); } static void ue_init(void *arg) { struct usb_ether *ue = arg; UE_LOCK(ue); ue_queue_command(ue, ue_start_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); UE_UNLOCK(ue); } static void ue_start_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; struct ifnet *ifp = ue->ue_ifp; UE_LOCK_ASSERT(ue, MA_OWNED); ue->ue_methods->ue_init(ue); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; if (ue->ue_methods->ue_tick != NULL) usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue); } static void ue_stop_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; UE_LOCK_ASSERT(ue, MA_OWNED); usb_callout_stop(&ue->ue_watchdog); ue->ue_methods->ue_stop(ue); } void uether_start(struct ifnet *ifp) { ue_start(ifp); } static void ue_start(struct ifnet *ifp) { struct usb_ether *ue = ifp->if_softc; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; UE_LOCK(ue); ue->ue_methods->ue_start(ue); UE_UNLOCK(ue); } static void ue_promisc_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; ue->ue_methods->ue_setpromisc(ue); } static void ue_setmulti_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; ue->ue_methods->ue_setmulti(ue); } int uether_ifmedia_upd(struct ifnet *ifp) { return (ue_ifmedia_upd(ifp)); } static int ue_ifmedia_upd(struct ifnet *ifp) { struct usb_ether *ue = ifp->if_softc; /* Defer to process context */ UE_LOCK(ue); ue_queue_command(ue, ue_ifmedia_task, &ue->ue_media_task[0].hdr, &ue->ue_media_task[1].hdr); UE_UNLOCK(ue); return (0); } static void ue_ifmedia_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; struct ifnet *ifp = ue->ue_ifp; ue->ue_methods->ue_mii_upd(ifp); } static void ue_watchdog(void *arg) { struct usb_ether *ue = arg; struct ifnet *ifp = ue->ue_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; ue_queue_command(ue, ue_tick_task, &ue->ue_tick_task[0].hdr, &ue->ue_tick_task[1].hdr); usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue); } static void ue_tick_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; struct ifnet *ifp = ue->ue_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; ue->ue_methods->ue_tick(ue); } int uether_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct usb_ether *ue = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFFLAGS: UE_LOCK(ue); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) ue_queue_command(ue, ue_promisc_task, &ue->ue_promisc_task[0].hdr, &ue->ue_promisc_task[1].hdr); else ue_queue_command(ue, ue_start_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); } else { ue_queue_command(ue, ue_stop_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); } UE_UNLOCK(ue); break; case SIOCADDMULTI: case SIOCDELMULTI: UE_LOCK(ue); ue_queue_command(ue, ue_setmulti_task, &ue->ue_multi_task[0].hdr, &ue->ue_multi_task[1].hdr); UE_UNLOCK(ue); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (ue->ue_miibus != NULL) { mii = device_get_softc(ue->ue_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } else error = ether_ioctl(ifp, command, data); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int uether_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: ueunit = new_unrhdr(0, INT_MAX, NULL); break; case MOD_UNLOAD: break; default: return (EOPNOTSUPP); } return (0); } static moduledata_t uether_mod = { "uether", uether_modevent, 0 }; struct mbuf * uether_newbuf(void) { struct mbuf *m_new; m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (NULL); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, ETHER_ALIGN); return (m_new); } int uether_rxmbuf(struct usb_ether *ue, struct mbuf *m, unsigned int len) { struct ifnet *ifp = ue->ue_ifp; UE_LOCK_ASSERT(ue, MA_OWNED); /* finalize mbuf */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* enqueue for later when the lock can be released */ (void)mbufq_enqueue(&ue->ue_rxq, m); return (0); } int uether_rxbuf(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, unsigned int len) { struct ifnet *ifp = ue->ue_ifp; struct mbuf *m; UE_LOCK_ASSERT(ue, MA_OWNED); if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) return (1); m = uether_newbuf(); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return (ENOMEM); } usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); /* finalize mbuf */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* enqueue for later when the lock can be released */ (void)mbufq_enqueue(&ue->ue_rxq, m); return (0); } void uether_rxflush(struct usb_ether *ue) { struct ifnet *ifp = ue->ue_ifp; struct epoch_tracker et; struct mbuf *m, *n; UE_LOCK_ASSERT(ue, MA_OWNED); n = mbufq_flush(&ue->ue_rxq); UE_UNLOCK(ue); NET_EPOCH_ENTER(et); while ((m = n) != NULL) { n = STAILQ_NEXT(m, m_stailqpkt); m->m_nextpkt = NULL; ifp->if_input(ifp, m); } NET_EPOCH_EXIT(et); UE_LOCK(ue); } /* * USB net drivers are run by DRIVER_MODULE() thus SI_SUB_DRIVERS, * SI_ORDER_MIDDLE. Run uether after that. */ DECLARE_MODULE(uether, uether_mod, SI_SUB_DRIVERS, SI_ORDER_ANY); MODULE_VERSION(uether, 1); diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c index 8d0e7961f675..6564182a97b0 100644 --- a/sys/dev/usb/usb_device.c +++ b/sys/dev/usb/usb_device.c @@ -1,3095 +1,3095 @@ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008-2020 Hans Petter Selasky. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if USB_HAVE_UGEN #include #endif #include "usbdevs.h" #define USB_DEBUG_VAR usb_debug #include #include #include #include #include #include #include #include #include #include #include #if USB_HAVE_UGEN #include #include #endif #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ /* function prototypes */ static int sysctl_hw_usb_template(SYSCTL_HANDLER_ARGS); static void usb_init_endpoint(struct usb_device *, uint8_t, struct usb_endpoint_descriptor *, struct usb_endpoint_ss_comp_descriptor *, struct usb_endpoint *); static void usb_unconfigure(struct usb_device *, uint8_t); static void usb_detach_device_sub(struct usb_device *, device_t *, char **, uint8_t); static uint8_t usb_probe_and_attach_sub(struct usb_device *, struct usb_attach_arg *); static void usb_init_attach_arg(struct usb_device *, struct usb_attach_arg *); static void usb_suspend_resume_sub(struct usb_device *, device_t, uint8_t); static usb_proc_callback_t usbd_clear_stall_proc; static usb_error_t usb_config_parse(struct usb_device *, uint8_t, uint8_t); #if USB_HAVE_DEVCTL static void usb_notify_addq(const char *type, struct usb_device *); #endif #if USB_HAVE_UGEN static void usb_fifo_free_wrap(struct usb_device *, uint8_t, uint8_t); static void usb_cdev_create(struct usb_device *); static void usb_cdev_free(struct usb_device *); #endif /* This variable is global to allow easy access to it: */ #ifdef USB_TEMPLATE int usb_template = USB_TEMPLATE; #else int usb_template = -1; #endif SYSCTL_PROC(_hw_usb, OID_AUTO, template, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_usb_template, "I", "Selected USB device side template"); /*------------------------------------------------------------------------* * usb_trigger_reprobe_on_off * * This function sets the pull up resistors for all ports currently * operating in device mode either on (when on_not_off is 1), or off * (when it's 0). *------------------------------------------------------------------------*/ static void usb_trigger_reprobe_on_off(int on_not_off) { struct usb_port_status ps; struct usb_bus *bus; struct usb_device *udev; usb_error_t err; int do_unlock, max; max = devclass_get_maxunit(usb_devclass_ptr); while (max >= 0) { mtx_lock(&usb_ref_lock); bus = devclass_get_softc(usb_devclass_ptr, max); max--; if (bus == NULL || bus->devices == NULL || bus->devices[USB_ROOT_HUB_ADDR] == NULL) { mtx_unlock(&usb_ref_lock); continue; } udev = bus->devices[USB_ROOT_HUB_ADDR]; if (udev->refcount == USB_DEV_REF_MAX) { mtx_unlock(&usb_ref_lock); continue; } udev->refcount++; mtx_unlock(&usb_ref_lock); do_unlock = usbd_enum_lock(udev); if (do_unlock > 1) { do_unlock = 0; goto next; } err = usbd_req_get_port_status(udev, NULL, &ps, 1); if (err != 0) { DPRINTF("usbd_req_get_port_status() " "failed: %s\n", usbd_errstr(err)); goto next; } if ((UGETW(ps.wPortStatus) & UPS_PORT_MODE_DEVICE) == 0) goto next; if (on_not_off) { err = usbd_req_set_port_feature(udev, NULL, 1, UHF_PORT_POWER); if (err != 0) { DPRINTF("usbd_req_set_port_feature() " "failed: %s\n", usbd_errstr(err)); } } else { err = usbd_req_clear_port_feature(udev, NULL, 1, UHF_PORT_POWER); if (err != 0) { DPRINTF("usbd_req_clear_port_feature() " "failed: %s\n", usbd_errstr(err)); } } next: mtx_lock(&usb_ref_lock); if (do_unlock) usbd_enum_unlock(udev); if (--(udev->refcount) == 0) cv_broadcast(&udev->ref_cv); mtx_unlock(&usb_ref_lock); } } /*------------------------------------------------------------------------* * usb_trigger_reprobe_all * * This function toggles the pull up resistors for all ports currently * operating in device mode, causing the host machine to reenumerate them. *------------------------------------------------------------------------*/ static void usb_trigger_reprobe_all(void) { /* * Set the pull up resistors off for all ports in device mode. */ usb_trigger_reprobe_on_off(0); /* * According to the DWC OTG spec this must be at least 3ms. */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(USB_POWER_DOWN_TIME)); /* * Set the pull up resistors back on. */ usb_trigger_reprobe_on_off(1); } static int sysctl_hw_usb_template(SYSCTL_HANDLER_ARGS) { int error, val; val = usb_template; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL || usb_template == val) return (error); usb_template = val; if (usb_template < 0) { usb_trigger_reprobe_on_off(0); } else { usb_trigger_reprobe_all(); } return (0); } /* English is default language */ static int usb_lang_id = 0x0009; static int usb_lang_mask = 0x00FF; SYSCTL_INT(_hw_usb, OID_AUTO, usb_lang_id, CTLFLAG_RWTUN, &usb_lang_id, 0, "Preferred USB language ID"); SYSCTL_INT(_hw_usb, OID_AUTO, usb_lang_mask, CTLFLAG_RWTUN, &usb_lang_mask, 0, "Preferred USB language mask"); static const char* statestr[USB_STATE_MAX] = { [USB_STATE_DETACHED] = "DETACHED", [USB_STATE_ATTACHED] = "ATTACHED", [USB_STATE_POWERED] = "POWERED", [USB_STATE_ADDRESSED] = "ADDRESSED", [USB_STATE_CONFIGURED] = "CONFIGURED", }; const char * usb_statestr(enum usb_dev_state state) { return ((state < USB_STATE_MAX) ? statestr[state] : "UNKNOWN"); } const char * usb_get_manufacturer(struct usb_device *udev) { return (udev->manufacturer ? udev->manufacturer : "Unknown"); } const char * usb_get_product(struct usb_device *udev) { return (udev->product ? udev->product : ""); } const char * usb_get_serial(struct usb_device *udev) { return (udev->serial ? udev->serial : ""); } /*------------------------------------------------------------------------* * usbd_get_ep_by_addr * * This function searches for an USB ep by endpoint address and * direction. * * Returns: * NULL: Failure * Else: Success *------------------------------------------------------------------------*/ struct usb_endpoint * usbd_get_ep_by_addr(struct usb_device *udev, uint8_t ea_val) { struct usb_endpoint *ep = udev->endpoints; struct usb_endpoint *ep_end = udev->endpoints + udev->endpoints_max; enum { EA_MASK = (UE_DIR_IN | UE_DIR_OUT | UE_ADDR), }; /* * According to the USB specification not all bits are used * for the endpoint address. Keep defined bits only: */ ea_val &= EA_MASK; /* * Iterate across all the USB endpoints searching for a match * based on the endpoint address: */ for (; ep != ep_end; ep++) { if (ep->edesc == NULL) { continue; } /* do the mask and check the value */ if ((ep->edesc->bEndpointAddress & EA_MASK) == ea_val) { goto found; } } /* * The default endpoint is always present and is checked separately: */ if ((udev->ctrl_ep.edesc != NULL) && ((udev->ctrl_ep.edesc->bEndpointAddress & EA_MASK) == ea_val)) { ep = &udev->ctrl_ep; goto found; } return (NULL); found: return (ep); } /*------------------------------------------------------------------------* * usbd_get_endpoint * * This function searches for an USB endpoint based on the information * given by the passed "struct usb_config" pointer. * * Return values: * NULL: No match. * Else: Pointer to "struct usb_endpoint". *------------------------------------------------------------------------*/ struct usb_endpoint * usbd_get_endpoint(struct usb_device *udev, uint8_t iface_index, const struct usb_config *setup) { struct usb_endpoint *ep = udev->endpoints; struct usb_endpoint *ep_end = udev->endpoints + udev->endpoints_max; uint8_t index = setup->ep_index; uint8_t ea_mask; uint8_t ea_val; uint8_t type_mask; uint8_t type_val; DPRINTFN(10, "udev=%p iface_index=%d address=0x%x " "type=0x%x dir=0x%x index=%d\n", udev, iface_index, setup->endpoint, setup->type, setup->direction, setup->ep_index); /* check USB mode */ if (setup->usb_mode != USB_MODE_DUAL && udev->flags.usb_mode != setup->usb_mode) { /* wrong mode - no endpoint */ return (NULL); } /* setup expected endpoint direction mask and value */ if (setup->direction == UE_DIR_RX) { ea_mask = (UE_DIR_IN | UE_DIR_OUT); ea_val = (udev->flags.usb_mode == USB_MODE_DEVICE) ? UE_DIR_OUT : UE_DIR_IN; } else if (setup->direction == UE_DIR_TX) { ea_mask = (UE_DIR_IN | UE_DIR_OUT); ea_val = (udev->flags.usb_mode == USB_MODE_DEVICE) ? UE_DIR_IN : UE_DIR_OUT; } else if (setup->direction == UE_DIR_ANY) { /* match any endpoint direction */ ea_mask = 0; ea_val = 0; } else { /* match the given endpoint direction */ ea_mask = (UE_DIR_IN | UE_DIR_OUT); ea_val = (setup->direction & (UE_DIR_IN | UE_DIR_OUT)); } /* setup expected endpoint address */ if (setup->endpoint == UE_ADDR_ANY) { /* match any endpoint address */ } else { /* match the given endpoint address */ ea_mask |= UE_ADDR; ea_val |= (setup->endpoint & UE_ADDR); } /* setup expected endpoint type */ if (setup->type == UE_BULK_INTR) { /* this will match BULK and INTERRUPT endpoints */ type_mask = 2; type_val = 2; } else if (setup->type == UE_TYPE_ANY) { /* match any endpoint type */ type_mask = 0; type_val = 0; } else { /* match the given endpoint type */ type_mask = UE_XFERTYPE; type_val = (setup->type & UE_XFERTYPE); } /* * Iterate across all the USB endpoints searching for a match * based on the endpoint address. Note that we are searching * the endpoints from the beginning of the "udev->endpoints" array. */ for (; ep != ep_end; ep++) { if ((ep->edesc == NULL) || (ep->iface_index != iface_index)) { continue; } /* do the masks and check the values */ if (((ep->edesc->bEndpointAddress & ea_mask) == ea_val) && ((ep->edesc->bmAttributes & type_mask) == type_val)) { if (!index--) { goto found; } } } /* * Match against default endpoint last, so that "any endpoint", "any * address" and "any direction" returns the first endpoint of the * interface. "iface_index" and "direction" is ignored: */ if ((udev->ctrl_ep.edesc != NULL) && ((udev->ctrl_ep.edesc->bEndpointAddress & ea_mask) == ea_val) && ((udev->ctrl_ep.edesc->bmAttributes & type_mask) == type_val) && (!index)) { ep = &udev->ctrl_ep; goto found; } return (NULL); found: return (ep); } /*------------------------------------------------------------------------* * usbd_interface_count * * This function stores the number of USB interfaces excluding * alternate settings, which the USB config descriptor reports into * the unsigned 8-bit integer pointed to by "count". * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ usb_error_t usbd_interface_count(struct usb_device *udev, uint8_t *count) { if (udev->cdesc == NULL) { *count = 0; return (USB_ERR_NOT_CONFIGURED); } *count = udev->ifaces_max; return (USB_ERR_NORMAL_COMPLETION); } /*------------------------------------------------------------------------* * usb_init_endpoint * * This function will initialise the USB endpoint structure pointed to by * the "endpoint" argument. The structure pointed to by "endpoint" must be * zeroed before calling this function. *------------------------------------------------------------------------*/ static void usb_init_endpoint(struct usb_device *udev, uint8_t iface_index, struct usb_endpoint_descriptor *edesc, struct usb_endpoint_ss_comp_descriptor *ecomp, struct usb_endpoint *ep) { const struct usb_bus_methods *methods; usb_stream_t x; methods = udev->bus->methods; (methods->endpoint_init) (udev, edesc, ep); /* initialise USB endpoint structure */ ep->edesc = edesc; ep->ecomp = ecomp; ep->iface_index = iface_index; /* setup USB stream queues */ for (x = 0; x != USB_MAX_EP_STREAMS; x++) { TAILQ_INIT(&ep->endpoint_q[x].head); ep->endpoint_q[x].command = &usbd_pipe_start; } /* the pipe is not supported by the hardware */ if (ep->methods == NULL) return; /* check for SUPER-speed streams mode endpoint */ if (udev->speed == USB_SPEED_SUPER && ecomp != NULL && (edesc->bmAttributes & UE_XFERTYPE) == UE_BULK && (UE_GET_BULK_STREAMS(ecomp->bmAttributes) != 0)) { usbd_set_endpoint_mode(udev, ep, USB_EP_MODE_STREAMS); } else { usbd_set_endpoint_mode(udev, ep, USB_EP_MODE_DEFAULT); } /* clear stall, if any */ if (methods->clear_stall != NULL) { USB_BUS_LOCK(udev->bus); (methods->clear_stall) (udev, ep); USB_BUS_UNLOCK(udev->bus); } } /*-----------------------------------------------------------------------* * usb_endpoint_foreach * * This function will iterate all the USB endpoints except the control * endpoint. This function is NULL safe. * * Return values: * NULL: End of USB endpoints * Else: Pointer to next USB endpoint *------------------------------------------------------------------------*/ struct usb_endpoint * usb_endpoint_foreach(struct usb_device *udev, struct usb_endpoint *ep) { struct usb_endpoint *ep_end; /* be NULL safe */ if (udev == NULL) return (NULL); ep_end = udev->endpoints + udev->endpoints_max; /* get next endpoint */ if (ep == NULL) ep = udev->endpoints; else ep++; /* find next allocated ep */ while (ep != ep_end) { if (ep->edesc != NULL) return (ep); ep++; } return (NULL); } /*------------------------------------------------------------------------* * usb_wait_pending_refs * * This function will wait for any USB references to go away before * returning. This function is used before freeing a USB device. *------------------------------------------------------------------------*/ static void usb_wait_pending_refs(struct usb_device *udev) { #if USB_HAVE_UGEN DPRINTF("Refcount = %d\n", (int)udev->refcount); mtx_lock(&usb_ref_lock); udev->refcount--; while (1) { /* wait for any pending references to go away */ if (udev->refcount == 0) { /* prevent further refs being taken, if any */ udev->refcount = USB_DEV_REF_MAX; break; } cv_wait(&udev->ref_cv, &usb_ref_lock); } mtx_unlock(&usb_ref_lock); #endif } /*------------------------------------------------------------------------* * usb_unconfigure * * This function will free all USB interfaces and USB endpoints belonging * to an USB device. * * Flag values, see "USB_UNCFG_FLAG_XXX". *------------------------------------------------------------------------*/ static void usb_unconfigure(struct usb_device *udev, uint8_t flag) { uint8_t do_unlock; /* Prevent re-enumeration */ do_unlock = usbd_enum_lock(udev); /* detach all interface drivers */ usb_detach_device(udev, USB_IFACE_INDEX_ANY, flag); #if USB_HAVE_UGEN /* free all FIFOs except control endpoint FIFOs */ usb_fifo_free_wrap(udev, USB_IFACE_INDEX_ANY, flag); /* * Free all cdev's, if any. */ usb_cdev_free(udev); #endif #if USB_HAVE_COMPAT_LINUX /* free Linux compat device, if any */ if (udev->linux_endpoint_start != NULL) { usb_linux_free_device_p(udev); udev->linux_endpoint_start = NULL; } #endif usb_config_parse(udev, USB_IFACE_INDEX_ANY, USB_CFG_FREE); /* free "cdesc" after "ifaces" and "endpoints", if any */ if (udev->cdesc != NULL) { if (udev->flags.usb_mode != USB_MODE_DEVICE) usbd_free_config_desc(udev, udev->cdesc); udev->cdesc = NULL; } /* set unconfigured state */ udev->curr_config_no = USB_UNCONFIG_NO; udev->curr_config_index = USB_UNCONFIG_INDEX; if (do_unlock) usbd_enum_unlock(udev); } /*------------------------------------------------------------------------* * usbd_set_config_index * * This function selects configuration by index, independent of the * actual configuration number. This function should not be used by * USB drivers. * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ usb_error_t usbd_set_config_index(struct usb_device *udev, uint8_t index) { struct usb_status ds; struct usb_config_descriptor *cdp; uint16_t power; uint16_t max_power; uint8_t selfpowered; uint8_t do_unlock; usb_error_t err; DPRINTFN(6, "udev=%p index=%d\n", udev, index); /* Prevent re-enumeration */ do_unlock = usbd_enum_lock(udev); usb_unconfigure(udev, 0); if (index == USB_UNCONFIG_INDEX) { /* * Leave unallocated when unconfiguring the * device. "usb_unconfigure()" will also reset * the current config number and index. */ err = usbd_req_set_config(udev, NULL, USB_UNCONFIG_NO); if (udev->state == USB_STATE_CONFIGURED) usb_set_device_state(udev, USB_STATE_ADDRESSED); goto done; } /* get the full config descriptor */ if (udev->flags.usb_mode == USB_MODE_DEVICE) { /* save some memory */ err = usbd_req_get_descriptor_ptr(udev, &cdp, (UDESC_CONFIG << 8) | index); } else { /* normal request */ err = usbd_req_get_config_desc_full(udev, NULL, &cdp, index); } if (err) { goto done; } /* set the new config descriptor */ udev->cdesc = cdp; /* Figure out if the device is self or bus powered. */ selfpowered = 0; if ((!udev->flags.uq_bus_powered) && (cdp->bmAttributes & UC_SELF_POWERED) && (udev->flags.usb_mode == USB_MODE_HOST)) { /* May be self powered. */ if (cdp->bmAttributes & UC_BUS_POWERED) { /* Must ask device. */ err = usbd_req_get_device_status(udev, NULL, &ds); if (err) { DPRINTFN(0, "could not read " "device status: %s\n", usbd_errstr(err)); } else if (UGETW(ds.wStatus) & UDS_SELF_POWERED) { selfpowered = 1; } DPRINTF("status=0x%04x \n", UGETW(ds.wStatus)); } else selfpowered = 1; } DPRINTF("udev=%p cdesc=%p (addr %d) cno=%d attr=0x%02x, " "selfpowered=%d, power=%d\n", udev, cdp, udev->address, cdp->bConfigurationValue, cdp->bmAttributes, selfpowered, cdp->bMaxPower * 2); /* Check if we have enough power. */ power = cdp->bMaxPower * 2; if (udev->parent_hub) { max_power = udev->parent_hub->hub->portpower; } else { max_power = USB_MAX_POWER; } if (power > max_power) { DPRINTFN(0, "power exceeded %d > %d\n", power, max_power); err = USB_ERR_NO_POWER; goto done; } /* Only update "self_powered" in USB Host Mode */ if (udev->flags.usb_mode == USB_MODE_HOST) { udev->flags.self_powered = selfpowered; } udev->power = power; udev->curr_config_no = cdp->bConfigurationValue; udev->curr_config_index = index; usb_set_device_state(udev, USB_STATE_CONFIGURED); /* Set the actual configuration value. */ err = usbd_req_set_config(udev, NULL, cdp->bConfigurationValue); if (err) { goto done; } err = usb_config_parse(udev, USB_IFACE_INDEX_ANY, USB_CFG_ALLOC); if (err) { goto done; } err = usb_config_parse(udev, USB_IFACE_INDEX_ANY, USB_CFG_INIT); if (err) { goto done; } #if USB_HAVE_UGEN /* create device nodes for each endpoint */ usb_cdev_create(udev); #endif done: DPRINTF("error=%s\n", usbd_errstr(err)); if (err) { usb_unconfigure(udev, 0); } if (do_unlock) usbd_enum_unlock(udev); return (err); } /*------------------------------------------------------------------------* * usb_config_parse * * This function will allocate and free USB interfaces and USB endpoints, * parse the USB configuration structure and initialise the USB endpoints * and interfaces. If "iface_index" is not equal to * "USB_IFACE_INDEX_ANY" then the "cmd" parameter is the * alternate_setting to be selected for the given interface. Else the * "cmd" parameter is defined by "USB_CFG_XXX". "iface_index" can be * "USB_IFACE_INDEX_ANY" or a valid USB interface index. This function * is typically called when setting the configuration or when setting * an alternate interface. * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ static usb_error_t usb_config_parse(struct usb_device *udev, uint8_t iface_index, uint8_t cmd) { struct usb_idesc_parse_state ips; struct usb_interface_descriptor *id; struct usb_endpoint_descriptor *ed; struct usb_interface *iface; struct usb_endpoint *ep; usb_error_t err; uint8_t ep_curr; uint8_t ep_max; uint8_t temp; uint8_t do_init; uint8_t alt_index; if (iface_index != USB_IFACE_INDEX_ANY) { /* parameter overload */ alt_index = cmd; cmd = USB_CFG_INIT; } else { /* not used */ alt_index = 0; } err = 0; DPRINTFN(5, "iface_index=%d cmd=%d\n", iface_index, cmd); if (cmd == USB_CFG_FREE) goto cleanup; if (cmd == USB_CFG_INIT) { sx_assert(&udev->enum_sx, SA_LOCKED); /* check for in-use endpoints */ ep = udev->endpoints; ep_max = udev->endpoints_max; while (ep_max--) { /* look for matching endpoints */ if ((iface_index == USB_IFACE_INDEX_ANY) || (iface_index == ep->iface_index)) { if (ep->refcount_alloc != 0) { /* * This typically indicates a * more serious error. */ err = USB_ERR_IN_USE; } else { /* reset endpoint */ memset(ep, 0, sizeof(*ep)); /* make sure we don't zero the endpoint again */ ep->iface_index = USB_IFACE_INDEX_ANY; } } ep++; } if (err) return (err); } memset(&ips, 0, sizeof(ips)); ep_curr = 0; ep_max = 0; while ((id = usb_idesc_foreach(udev->cdesc, &ips))) { iface = udev->ifaces + ips.iface_index; /* check for specific interface match */ if (cmd == USB_CFG_INIT) { if ((iface_index != USB_IFACE_INDEX_ANY) && (iface_index != ips.iface_index)) { /* wrong interface */ do_init = 0; } else if (alt_index != ips.iface_index_alt) { /* wrong alternate setting */ do_init = 0; } else { /* initialise interface */ do_init = 1; } /* update number of alternate settings, if any */ if (iface_index == USB_IFACE_INDEX_ANY) iface->num_altsetting = ips.iface_index_alt + 1; } else do_init = 0; /* check for new interface */ if (ips.iface_index_alt == 0) { /* update current number of endpoints */ ep_curr = ep_max; } /* check for init */ if (do_init) { /* setup the USB interface structure */ iface->idesc = id; /* set alternate index */ iface->alt_index = alt_index; /* set default interface parent */ if (iface_index == USB_IFACE_INDEX_ANY) { iface->parent_iface_index = USB_IFACE_INDEX_ANY; } } DPRINTFN(5, "found idesc nendpt=%d\n", id->bNumEndpoints); ed = (struct usb_endpoint_descriptor *)id; temp = ep_curr; /* iterate all the endpoint descriptors */ while ((ed = usb_edesc_foreach(udev->cdesc, ed))) { /* check if endpoint limit has been reached */ if (temp >= USB_MAX_EP_UNITS) { DPRINTF("Endpoint limit reached\n"); break; } ep = udev->endpoints + temp; if (do_init) { void *ecomp; ecomp = usb_ed_comp_foreach(udev->cdesc, (void *)ed); if (ecomp != NULL) DPRINTFN(5, "Found endpoint companion descriptor\n"); usb_init_endpoint(udev, ips.iface_index, ed, ecomp, ep); } temp ++; /* find maximum number of endpoints */ if (ep_max < temp) ep_max = temp; } } /* NOTE: It is valid to have no interfaces and no endpoints! */ if (cmd == USB_CFG_ALLOC) { udev->ifaces_max = ips.iface_index; #if (USB_HAVE_FIXED_IFACE == 0) udev->ifaces = NULL; if (udev->ifaces_max != 0) { udev->ifaces = malloc(sizeof(*iface) * udev->ifaces_max, M_USB, M_WAITOK | M_ZERO); if (udev->ifaces == NULL) { err = USB_ERR_NOMEM; goto done; } } #endif #if (USB_HAVE_FIXED_ENDPOINT == 0) if (ep_max != 0) { udev->endpoints = malloc(sizeof(*ep) * ep_max, M_USB, M_WAITOK | M_ZERO); if (udev->endpoints == NULL) { err = USB_ERR_NOMEM; goto done; } } else { udev->endpoints = NULL; } #endif USB_BUS_LOCK(udev->bus); udev->endpoints_max = ep_max; /* reset any ongoing clear-stall */ udev->ep_curr = NULL; USB_BUS_UNLOCK(udev->bus); } #if (USB_HAVE_FIXED_IFACE == 0) || (USB_HAVE_FIXED_ENDPOINT == 0) done: #endif if (err) { if (cmd == USB_CFG_ALLOC) { cleanup: USB_BUS_LOCK(udev->bus); udev->endpoints_max = 0; /* reset any ongoing clear-stall */ udev->ep_curr = NULL; USB_BUS_UNLOCK(udev->bus); #if (USB_HAVE_FIXED_IFACE == 0) free(udev->ifaces, M_USB); udev->ifaces = NULL; #endif #if (USB_HAVE_FIXED_ENDPOINT == 0) free(udev->endpoints, M_USB); udev->endpoints = NULL; #endif udev->ifaces_max = 0; } } return (err); } /*------------------------------------------------------------------------* * usbd_set_alt_interface_index * * This function will select an alternate interface index for the * given interface index. The interface should not be in use when this * function is called. That means there should not be any open USB * transfers. Else an error is returned. If the alternate setting is * already set this function will simply return success. This function * is called in Host mode and Device mode! * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ usb_error_t usbd_set_alt_interface_index(struct usb_device *udev, uint8_t iface_index, uint8_t alt_index) { struct usb_interface *iface = usbd_get_iface(udev, iface_index); usb_error_t err; uint8_t do_unlock; /* Prevent re-enumeration */ do_unlock = usbd_enum_lock(udev); if (iface == NULL) { err = USB_ERR_INVAL; goto done; } if (iface->alt_index == alt_index) { /* * Optimise away duplicate setting of * alternate setting in USB Host Mode! */ err = 0; goto done; } #if USB_HAVE_UGEN /* * Free all generic FIFOs for this interface, except control * endpoint FIFOs: */ usb_fifo_free_wrap(udev, iface_index, 0); #endif err = usb_config_parse(udev, iface_index, alt_index); if (err) { goto done; } if (iface->alt_index != alt_index) { /* the alternate setting does not exist */ err = USB_ERR_INVAL; goto done; } err = usbd_req_set_alt_interface_no(udev, NULL, iface_index, iface->idesc->bAlternateSetting); done: if (do_unlock) usbd_enum_unlock(udev); return (err); } /*------------------------------------------------------------------------* * usbd_set_endpoint_stall * * This function is used to make a BULK or INTERRUPT endpoint send * STALL tokens in USB device mode. * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ usb_error_t usbd_set_endpoint_stall(struct usb_device *udev, struct usb_endpoint *ep, uint8_t do_stall) { struct usb_xfer *xfer; usb_stream_t x; uint8_t et; uint8_t was_stalled; if (ep == NULL) { /* nothing to do */ DPRINTF("Cannot find endpoint\n"); /* * Pretend that the clear or set stall request is * successful else some USB host stacks can do * strange things, especially when a control endpoint * stalls. */ return (0); } et = (ep->edesc->bmAttributes & UE_XFERTYPE); if ((et != UE_BULK) && (et != UE_INTERRUPT)) { /* * Should not stall control * nor isochronous endpoints. */ DPRINTF("Invalid endpoint\n"); return (0); } USB_BUS_LOCK(udev->bus); /* store current stall state */ was_stalled = ep->is_stalled; /* check for no change */ if (was_stalled && do_stall) { /* if the endpoint is already stalled do nothing */ USB_BUS_UNLOCK(udev->bus); DPRINTF("No change\n"); return (0); } /* set stalled state */ ep->is_stalled = 1; if (do_stall || (!was_stalled)) { if (!was_stalled) { for (x = 0; x != USB_MAX_EP_STREAMS; x++) { /* lookup the current USB transfer, if any */ xfer = ep->endpoint_q[x].curr; if (xfer != NULL) { /* * The "xfer_stall" method * will complete the USB * transfer like in case of a * timeout setting the error * code "USB_ERR_STALLED". */ (udev->bus->methods->xfer_stall) (xfer); } } } (udev->bus->methods->set_stall) (udev, ep, &do_stall); } if (!do_stall) { ep->toggle_next = 0; /* reset data toggle */ ep->is_stalled = 0; /* clear stalled state */ (udev->bus->methods->clear_stall) (udev, ep); /* start the current or next transfer, if any */ for (x = 0; x != USB_MAX_EP_STREAMS; x++) { usb_command_wrapper(&ep->endpoint_q[x], ep->endpoint_q[x].curr); } } USB_BUS_UNLOCK(udev->bus); return (0); } /*------------------------------------------------------------------------* * usb_reset_iface_endpoints - used in USB device side mode *------------------------------------------------------------------------*/ usb_error_t usb_reset_iface_endpoints(struct usb_device *udev, uint8_t iface_index) { struct usb_endpoint *ep; struct usb_endpoint *ep_end; ep = udev->endpoints; ep_end = udev->endpoints + udev->endpoints_max; for (; ep != ep_end; ep++) { if ((ep->edesc == NULL) || (ep->iface_index != iface_index)) { continue; } /* simulate a clear stall from the peer */ usbd_set_endpoint_stall(udev, ep, 0); } return (0); } /*------------------------------------------------------------------------* * usb_detach_device_sub * * This function will try to detach an USB device. If it fails a panic * will result. * * Flag values, see "USB_UNCFG_FLAG_XXX". *------------------------------------------------------------------------*/ static void usb_detach_device_sub(struct usb_device *udev, device_t *ppdev, char **ppnpinfo, uint8_t flag) { device_t dev; char *pnpinfo; int err; dev = *ppdev; if (dev) { /* * NOTE: It is important to clear "*ppdev" before deleting * the child due to some device methods being called late * during the delete process ! */ *ppdev = NULL; if (!rebooting) { device_printf(dev, "at %s, port %d, addr %d " "(disconnected)\n", device_get_nameunit(udev->parent_dev), udev->port_no, udev->address); } if (device_is_attached(dev)) { if (udev->flags.peer_suspended) { err = DEVICE_RESUME(dev); if (err) { device_printf(dev, "Resume failed\n"); } } } /* detach and delete child */ if (device_delete_child(udev->parent_dev, dev)) { goto error; } } pnpinfo = *ppnpinfo; if (pnpinfo != NULL) { *ppnpinfo = NULL; free(pnpinfo, M_USBDEV); } return; error: /* Detach is not allowed to fail in the USB world */ panic("usb_detach_device_sub: A USB driver would not detach\n"); } /*------------------------------------------------------------------------* * usb_detach_device * * The following function will detach the matching interfaces. * This function is NULL safe. * * Flag values, see "USB_UNCFG_FLAG_XXX". *------------------------------------------------------------------------*/ void usb_detach_device(struct usb_device *udev, uint8_t iface_index, uint8_t flag) { struct usb_interface *iface; uint8_t i; if (udev == NULL) { /* nothing to do */ return; } DPRINTFN(4, "udev=%p\n", udev); sx_assert(&udev->enum_sx, SA_LOCKED); /* * First detach the child to give the child's detach routine a * chance to detach the sub-devices in the correct order. * Then delete the child using "device_delete_child()" which * will detach all sub-devices from the bottom and upwards! */ if (iface_index != USB_IFACE_INDEX_ANY) { i = iface_index; iface_index = i + 1; } else { i = 0; iface_index = USB_IFACE_MAX; } /* do the detach */ for (; i != iface_index; i++) { iface = usbd_get_iface(udev, i); if (iface == NULL) { /* looks like the end of the USB interfaces */ break; } usb_detach_device_sub(udev, &iface->subdev, &iface->pnpinfo, flag); } } /*------------------------------------------------------------------------* * usb_probe_and_attach_sub * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ static uint8_t usb_probe_and_attach_sub(struct usb_device *udev, struct usb_attach_arg *uaa) { struct usb_interface *iface; device_t dev; int err; iface = uaa->iface; if (iface->parent_iface_index != USB_IFACE_INDEX_ANY) { /* leave interface alone */ return (0); } dev = iface->subdev; if (dev) { /* clean up after module unload */ if (device_is_attached(dev)) { /* already a device there */ return (0); } /* clear "iface->subdev" as early as possible */ iface->subdev = NULL; if (device_delete_child(udev->parent_dev, dev)) { /* * Panic here, else one can get a double call * to device_detach(). USB devices should * never fail on detach! */ panic("device_delete_child() failed\n"); } } if (uaa->temp_dev == NULL) { /* create a new child */ uaa->temp_dev = device_add_child(udev->parent_dev, NULL, -1); if (uaa->temp_dev == NULL) { device_printf(udev->parent_dev, "Device creation failed\n"); return (1); /* failure */ } device_set_ivars(uaa->temp_dev, uaa); device_quiet(uaa->temp_dev); } /* * Set "subdev" before probe and attach so that "devd" gets * the information it needs. */ iface->subdev = uaa->temp_dev; if (device_probe_and_attach(iface->subdev) == 0) { /* * The USB attach arguments are only available during probe * and attach ! */ uaa->temp_dev = NULL; device_set_ivars(iface->subdev, NULL); if (udev->flags.peer_suspended) { err = DEVICE_SUSPEND(iface->subdev); if (err) device_printf(iface->subdev, "Suspend failed\n"); } return (0); /* success */ } else { /* No USB driver found */ iface->subdev = NULL; } return (1); /* failure */ } /*------------------------------------------------------------------------* * usbd_set_parent_iface * * Using this function will lock the alternate interface setting on an * interface. It is typically used for multi interface drivers. In USB * device side mode it is assumed that the alternate interfaces all * have the same endpoint descriptors. The default parent index value * is "USB_IFACE_INDEX_ANY". Then the alternate setting value is not * locked. *------------------------------------------------------------------------*/ void usbd_set_parent_iface(struct usb_device *udev, uint8_t iface_index, uint8_t parent_index) { struct usb_interface *iface; if (udev == NULL || iface_index == parent_index) { /* nothing to do */ return; } iface = usbd_get_iface(udev, iface_index); if (iface != NULL) iface->parent_iface_index = parent_index; } static void usb_init_attach_arg(struct usb_device *udev, struct usb_attach_arg *uaa) { memset(uaa, 0, sizeof(*uaa)); uaa->device = udev; uaa->usb_mode = udev->flags.usb_mode; uaa->port = udev->port_no; uaa->dev_state = UAA_DEV_READY; uaa->info.idVendor = UGETW(udev->ddesc.idVendor); uaa->info.idProduct = UGETW(udev->ddesc.idProduct); uaa->info.bcdDevice = UGETW(udev->ddesc.bcdDevice); uaa->info.bDeviceClass = udev->ddesc.bDeviceClass; uaa->info.bDeviceSubClass = udev->ddesc.bDeviceSubClass; uaa->info.bDeviceProtocol = udev->ddesc.bDeviceProtocol; uaa->info.bConfigIndex = udev->curr_config_index; uaa->info.bConfigNum = udev->curr_config_no; } /*------------------------------------------------------------------------* * usb_probe_and_attach * * This function is called from "uhub_explore_sub()", * "usb_handle_set_config()" and "usb_handle_request()". * * Returns: * 0: Success * Else: A control transfer failed *------------------------------------------------------------------------*/ usb_error_t usb_probe_and_attach(struct usb_device *udev, uint8_t iface_index) { struct usb_attach_arg uaa; struct usb_interface *iface; uint8_t i; uint8_t j; uint8_t do_unlock; if (udev == NULL) { DPRINTF("udev == NULL\n"); return (USB_ERR_INVAL); } /* Prevent re-enumeration */ do_unlock = usbd_enum_lock(udev); if (udev->curr_config_index == USB_UNCONFIG_INDEX) { /* do nothing - no configuration has been set */ goto done; } /* setup USB attach arguments */ usb_init_attach_arg(udev, &uaa); /* * If the whole USB device is targeted, invoke the USB event * handler(s): */ if (iface_index == USB_IFACE_INDEX_ANY) { if (usb_test_quirk(&uaa, UQ_MSC_DYMO_EJECT) != 0 && usb_dymo_eject(udev, 0) == 0) { /* success, mark the udev as disappearing */ uaa.dev_state = UAA_DEV_EJECTING; } EVENTHANDLER_INVOKE(usb_dev_configured, udev, &uaa); if (uaa.dev_state != UAA_DEV_READY) { /* leave device unconfigured */ usb_unconfigure(udev, 0); goto done; } } /* Check if only one interface should be probed: */ if (iface_index != USB_IFACE_INDEX_ANY) { i = iface_index; j = i + 1; } else { i = 0; j = USB_IFACE_MAX; } /* Do the probe and attach */ for (; i != j; i++) { iface = usbd_get_iface(udev, i); if (iface == NULL) { /* * Looks like the end of the USB * interfaces ! */ DPRINTFN(2, "end of interfaces " "at %u\n", i); break; } if (iface->idesc == NULL) { /* no interface descriptor */ continue; } uaa.iface = iface; uaa.info.bInterfaceClass = iface->idesc->bInterfaceClass; uaa.info.bInterfaceSubClass = iface->idesc->bInterfaceSubClass; uaa.info.bInterfaceProtocol = iface->idesc->bInterfaceProtocol; uaa.info.bIfaceIndex = i; uaa.info.bIfaceNum = iface->idesc->bInterfaceNumber; uaa.driver_info = 0; /* reset driver_info */ DPRINTFN(2, "iclass=%u/%u/%u iindex=%u/%u\n", uaa.info.bInterfaceClass, uaa.info.bInterfaceSubClass, uaa.info.bInterfaceProtocol, uaa.info.bIfaceIndex, uaa.info.bIfaceNum); usb_probe_and_attach_sub(udev, &uaa); /* * Remove the leftover child, if any, to enforce that * a new nomatch devd event is generated for the next * interface if no driver is found: */ if (uaa.temp_dev == NULL) continue; if (device_delete_child(udev->parent_dev, uaa.temp_dev)) DPRINTFN(0, "device delete child failed\n"); uaa.temp_dev = NULL; } done: if (do_unlock) usbd_enum_unlock(udev); return (0); } /*------------------------------------------------------------------------* * usb_suspend_resume_sub * * This function is called when the suspend or resume methods should * be executed on an USB device. *------------------------------------------------------------------------*/ static void usb_suspend_resume_sub(struct usb_device *udev, device_t dev, uint8_t do_suspend) { int err; if (dev == NULL) { return; } if (!device_is_attached(dev)) { return; } if (do_suspend) { err = DEVICE_SUSPEND(dev); } else { err = DEVICE_RESUME(dev); } if (err) { device_printf(dev, "%s failed\n", do_suspend ? "Suspend" : "Resume"); } } /*------------------------------------------------------------------------* * usb_suspend_resume * * The following function will suspend or resume the USB device. * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ usb_error_t usb_suspend_resume(struct usb_device *udev, uint8_t do_suspend) { struct usb_interface *iface; uint8_t i; if (udev == NULL) { /* nothing to do */ return (0); } DPRINTFN(4, "udev=%p do_suspend=%d\n", udev, do_suspend); sx_assert(&udev->sr_sx, SA_LOCKED); USB_BUS_LOCK(udev->bus); /* filter the suspend events */ if (udev->flags.peer_suspended == do_suspend) { USB_BUS_UNLOCK(udev->bus); /* nothing to do */ return (0); } udev->flags.peer_suspended = do_suspend; USB_BUS_UNLOCK(udev->bus); /* do the suspend or resume */ for (i = 0; i != USB_IFACE_MAX; i++) { iface = usbd_get_iface(udev, i); if (iface == NULL) { /* looks like the end of the USB interfaces */ break; } usb_suspend_resume_sub(udev, iface->subdev, do_suspend); } return (0); } /*------------------------------------------------------------------------* * usbd_clear_stall_proc * * This function performs generic USB clear stall operations. *------------------------------------------------------------------------*/ static void usbd_clear_stall_proc(struct usb_proc_msg *_pm) { struct usb_udev_msg *pm = (void *)_pm; struct usb_device *udev = pm->udev; /* Change lock */ USB_BUS_UNLOCK(udev->bus); USB_MTX_LOCK(&udev->device_mtx); /* Start clear stall callback */ usbd_transfer_start(udev->ctrl_xfer[1]); /* Change lock */ USB_MTX_UNLOCK(&udev->device_mtx); USB_BUS_LOCK(udev->bus); } /*------------------------------------------------------------------------* * usb_get_langid * * This function tries to figure out the USB string language to use. *------------------------------------------------------------------------*/ void usb_get_langid(struct usb_device *udev) { uint8_t *scratch_ptr; uint8_t do_unlock; int err; /* * Workaround for buggy USB devices. * * It appears that some string-less USB chips will crash and * disappear if any attempts are made to read any string * descriptors. * * Try to detect such chips by checking the strings in the USB * device descriptor. If no strings are present there we * simply disable all USB strings. */ /* Protect scratch area */ do_unlock = usbd_ctrl_lock(udev); scratch_ptr = udev->scratch.data; if (udev->flags.no_strings) { err = USB_ERR_INVAL; } else if (udev->ddesc.iManufacturer || udev->ddesc.iProduct || udev->ddesc.iSerialNumber) { /* read out the language ID string */ err = usbd_req_get_string_desc(udev, NULL, (char *)scratch_ptr, 4, 0, USB_LANGUAGE_TABLE); } else { err = USB_ERR_INVAL; } if (err || (scratch_ptr[0] < 4)) { udev->flags.no_strings = 1; } else { uint16_t langid; uint16_t pref; uint16_t mask; uint8_t x; /* load preferred value and mask */ pref = usb_lang_id; mask = usb_lang_mask; /* align length correctly */ scratch_ptr[0] &= ~1U; /* fix compiler warning */ langid = 0; /* search for preferred language */ for (x = 2; x < scratch_ptr[0]; x += 2) { langid = UGETW(scratch_ptr + x); if ((langid & mask) == pref) break; } if (x >= scratch_ptr[0]) { /* pick the first language as the default */ DPRINTFN(1, "Using first language\n"); langid = UGETW(scratch_ptr + 2); } DPRINTFN(1, "Language selected: 0x%04x\n", langid); udev->langid = langid; } if (do_unlock) usbd_ctrl_unlock(udev); } /*------------------------------------------------------------------------* * usb_alloc_device * * This function allocates a new USB device. This function is called * when a new device has been put in the powered state, but not yet in * the addressed state. Get initial descriptor, set the address, get * full descriptor and get strings. * * Return values: * 0: Failure * Else: Success *------------------------------------------------------------------------*/ struct usb_device * usb_alloc_device(device_t parent_dev, struct usb_bus *bus, struct usb_device *parent_hub, uint8_t depth, uint8_t port_index, uint8_t port_no, enum usb_dev_speed speed, enum usb_hc_mode mode) { struct usb_attach_arg uaa; struct usb_device *udev; struct usb_device *adev; struct usb_device *hub; usb_error_t err; uint8_t device_index; uint8_t config_index; uint8_t config_quirk; uint8_t set_config_failed; DPRINTF("parent_dev=%p, bus=%p, parent_hub=%p, depth=%u, " "port_index=%u, port_no=%u, speed=%u, usb_mode=%u\n", parent_dev, bus, parent_hub, depth, port_index, port_no, speed, mode); /* * Find an unused device index. In USB Host mode this is the * same as the device address. * * Device index zero is not used and device index 1 should * always be the root hub. */ for (device_index = USB_ROOT_HUB_ADDR; (device_index != bus->devices_max) && (bus->devices[device_index] != NULL); device_index++) /* nop */; if (device_index == bus->devices_max) { device_printf(bus->bdev, "No free USB device index for new device\n"); return (NULL); } if (depth > 0x10) { device_printf(bus->bdev, "Invalid device depth\n"); return (NULL); } udev = malloc(sizeof(*udev), M_USB, M_WAITOK | M_ZERO); #if (USB_HAVE_MALLOC_WAITOK == 0) if (udev == NULL) { return (NULL); } #endif /* initialise our SX-lock */ sx_init_flags(&udev->enum_sx, "USB config SX lock", SX_DUPOK); sx_init_flags(&udev->sr_sx, "USB suspend and resume SX lock", SX_NOWITNESS); sx_init_flags(&udev->ctrl_sx, "USB control transfer SX lock", SX_DUPOK); cv_init(&udev->ctrlreq_cv, "WCTRL"); cv_init(&udev->ref_cv, "UGONE"); /* initialise our mutex */ mtx_init(&udev->device_mtx, "USB device mutex", NULL, MTX_DEF); /* initialise generic clear stall */ udev->cs_msg[0].hdr.pm_callback = &usbd_clear_stall_proc; udev->cs_msg[0].udev = udev; udev->cs_msg[1].hdr.pm_callback = &usbd_clear_stall_proc; udev->cs_msg[1].udev = udev; /* initialise some USB device fields */ udev->parent_hub = parent_hub; udev->parent_dev = parent_dev; udev->port_index = port_index; udev->port_no = port_no; udev->depth = depth; udev->bus = bus; udev->address = USB_START_ADDR; /* default value */ udev->plugtime = (usb_ticks_t)ticks; /* * We need to force the power mode to "on" because there are plenty * of USB devices out there that do not work very well with * automatic suspend and resume! */ udev->power_mode = usbd_filter_power_mode(udev, USB_POWER_MODE_ON); udev->pwr_save.last_xfer_time = ticks; /* we are not ready yet */ udev->refcount = 1; /* set up default endpoint descriptor */ udev->ctrl_ep_desc.bLength = sizeof(udev->ctrl_ep_desc); udev->ctrl_ep_desc.bDescriptorType = UDESC_ENDPOINT; udev->ctrl_ep_desc.bEndpointAddress = USB_CONTROL_ENDPOINT; udev->ctrl_ep_desc.bmAttributes = UE_CONTROL; udev->ctrl_ep_desc.wMaxPacketSize[0] = USB_MAX_IPACKET; udev->ctrl_ep_desc.wMaxPacketSize[1] = 0; udev->ctrl_ep_desc.bInterval = 0; /* set up default endpoint companion descriptor */ udev->ctrl_ep_comp_desc.bLength = sizeof(udev->ctrl_ep_comp_desc); udev->ctrl_ep_comp_desc.bDescriptorType = UDESC_ENDPOINT_SS_COMP; udev->ddesc.bMaxPacketSize = USB_MAX_IPACKET; udev->speed = speed; udev->flags.usb_mode = mode; /* search for our High Speed USB HUB, if any */ adev = udev; hub = udev->parent_hub; while (hub) { if (hub->speed == USB_SPEED_HIGH) { udev->hs_hub_addr = hub->address; udev->parent_hs_hub = hub; udev->hs_port_no = adev->port_no; break; } adev = hub; hub = hub->parent_hub; } /* init the default endpoint */ usb_init_endpoint(udev, 0, &udev->ctrl_ep_desc, &udev->ctrl_ep_comp_desc, &udev->ctrl_ep); /* set device index */ udev->device_index = device_index; #if USB_HAVE_UGEN /* Create ugen name */ snprintf(udev->ugen_name, sizeof(udev->ugen_name), USB_GENERIC_NAME "%u.%u", device_get_unit(bus->bdev), device_index); LIST_INIT(&udev->pd_list); /* Create the control endpoint device */ udev->ctrl_dev = usb_make_dev(udev, NULL, 0, 0, FREAD|FWRITE, UID_ROOT, GID_OPERATOR, 0600); /* Create a link from /dev/ugenX.X to the default endpoint */ if (udev->ctrl_dev != NULL) make_dev_alias(udev->ctrl_dev->cdev, "%s", udev->ugen_name); #endif /* Initialise device */ if (bus->methods->device_init != NULL) { err = (bus->methods->device_init) (udev); if (err != 0) { DPRINTFN(0, "device init %d failed " "(%s, ignored)\n", device_index, usbd_errstr(err)); goto done; } } /* set powered device state after device init is complete */ usb_set_device_state(udev, USB_STATE_POWERED); if (udev->flags.usb_mode == USB_MODE_HOST) { err = usbd_req_set_address(udev, NULL, device_index); /* * This is the new USB device address from now on, if * the set address request didn't set it already. */ if (udev->address == USB_START_ADDR) udev->address = device_index; /* * We ignore any set-address errors, hence there are * buggy USB devices out there that actually receive * the SETUP PID, but manage to set the address before * the STATUS stage is ACK'ed. If the device responds * to the subsequent get-descriptor at the new * address, then we know that the set-address command * was successful. */ if (err) { DPRINTFN(0, "set address %d failed " "(%s, ignored)\n", udev->address, usbd_errstr(err)); } } else { /* We are not self powered */ udev->flags.self_powered = 0; /* Set unconfigured state */ udev->curr_config_no = USB_UNCONFIG_NO; udev->curr_config_index = USB_UNCONFIG_INDEX; /* Setup USB descriptors */ err = (usb_temp_setup_by_index_p) (udev, usb_template); if (err) { DPRINTFN(0, "setting up USB template failed - " "usb_template(4) not loaded?\n"); goto done; } } usb_set_device_state(udev, USB_STATE_ADDRESSED); /* setup the device descriptor and the initial "wMaxPacketSize" */ err = usbd_setup_device_desc(udev, NULL); if (err != 0) { /* try to enumerate two more times */ err = usbd_req_re_enumerate(udev, NULL); if (err != 0) { err = usbd_req_re_enumerate(udev, NULL); if (err != 0) { goto done; } } } /* * Setup temporary USB attach args so that we can figure out some * basic quirks for this device. */ usb_init_attach_arg(udev, &uaa); if (usb_test_quirk(&uaa, UQ_BUS_POWERED)) { udev->flags.uq_bus_powered = 1; } if (usb_test_quirk(&uaa, UQ_NO_STRINGS)) { udev->flags.no_strings = 1; } usb_get_langid(udev); /* assume 100mA bus powered for now. Changed when configured. */ udev->power = USB_MIN_POWER; /* fetch the vendor and product strings from the device */ usb_set_device_strings(udev); if (udev->flags.usb_mode == USB_MODE_DEVICE) { /* USB device mode setup is complete */ err = 0; goto config_done; } /* * Most USB devices should attach to config index 0 by * default */ if (usb_test_quirk(&uaa, UQ_CFG_INDEX_0)) { config_index = 0; config_quirk = 1; } else if (usb_test_quirk(&uaa, UQ_CFG_INDEX_1)) { config_index = 1; config_quirk = 1; } else if (usb_test_quirk(&uaa, UQ_CFG_INDEX_2)) { config_index = 2; config_quirk = 1; } else if (usb_test_quirk(&uaa, UQ_CFG_INDEX_3)) { config_index = 3; config_quirk = 1; } else if (usb_test_quirk(&uaa, UQ_CFG_INDEX_4)) { config_index = 4; config_quirk = 1; } else { config_index = 0; config_quirk = 0; } set_config_failed = 0; repeat_set_config: DPRINTF("setting config %u\n", config_index); /* get the USB device configured */ err = usbd_set_config_index(udev, config_index); if (err) { if (udev->ddesc.bNumConfigurations != 0) { if (!set_config_failed) { set_config_failed = 1; /* XXX try to re-enumerate the device */ err = usbd_req_re_enumerate(udev, NULL); if (err == 0) goto repeat_set_config; } DPRINTFN(0, "Failure selecting configuration index %u:" "%s, port %u, addr %u (ignored)\n", config_index, usbd_errstr(err), udev->port_no, udev->address); } /* * Some USB devices do not have any configurations. Ignore any * set config failures! */ err = 0; goto config_done; } if (!config_quirk && config_index + 1 < udev->ddesc.bNumConfigurations) { if ((udev->cdesc->bNumInterface < 2) && usbd_get_no_descriptors(udev->cdesc, UDESC_ENDPOINT) == 0) { DPRINTFN(0, "Found no endpoints, trying next config\n"); config_index++; goto repeat_set_config; } #if USB_HAVE_MSCTEST if (config_index == 0) { /* * Try to figure out if we have an * auto-install disk there: */ if (usb_iface_is_cdrom(udev, 0)) { DPRINTFN(0, "Found possible auto-install " "disk (trying next config)\n"); config_index++; goto repeat_set_config; } } #endif } #if USB_HAVE_MSCTEST if (set_config_failed == 0 && config_index == 0 && usb_test_quirk(&uaa, UQ_MSC_NO_START_STOP) == 0 && usb_test_quirk(&uaa, UQ_MSC_NO_PREVENT_ALLOW) == 0 && usb_test_quirk(&uaa, UQ_MSC_NO_SYNC_CACHE) == 0 && usb_test_quirk(&uaa, UQ_MSC_NO_TEST_UNIT_READY) == 0 && usb_test_quirk(&uaa, UQ_MSC_NO_GETMAXLUN) == 0) { /* * Try to figure out if there are any MSC quirks we * should apply automatically: */ err = usb_msc_auto_quirk(udev, 0, &uaa); if (err != 0) { set_config_failed = 1; goto repeat_set_config; } } #endif config_done: DPRINTF("new dev (addr %d), udev=%p, parent_hub=%p\n", udev->address, udev, udev->parent_hub); /* register our device - we are ready */ usb_bus_port_set_device(bus, parent_hub ? parent_hub->hub->ports + port_index : NULL, udev, device_index); #if USB_HAVE_UGEN /* Symlink the ugen device name */ udev->ugen_symlink = usb_alloc_symlink(udev->ugen_name); /* Announce device */ printf("%s: <%s %s> at %s\n", udev->ugen_name, usb_get_manufacturer(udev), usb_get_product(udev), device_get_nameunit(udev->bus->bdev)); #endif #if USB_HAVE_DEVCTL usb_notify_addq("ATTACH", udev); #endif done: if (err) { /* * Free USB device and all subdevices, if any. */ usb_free_device(udev, 0); udev = NULL; } return (udev); } #if USB_HAVE_UGEN struct usb_fs_privdata * usb_make_dev(struct usb_device *udev, const char *devname, int ep, int fi, int rwmode, uid_t uid, gid_t gid, int mode) { struct usb_fs_privdata* pd; struct make_dev_args args; char buffer[32]; /* Store information to locate ourselves again later */ pd = malloc(sizeof(struct usb_fs_privdata), M_USBDEV, M_WAITOK | M_ZERO); pd->bus_index = device_get_unit(udev->bus->bdev); pd->dev_index = udev->device_index; pd->ep_addr = ep; pd->fifo_index = fi; pd->mode = rwmode; /* Now, create the device itself */ if (devname == NULL) { devname = buffer; snprintf(buffer, sizeof(buffer), USB_DEVICE_DIR "/%u.%u.%u", pd->bus_index, pd->dev_index, pd->ep_addr); } /* Setup arguments for make_dev_s() */ make_dev_args_init(&args); args.mda_devsw = &usb_devsw; args.mda_uid = uid; args.mda_gid = gid; args.mda_mode = mode; args.mda_si_drv1 = pd; if (make_dev_s(&args, &pd->cdev, "%s", devname) != 0) { DPRINTFN(0, "Failed to create device %s\n", devname); free(pd, M_USBDEV); return (NULL); } return (pd); } void usb_destroy_dev_sync(struct usb_fs_privdata *pd) { DPRINTFN(1, "Destroying device at ugen%d.%d\n", pd->bus_index, pd->dev_index); /* * Destroy character device synchronously. After this * all system calls are returned. Can block. */ destroy_dev(pd->cdev); free(pd, M_USBDEV); } void usb_destroy_dev(struct usb_fs_privdata *pd) { struct usb_bus *bus; if (pd == NULL) return; mtx_lock(&usb_ref_lock); bus = devclass_get_softc(usb_devclass_ptr, pd->bus_index); mtx_unlock(&usb_ref_lock); if (bus == NULL) { usb_destroy_dev_sync(pd); return; } /* make sure we can re-use the device name */ delist_dev(pd->cdev); USB_BUS_LOCK(bus); LIST_INSERT_HEAD(&bus->pd_cleanup_list, pd, pd_next); /* get cleanup going */ usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus), &bus->cleanup_msg[0], &bus->cleanup_msg[1]); USB_BUS_UNLOCK(bus); } static void usb_cdev_create(struct usb_device *udev) { struct usb_config_descriptor *cd; struct usb_endpoint_descriptor *ed; struct usb_descriptor *desc; struct usb_fs_privdata* pd; int inmode, outmode, inmask, outmask, mode; uint8_t ep; KASSERT(LIST_FIRST(&udev->pd_list) == NULL, ("stale cdev entries")); DPRINTFN(2, "Creating device nodes\n"); if (usbd_get_mode(udev) == USB_MODE_DEVICE) { inmode = FWRITE; outmode = FREAD; } else { /* USB_MODE_HOST */ inmode = FREAD; outmode = FWRITE; } inmask = 0; outmask = 0; desc = NULL; /* * Collect all used endpoint numbers instead of just * generating 16 static endpoints. */ cd = usbd_get_config_descriptor(udev); while ((desc = usb_desc_foreach(cd, desc))) { /* filter out all endpoint descriptors */ if ((desc->bDescriptorType == UDESC_ENDPOINT) && (desc->bLength >= sizeof(*ed))) { ed = (struct usb_endpoint_descriptor *)desc; /* update masks */ ep = ed->bEndpointAddress; if (UE_GET_DIR(ep) == UE_DIR_OUT) outmask |= 1 << UE_GET_ADDR(ep); else inmask |= 1 << UE_GET_ADDR(ep); } } /* Create all available endpoints except EP0 */ for (ep = 1; ep < 16; ep++) { mode = (inmask & (1 << ep)) ? inmode : 0; mode |= (outmask & (1 << ep)) ? outmode : 0; if (mode == 0) continue; /* no IN or OUT endpoint */ pd = usb_make_dev(udev, NULL, ep, 0, mode, UID_ROOT, GID_OPERATOR, 0600); if (pd != NULL) LIST_INSERT_HEAD(&udev->pd_list, pd, pd_next); } } static void usb_cdev_free(struct usb_device *udev) { struct usb_fs_privdata* pd; DPRINTFN(2, "Freeing device nodes\n"); while ((pd = LIST_FIRST(&udev->pd_list)) != NULL) { KASSERT(pd->cdev->si_drv1 == pd, ("privdata corrupt")); LIST_REMOVE(pd, pd_next); usb_destroy_dev(pd); } } #endif /*------------------------------------------------------------------------* * usb_free_device * * This function is NULL safe and will free an USB device and its * children devices, if any. * * Flag values: Reserved, set to zero. *------------------------------------------------------------------------*/ void usb_free_device(struct usb_device *udev, uint8_t flag) { struct usb_bus *bus; if (udev == NULL) return; /* already freed */ DPRINTFN(4, "udev=%p port=%d\n", udev, udev->port_no); bus = udev->bus; /* set DETACHED state to prevent any further references */ usb_set_device_state(udev, USB_STATE_DETACHED); #if USB_HAVE_DEVCTL usb_notify_addq("DETACH", udev); #endif #if USB_HAVE_UGEN if (!rebooting) { printf("%s: <%s %s> at %s (disconnected)\n", udev->ugen_name, usb_get_manufacturer(udev), usb_get_product(udev), device_get_nameunit(bus->bdev)); } /* Destroy UGEN symlink, if any */ if (udev->ugen_symlink) { usb_free_symlink(udev->ugen_symlink); udev->ugen_symlink = NULL; } usb_destroy_dev(udev->ctrl_dev); #endif if (udev->flags.usb_mode == USB_MODE_DEVICE) { /* stop receiving any control transfers (Device Side Mode) */ usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX); } /* the following will get the device unconfigured in software */ usb_unconfigure(udev, USB_UNCFG_FLAG_FREE_EP0); /* final device unregister after all character devices are closed */ usb_bus_port_set_device(bus, udev->parent_hub ? udev->parent_hub->hub->ports + udev->port_index : NULL, NULL, USB_ROOT_HUB_ADDR); /* unsetup any leftover default USB transfers */ usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX); /* template unsetup, if any */ (usb_temp_unsetup_p) (udev); /* * Make sure that our clear-stall messages are not queued * anywhere: */ USB_BUS_LOCK(udev->bus); usb_proc_mwait(USB_BUS_CS_PROC(udev->bus), &udev->cs_msg[0], &udev->cs_msg[1]); USB_BUS_UNLOCK(udev->bus); /* wait for all references to go away */ usb_wait_pending_refs(udev); sx_destroy(&udev->enum_sx); sx_destroy(&udev->sr_sx); sx_destroy(&udev->ctrl_sx); cv_destroy(&udev->ctrlreq_cv); cv_destroy(&udev->ref_cv); mtx_destroy(&udev->device_mtx); #if USB_HAVE_UGEN KASSERT(LIST_FIRST(&udev->pd_list) == NULL, ("leaked cdev entries")); #endif /* Uninitialise device */ if (bus->methods->device_uninit != NULL) (bus->methods->device_uninit) (udev); /* free device */ free(udev->serial, M_USB); free(udev->manufacturer, M_USB); free(udev->product, M_USB); free(udev, M_USB); } /*------------------------------------------------------------------------* * usbd_get_iface * * This function is the safe way to get the USB interface structure * pointer by interface index. * * Return values: * NULL: Interface not present. * Else: Pointer to USB interface structure. *------------------------------------------------------------------------*/ struct usb_interface * usbd_get_iface(struct usb_device *udev, uint8_t iface_index) { struct usb_interface *iface = udev->ifaces + iface_index; if (iface_index >= udev->ifaces_max) return (NULL); return (iface); } /*------------------------------------------------------------------------* * usbd_find_descriptor * * This function will lookup the first descriptor that matches the * criteria given by the arguments "type" and "subtype". Descriptors * will only be searched within the interface having the index * "iface_index". If the "id" argument points to an USB descriptor, * it will be skipped before the search is started. This allows * searching for multiple descriptors using the same criteria. Else * the search is started after the interface descriptor. * * Return values: * NULL: End of descriptors * Else: A descriptor matching the criteria *------------------------------------------------------------------------*/ void * usbd_find_descriptor(struct usb_device *udev, void *id, uint8_t iface_index, uint8_t type, uint8_t type_mask, uint8_t subtype, uint8_t subtype_mask) { struct usb_descriptor *desc; struct usb_config_descriptor *cd; struct usb_interface *iface; cd = usbd_get_config_descriptor(udev); if (cd == NULL) { return (NULL); } if (id == NULL) { iface = usbd_get_iface(udev, iface_index); if (iface == NULL) { return (NULL); } id = usbd_get_interface_descriptor(iface); if (id == NULL) { return (NULL); } } desc = (void *)id; while ((desc = usb_desc_foreach(cd, desc))) { if (desc->bDescriptorType == UDESC_INTERFACE) { break; } if (((desc->bDescriptorType & type_mask) == type) && ((desc->bDescriptorSubtype & subtype_mask) == subtype)) { return (desc); } } return (NULL); } /*------------------------------------------------------------------------* * usb_devinfo * * This function will dump information from the device descriptor * belonging to the USB device pointed to by "udev", to the string * pointed to by "dst_ptr" having a maximum length of "dst_len" bytes * including the terminating zero. *------------------------------------------------------------------------*/ void usb_devinfo(struct usb_device *udev, char *dst_ptr, uint16_t dst_len) { struct usb_device_descriptor *udd = &udev->ddesc; uint16_t bcdDevice; uint16_t bcdUSB; bcdUSB = UGETW(udd->bcdUSB); bcdDevice = UGETW(udd->bcdDevice); if (udd->bDeviceClass != 0xFF) { snprintf(dst_ptr, dst_len, "%s %s, class %d/%d, rev %x.%02x/" "%x.%02x, addr %d", usb_get_manufacturer(udev), usb_get_product(udev), udd->bDeviceClass, udd->bDeviceSubClass, (bcdUSB >> 8), bcdUSB & 0xFF, (bcdDevice >> 8), bcdDevice & 0xFF, udev->address); } else { snprintf(dst_ptr, dst_len, "%s %s, rev %x.%02x/" "%x.%02x, addr %d", usb_get_manufacturer(udev), usb_get_product(udev), (bcdUSB >> 8), bcdUSB & 0xFF, (bcdDevice >> 8), bcdDevice & 0xFF, udev->address); } } #ifdef USB_VERBOSE /* * Descriptions of of known vendors and devices ("products"). */ struct usb_knowndev { uint16_t vendor; uint16_t product; uint32_t flags; const char *vendorname; const char *productname; }; #define USB_KNOWNDEV_NOPROD 0x01 /* match on vendor only */ #include "usbdevs.h" #include "usbdevs_data.h" #endif /* USB_VERBOSE */ void usb_set_device_strings(struct usb_device *udev) { struct usb_device_descriptor *udd = &udev->ddesc; #ifdef USB_VERBOSE const struct usb_knowndev *kdp; #endif char *temp_ptr; size_t temp_size; uint16_t vendor_id; uint16_t product_id; uint8_t do_unlock; /* Protect scratch area */ do_unlock = usbd_ctrl_lock(udev); temp_ptr = (char *)udev->scratch.data; temp_size = sizeof(udev->scratch.data); vendor_id = UGETW(udd->idVendor); product_id = UGETW(udd->idProduct); /* cleanup old strings, if any */ free(udev->serial, M_USB); free(udev->manufacturer, M_USB); free(udev->product, M_USB); /* zero the string pointers */ udev->serial = NULL; udev->manufacturer = NULL; udev->product = NULL; /* get serial number string */ usbd_req_get_string_any(udev, NULL, temp_ptr, temp_size, udev->ddesc.iSerialNumber); udev->serial = strdup(temp_ptr, M_USB); /* get manufacturer string */ usbd_req_get_string_any(udev, NULL, temp_ptr, temp_size, udev->ddesc.iManufacturer); usb_trim_spaces(temp_ptr); if (temp_ptr[0] != '\0') udev->manufacturer = strdup(temp_ptr, M_USB); /* get product string */ usbd_req_get_string_any(udev, NULL, temp_ptr, temp_size, udev->ddesc.iProduct); usb_trim_spaces(temp_ptr); if (temp_ptr[0] != '\0') udev->product = strdup(temp_ptr, M_USB); #ifdef USB_VERBOSE if (udev->manufacturer == NULL || udev->product == NULL) { for (kdp = usb_knowndevs; kdp->vendorname != NULL; kdp++) { if (kdp->vendor == vendor_id && (kdp->product == product_id || (kdp->flags & USB_KNOWNDEV_NOPROD) != 0)) break; } if (kdp->vendorname != NULL) { /* XXX should use pointer to knowndevs string */ if (udev->manufacturer == NULL) { udev->manufacturer = strdup(kdp->vendorname, M_USB); } if (udev->product == NULL && (kdp->flags & USB_KNOWNDEV_NOPROD) == 0) { udev->product = strdup(kdp->productname, M_USB); } } } #endif /* Provide default strings if none were found */ if (udev->manufacturer == NULL) { snprintf(temp_ptr, temp_size, "vendor 0x%04x", vendor_id); udev->manufacturer = strdup(temp_ptr, M_USB); } if (udev->product == NULL) { snprintf(temp_ptr, temp_size, "product 0x%04x", product_id); udev->product = strdup(temp_ptr, M_USB); } if (do_unlock) usbd_ctrl_unlock(udev); } /* * Returns: * See: USB_MODE_XXX */ enum usb_hc_mode usbd_get_mode(struct usb_device *udev) { return (udev->flags.usb_mode); } /* * Returns: * See: USB_SPEED_XXX */ enum usb_dev_speed usbd_get_speed(struct usb_device *udev) { return (udev->speed); } uint32_t usbd_get_isoc_fps(struct usb_device *udev) { ; /* indent fix */ switch (udev->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: return (1000); default: return (8000); } } struct usb_device_descriptor * usbd_get_device_descriptor(struct usb_device *udev) { if (udev == NULL) return (NULL); /* be NULL safe */ return (&udev->ddesc); } struct usb_config_descriptor * usbd_get_config_descriptor(struct usb_device *udev) { if (udev == NULL) return (NULL); /* be NULL safe */ return (udev->cdesc); } /*------------------------------------------------------------------------* * usb_test_quirk - test a device for a given quirk * * Return values: * 0: The USB device does not have the given quirk. * Else: The USB device has the given quirk. *------------------------------------------------------------------------*/ uint8_t usb_test_quirk(const struct usb_attach_arg *uaa, uint16_t quirk) { uint8_t found; uint8_t x; if (quirk == UQ_NONE) return (0); /* search the automatic per device quirks first */ for (x = 0; x != USB_MAX_AUTO_QUIRK; x++) { if (uaa->device->autoQuirk[x] == quirk) return (1); } /* search global quirk table, if any */ found = (usb_test_quirk_p) (&uaa->info, quirk); return (found); } struct usb_interface_descriptor * usbd_get_interface_descriptor(struct usb_interface *iface) { if (iface == NULL) return (NULL); /* be NULL safe */ return (iface->idesc); } uint8_t usbd_get_interface_altindex(struct usb_interface *iface) { return (iface->alt_index); } uint8_t usbd_get_bus_index(struct usb_device *udev) { return ((uint8_t)device_get_unit(udev->bus->bdev)); } uint8_t usbd_get_device_index(struct usb_device *udev) { return (udev->device_index); } #if USB_HAVE_DEVCTL static void usb_notify_addq(const char *type, struct usb_device *udev) { struct usb_interface *iface; struct sbuf *sb; int i; /* announce the device */ sb = sbuf_new_auto(); sbuf_printf(sb, #if USB_HAVE_UGEN "ugen=%s " "cdev=%s " #endif "vendor=0x%04x " "product=0x%04x " "devclass=0x%02x " "devsubclass=0x%02x " "sernum=\"%s\" " "release=0x%04x " "mode=%s " "port=%u " #if USB_HAVE_UGEN "parent=%s" #endif "", #if USB_HAVE_UGEN udev->ugen_name, udev->ugen_name, #endif UGETW(udev->ddesc.idVendor), UGETW(udev->ddesc.idProduct), udev->ddesc.bDeviceClass, udev->ddesc.bDeviceSubClass, usb_get_serial(udev), UGETW(udev->ddesc.bcdDevice), (udev->flags.usb_mode == USB_MODE_HOST) ? "host" : "device", udev->port_no #if USB_HAVE_UGEN , udev->parent_hub != NULL ? udev->parent_hub->ugen_name : device_get_nameunit(device_get_parent(udev->bus->bdev)) #endif ); sbuf_finish(sb); devctl_notify("USB", "DEVICE", type, sbuf_data(sb)); sbuf_delete(sb); /* announce each interface */ for (i = 0; i < USB_IFACE_MAX; i++) { iface = usbd_get_iface(udev, i); if (iface == NULL) break; /* end of interfaces */ if (iface->idesc == NULL) continue; /* no interface descriptor */ sb = sbuf_new_auto(); sbuf_printf(sb, #if USB_HAVE_UGEN "ugen=%s " "cdev=%s " #endif "vendor=0x%04x " "product=0x%04x " "devclass=0x%02x " "devsubclass=0x%02x " "sernum=\"%s\" " "release=0x%04x " "mode=%s " "interface=%d " "endpoints=%d " "intclass=0x%02x " "intsubclass=0x%02x " "intprotocol=0x%02x", #if USB_HAVE_UGEN udev->ugen_name, udev->ugen_name, #endif UGETW(udev->ddesc.idVendor), UGETW(udev->ddesc.idProduct), udev->ddesc.bDeviceClass, udev->ddesc.bDeviceSubClass, usb_get_serial(udev), UGETW(udev->ddesc.bcdDevice), (udev->flags.usb_mode == USB_MODE_HOST) ? "host" : "device", iface->idesc->bInterfaceNumber, iface->idesc->bNumEndpoints, iface->idesc->bInterfaceClass, iface->idesc->bInterfaceSubClass, iface->idesc->bInterfaceProtocol); sbuf_finish(sb); devctl_notify("USB", "INTERFACE", type, sbuf_data(sb)); sbuf_delete(sb); } } #endif #if USB_HAVE_UGEN /*------------------------------------------------------------------------* * usb_fifo_free_wrap * * This function will free the FIFOs. * * Description of "flag" argument: If the USB_UNCFG_FLAG_FREE_EP0 flag * is set and "iface_index" is set to "USB_IFACE_INDEX_ANY", we free * all FIFOs. If the USB_UNCFG_FLAG_FREE_EP0 flag is not set and * "iface_index" is set to "USB_IFACE_INDEX_ANY", we free all non * control endpoint FIFOs. If "iface_index" is not set to * "USB_IFACE_INDEX_ANY" the flag has no effect. *------------------------------------------------------------------------*/ static void usb_fifo_free_wrap(struct usb_device *udev, uint8_t iface_index, uint8_t flag) { struct usb_fifo *f; uint16_t i; /* * Free any USB FIFOs on the given interface: */ for (i = 0; i != USB_FIFO_MAX; i++) { f = udev->fifo[i]; if (f == NULL) { continue; } /* Check if the interface index matches */ if (iface_index == f->iface_index) { if (f->methods != &usb_ugen_methods) { /* * Don't free any non-generic FIFOs in * this case. */ continue; } if ((f->dev_ep_index == 0) && (f->fs_xfer == NULL)) { /* no need to free this FIFO */ continue; } } else if (iface_index == USB_IFACE_INDEX_ANY) { if ((f->methods == &usb_ugen_methods) && (f->dev_ep_index == 0) && (!(flag & USB_UNCFG_FLAG_FREE_EP0)) && (f->fs_xfer == NULL)) { /* no need to free this FIFO */ continue; } } else { /* no need to free this FIFO */ continue; } /* free this FIFO */ usb_fifo_free(f); } } #endif /*------------------------------------------------------------------------* * usb_peer_can_wakeup * * Return values: * 0: Peer cannot do resume signalling. * Else: Peer can do resume signalling. *------------------------------------------------------------------------*/ uint8_t usb_peer_can_wakeup(struct usb_device *udev) { const struct usb_config_descriptor *cdp; cdp = udev->cdesc; if ((cdp != NULL) && (udev->flags.usb_mode == USB_MODE_HOST)) { return (cdp->bmAttributes & UC_REMOTE_WAKEUP); } return (0); /* not supported */ } void usb_set_device_state(struct usb_device *udev, enum usb_dev_state state) { KASSERT(state < USB_STATE_MAX, ("invalid udev state")); DPRINTF("udev %p state %s -> %s\n", udev, usb_statestr(udev->state), usb_statestr(state)); #if USB_HAVE_UGEN mtx_lock(&usb_ref_lock); #endif udev->state = state; #if USB_HAVE_UGEN mtx_unlock(&usb_ref_lock); #endif if (udev->bus->methods->device_state_change != NULL) (udev->bus->methods->device_state_change) (udev); } enum usb_dev_state usb_get_device_state(struct usb_device *udev) { if (udev == NULL) return (USB_STATE_DETACHED); return (udev->state); } uint8_t usbd_device_attached(struct usb_device *udev) { return (udev->state > USB_STATE_DETACHED); } /* * The following function locks enumerating the given USB device. If * the lock is already grabbed this function returns zero. Else a * a value of one is returned. */ uint8_t usbd_enum_lock(struct usb_device *udev) { if (sx_xlocked(&udev->enum_sx)) return (0); sx_xlock(&udev->enum_sx); sx_xlock(&udev->sr_sx); /* * NEWBUS LOCK NOTE: We should check if any parent SX locks * are locked before locking Giant. Else the lock can be * locked multiple times. */ - mtx_lock(&Giant); + bus_topo_lock(); return (1); } #if USB_HAVE_UGEN /* * This function is the same like usbd_enum_lock() except a value of * 255 is returned when a signal is pending: */ uint8_t usbd_enum_lock_sig(struct usb_device *udev) { if (sx_xlocked(&udev->enum_sx)) return (0); if (sx_xlock_sig(&udev->enum_sx)) return (255); if (sx_xlock_sig(&udev->sr_sx)) { sx_xunlock(&udev->enum_sx); return (255); } - mtx_lock(&Giant); + bus_topo_lock(); return (1); } #endif /* The following function unlocks enumerating the given USB device. */ void usbd_enum_unlock(struct usb_device *udev) { - mtx_unlock(&Giant); + bus_topo_unlock(); sx_xunlock(&udev->enum_sx); sx_xunlock(&udev->sr_sx); } /* The following function locks suspend and resume. */ void usbd_sr_lock(struct usb_device *udev) { sx_xlock(&udev->sr_sx); /* * NEWBUS LOCK NOTE: We should check if any parent SX locks * are locked before locking Giant. Else the lock can be * locked multiple times. */ - mtx_lock(&Giant); + bus_topo_lock(); } /* The following function unlocks suspend and resume. */ void usbd_sr_unlock(struct usb_device *udev) { - mtx_unlock(&Giant); + bus_topo_unlock(); sx_xunlock(&udev->sr_sx); } /* * The following function checks the enumerating lock for the given * USB device. */ uint8_t usbd_enum_is_locked(struct usb_device *udev) { return (sx_xlocked(&udev->enum_sx)); } /* * The following function is used to serialize access to USB control * transfers and the USB scratch area. If the lock is already grabbed * this function returns zero. Else a value of one is returned. */ uint8_t usbd_ctrl_lock(struct usb_device *udev) { if (sx_xlocked(&udev->ctrl_sx)) return (0); sx_xlock(&udev->ctrl_sx); /* * We need to allow suspend and resume at this point, else the * control transfer will timeout if the device is suspended! */ if (usbd_enum_is_locked(udev)) usbd_sr_unlock(udev); return (1); } void usbd_ctrl_unlock(struct usb_device *udev) { sx_xunlock(&udev->ctrl_sx); /* * Restore the suspend and resume lock after we have unlocked * the USB control transfer lock to avoid LOR: */ if (usbd_enum_is_locked(udev)) usbd_sr_lock(udev); } /* * The following function is used to set the per-interface specific * plug and play information. The string referred to by the pnpinfo * argument can safely be freed after calling this function. The * pnpinfo of an interface will be reset at device detach or when * passing a NULL argument to this function. This function * returns zero on success, else a USB_ERR_XXX failure code. */ usb_error_t usbd_set_pnpinfo(struct usb_device *udev, uint8_t iface_index, const char *pnpinfo) { struct usb_interface *iface; iface = usbd_get_iface(udev, iface_index); if (iface == NULL) return (USB_ERR_INVAL); if (iface->pnpinfo != NULL) { free(iface->pnpinfo, M_USBDEV); iface->pnpinfo = NULL; } if (pnpinfo == NULL || pnpinfo[0] == 0) return (0); /* success */ iface->pnpinfo = strdup(pnpinfo, M_USBDEV); if (iface->pnpinfo == NULL) return (USB_ERR_NOMEM); return (0); /* success */ } usb_error_t usbd_add_dynamic_quirk(struct usb_device *udev, uint16_t quirk) { uint8_t x; for (x = 0; x != USB_MAX_AUTO_QUIRK; x++) { if (udev->autoQuirk[x] == 0 || udev->autoQuirk[x] == quirk) { udev->autoQuirk[x] = quirk; return (0); /* success */ } } return (USB_ERR_NOMEM); } /* * The following function is used to select the endpoint mode. It * should not be called outside enumeration context. */ usb_error_t usbd_set_endpoint_mode(struct usb_device *udev, struct usb_endpoint *ep, uint8_t ep_mode) { usb_error_t error; uint8_t do_unlock; /* Prevent re-enumeration */ do_unlock = usbd_enum_lock(udev); if (udev->bus->methods->set_endpoint_mode != NULL) { error = (udev->bus->methods->set_endpoint_mode) ( udev, ep, ep_mode); } else if (ep_mode != USB_EP_MODE_DEFAULT) { error = USB_ERR_INVAL; } else { error = 0; } /* only set new mode regardless of error */ ep->ep_mode = ep_mode; if (do_unlock) usbd_enum_unlock(udev); return (error); } uint8_t usbd_get_endpoint_mode(struct usb_device *udev, struct usb_endpoint *ep) { return (ep->ep_mode); } diff --git a/sys/dev/xen/control/control.c b/sys/dev/xen/control/control.c index a9738eeb7c2b..a2a50fec48a3 100644 --- a/sys/dev/xen/control/control.c +++ b/sys/dev/xen/control/control.c @@ -1,487 +1,486 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-4-Clause * * Copyright (c) 2010 Justin T. Gibbs, Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ /*- * PV suspend/resume support: * * Copyright (c) 2004 Christian Limpach. * Copyright (c) 2004-2006,2008 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Christian Limpach. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * HVM suspend/resume support: * * Copyright (c) 2008 Citrix Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /** * \file control.c * * \brief Device driver to repond to control domain events that impact * this VM. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include bool xen_suspend_cancelled; /*--------------------------- Forward Declarations --------------------------*/ /** Function signature for shutdown event handlers. */ typedef void (xctrl_shutdown_handler_t)(void); static xctrl_shutdown_handler_t xctrl_poweroff; static xctrl_shutdown_handler_t xctrl_reboot; static xctrl_shutdown_handler_t xctrl_suspend; static xctrl_shutdown_handler_t xctrl_crash; /*-------------------------- Private Data Structures -------------------------*/ /** Element type for lookup table of event name to handler. */ struct xctrl_shutdown_reason { const char *name; xctrl_shutdown_handler_t *handler; }; /** Lookup table for shutdown event name to handler. */ static const struct xctrl_shutdown_reason xctrl_shutdown_reasons[] = { { "poweroff", xctrl_poweroff }, { "reboot", xctrl_reboot }, { "suspend", xctrl_suspend }, { "crash", xctrl_crash }, { "halt", xctrl_poweroff }, }; struct xctrl_softc { struct xs_watch xctrl_watch; }; /*------------------------------ Event Handlers ------------------------------*/ static void xctrl_poweroff() { shutdown_nice(RB_POWEROFF|RB_HALT); } static void xctrl_reboot() { shutdown_nice(0); } static void xctrl_suspend() { #ifdef SMP cpuset_t cpu_suspend_map; #endif EVENTHANDLER_INVOKE(power_suspend_early); xs_lock(); stop_all_proc(); xs_unlock(); suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); #ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } #endif KASSERT((PCPU_GET(cpuid) == 0), ("Not running on CPU#0")); /* - * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE - * drivers need this. + * Be sure to hold Giant across DEVICE_SUSPEND/RESUME. */ - mtx_lock(&Giant); + bus_topo_lock(); if (DEVICE_SUSPEND(root_bus) != 0) { - mtx_unlock(&Giant); + bus_topo_unlock(); printf("%s: device_suspend failed\n", __func__); return; } #ifdef SMP #ifdef EARLY_AP_STARTUP /* * Suspend other CPUs. This prevents IPIs while we * are resuming, and will allow us to reset per-cpu * vcpu_info on resume. */ cpu_suspend_map = all_cpus; CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map); if (!CPU_EMPTY(&cpu_suspend_map)) suspend_cpus(cpu_suspend_map); #else CPU_ZERO(&cpu_suspend_map); /* silence gcc */ if (smp_started) { /* * Suspend other CPUs. This prevents IPIs while we * are resuming, and will allow us to reset per-cpu * vcpu_info on resume. */ cpu_suspend_map = all_cpus; CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map); if (!CPU_EMPTY(&cpu_suspend_map)) suspend_cpus(cpu_suspend_map); } #endif #endif /* * Prevent any races with evtchn_interrupt() handler. */ disable_intr(); intr_suspend(); xen_hvm_suspend(); xen_suspend_cancelled = !!HYPERVISOR_suspend(0); if (!xen_suspend_cancelled) { xen_hvm_resume(false); } intr_resume(xen_suspend_cancelled != 0); enable_intr(); /* * Reset grant table info. */ if (!xen_suspend_cancelled) { gnttab_resume(NULL); } #ifdef SMP if (!CPU_EMPTY(&cpu_suspend_map)) { /* * Now that event channels have been initialized, * resume CPUs. */ resume_cpus(cpu_suspend_map); /* Send an IPI_BITMAP in case there are pending bitmap IPIs. */ lapic_ipi_vectored(IPI_BITMAP_VECTOR, APIC_IPI_DEST_ALL); } #endif /* * FreeBSD really needs to add DEVICE_SUSPEND_CANCEL or * similar. */ DEVICE_RESUME(root_bus); - mtx_unlock(&Giant); + bus_topo_unlock(); /* * Warm up timecounter again and reset system clock. */ timecounter->tc_get_timecount(timecounter); inittodr(time_second); #ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif resume_all_fs(); resume_all_proc(); EVENTHANDLER_INVOKE(power_resume); if (bootverbose) printf("System resumed after suspension\n"); } static void xctrl_crash() { panic("Xen directed crash"); } static void xen_pv_shutdown_final(void *arg, int howto) { /* * Inform the hypervisor that shutdown is complete. * This is not necessary in HVM domains since Xen * emulates ACPI in that mode and FreeBSD's ACPI * support will request this transition. */ if (howto & (RB_HALT | RB_POWEROFF)) HYPERVISOR_shutdown(SHUTDOWN_poweroff); else HYPERVISOR_shutdown(SHUTDOWN_reboot); } /*------------------------------ Event Reception -----------------------------*/ static void xctrl_on_watch_event(struct xs_watch *watch, const char **vec, unsigned int len) { const struct xctrl_shutdown_reason *reason; const struct xctrl_shutdown_reason *last_reason; char *result; int error; int result_len; error = xs_read(XST_NIL, "control", "shutdown", &result_len, (void **)&result); if (error != 0 || result_len == 0) return; /* Acknowledge the request by writing back an empty string. */ error = xs_write(XST_NIL, "control", "shutdown", ""); if (error != 0) printf("unable to ack shutdown request, proceeding anyway\n"); reason = xctrl_shutdown_reasons; last_reason = reason + nitems(xctrl_shutdown_reasons); while (reason < last_reason) { if (!strcmp(result, reason->name)) { reason->handler(); break; } reason++; } free(result, M_XENSTORE); } /*------------------ Private Device Attachment Functions --------------------*/ /** * \brief Identify instances of this device type in the system. * * \param driver The driver performing this identify action. * \param parent The NewBus parent device for any devices this method adds. */ static void xctrl_identify(driver_t *driver __unused, device_t parent) { /* * A single device instance for our driver is always present * in a system operating under Xen. */ BUS_ADD_CHILD(parent, 0, driver->name, 0); } /** * \brief Probe for the existence of the Xen Control device * * \param dev NewBus device_t for this Xen control instance. * * \return Always returns 0 indicating success. */ static int xctrl_probe(device_t dev) { device_set_desc(dev, "Xen Control Device"); return (BUS_PROBE_NOWILDCARD); } /** * \brief Attach the Xen control device. * * \param dev NewBus device_t for this Xen control instance. * * \return On success, 0. Otherwise an errno value indicating the * type of failure. */ static int xctrl_attach(device_t dev) { struct xctrl_softc *xctrl; xctrl = device_get_softc(dev); /* Activate watch */ xctrl->xctrl_watch.node = "control/shutdown"; xctrl->xctrl_watch.callback = xctrl_on_watch_event; xctrl->xctrl_watch.callback_data = (uintptr_t)xctrl; /* * We don't care about the path updated, just about the value changes * on that single node, hence there's no need to queue more that one * event. */ xctrl->xctrl_watch.max_pending = 1; xs_register_watch(&xctrl->xctrl_watch); if (xen_pv_domain()) EVENTHANDLER_REGISTER(shutdown_final, xen_pv_shutdown_final, NULL, SHUTDOWN_PRI_LAST); return (0); } /** * \brief Detach the Xen control device. * * \param dev NewBus device_t for this Xen control device instance. * * \return On success, 0. Otherwise an errno value indicating the * type of failure. */ static int xctrl_detach(device_t dev) { struct xctrl_softc *xctrl; xctrl = device_get_softc(dev); /* Release watch */ xs_unregister_watch(&xctrl->xctrl_watch); return (0); } /*-------------------- Private Device Attachment Data -----------------------*/ static device_method_t xctrl_methods[] = { /* Device interface */ DEVMETHOD(device_identify, xctrl_identify), DEVMETHOD(device_probe, xctrl_probe), DEVMETHOD(device_attach, xctrl_attach), DEVMETHOD(device_detach, xctrl_detach), DEVMETHOD_END }; DEFINE_CLASS_0(xctrl, xctrl_driver, xctrl_methods, sizeof(struct xctrl_softc)); devclass_t xctrl_devclass; DRIVER_MODULE(xctrl, xenstore, xctrl_driver, xctrl_devclass, NULL, NULL); diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 8b7bcbf4e3e0..f7e55e7f48d8 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -1,6068 +1,6089 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); SYSCTL_ROOT_NODE(OID_AUTO, dev, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); /* * Used to attach drivers to devclasses. */ typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ int pass; int flags; #define DL_DEFERRED_PROBE 1 /* Probe deferred on this */ TAILQ_ENTRY(driverlink) passlink; }; /* * Forward declarations */ typedef TAILQ_HEAD(devclass_list, devclass) devclass_list_t; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; typedef TAILQ_HEAD(device_list, _device) device_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ int flags; #define DC_HAS_CHILDREN 1 struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; /** * @brief Implementation of _device. * * The structure is named "_device" instead of "device" to avoid type confusion * caused by other subsystems defining a (struct device). */ struct _device { /* * A device is a kernel object. The first field must be the * current ops table for the object. */ KOBJ_FIELDS; /* * Device hierarchy. */ TAILQ_ENTRY(_device) link; /**< list of devices in parent */ TAILQ_ENTRY(_device) devlink; /**< global device list membership */ device_t parent; /**< parent of this device */ device_list_t children; /**< list of child devices */ /* * Details of this device. */ driver_t *driver; /**< current driver */ devclass_t devclass; /**< current device class */ int unit; /**< current unit number */ char* nameunit; /**< name+unit e.g. foodev0 */ char* desc; /**< driver specific description */ int busy; /**< count of calls to device_busy() */ device_state_t state; /**< current device state */ uint32_t devflags; /**< api level flags for device_get_flags() */ u_int flags; /**< internal device flags */ u_int order; /**< order from device_add_child_ordered() */ void *ivars; /**< instance variables */ void *softc; /**< current driver's variables */ struct sysctl_ctx_list sysctl_ctx; /**< state for sysctl variables */ struct sysctl_oid *sysctl_tree; /**< state for sysctl variables */ }; static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures"); static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc"); EVENTHANDLER_LIST_DEFINE(device_attach); EVENTHANDLER_LIST_DEFINE(device_detach); EVENTHANDLER_LIST_DEFINE(dev_lookup); static int bus_child_location_sb(device_t child, struct sbuf *sb); static int bus_child_pnpinfo_sb(device_t child, struct sbuf *sb); static void devctl2_init(void); static bool device_frozen; #define DRIVERNAME(d) ((d)? d->name : "no driver") #define DEVCLANAME(d) ((d)? d->name : "no devclass") #ifdef BUS_DEBUG static int bus_debug = 1; SYSCTL_INT(_debug, OID_AUTO, bus_debug, CTLFLAG_RWTUN, &bus_debug, 0, "Bus debug level"); #define PDEBUG(a) if (bus_debug) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");} #define DEVICENAME(d) ((d)? device_get_name(d): "no device") /** * Produce the indenting, indent*2 spaces plus a '.' ahead of that to * prevent syslog from deleting initial spaces */ #define indentprintf(p) do { int iJ; printf("."); for (iJ=0; iJparent ? dc->parent->name : ""; break; default: return (EINVAL); } return (SYSCTL_OUT_STR(req, value)); } static void devclass_sysctl_init(devclass_t dc) { if (dc->sysctl_tree != NULL) return; sysctl_ctx_init(&dc->sysctl_ctx); dc->sysctl_tree = SYSCTL_ADD_NODE(&dc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, dc->name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(&dc->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dc, DEVCLASS_SYSCTL_PARENT, devclass_sysctl_handler, "A", "parent class"); } enum { DEVICE_SYSCTL_DESC, DEVICE_SYSCTL_DRIVER, DEVICE_SYSCTL_LOCATION, DEVICE_SYSCTL_PNPINFO, DEVICE_SYSCTL_PARENT, }; static int device_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct sbuf sb; device_t dev = (device_t)arg1; int error; sbuf_new_for_sysctl(&sb, NULL, 1024, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); switch (arg2) { case DEVICE_SYSCTL_DESC: sbuf_cat(&sb, dev->desc ? dev->desc : ""); break; case DEVICE_SYSCTL_DRIVER: sbuf_cat(&sb, dev->driver ? dev->driver->name : ""); break; case DEVICE_SYSCTL_LOCATION: bus_child_location_sb(dev, &sb); break; case DEVICE_SYSCTL_PNPINFO: bus_child_pnpinfo_sb(dev, &sb); break; case DEVICE_SYSCTL_PARENT: sbuf_cat(&sb, dev->parent ? dev->parent->nameunit : ""); break; default: sbuf_delete(&sb); return (EINVAL); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } static void device_sysctl_init(device_t dev) { devclass_t dc = dev->devclass; int domain; if (dev->sysctl_tree != NULL) return; devclass_sysctl_init(dc); sysctl_ctx_init(&dev->sysctl_ctx); dev->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&dev->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, dev->nameunit + strlen(dc->name), CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "", "device_index"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%desc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_DESC, device_sysctl_handler, "A", "device description"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%driver", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_DRIVER, device_sysctl_handler, "A", "device driver name"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%location", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_LOCATION, device_sysctl_handler, "A", "device location relative to parent"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%pnpinfo", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_PNPINFO, device_sysctl_handler, "A", "device identification"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_PARENT, device_sysctl_handler, "A", "parent device"); if (bus_get_domain(dev, &domain) == 0) SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%domain", CTLFLAG_RD, NULL, domain, "NUMA domain"); } static void device_sysctl_update(device_t dev) { devclass_t dc = dev->devclass; if (dev->sysctl_tree == NULL) return; sysctl_rename_oid(dev->sysctl_tree, dev->nameunit + strlen(dc->name)); } static void device_sysctl_fini(device_t dev) { if (dev->sysctl_tree == NULL) return; sysctl_ctx_free(&dev->sysctl_ctx); dev->sysctl_tree = NULL; } /* * /dev/devctl implementation */ /* * This design allows only one reader for /dev/devctl. This is not desirable * in the long run, but will get a lot of hair out of this implementation. * Maybe we should make this device a clonable device. * * Also note: we specifically do not attach a device to the device_t tree * to avoid potential chicken and egg problems. One could argue that all * of this belongs to the root node. */ #define DEVCTL_DEFAULT_QUEUE_LEN 1000 static int sysctl_devctl_queue(SYSCTL_HANDLER_ARGS); static int devctl_queue_length = DEVCTL_DEFAULT_QUEUE_LEN; SYSCTL_PROC(_hw_bus, OID_AUTO, devctl_queue, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 0, sysctl_devctl_queue, "I", "devctl queue length"); static d_open_t devopen; static d_close_t devclose; static d_read_t devread; static d_ioctl_t devioctl; static d_poll_t devpoll; static d_kqfilter_t devkqfilter; static struct cdevsw dev_cdevsw = { .d_version = D_VERSION, .d_open = devopen, .d_close = devclose, .d_read = devread, .d_ioctl = devioctl, .d_poll = devpoll, .d_kqfilter = devkqfilter, .d_name = "devctl", }; #define DEVCTL_BUFFER (1024 - sizeof(void *)) struct dev_event_info { STAILQ_ENTRY(dev_event_info) dei_link; char dei_data[DEVCTL_BUFFER]; }; STAILQ_HEAD(devq, dev_event_info); static struct dev_softc { int inuse; int nonblock; int queued; int async; struct mtx mtx; struct cv cv; struct selinfo sel; struct devq devq; struct sigio *sigio; uma_zone_t zone; } devsoftc; static void filt_devctl_detach(struct knote *kn); static int filt_devctl_read(struct knote *kn, long hint); struct filterops devctl_rfiltops = { .f_isfd = 1, .f_detach = filt_devctl_detach, .f_event = filt_devctl_read, }; static struct cdev *devctl_dev; static void devinit(void) { int reserve; uma_zone_t z; devctl_dev = make_dev_credf(MAKEDEV_ETERNAL, &dev_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, "devctl"); mtx_init(&devsoftc.mtx, "dev mtx", "devd", MTX_DEF); cv_init(&devsoftc.cv, "dev cv"); STAILQ_INIT(&devsoftc.devq); knlist_init_mtx(&devsoftc.sel.si_note, &devsoftc.mtx); if (devctl_queue_length > 0) { /* * Allocate a zone for the messages. Preallocate 2% of these for * a reserve. Allow only devctl_queue_length slabs to cap memory * usage. The reserve usually allows coverage of surges of * events during memory shortages. Normally we won't have to * re-use events from the queue, but will in extreme shortages. */ z = devsoftc.zone = uma_zcreate("DEVCTL", sizeof(struct dev_event_info), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); reserve = max(devctl_queue_length / 50, 100); /* 2% reserve */ uma_zone_set_max(z, devctl_queue_length); uma_zone_set_maxcache(z, 0); uma_zone_reserve(z, reserve); uma_prealloc(z, reserve); } devctl2_init(); } static int devopen(struct cdev *dev, int oflags, int devtype, struct thread *td) { mtx_lock(&devsoftc.mtx); if (devsoftc.inuse) { mtx_unlock(&devsoftc.mtx); return (EBUSY); } /* move to init */ devsoftc.inuse = 1; mtx_unlock(&devsoftc.mtx); return (0); } static int devclose(struct cdev *dev, int fflag, int devtype, struct thread *td) { mtx_lock(&devsoftc.mtx); devsoftc.inuse = 0; devsoftc.nonblock = 0; devsoftc.async = 0; cv_broadcast(&devsoftc.cv); funsetown(&devsoftc.sigio); mtx_unlock(&devsoftc.mtx); return (0); } /* * The read channel for this device is used to report changes to * userland in realtime. We are required to free the data as well as * the n1 object because we allocate them separately. Also note that * we return one record at a time. If you try to read this device a * character at a time, you will lose the rest of the data. Listening * programs are expected to cope. */ static int devread(struct cdev *dev, struct uio *uio, int ioflag) { struct dev_event_info *n1; int rv; mtx_lock(&devsoftc.mtx); while (STAILQ_EMPTY(&devsoftc.devq)) { if (devsoftc.nonblock) { mtx_unlock(&devsoftc.mtx); return (EAGAIN); } rv = cv_wait_sig(&devsoftc.cv, &devsoftc.mtx); if (rv) { /* * Need to translate ERESTART to EINTR here? -- jake */ mtx_unlock(&devsoftc.mtx); return (rv); } } n1 = STAILQ_FIRST(&devsoftc.devq); STAILQ_REMOVE_HEAD(&devsoftc.devq, dei_link); devsoftc.queued--; mtx_unlock(&devsoftc.mtx); rv = uiomove(n1->dei_data, strlen(n1->dei_data), uio); uma_zfree(devsoftc.zone, n1); return (rv); } static int devioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { switch (cmd) { case FIONBIO: if (*(int*)data) devsoftc.nonblock = 1; else devsoftc.nonblock = 0; return (0); case FIOASYNC: if (*(int*)data) devsoftc.async = 1; else devsoftc.async = 0; return (0); case FIOSETOWN: return fsetown(*(int *)data, &devsoftc.sigio); case FIOGETOWN: *(int *)data = fgetown(&devsoftc.sigio); return (0); /* (un)Support for other fcntl() calls. */ case FIOCLEX: case FIONCLEX: case FIONREAD: default: break; } return (ENOTTY); } static int devpoll(struct cdev *dev, int events, struct thread *td) { int revents = 0; mtx_lock(&devsoftc.mtx); if (events & (POLLIN | POLLRDNORM)) { if (!STAILQ_EMPTY(&devsoftc.devq)) revents = events & (POLLIN | POLLRDNORM); else selrecord(td, &devsoftc.sel); } mtx_unlock(&devsoftc.mtx); return (revents); } static int devkqfilter(struct cdev *dev, struct knote *kn) { int error; if (kn->kn_filter == EVFILT_READ) { kn->kn_fop = &devctl_rfiltops; knlist_add(&devsoftc.sel.si_note, kn, 0); error = 0; } else error = EINVAL; return (error); } static void filt_devctl_detach(struct knote *kn) { knlist_remove(&devsoftc.sel.si_note, kn, 0); } static int filt_devctl_read(struct knote *kn, long hint) { kn->kn_data = devsoftc.queued; return (kn->kn_data != 0); } /** * @brief Return whether the userland process is running */ bool devctl_process_running(void) { return (devsoftc.inuse == 1); } static struct dev_event_info * devctl_alloc_dei(void) { struct dev_event_info *dei = NULL; mtx_lock(&devsoftc.mtx); if (devctl_queue_length == 0) goto out; dei = uma_zalloc(devsoftc.zone, M_NOWAIT); if (dei == NULL) dei = uma_zalloc(devsoftc.zone, M_NOWAIT | M_USE_RESERVE); if (dei == NULL) { /* * Guard against no items in the queue. Normally, this won't * happen, but if lots of events happen all at once and there's * a chance we're out of allocated space but none have yet been * queued when we get here, leaving nothing to steal. This can * also happen with error injection. Fail safe by returning * NULL in that case.. */ if (devsoftc.queued == 0) goto out; dei = STAILQ_FIRST(&devsoftc.devq); STAILQ_REMOVE_HEAD(&devsoftc.devq, dei_link); devsoftc.queued--; } MPASS(dei != NULL); *dei->dei_data = '\0'; out: mtx_unlock(&devsoftc.mtx); return (dei); } static struct dev_event_info * devctl_alloc_dei_sb(struct sbuf *sb) { struct dev_event_info *dei; dei = devctl_alloc_dei(); if (dei != NULL) sbuf_new(sb, dei->dei_data, sizeof(dei->dei_data), SBUF_FIXEDLEN); return (dei); } static void devctl_free_dei(struct dev_event_info *dei) { uma_zfree(devsoftc.zone, dei); } static void devctl_queue(struct dev_event_info *dei) { mtx_lock(&devsoftc.mtx); STAILQ_INSERT_TAIL(&devsoftc.devq, dei, dei_link); devsoftc.queued++; cv_broadcast(&devsoftc.cv); KNOTE_LOCKED(&devsoftc.sel.si_note, 0); mtx_unlock(&devsoftc.mtx); selwakeup(&devsoftc.sel); if (devsoftc.async && devsoftc.sigio != NULL) pgsigio(&devsoftc.sigio, SIGIO, 0); } /** * @brief Send a 'notification' to userland, using standard ways */ void devctl_notify(const char *system, const char *subsystem, const char *type, const char *data) { struct dev_event_info *dei; struct sbuf sb; if (system == NULL || subsystem == NULL || type == NULL) return; dei = devctl_alloc_dei_sb(&sb); if (dei == NULL) return; sbuf_cpy(&sb, "!system="); sbuf_cat(&sb, system); sbuf_cat(&sb, " subsystem="); sbuf_cat(&sb, subsystem); sbuf_cat(&sb, " type="); sbuf_cat(&sb, type); if (data != NULL) { sbuf_putc(&sb, ' '); sbuf_cat(&sb, data); } sbuf_putc(&sb, '\n'); if (sbuf_finish(&sb) != 0) devctl_free_dei(dei); /* overflow -> drop it */ else devctl_queue(dei); } /* * Common routine that tries to make sending messages as easy as possible. * We allocate memory for the data, copy strings into that, but do not * free it unless there's an error. The dequeue part of the driver should * free the data. We don't send data when the device is disabled. We do * send data, even when we have no listeners, because we wish to avoid * races relating to startup and restart of listening applications. * * devaddq is designed to string together the type of event, with the * object of that event, plus the plug and play info and location info * for that event. This is likely most useful for devices, but less * useful for other consumers of this interface. Those should use * the devctl_notify() interface instead. * * Output: * ${type}${what} at $(location dev) $(pnp-info dev) on $(parent dev) */ static void devaddq(const char *type, const char *what, device_t dev) { struct dev_event_info *dei; const char *parstr; struct sbuf sb; dei = devctl_alloc_dei_sb(&sb); if (dei == NULL) return; sbuf_cpy(&sb, type); sbuf_cat(&sb, what); sbuf_cat(&sb, " at "); /* Add in the location */ bus_child_location_sb(dev, &sb); sbuf_putc(&sb, ' '); /* Add in pnpinfo */ bus_child_pnpinfo_sb(dev, &sb); /* Get the parent of this device, or / if high enough in the tree. */ if (device_get_parent(dev) == NULL) parstr = "."; /* Or '/' ? */ else parstr = device_get_nameunit(device_get_parent(dev)); sbuf_cat(&sb, " on "); sbuf_cat(&sb, parstr); sbuf_putc(&sb, '\n'); if (sbuf_finish(&sb) != 0) goto bad; devctl_queue(dei); return; bad: devctl_free_dei(dei); } /* * A device was added to the tree. We are called just after it successfully * attaches (that is, probe and attach success for this device). No call * is made if a device is merely parented into the tree. See devnomatch * if probe fails. If attach fails, no notification is sent (but maybe * we should have a different message for this). */ static void devadded(device_t dev) { devaddq("+", device_get_nameunit(dev), dev); } /* * A device was removed from the tree. We are called just before this * happens. */ static void devremoved(device_t dev) { devaddq("-", device_get_nameunit(dev), dev); } /* * Called when there's no match for this device. This is only called * the first time that no match happens, so we don't keep getting this * message. Should that prove to be undesirable, we can change it. * This is called when all drivers that can attach to a given bus * decline to accept this device. Other errors may not be detected. */ static void devnomatch(device_t dev) { devaddq("?", "", dev); } static int sysctl_devctl_queue(SYSCTL_HANDLER_ARGS) { int q, error; q = devctl_queue_length; error = sysctl_handle_int(oidp, &q, 0, req); if (error || !req->newptr) return (error); if (q < 0) return (EINVAL); /* * When set as a tunable, we've not yet initialized the mutex. * It is safe to just assign to devctl_queue_length and return * as we're racing no one. We'll use whatever value set in * devinit. */ if (!mtx_initialized(&devsoftc.mtx)) { devctl_queue_length = q; return (0); } /* * XXX It's hard to grow or shrink the UMA zone. Only allow * disabling the queue size for the moment until underlying * UMA issues can be sorted out. */ if (q != 0) return (EINVAL); if (q == devctl_queue_length) return (0); mtx_lock(&devsoftc.mtx); devctl_queue_length = 0; uma_zdestroy(devsoftc.zone); devsoftc.zone = 0; mtx_unlock(&devsoftc.mtx); return (0); } /** * @brief safely quotes strings that might have double quotes in them. * * The devctl protocol relies on quoted strings having matching quotes. * This routine quotes any internal quotes so the resulting string * is safe to pass to snprintf to construct, for example pnp info strings. * * @param sb sbuf to place the characters into * @param src Original buffer. */ void devctl_safe_quote_sb(struct sbuf *sb, const char *src) { while (*src != '\0') { if (*src == '"' || *src == '\\') sbuf_putc(sb, '\\'); sbuf_putc(sb, *src++); } } /* End of /dev/devctl code */ static struct device_list bus_data_devices; static int bus_data_generation = 1; static kobj_method_t null_methods[] = { KOBJMETHOD_END }; DEFINE_CLASS(null, null_methods, 0); +struct mtx * +bus_topo_mtx(void) +{ + + return (&Giant); +} + +void +bus_topo_lock(void) +{ + + mtx_lock(bus_topo_mtx()); +} + +void +bus_topo_unlock(void) +{ + + mtx_unlock(bus_topo_mtx()); +} + /* * Bus pass implementation */ static driver_list_t passes = TAILQ_HEAD_INITIALIZER(passes); int bus_current_pass = BUS_PASS_ROOT; /** * @internal * @brief Register the pass level of a new driver attachment * * Register a new driver attachment's pass level. If no driver * attachment with the same pass level has been added, then @p new * will be added to the global passes list. * * @param new the new driver attachment */ static void driver_register_pass(struct driverlink *new) { struct driverlink *dl; /* We only consider pass numbers during boot. */ if (bus_current_pass == BUS_PASS_DEFAULT) return; /* * Walk the passes list. If we already know about this pass * then there is nothing to do. If we don't, then insert this * driver link into the list. */ TAILQ_FOREACH(dl, &passes, passlink) { if (dl->pass < new->pass) continue; if (dl->pass == new->pass) return; TAILQ_INSERT_BEFORE(dl, new, passlink); return; } TAILQ_INSERT_TAIL(&passes, new, passlink); } /** * @brief Raise the current bus pass * * Raise the current bus pass level to @p pass. Call the BUS_NEW_PASS() * method on the root bus to kick off a new device tree scan for each * new pass level that has at least one driver. */ void bus_set_pass(int pass) { struct driverlink *dl; if (bus_current_pass > pass) panic("Attempt to lower bus pass level"); TAILQ_FOREACH(dl, &passes, passlink) { /* Skip pass values below the current pass level. */ if (dl->pass <= bus_current_pass) continue; /* * Bail once we hit a driver with a pass level that is * too high. */ if (dl->pass > pass) break; /* * Raise the pass level to the next level and rescan * the tree. */ bus_current_pass = dl->pass; BUS_NEW_PASS(root_bus); } /* * If there isn't a driver registered for the requested pass, * then bus_current_pass might still be less than 'pass'. Set * it to 'pass' in that case. */ if (bus_current_pass < pass) bus_current_pass = pass; KASSERT(bus_current_pass == pass, ("Failed to update bus pass level")); } /* * Devclass implementation */ static devclass_list_t devclasses = TAILQ_HEAD_INITIALIZER(devclasses); /** * @internal * @brief Find or create a device class * * If a device class with the name @p classname exists, return it, * otherwise if @p create is non-zero create and return a new device * class. * * If @p parentname is non-NULL, the parent of the devclass is set to * the devclass of that name. * * @param classname the devclass name to find or create * @param parentname the parent devclass name or @c NULL * @param create non-zero to create a devclass */ static devclass_t devclass_find_internal(const char *classname, const char *parentname, int create) { devclass_t dc; PDEBUG(("looking for %s", classname)); if (!classname) return (NULL); TAILQ_FOREACH(dc, &devclasses, link) { if (!strcmp(dc->name, classname)) break; } if (create && !dc) { PDEBUG(("creating %s", classname)); dc = malloc(sizeof(struct devclass) + strlen(classname) + 1, M_BUS, M_NOWAIT | M_ZERO); if (!dc) return (NULL); dc->parent = NULL; dc->name = (char*) (dc + 1); strcpy(dc->name, classname); TAILQ_INIT(&dc->drivers); TAILQ_INSERT_TAIL(&devclasses, dc, link); bus_data_generation_update(); } /* * If a parent class is specified, then set that as our parent so * that this devclass will support drivers for the parent class as * well. If the parent class has the same name don't do this though * as it creates a cycle that can trigger an infinite loop in * device_probe_child() if a device exists for which there is no * suitable driver. */ if (parentname && dc && !dc->parent && strcmp(classname, parentname) != 0) { dc->parent = devclass_find_internal(parentname, NULL, TRUE); dc->parent->flags |= DC_HAS_CHILDREN; } return (dc); } /** * @brief Create a device class * * If a device class with the name @p classname exists, return it, * otherwise create and return a new device class. * * @param classname the devclass name to find or create */ devclass_t devclass_create(const char *classname) { return (devclass_find_internal(classname, NULL, TRUE)); } /** * @brief Find a device class * * If a device class with the name @p classname exists, return it, * otherwise return @c NULL. * * @param classname the devclass name to find */ devclass_t devclass_find(const char *classname) { return (devclass_find_internal(classname, NULL, FALSE)); } /** * @brief Register that a device driver has been added to a devclass * * Register that a device driver has been added to a devclass. This * is called by devclass_add_driver to accomplish the recursive * notification of all the children classes of dc, as well as dc. * Each layer will have BUS_DRIVER_ADDED() called for all instances of * the devclass. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param dc the devclass to edit * @param driver the driver that was just added */ static void devclass_driver_added(devclass_t dc, driver_t *driver) { devclass_t parent; int i; /* * Call BUS_DRIVER_ADDED for any existing buses in this class. */ for (i = 0; i < dc->maxunit; i++) if (dc->devices[i] && device_is_attached(dc->devices[i])) BUS_DRIVER_ADDED(dc->devices[i], driver); /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(dc->flags & DC_HAS_CHILDREN)) return; parent = dc; TAILQ_FOREACH(dc, &devclasses, link) { if (dc->parent == parent) devclass_driver_added(dc, driver); } } /** * @brief Add a device driver to a device class * * Add a device driver to a devclass. This is normally called * automatically by DRIVER_MODULE(). The BUS_DRIVER_ADDED() method of * all devices in the devclass will be called to allow them to attempt * to re-probe any unmatched children. * * @param dc the devclass to edit * @param driver the driver to register */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp) { driverlink_t dl; devclass_t child_dc; const char *parentname; PDEBUG(("%s", DRIVERNAME(driver))); /* Don't allow invalid pass values. */ if (pass <= BUS_PASS_ROOT) return (EINVAL); dl = malloc(sizeof *dl, M_BUS, M_NOWAIT|M_ZERO); if (!dl) return (ENOMEM); /* * Compile the driver's methods. Also increase the reference count * so that the class doesn't get freed when the last instance * goes. This means we can safely use static methods and avoids a * double-free in devclass_delete_driver. */ kobj_class_compile((kobj_class_t) driver); /* * If the driver has any base classes, make the * devclass inherit from the devclass of the driver's * first base class. This will allow the system to * search for drivers in both devclasses for children * of a device using this driver. */ if (driver->baseclasses) parentname = driver->baseclasses[0]->name; else parentname = NULL; child_dc = devclass_find_internal(driver->name, parentname, TRUE); if (dcp != NULL) *dcp = child_dc; dl->driver = driver; TAILQ_INSERT_TAIL(&dc->drivers, dl, link); driver->refs++; /* XXX: kobj_mtx */ dl->pass = pass; driver_register_pass(dl); if (device_frozen) { dl->flags |= DL_DEFERRED_PROBE; } else { devclass_driver_added(dc, driver); } bus_data_generation_update(); return (0); } /** * @brief Register that a device driver has been deleted from a devclass * * Register that a device driver has been removed from a devclass. * This is called by devclass_delete_driver to accomplish the * recursive notification of all the children classes of busclass, as * well as busclass. Each layer will attempt to detach the driver * from any devices that are children of the bus's devclass. The function * will return an error if a device fails to detach. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param busclass the devclass of the parent bus * @param dc the devclass of the driver being deleted * @param driver the driver being deleted */ static int devclass_driver_deleted(devclass_t busclass, devclass_t dc, driver_t *driver) { devclass_t parent; device_t dev; int error, i; /* * Disassociate from any devices. We iterate through all the * devices in the devclass of the driver and detach any which are * using the driver and which have a parent in the devclass which * we are deleting from. * * Note that since a driver can be in multiple devclasses, we * should not detach devices which are not children of devices in * the affected devclass. * * If we're frozen, we don't generate NOMATCH events. Mark to * generate later. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_detach(dev)) != 0) return (error); if (device_frozen) { dev->flags &= ~DF_DONENOMATCH; dev->flags |= DF_NEEDNOMATCH; } else { BUS_PROBE_NOMATCH(dev->parent, dev); devnomatch(dev); dev->flags |= DF_DONENOMATCH; } } } } /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(busclass->flags & DC_HAS_CHILDREN)) return (0); parent = busclass; TAILQ_FOREACH(busclass, &devclasses, link) { if (busclass->parent == parent) { error = devclass_driver_deleted(busclass, dc, driver); if (error) return (error); } } return (0); } /** * @brief Delete a device driver from a device class * * Delete a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_delete_driver() will first attempt to detach from each * device. If one of the detach calls fails, the driver will not be * deleted. * * @param dc the devclass to edit * @param driver the driver to unregister */ int devclass_delete_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } error = devclass_driver_deleted(busclass, dc, driver); if (error != 0) return (error); TAILQ_REMOVE(&busclass->drivers, dl, link); free(dl, M_BUS); /* XXX: kobj_mtx */ driver->refs--; if (driver->refs == 0) kobj_class_free((kobj_class_t) driver); bus_data_generation_update(); return (0); } /** * @brief Quiesces a set of device drivers from a device class * * Quiesce a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_quiesece_driver() will first attempt to quiesce each * device. * * @param dc the devclass to edit * @param driver the driver to unregister */ static int devclass_quiesce_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; device_t dev; int i; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } /* * Quiesce all devices. We iterate through all the devices in * the devclass of the driver and quiesce any which are using * the driver and which have a parent in the devclass which we * are quiescing. * * Note that since a driver can be in multiple devclasses, we * should not quiesce devices which are not children of * devices in the affected devclass. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_quiesce(dev)) != 0) return (error); } } } return (0); } /** * @internal */ static driverlink_t devclass_find_driver_internal(devclass_t dc, const char *classname) { driverlink_t dl; PDEBUG(("%s in devclass %s", classname, DEVCLANAME(dc))); TAILQ_FOREACH(dl, &dc->drivers, link) { if (!strcmp(dl->driver->name, classname)) return (dl); } PDEBUG(("not found")); return (NULL); } /** * @brief Return the name of the devclass */ const char * devclass_get_name(devclass_t dc) { return (dc->name); } /** * @brief Find a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t devclass_get_device(devclass_t dc, int unit) { if (dc == NULL || unit < 0 || unit >= dc->maxunit) return (NULL); return (dc->devices[unit]); } /** * @brief Find the softc field of a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the softc field of the device with the given * unit number or @c NULL if there is no such * device */ void * devclass_get_softc(devclass_t dc, int unit) { device_t dev; dev = devclass_get_device(dc, unit); if (!dev) return (NULL); return (device_get_softc(dev)); } /** * @brief Get a list of devices in the devclass * * An array containing a list of all the devices in the given devclass * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP), even if @p *devcountp is 0. * * @param dc the devclass to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_devices(devclass_t dc, device_t **devlistp, int *devcountp) { int count, i; device_t *list; count = devclass_get_count(dc); list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { list[count] = dc->devices[i]; count++; } } *devlistp = list; *devcountp = count; return (0); } /** * @brief Get a list of drivers in the devclass * * An array containing a list of pointers to all the drivers in the * given devclass is allocated and returned in @p *listp. The number * of drivers in the array is returned in @p *countp. The caller should * free the array using @c free(p, M_TEMP). * * @param dc the devclass to examine * @param listp gives location for array pointer return value * @param countp gives location for number of array elements * return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp) { driverlink_t dl; driver_t **list; int count; count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) count++; list = malloc(count * sizeof(driver_t *), M_TEMP, M_NOWAIT); if (list == NULL) return (ENOMEM); count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) { list[count] = dl->driver; count++; } *listp = list; *countp = count; return (0); } /** * @brief Get the number of devices in a devclass * * @param dc the devclass to examine */ int devclass_get_count(devclass_t dc) { int count, i; count = 0; for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) count++; return (count); } /** * @brief Get the maximum unit number used in a devclass * * Note that this is one greater than the highest currently-allocated * unit. If a null devclass_t is passed in, -1 is returned to indicate * that not even the devclass has been allocated yet. * * @param dc the devclass to examine */ int devclass_get_maxunit(devclass_t dc) { if (dc == NULL) return (-1); return (dc->maxunit); } /** * @brief Find a free unit number in a devclass * * This function searches for the first unused unit number greater * that or equal to @p unit. * * @param dc the devclass to examine * @param unit the first unit number to check */ int devclass_find_free_unit(devclass_t dc, int unit) { if (dc == NULL) return (unit); while (unit < dc->maxunit && dc->devices[unit] != NULL) unit++; return (unit); } /** * @brief Set the parent of a devclass * * The parent class is normally initialised automatically by * DRIVER_MODULE(). * * @param dc the devclass to edit * @param pdc the new parent devclass */ void devclass_set_parent(devclass_t dc, devclass_t pdc) { dc->parent = pdc; } /** * @brief Get the parent of a devclass * * @param dc the devclass to examine */ devclass_t devclass_get_parent(devclass_t dc) { return (dc->parent); } struct sysctl_ctx_list * devclass_get_sysctl_ctx(devclass_t dc) { return (&dc->sysctl_ctx); } struct sysctl_oid * devclass_get_sysctl_tree(devclass_t dc) { return (dc->sysctl_tree); } /** * @internal * @brief Allocate a unit number * * On entry, @p *unitp is the desired unit number (or @c -1 if any * will do). The allocated unit number is returned in @p *unitp. * @param dc the devclass to allocate from * @param unitp points at the location for the allocated unit * number * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_alloc_unit(devclass_t dc, device_t dev, int *unitp) { const char *s; int unit = *unitp; PDEBUG(("unit %d in devclass %s", unit, DEVCLANAME(dc))); /* Ask the parent bus if it wants to wire this device. */ if (unit == -1) BUS_HINT_DEVICE_UNIT(device_get_parent(dev), dev, dc->name, &unit); /* If we were given a wired unit number, check for existing device */ /* XXX imp XXX */ if (unit != -1) { if (unit >= 0 && unit < dc->maxunit && dc->devices[unit] != NULL) { if (bootverbose) printf("%s: %s%d already exists; skipping it\n", dc->name, dc->name, *unitp); return (EEXIST); } } else { /* Unwired device, find the next available slot for it */ unit = 0; for (unit = 0;; unit++) { /* If this device slot is already in use, skip it. */ if (unit < dc->maxunit && dc->devices[unit] != NULL) continue; /* If there is an "at" hint for a unit then skip it. */ if (resource_string_value(dc->name, unit, "at", &s) == 0) continue; break; } } /* * We've selected a unit beyond the length of the table, so let's * extend the table to make room for all units up to and including * this one. */ if (unit >= dc->maxunit) { device_t *newlist, *oldlist; int newsize; oldlist = dc->devices; newsize = roundup((unit + 1), MAX(1, MINALLOCSIZE / sizeof(device_t))); newlist = malloc(sizeof(device_t) * newsize, M_BUS, M_NOWAIT); if (!newlist) return (ENOMEM); if (oldlist != NULL) bcopy(oldlist, newlist, sizeof(device_t) * dc->maxunit); bzero(newlist + dc->maxunit, sizeof(device_t) * (newsize - dc->maxunit)); dc->devices = newlist; dc->maxunit = newsize; if (oldlist != NULL) free(oldlist, M_BUS); } PDEBUG(("now: unit %d in devclass %s", unit, DEVCLANAME(dc))); *unitp = unit; return (0); } /** * @internal * @brief Add a device to a devclass * * A unit number is allocated for the device (using the device's * preferred unit number if any) and the device is registered in the * devclass. This allows the device to be looked up by its unit * number, e.g. by decoding a dev_t minor number. * * @param dc the devclass to add to * @param dev the device to add * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_add_device(devclass_t dc, device_t dev) { int buflen, error; PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); buflen = snprintf(NULL, 0, "%s%d$", dc->name, INT_MAX); if (buflen < 0) return (ENOMEM); dev->nameunit = malloc(buflen, M_BUS, M_NOWAIT|M_ZERO); if (!dev->nameunit) return (ENOMEM); if ((error = devclass_alloc_unit(dc, dev, &dev->unit)) != 0) { free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (error); } dc->devices[dev->unit] = dev; dev->devclass = dc; snprintf(dev->nameunit, buflen, "%s%d", dc->name, dev->unit); return (0); } /** * @internal * @brief Delete a device from a devclass * * The device is removed from the devclass's device list and its unit * number is freed. * @param dc the devclass to delete from * @param dev the device to delete * * @retval 0 success */ static int devclass_delete_device(devclass_t dc, device_t dev) { if (!dc || !dev) return (0); PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); if (dev->devclass != dc || dc->devices[dev->unit] != dev) panic("devclass_delete_device: inconsistent device class"); dc->devices[dev->unit] = NULL; if (dev->flags & DF_WILDCARD) dev->unit = -1; dev->devclass = NULL; free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (0); } /** * @internal * @brief Make a new device and add it as a child of @p parent * * @param parent the parent of the new device * @param name the devclass name of the new device or @c NULL * to leave the devclass unspecified * @parem unit the unit number of the new device of @c -1 to * leave the unit number unspecified * * @returns the new device */ static device_t make_device(device_t parent, const char *name, int unit) { device_t dev; devclass_t dc; PDEBUG(("%s at %s as unit %d", name, DEVICENAME(parent), unit)); if (name) { dc = devclass_find_internal(name, NULL, TRUE); if (!dc) { printf("make_device: can't find device class %s\n", name); return (NULL); } } else { dc = NULL; } dev = malloc(sizeof(*dev), M_BUS, M_NOWAIT|M_ZERO); if (!dev) return (NULL); dev->parent = parent; TAILQ_INIT(&dev->children); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; dev->devclass = NULL; dev->unit = unit; dev->nameunit = NULL; dev->desc = NULL; dev->busy = 0; dev->devflags = 0; dev->flags = DF_ENABLED; dev->order = 0; if (unit == -1) dev->flags |= DF_WILDCARD; if (name) { dev->flags |= DF_FIXEDCLASS; if (devclass_add_device(dc, dev)) { kobj_delete((kobj_t) dev, M_BUS); return (NULL); } } if (parent != NULL && device_has_quiet_children(parent)) dev->flags |= DF_QUIET | DF_QUIET_CHILDREN; dev->ivars = NULL; dev->softc = NULL; dev->state = DS_NOTPRESENT; TAILQ_INSERT_TAIL(&bus_data_devices, dev, devlink); bus_data_generation_update(); return (dev); } /** * @internal * @brief Print a description of a device. */ static int device_print_child(device_t dev, device_t child) { int retval = 0; if (device_is_alive(child)) retval += BUS_PRINT_CHILD(dev, child); else retval += device_printf(child, " not found\n"); return (retval); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with order zero. * * @param dev the device which will be the parent of the * new child device * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child(device_t dev, const char *name, int unit) { return (device_add_child_ordered(dev, 0, name, unit)); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with the same order. * * @param dev the device which will be the parent of the * new child device * @param order a value which is used to partially sort the * children of @p dev - devices created using * lower values of @p order appear first in @p * dev's list of children * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit) { device_t child; device_t place; PDEBUG(("%s at %s with order %u as unit %d", name, DEVICENAME(dev), order, unit)); KASSERT(name != NULL || unit == -1, ("child device with wildcard name and specific unit number")); child = make_device(dev, name, unit); if (child == NULL) return (child); child->order = order; TAILQ_FOREACH(place, &dev->children, link) { if (place->order > order) break; } if (place) { /* * The device 'place' is the first device whose order is * greater than the new child. */ TAILQ_INSERT_BEFORE(place, child, link); } else { /* * The new child's order is greater or equal to the order of * any existing device. Add the child to the tail of the list. */ TAILQ_INSERT_TAIL(&dev->children, child, link); } bus_data_generation_update(); return (child); } /** * @brief Delete a device * * This function deletes a device along with all of its children. If * the device currently has a driver attached to it, the device is * detached first using device_detach(). * * @param dev the parent device * @param child the device to delete * * @retval 0 success * @retval non-zero a unit error code describing the error */ int device_delete_child(device_t dev, device_t child) { int error; device_t grandchild; PDEBUG(("%s from %s", DEVICENAME(child), DEVICENAME(dev))); /* detach parent before deleting children, if any */ if ((error = device_detach(child)) != 0) return (error); /* remove children second */ while ((grandchild = TAILQ_FIRST(&child->children)) != NULL) { error = device_delete_child(child, grandchild); if (error) return (error); } if (child->devclass) devclass_delete_device(child->devclass, child); if (child->parent) BUS_CHILD_DELETED(dev, child); TAILQ_REMOVE(&dev->children, child, link); TAILQ_REMOVE(&bus_data_devices, child, devlink); kobj_delete((kobj_t) child, M_BUS); bus_data_generation_update(); return (0); } /** * @brief Delete all children devices of the given device, if any. * * This function deletes all children devices of the given device, if * any, using the device_delete_child() function for each device it * finds. If a child device cannot be deleted, this function will * return an error code. * * @param dev the parent device * * @retval 0 success * @retval non-zero a device would not detach */ int device_delete_children(device_t dev) { device_t child; int error; PDEBUG(("Deleting all children of %s", DEVICENAME(dev))); error = 0; while ((child = TAILQ_FIRST(&dev->children)) != NULL) { error = device_delete_child(dev, child); if (error) { PDEBUG(("Failed deleting %s", DEVICENAME(child))); break; } } return (error); } /** * @brief Find a device given a unit number * * This is similar to devclass_get_devices() but only searches for * devices which have @p dev as a parent. * * @param dev the parent device to search * @param unit the unit number to search for. If the unit is -1, * return the first child of @p dev which has name * @p classname (that is, the one with the lowest unit.) * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t device_find_child(device_t dev, const char *classname, int unit) { devclass_t dc; device_t child; dc = devclass_find(classname); if (!dc) return (NULL); if (unit != -1) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } else { for (unit = 0; unit < devclass_get_maxunit(dc); unit++) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } } return (NULL); } /** * @internal */ static driverlink_t first_matching_driver(devclass_t dc, device_t dev) { if (dev->devclass) return (devclass_find_driver_internal(dc, dev->devclass->name)); return (TAILQ_FIRST(&dc->drivers)); } /** * @internal */ static driverlink_t next_matching_driver(devclass_t dc, device_t dev, driverlink_t last) { if (dev->devclass) { driverlink_t dl; for (dl = TAILQ_NEXT(last, link); dl; dl = TAILQ_NEXT(dl, link)) if (!strcmp(dev->devclass->name, dl->driver->name)) return (dl); return (NULL); } return (TAILQ_NEXT(last, link)); } /** * @internal */ int device_probe_child(device_t dev, device_t child) { devclass_t dc; driverlink_t best = NULL; driverlink_t dl; int result, pri = 0; /* We should preserve the devclass (or lack of) set by the bus. */ int hasclass = (child->devclass != NULL); GIANT_REQUIRED; dc = dev->devclass; if (!dc) panic("device_probe_child: parent device has no devclass"); /* * If the state is already probed, then return. */ if (child->state == DS_ALIVE) return (0); for (; dc; dc = dc->parent) { for (dl = first_matching_driver(dc, child); dl; dl = next_matching_driver(dc, child, dl)) { /* If this driver's pass is too high, then ignore it. */ if (dl->pass > bus_current_pass) continue; PDEBUG(("Trying %s", DRIVERNAME(dl->driver))); result = device_set_driver(child, dl->driver); if (result == ENOMEM) return (result); else if (result != 0) continue; if (!hasclass) { if (device_set_devclass(child, dl->driver->name) != 0) { char const * devname = device_get_name(child); if (devname == NULL) devname = "(unknown)"; printf("driver bug: Unable to set " "devclass (class: %s " "devname: %s)\n", dl->driver->name, devname); (void)device_set_driver(child, NULL); continue; } } /* Fetch any flags for the device before probing. */ resource_int_value(dl->driver->name, child->unit, "flags", &child->devflags); result = DEVICE_PROBE(child); /* * If the driver returns SUCCESS, there can be * no higher match for this device. */ if (result == 0) { best = dl; pri = 0; break; } /* Reset flags and devclass before the next probe. */ child->devflags = 0; if (!hasclass) (void)device_set_devclass(child, NULL); /* * Reset DF_QUIET in case this driver doesn't * end up as the best driver. */ device_verbose(child); /* * Probes that return BUS_PROBE_NOWILDCARD or lower * only match on devices whose driver was explicitly * specified. */ if (result <= BUS_PROBE_NOWILDCARD && !(child->flags & DF_FIXEDCLASS)) { result = ENXIO; } /* * The driver returned an error so it * certainly doesn't match. */ if (result > 0) { (void)device_set_driver(child, NULL); continue; } /* * A priority lower than SUCCESS, remember the * best matching driver. Initialise the value * of pri for the first match. */ if (best == NULL || result > pri) { best = dl; pri = result; continue; } } /* * If we have an unambiguous match in this devclass, * don't look in the parent. */ if (best && pri == 0) break; } if (best == NULL) return (ENXIO); /* * If we found a driver, change state and initialise the devclass. */ if (pri < 0) { /* Set the winning driver, devclass, and flags. */ result = device_set_driver(child, best->driver); if (result != 0) return (result); if (!child->devclass) { result = device_set_devclass(child, best->driver->name); if (result != 0) { (void)device_set_driver(child, NULL); return (result); } } resource_int_value(best->driver->name, child->unit, "flags", &child->devflags); /* * A bit bogus. Call the probe method again to make sure * that we have the right description. */ result = DEVICE_PROBE(child); if (result > 0) { if (!hasclass) (void)device_set_devclass(child, NULL); (void)device_set_driver(child, NULL); return (result); } } child->state = DS_ALIVE; bus_data_generation_update(); return (0); } /** * @brief Return the parent of a device */ device_t device_get_parent(device_t dev) { return (dev->parent); } /** * @brief Get a list of children of a device * * An array containing a list of all the children of the given device * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP). * * @param dev the device to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int device_get_children(device_t dev, device_t **devlistp, int *devcountp) { int count; device_t child; device_t *list; count = 0; TAILQ_FOREACH(child, &dev->children, link) { count++; } if (count == 0) { *devlistp = NULL; *devcountp = 0; return (0); } list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; TAILQ_FOREACH(child, &dev->children, link) { list[count] = child; count++; } *devlistp = list; *devcountp = count; return (0); } /** * @brief Return the current driver for the device or @c NULL if there * is no driver currently attached */ driver_t * device_get_driver(device_t dev) { return (dev->driver); } /** * @brief Return the current devclass for the device or @c NULL if * there is none. */ devclass_t device_get_devclass(device_t dev) { return (dev->devclass); } /** * @brief Return the name of the device's devclass or @c NULL if there * is none. */ const char * device_get_name(device_t dev) { if (dev != NULL && dev->devclass) return (devclass_get_name(dev->devclass)); return (NULL); } /** * @brief Return a string containing the device's devclass name * followed by an ascii representation of the device's unit number * (e.g. @c "foo2"). */ const char * device_get_nameunit(device_t dev) { return (dev->nameunit); } /** * @brief Return the device's unit number. */ int device_get_unit(device_t dev) { return (dev->unit); } /** * @brief Return the device's description string */ const char * device_get_desc(device_t dev) { return (dev->desc); } /** * @brief Return the device's flags */ uint32_t device_get_flags(device_t dev) { return (dev->devflags); } struct sysctl_ctx_list * device_get_sysctl_ctx(device_t dev) { return (&dev->sysctl_ctx); } struct sysctl_oid * device_get_sysctl_tree(device_t dev) { return (dev->sysctl_tree); } /** * @brief Print the name of the device followed by a colon and a space * * @returns the number of characters printed */ int device_print_prettyname(device_t dev) { const char *name = device_get_name(dev); if (name == NULL) return (printf("unknown: ")); return (printf("%s%d: ", name, device_get_unit(dev))); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling vprintf() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_printf(device_t dev, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); sbuf_set_drain(&sb, sbuf_printf_drain, &retval); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); sbuf_delete(&sb); return (retval); } /** * @internal */ static void device_set_desc_internal(device_t dev, const char* desc, int copy) { if (dev->desc && (dev->flags & DF_DESCMALLOCED)) { free(dev->desc, M_BUS); dev->flags &= ~DF_DESCMALLOCED; dev->desc = NULL; } if (copy && desc) { dev->desc = malloc(strlen(desc) + 1, M_BUS, M_NOWAIT); if (dev->desc) { strcpy(dev->desc, desc); dev->flags |= DF_DESCMALLOCED; } } else { /* Avoid a -Wcast-qual warning */ dev->desc = (char *)(uintptr_t) desc; } bus_data_generation_update(); } /** * @brief Set the device's description * * The value of @c desc should be a string constant that will not * change (at least until the description is changed in a subsequent * call to device_set_desc() or device_set_desc_copy()). */ void device_set_desc(device_t dev, const char* desc) { device_set_desc_internal(dev, desc, FALSE); } /** * @brief Set the device's description * * The string pointed to by @c desc is copied. Use this function if * the device description is generated, (e.g. with sprintf()). */ void device_set_desc_copy(device_t dev, const char* desc) { device_set_desc_internal(dev, desc, TRUE); } /** * @brief Set the device's flags */ void device_set_flags(device_t dev, uint32_t flags) { dev->devflags = flags; } /** * @brief Return the device's softc field * * The softc is allocated and zeroed when a driver is attached, based * on the size field of the driver. */ void * device_get_softc(device_t dev) { return (dev->softc); } /** * @brief Set the device's softc field * * Most drivers do not need to use this since the softc is allocated * automatically when the driver is attached. */ void device_set_softc(device_t dev, void *softc) { if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) free(dev->softc, M_BUS_SC); dev->softc = softc; if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Free claimed softc * * Most drivers do not need to use this since the softc is freed * automatically when the driver is detached. */ void device_free_softc(void *softc) { free(softc, M_BUS_SC); } /** * @brief Claim softc * * This function can be used to let the driver free the automatically * allocated softc using "device_free_softc()". This function is * useful when the driver is refcounting the softc and the softc * cannot be freed when the "device_detach" method is called. */ void device_claim_softc(device_t dev) { if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Get the device's ivars field * * The ivars field is used by the parent device to store per-device * state (e.g. the physical location of the device or a list of * resources). */ void * device_get_ivars(device_t dev) { KASSERT(dev != NULL, ("device_get_ivars(NULL, ...)")); return (dev->ivars); } /** * @brief Set the device's ivars field */ void device_set_ivars(device_t dev, void * ivars) { KASSERT(dev != NULL, ("device_set_ivars(NULL, ...)")); dev->ivars = ivars; } /** * @brief Return the device's state */ device_state_t device_get_state(device_t dev) { return (dev->state); } /** * @brief Set the DF_ENABLED flag for the device */ void device_enable(device_t dev) { dev->flags |= DF_ENABLED; } /** * @brief Clear the DF_ENABLED flag for the device */ void device_disable(device_t dev) { dev->flags &= ~DF_ENABLED; } /** * @brief Increment the busy counter for the device */ void device_busy(device_t dev) { if (dev->state < DS_ATTACHING) panic("device_busy: called for unattached device"); if (dev->busy == 0 && dev->parent) device_busy(dev->parent); dev->busy++; if (dev->state == DS_ATTACHED) dev->state = DS_BUSY; } /** * @brief Decrement the busy counter for the device */ void device_unbusy(device_t dev) { if (dev->busy != 0 && dev->state != DS_BUSY && dev->state != DS_ATTACHING) panic("device_unbusy: called for non-busy device %s", device_get_nameunit(dev)); dev->busy--; if (dev->busy == 0) { if (dev->parent) device_unbusy(dev->parent); if (dev->state == DS_BUSY) dev->state = DS_ATTACHED; } } /** * @brief Set the DF_QUIET flag for the device */ void device_quiet(device_t dev) { dev->flags |= DF_QUIET; } /** * @brief Set the DF_QUIET_CHILDREN flag for the device */ void device_quiet_children(device_t dev) { dev->flags |= DF_QUIET_CHILDREN; } /** * @brief Clear the DF_QUIET flag for the device */ void device_verbose(device_t dev) { dev->flags &= ~DF_QUIET; } ssize_t device_get_property(device_t dev, const char *prop, void *val, size_t sz, device_property_type_t type) { device_t bus = device_get_parent(dev); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: break; case DEVICE_PROP_UINT32: if (sz % 4 != 0) return (-1); break; case DEVICE_PROP_UINT64: if (sz % 8 != 0) return (-1); break; default: return (-1); } return (BUS_GET_PROPERTY(bus, dev, prop, val, sz, type)); } bool device_has_property(device_t dev, const char *prop) { return (device_get_property(dev, prop, NULL, 0, DEVICE_PROP_ANY) >= 0); } /** * @brief Return non-zero if the DF_QUIET_CHIDLREN flag is set on the device */ int device_has_quiet_children(device_t dev) { return ((dev->flags & DF_QUIET_CHILDREN) != 0); } /** * @brief Return non-zero if the DF_QUIET flag is set on the device */ int device_is_quiet(device_t dev) { return ((dev->flags & DF_QUIET) != 0); } /** * @brief Return non-zero if the DF_ENABLED flag is set on the device */ int device_is_enabled(device_t dev) { return ((dev->flags & DF_ENABLED) != 0); } /** * @brief Return non-zero if the device was successfully probed */ int device_is_alive(device_t dev) { return (dev->state >= DS_ALIVE); } /** * @brief Return non-zero if the device currently has a driver * attached to it */ int device_is_attached(device_t dev) { return (dev->state >= DS_ATTACHED); } /** * @brief Return non-zero if the device is currently suspended. */ int device_is_suspended(device_t dev) { return ((dev->flags & DF_SUSPENDED) != 0); } /** * @brief Set the devclass of a device * @see devclass_add_device(). */ int device_set_devclass(device_t dev, const char *classname) { devclass_t dc; int error; if (!classname) { if (dev->devclass) devclass_delete_device(dev->devclass, dev); return (0); } if (dev->devclass) { printf("device_set_devclass: device class already set\n"); return (EINVAL); } dc = devclass_find_internal(classname, NULL, TRUE); if (!dc) return (ENOMEM); error = devclass_add_device(dc, dev); bus_data_generation_update(); return (error); } /** * @brief Set the devclass of a device and mark the devclass fixed. * @see device_set_devclass() */ int device_set_devclass_fixed(device_t dev, const char *classname) { int error; if (classname == NULL) return (EINVAL); error = device_set_devclass(dev, classname); if (error) return (error); dev->flags |= DF_FIXEDCLASS; return (0); } /** * @brief Query the device to determine if it's of a fixed devclass * @see device_set_devclass_fixed() */ bool device_is_devclass_fixed(device_t dev) { return ((dev->flags & DF_FIXEDCLASS) != 0); } /** * @brief Set the driver of a device * * @retval 0 success * @retval EBUSY the device already has a driver attached * @retval ENOMEM a memory allocation failure occurred */ int device_set_driver(device_t dev, driver_t *driver) { int domain; struct domainset *policy; if (dev->state >= DS_ATTACHED) return (EBUSY); if (dev->driver == driver) return (0); if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) { free(dev->softc, M_BUS_SC); dev->softc = NULL; } device_set_desc(dev, NULL); kobj_delete((kobj_t) dev, NULL); dev->driver = driver; if (driver) { kobj_init((kobj_t) dev, (kobj_class_t) driver); if (!(dev->flags & DF_EXTERNALSOFTC) && driver->size > 0) { if (bus_get_domain(dev, &domain) == 0) policy = DOMAINSET_PREF(domain); else policy = DOMAINSET_RR(); dev->softc = malloc_domainset(driver->size, M_BUS_SC, policy, M_NOWAIT | M_ZERO); if (!dev->softc) { kobj_delete((kobj_t) dev, NULL); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; return (ENOMEM); } } } else { kobj_init((kobj_t) dev, &null_class); } bus_data_generation_update(); return (0); } /** * @brief Probe a device, and return this status. * * This function is the core of the device autoconfiguration * system. Its purpose is to select a suitable driver for a device and * then call that driver to initialise the hardware appropriately. The * driver is selected by calling the DEVICE_PROBE() method of a set of * candidate drivers and then choosing the driver which returned the * best value. This driver is then attached to the device using * device_attach(). * * The set of suitable drivers is taken from the list of drivers in * the parent device's devclass. If the device was originally created * with a specific class name (see device_add_child()), only drivers * with that name are probed, otherwise all drivers in the devclass * are probed. If no drivers return successful probe values in the * parent devclass, the search continues in the parent of that * devclass (see devclass_get_parent()) if any. * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code * @retval -1 Device already attached */ int device_probe(device_t dev) { int error; GIANT_REQUIRED; if (dev->state >= DS_ALIVE) return (-1); if (!(dev->flags & DF_ENABLED)) { if (bootverbose && device_get_name(dev) != NULL) { device_print_prettyname(dev); printf("not probed (disabled)\n"); } return (-1); } if ((error = device_probe_child(dev->parent, dev)) != 0) { if (bus_current_pass == BUS_PASS_DEFAULT && !(dev->flags & DF_DONENOMATCH)) { BUS_PROBE_NOMATCH(dev->parent, dev); devnomatch(dev); dev->flags |= DF_DONENOMATCH; } return (error); } return (0); } /** * @brief Probe a device and attach a driver if possible * * calls device_probe() and attaches if that was successful. */ int device_probe_and_attach(device_t dev) { int error; GIANT_REQUIRED; error = device_probe(dev); if (error == -1) return (0); else if (error != 0) return (error); CURVNET_SET_QUIET(vnet0); error = device_attach(dev); CURVNET_RESTORE(); return error; } /** * @brief Attach a device driver to a device * * This function is a wrapper around the DEVICE_ATTACH() driver * method. In addition to calling DEVICE_ATTACH(), it initialises the * device's sysctl tree, optionally prints a description of the device * and queues a notification event for user-based device management * services. * * Normally this function is only called internally from * device_probe_and_attach(). * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_attach(device_t dev) { uint64_t attachtime; uint16_t attachentropy; int error; if (resource_disabled(dev->driver->name, dev->unit)) { device_disable(dev); if (bootverbose) device_printf(dev, "disabled via hints entry\n"); return (ENXIO); } device_sysctl_init(dev); if (!device_is_quiet(dev)) device_print_child(dev->parent, dev); attachtime = get_cyclecount(); dev->state = DS_ATTACHING; if ((error = DEVICE_ATTACH(dev)) != 0) { printf("device_attach: %s%d attach returned %d\n", dev->driver->name, dev->unit, error); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); KASSERT(dev->busy == 0, ("attach failed but busy")); dev->state = DS_NOTPRESENT; return (error); } dev->flags |= DF_ATTACHED_ONCE; /* We only need the low bits of this time, but ranges from tens to thousands * have been seen, so keep 2 bytes' worth. */ attachentropy = (uint16_t)(get_cyclecount() - attachtime); random_harvest_direct(&attachentropy, sizeof(attachentropy), RANDOM_ATTACH); device_sysctl_update(dev); if (dev->busy) dev->state = DS_BUSY; else dev->state = DS_ATTACHED; dev->flags &= ~DF_DONENOMATCH; EVENTHANDLER_DIRECT_INVOKE(device_attach, dev); devadded(dev); return (0); } /** * @brief Detach a driver from a device * * This function is a wrapper around the DEVICE_DETACH() driver * method. If the call to DEVICE_DETACH() succeeds, it calls * BUS_CHILD_DETACHED() for the parent of @p dev, queues a * notification event for user-based device management services and * cleans up the device's sysctl tree. * * @param dev the device to un-initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_detach(device_t dev) { int error; GIANT_REQUIRED; PDEBUG(("%s", DEVICENAME(dev))); if (dev->state == DS_BUSY) return (EBUSY); if (dev->state == DS_ATTACHING) { device_printf(dev, "device in attaching state! Deferring detach.\n"); return (EBUSY); } if (dev->state != DS_ATTACHED) return (0); EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_BEGIN); if ((error = DEVICE_DETACH(dev)) != 0) { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_FAILED); return (error); } else { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_COMPLETE); } devremoved(dev); if (!device_is_quiet(dev)) device_printf(dev, "detached\n"); if (dev->parent) BUS_CHILD_DETACHED(dev->parent, dev); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); device_verbose(dev); dev->state = DS_NOTPRESENT; (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); return (0); } /** * @brief Tells a driver to quiesce itself. * * This function is a wrapper around the DEVICE_QUIESCE() driver * method. If the call to DEVICE_QUIESCE() succeeds. * * @param dev the device to quiesce * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_quiesce(device_t dev) { PDEBUG(("%s", DEVICENAME(dev))); if (dev->state == DS_BUSY) return (EBUSY); if (dev->state != DS_ATTACHED) return (0); return (DEVICE_QUIESCE(dev)); } /** * @brief Notify a device of system shutdown * * This function calls the DEVICE_SHUTDOWN() driver method if the * device currently has an attached driver. * * @returns the value returned by DEVICE_SHUTDOWN() */ int device_shutdown(device_t dev) { if (dev->state < DS_ATTACHED) return (0); return (DEVICE_SHUTDOWN(dev)); } /** * @brief Set the unit number of a device * * This function can be used to override the unit number used for a * device (e.g. to wire a device to a pre-configured unit number). */ int device_set_unit(device_t dev, int unit) { devclass_t dc; int err; if (unit == dev->unit) return (0); dc = device_get_devclass(dev); if (unit < dc->maxunit && dc->devices[unit]) return (EBUSY); err = devclass_delete_device(dc, dev); if (err) return (err); dev->unit = unit; err = devclass_add_device(dc, dev); if (err) return (err); bus_data_generation_update(); return (0); } /*======================================*/ /* * Some useful method implementations to make life easier for bus drivers. */ void resource_init_map_request_impl(struct resource_map_request *args, size_t sz) { bzero(args, sz); args->size = sz; args->memattr = VM_MEMATTR_DEVICE; } /** * @brief Initialise a resource list. * * @param rl the resource list to initialise */ void resource_list_init(struct resource_list *rl) { STAILQ_INIT(rl); } /** * @brief Reclaim memory used by a resource list. * * This function frees the memory for all resource entries on the list * (if any). * * @param rl the resource list to free */ void resource_list_free(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) panic("resource_list_free: resource entry is busy"); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } /** * @brief Add a resource entry. * * This function adds a resource entry using the given @p type, @p * start, @p end and @p count values. A rid value is chosen by * searching sequentially for the first unused rid starting at zero. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count) { int rid; rid = 0; while (resource_list_find(rl, type, rid) != NULL) rid++; resource_list_add(rl, type, rid, start, end, count); return (rid); } /** * @brief Add or modify a resource entry. * * If an existing entry exists with the same type and rid, it will be * modified using the given values of @p start, @p end and @p * count. If no entry exists, a new one will be created using the * given values. The resource list entry that matches is then returned. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) { rle = malloc(sizeof(struct resource_list_entry), M_BUS, M_NOWAIT); if (!rle) panic("resource_list_add: can't record entry"); STAILQ_INSERT_TAIL(rl, rle, link); rle->type = type; rle->rid = rid; rle->res = NULL; rle->flags = 0; } if (rle->res) panic("resource_list_add: resource entry is busy"); rle->start = start; rle->end = end; rle->count = count; return (rle); } /** * @brief Determine if a resource entry is busy. * * Returns true if a resource entry is busy meaning that it has an * associated resource that is not an unallocated "reserved" resource. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is busy, zero otherwise. */ int resource_list_busy(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL || rle->res == NULL) return (0); if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) { KASSERT(!(rman_get_flags(rle->res) & RF_ACTIVE), ("reserved resource is active")); return (0); } return (1); } /** * @brief Determine if a resource entry is reserved. * * Returns true if a resource entry is reserved meaning that it has an * associated "reserved" resource. The resource can either be * allocated or unallocated. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is reserved, zero otherwise. */ int resource_list_reserved(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle != NULL && rle->flags & RLE_RESERVED) return (1); return (0); } /** * @brief Find a resource entry by type and rid. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns the resource entry pointer or NULL if there is no such * entry. */ struct resource_list_entry * resource_list_find(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type && rle->rid == rid) return (rle); } return (NULL); } /** * @brief Delete a resource entry. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier */ void resource_list_delete(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle = resource_list_find(rl, type, rid); if (rle) { if (rle->res != NULL) panic("resource_list_delete: resource has not been released"); STAILQ_REMOVE(rl, rle, resource_list_entry, link); free(rle, M_BUS); } } /** * @brief Allocate a reserved resource * * This can be used by buses to force the allocation of resources * that are always active in the system even if they are not allocated * by a driver (e.g. PCI BARs). This function is usually called when * adding a new child to the bus. The resource is allocated from the * parent bus when it is reserved. The resource list entry is marked * with RLE_RESERVED to note that it is a reserved resource. * * Subsequent attempts to allocate the resource with * resource_list_alloc() will succeed the first time and will set * RLE_ALLOCATED to note that it has been allocated. When a reserved * resource that has been allocated is released with * resource_list_release() the resource RLE_ALLOCATED is cleared, but * the actual resource remains allocated. The resource can be released to * the parent bus by calling resource_list_unreserve(). * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device for which the resource is being reserved * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); struct resource *r; if (passthrough) panic( "resource_list_reserve() should only be called for direct children"); if (flags & RF_ACTIVE) panic( "resource_list_reserve() should only reserve inactive resources"); r = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); if (r != NULL) { rle = resource_list_find(rl, type, *rid); rle->flags |= RLE_RESERVED; } return (r); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE() * * Implement BUS_ALLOC_RESOURCE() by looking up a resource from the list * and passing the allocation up to the parent of @p bus. This assumes * that the first entry of @c device_get_ivars(child) is a struct * resource_list. This also handles 'passthrough' allocations where a * child is a remote descendant of bus by passing the allocation up to * the parent of bus. * * Typically, a bus driver would store a list of child resources * somewhere in the child device's ivars (see device_get_ivars()) and * its implementation of BUS_ALLOC_RESOURCE() would find that list and * then call resource_list_alloc() to perform the allocation. * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device which is requesting an allocation * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); } rle = resource_list_find(rl, type, *rid); if (!rle) return (NULL); /* no resource of that type/rid */ if (rle->res) { if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) return (NULL); if ((flags & RF_ACTIVE) && bus_activate_resource(child, type, *rid, rle->res) != 0) return (NULL); rle->flags |= RLE_ALLOCATED; return (rle->res); } device_printf(bus, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); /* * Record the new range. */ if (rle->res) { rle->start = rman_get_start(rle->res); rle->end = rman_get_end(rle->res); rle->count = count; } return (rle->res); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE() * * Implement BUS_RELEASE_RESOURCE() using a resource list. Normally * used with resource_list_alloc(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device which is requesting a release * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_release(struct resource_list *rl, device_t bus, device_t child, int type, int rid, struct resource *res) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int error; if (passthrough) { return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res)); } rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_release: can't find resource"); if (!rle->res) panic("resource_list_release: resource entry is not busy"); if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) { if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error) return (error); } rle->flags &= ~RLE_ALLOCATED; return (0); } return (EINVAL); } error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res); if (error) return (error); rle->res = NULL; return (0); } /** * @brief Release all active resources of a given type * * Release all active resources of a specified type. This is intended * to be used to cleanup resources leaked by a driver after detach or * a failed attach. * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose active resources are being released * @param type the type of resources to release * * @retval 0 success * @retval EBUSY at least one resource was active */ int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type) { struct resource_list_entry *rle; int error, retval; retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->res == NULL) continue; if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) continue; retval = EBUSY; error = resource_list_release(rl, bus, child, type, rman_get_rid(rle->res), rle->res); if (error != 0) device_printf(bus, "Failed to release active resource: %d\n", error); } return (retval); } /** * @brief Fully release a reserved resource * * Fully releases a resource reserved via resource_list_reserve(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose reserved resource is being released * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); if (passthrough) panic( "resource_list_unreserve() should only be called for direct children"); rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_unreserve: can't find resource"); if (!(rle->flags & RLE_RESERVED)) return (EINVAL); if (rle->flags & RLE_ALLOCATED) return (EBUSY); rle->flags &= ~RLE_RESERVED; return (resource_list_release(rl, bus, child, type, rid, rle->res)); } /** * @brief Print a description of resources in a resource list * * Print all resources of a specified type, for use in BUS_PRINT_CHILD(). * The name is printed if at least one resource of the given type is available. * The format is used to print resource start and end. * * @param rl the resource list to print * @param name the name of @p type, e.g. @c "memory" * @param type type type of resource entry to print * @param format printf(9) format string to print resource * start and end values * * @returns the number of characters printed */ int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format) { struct resource_list_entry *rle; int printed, retval; printed = 0; retval = 0; /* Yes, this is kinda cheating */ STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { if (printed == 0) retval += printf(" %s ", name); else retval += printf(","); printed++; retval += printf(format, rle->start); if (rle->count > 1) { retval += printf("-"); retval += printf(format, rle->start + rle->count - 1); } } } return (retval); } /** * @brief Releases all the resources in a list. * * @param rl The resource list to purge. * * @returns nothing */ void resource_list_purge(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) bus_release_resource(rman_get_device(rle->res), rle->type, rle->rid, rle->res); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit) { return (device_add_child_ordered(dev, order, name, unit)); } /** * @brief Helper function for implementing DEVICE_PROBE() * * This function can be used to help implement the DEVICE_PROBE() for * a bus (i.e. a device which has other devices attached to it). It * calls the DEVICE_IDENTIFY() method of each driver in the device's * devclass. */ int bus_generic_probe(device_t dev) { devclass_t dc = dev->devclass; driverlink_t dl; TAILQ_FOREACH(dl, &dc->drivers, link) { /* * If this driver's pass is too high, then ignore it. * For most drivers in the default pass, this will * never be true. For early-pass drivers they will * only call the identify routines of eligible drivers * when this routine is called. Drivers for later * passes should have their identify routines called * on early-pass buses during BUS_NEW_PASS(). */ if (dl->pass > bus_current_pass) continue; DEVICE_IDENTIFY(dl->driver, dev); } return (0); } /** * @brief Helper function for implementing DEVICE_ATTACH() * * This function can be used to help implement the DEVICE_ATTACH() for * a bus. It calls device_probe_and_attach() for each of the device's * children. */ int bus_generic_attach(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { device_probe_and_attach(child); } return (0); } /** * @brief Helper function for delaying attaching children * * Many buses can't run transactions on the bus which children need to probe and * attach until after interrupts and/or timers are running. This function * delays their attach until interrupts and timers are enabled. */ int bus_delayed_attach_children(device_t dev) { /* Probe and attach the bus children when interrupts are available */ config_intrhook_oneshot((ich_func_t)bus_generic_attach, dev); return (0); } /** * @brief Helper function for implementing DEVICE_DETACH() * * This function can be used to help implement the DEVICE_DETACH() for * a bus. It calls device_detach() for each of the device's * children. */ int bus_generic_detach(device_t dev) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); /* * Detach children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((error = device_detach(child)) != 0) return (error); } return (0); } /** * @brief Helper function for implementing DEVICE_SHUTDOWN() * * This function can be used to help implement the DEVICE_SHUTDOWN() * for a bus. It calls device_shutdown() for each of the device's * children. */ int bus_generic_shutdown(device_t dev) { device_t child; /* * Shut down children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { device_shutdown(child); } return (0); } /** * @brief Default function for suspending a child device. * * This function is to be used by a bus's DEVICE_SUSPEND_CHILD(). */ int bus_generic_suspend_child(device_t dev, device_t child) { int error; error = DEVICE_SUSPEND(child); if (error == 0) child->flags |= DF_SUSPENDED; return (error); } /** * @brief Default function for resuming a child device. * * This function is to be used by a bus's DEVICE_RESUME_CHILD(). */ int bus_generic_resume_child(device_t dev, device_t child) { DEVICE_RESUME(child); child->flags &= ~DF_SUSPENDED; return (0); } /** * @brief Helper function for implementing DEVICE_SUSPEND() * * This function can be used to help implement the DEVICE_SUSPEND() * for a bus. It calls DEVICE_SUSPEND() for each of the device's * children. If any call to DEVICE_SUSPEND() fails, the suspend * operation is aborted and any devices which were suspended are * resumed immediately by calling their DEVICE_RESUME() methods. */ int bus_generic_suspend(device_t dev) { int error; device_t child; /* * Suspend children in the reverse order. * For most buses all children are equal, so the order does not matter. * Other buses, such as acpi, carefully order their child devices to * express implicit dependencies between them. For such buses it is * safer to bring down devices in the reverse order. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { error = BUS_SUSPEND_CHILD(dev, child); if (error != 0) { child = TAILQ_NEXT(child, link); if (child != NULL) { TAILQ_FOREACH_FROM(child, &dev->children, link) BUS_RESUME_CHILD(dev, child); } return (error); } } return (0); } /** * @brief Helper function for implementing DEVICE_RESUME() * * This function can be used to help implement the DEVICE_RESUME() for * a bus. It calls DEVICE_RESUME() on each of the device's children. */ int bus_generic_resume(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { BUS_RESUME_CHILD(dev, child); /* if resume fails, there's nothing we can usefully do... */ } return (0); } /** * @brief Helper function for implementing BUS_RESET_POST * * Bus can use this function to implement common operations of * re-attaching or resuming the children after the bus itself was * reset, and after restoring bus-unique state of children. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_post(device_t dev, int flags) { device_t child; int error, error1; error = 0; TAILQ_FOREACH(child, &dev->children,link) { BUS_RESET_POST(dev, child); error1 = (flags & DEVF_RESET_DETACH) != 0 ? device_probe_and_attach(child) : BUS_RESUME_CHILD(dev, child); if (error == 0 && error1 != 0) error = error1; } return (error); } static void bus_helper_reset_prepare_rollback(device_t dev, device_t child, int flags) { child = TAILQ_NEXT(child, link); if (child == NULL) return; TAILQ_FOREACH_FROM(child, &dev->children,link) { BUS_RESET_POST(dev, child); if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } /** * @brief Helper function for implementing BUS_RESET_PREPARE * * Bus can use this function to implement common operations of * detaching or suspending the children before the bus itself is * reset, and then save bus-unique state of children that must * persists around reset. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_prepare(device_t dev, int flags) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { error = BUS_RESET_PREPARE(dev, child); if (error != 0) { if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } if (error != 0) { bus_helper_reset_prepare_rollback(dev, child, flags); return (error); } } return (0); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the first part of the ascii representation of * @p child, including its name, unit and description (if any - see * device_set_desc()). * * @returns the number of characters printed */ int bus_print_child_header(device_t dev, device_t child) { int retval = 0; if (device_get_desc(child)) { retval += device_printf(child, "<%s>", device_get_desc(child)); } else { retval += printf("%s", device_get_nameunit(child)); } return (retval); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the last part of the ascii representation of * @p child, which consists of the string @c " on " followed by the * name and unit of the @p dev. * * @returns the number of characters printed */ int bus_print_child_footer(device_t dev, device_t child) { return (printf(" on %s\n", device_get_nameunit(dev))); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints out the VM domain for the given device. * * @returns the number of characters printed */ int bus_print_child_domain(device_t dev, device_t child) { int domain; /* No domain? Don't print anything */ if (BUS_GET_DOMAIN(dev, child, &domain) != 0) return (0); return (printf(" numa-domain %d", domain)); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function simply calls bus_print_child_header() followed by * bus_print_child_footer(). * * @returns the number of characters printed */ int bus_generic_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } /** * @brief Stub function for implementing BUS_READ_IVAR(). * * @returns ENOENT */ int bus_generic_read_ivar(device_t dev, device_t child, int index, uintptr_t * result) { return (ENOENT); } /** * @brief Stub function for implementing BUS_WRITE_IVAR(). * * @returns ENOENT */ int bus_generic_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * @brief Helper function for implementing BUS_GET_PROPERTY(). * * This simply calls the BUS_GET_PROPERTY of the parent of dev, * until a non-default implementation is found. */ ssize_t bus_generic_get_property(device_t dev, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { if (device_get_parent(dev) != NULL) return (BUS_GET_PROPERTY(device_get_parent(dev), child, propname, propvalue, size, type)); return (-1); } /** * @brief Stub function for implementing BUS_GET_RESOURCE_LIST(). * * @returns NULL */ struct resource_list * bus_generic_get_resource_list(device_t dev, device_t child) { return (NULL); } /** * @brief Helper function for implementing BUS_DRIVER_ADDED(). * * This implementation of BUS_DRIVER_ADDED() simply calls the driver's * DEVICE_IDENTIFY() method to allow it to add new children to the bus * and then calls device_probe_and_attach() for each unattached child. */ void bus_generic_driver_added(device_t dev, driver_t *driver) { device_t child; DEVICE_IDENTIFY(driver, dev); TAILQ_FOREACH(child, &dev->children, link) { if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_NEW_PASS(). * * This implementing of BUS_NEW_PASS() first calls the identify * routines for any drivers that probe at the current pass. Then it * walks the list of devices for this bus. If a device is already * attached, then it calls BUS_NEW_PASS() on that device. If the * device is not already attached, it attempts to attach a driver to * it. */ void bus_generic_new_pass(device_t dev) { driverlink_t dl; devclass_t dc; device_t child; dc = dev->devclass; TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->pass == bus_current_pass) DEVICE_IDENTIFY(dl->driver, dev); } TAILQ_FOREACH(child, &dev->children, link) { if (child->state >= DS_ATTACHED) BUS_NEW_PASS(child); else if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_SETUP_INTR(). * * This simple implementation of BUS_SETUP_INTR() simply calls the * BUS_SETUP_INTR() method of the parent of @p dev. */ int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SETUP_INTR(dev->parent, child, irq, flags, filter, intr, arg, cookiep)); return (EINVAL); } /** * @brief Helper function for implementing BUS_TEARDOWN_INTR(). * * This simple implementation of BUS_TEARDOWN_INTR() simply calls the * BUS_TEARDOWN_INTR() method of the parent of @p dev. */ int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_TEARDOWN_INTR(dev->parent, child, irq, cookie)); return (EINVAL); } /** * @brief Helper function for implementing BUS_SUSPEND_INTR(). * * This simple implementation of BUS_SUSPEND_INTR() simply calls the * BUS_SUSPEND_INTR() method of the parent of @p dev. */ int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SUSPEND_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_RESUME_INTR(). * * This simple implementation of BUS_RESUME_INTR() simply calls the * BUS_RESUME_INTR() method of the parent of @p dev. */ int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RESUME_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This simple implementation of BUS_ADJUST_RESOURCE() simply calls the * BUS_ADJUST_RESOURCE() method of the parent of @p dev. */ int bus_generic_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ADJUST_RESOURCE(dev->parent, child, type, r, start, end)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This simple implementation of BUS_ALLOC_RESOURCE() simply calls the * BUS_ALLOC_RESOURCE() method of the parent of @p dev. */ struct resource * bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ALLOC_RESOURCE(dev->parent, child, type, rid, start, end, count, flags)); return (NULL); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This simple implementation of BUS_RELEASE_RESOURCE() simply calls the * BUS_RELEASE_RESOURCE() method of the parent of @p dev. */ int bus_generic_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RELEASE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This simple implementation of BUS_ACTIVATE_RESOURCE() simply calls the * BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ACTIVATE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This simple implementation of BUS_DEACTIVATE_RESOURCE() simply calls the * BUS_DEACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_MAP_RESOURCE(). * * This simple implementation of BUS_MAP_RESOURCE() simply calls the * BUS_MAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_MAP_RESOURCE(dev->parent, child, type, r, args, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_UNMAP_RESOURCE(). * * This simple implementation of BUS_UNMAP_RESOURCE() simply calls the * BUS_UNMAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_UNMAP_RESOURCE(dev->parent, child, type, r, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_BIND_INTR(). * * This simple implementation of BUS_BIND_INTR() simply calls the * BUS_BIND_INTR() method of the parent of @p dev. */ int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_BIND_INTR(dev->parent, child, irq, cpu)); return (EINVAL); } /** * @brief Helper function for implementing BUS_CONFIG_INTR(). * * This simple implementation of BUS_CONFIG_INTR() simply calls the * BUS_CONFIG_INTR() method of the parent of @p dev. */ int bus_generic_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_CONFIG_INTR(dev->parent, irq, trig, pol)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DESCRIBE_INTR(). * * This simple implementation of BUS_DESCRIBE_INTR() simply calls the * BUS_DESCRIBE_INTR() method of the parent of @p dev. */ int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DESCRIBE_INTR(dev->parent, child, irq, cookie, descr)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_CPUS(). * * This simple implementation of BUS_GET_CPUS() simply calls the * BUS_GET_CPUS() method of the parent of @p dev. */ int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_CPUS(dev->parent, child, op, setsize, cpuset)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_DMA_TAG(). * * This simple implementation of BUS_GET_DMA_TAG() simply calls the * BUS_GET_DMA_TAG() method of the parent of @p dev. */ bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_DMA_TAG(dev->parent, child)); return (NULL); } /** * @brief Helper function for implementing BUS_GET_BUS_TAG(). * * This simple implementation of BUS_GET_BUS_TAG() simply calls the * BUS_GET_BUS_TAG() method of the parent of @p dev. */ bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_BUS_TAG(dev->parent, child)); return ((bus_space_tag_t)0); } /** * @brief Helper function for implementing BUS_GET_RESOURCE(). * * This implementation of BUS_GET_RESOURCE() uses the * resource_list_find() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * search. */ int bus_generic_rl_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct resource_list * rl = NULL; struct resource_list_entry * rle = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); rle = resource_list_find(rl, type, rid); if (!rle) return (ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } /** * @brief Helper function for implementing BUS_SET_RESOURCE(). * * This implementation of BUS_SET_RESOURCE() uses the * resource_list_add() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ int bus_generic_rl_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); resource_list_add(rl, type, rid, start, (start + count - 1), count); return (0); } /** * @brief Helper function for implementing BUS_DELETE_RESOURCE(). * * This implementation of BUS_DELETE_RESOURCE() uses the * resource_list_delete() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ void bus_generic_rl_delete_resource(device_t dev, device_t child, int type, int rid) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return; resource_list_delete(rl, type, rid); return; } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() uses the * resource_list_release() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ int bus_generic_rl_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); return (resource_list_release(rl, dev, child, type, rid, r)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() uses the * resource_list_alloc() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ struct resource * bus_generic_rl_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (NULL); return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } /** * @brief Helper function for implementing BUS_CHILD_PRESENT(). * * This simple implementation of BUS_CHILD_PRESENT() simply calls the * BUS_CHILD_PRESENT() method of the parent of @p dev. */ int bus_generic_child_present(device_t dev, device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(dev), dev)); } int bus_generic_get_domain(device_t dev, device_t child, int *domain) { if (dev->parent) return (BUS_GET_DOMAIN(dev->parent, dev, domain)); return (ENOENT); } /** * @brief Helper function for implementing BUS_RESCAN(). * * This null implementation of BUS_RESCAN() always fails to indicate * the bus does not support rescanning. */ int bus_null_rescan(device_t dev) { return (ENXIO); } /* * Some convenience functions to make it easier for drivers to use the * resource-management functions. All these really do is hide the * indirection through the parent's method table, making for slightly * less-wordy code. In the future, it might make sense for this code * to maintain some sort of a list of resources allocated by each device. */ int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) res[i] = NULL; for (i = 0; rs[i].type != -1; i++) { res[i] = bus_alloc_resource_any(dev, rs[i].type, &rs[i].rid, rs[i].flags); if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) { bus_release_resources(dev, rs, res); return (ENXIO); } } return (0); } void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) if (res[i] != NULL) { bus_release_resource( dev, rs[i].type, rs[i].rid, res[i]); res[i] = NULL; } } /** * @brief Wrapper function for BUS_ALLOC_RESOURCE(). * * This function simply calls the BUS_ALLOC_RESOURCE() method of the * parent of @p dev. */ struct resource * bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (dev->parent == NULL) return (NULL); res = BUS_ALLOC_RESOURCE(dev->parent, dev, type, rid, start, end, count, flags); return (res); } /** * @brief Wrapper function for BUS_ADJUST_RESOURCE(). * * This function simply calls the BUS_ADJUST_RESOURCE() method of the * parent of @p dev. */ int bus_adjust_resource(device_t dev, int type, struct resource *r, rman_res_t start, rman_res_t end) { if (dev->parent == NULL) return (EINVAL); return (BUS_ADJUST_RESOURCE(dev->parent, dev, type, r, start, end)); } /** * @brief Wrapper function for BUS_ACTIVATE_RESOURCE(). * * This function simply calls the BUS_ACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_activate_resource(device_t dev, int type, int rid, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); } /** * @brief Wrapper function for BUS_DEACTIVATE_RESOURCE(). * * This function simply calls the BUS_DEACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); } /** * @brief Wrapper function for BUS_MAP_RESOURCE(). * * This function simply calls the BUS_MAP_RESOURCE() method of the * parent of @p dev. */ int bus_map_resource(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_MAP_RESOURCE(dev->parent, dev, type, r, args, map)); } /** * @brief Wrapper function for BUS_UNMAP_RESOURCE(). * * This function simply calls the BUS_UNMAP_RESOURCE() method of the * parent of @p dev. */ int bus_unmap_resource(device_t dev, int type, struct resource *r, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_UNMAP_RESOURCE(dev->parent, dev, type, r, map)); } /** * @brief Wrapper function for BUS_RELEASE_RESOURCE(). * * This function simply calls the BUS_RELEASE_RESOURCE() method of the * parent of @p dev. */ int bus_release_resource(device_t dev, int type, int rid, struct resource *r) { int rv; if (dev->parent == NULL) return (EINVAL); rv = BUS_RELEASE_RESOURCE(dev->parent, dev, type, rid, r); return (rv); } /** * @brief Wrapper function for BUS_SETUP_INTR(). * * This function simply calls the BUS_SETUP_INTR() method of the * parent of @p dev. */ int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { int error; if (dev->parent == NULL) return (EINVAL); error = BUS_SETUP_INTR(dev->parent, dev, r, flags, filter, handler, arg, cookiep); if (error != 0) return (error); if (handler != NULL && !(flags & INTR_MPSAFE)) device_printf(dev, "[GIANT-LOCKED]\n"); return (0); } /** * @brief Wrapper function for BUS_TEARDOWN_INTR(). * * This function simply calls the BUS_TEARDOWN_INTR() method of the * parent of @p dev. */ int bus_teardown_intr(device_t dev, struct resource *r, void *cookie) { if (dev->parent == NULL) return (EINVAL); return (BUS_TEARDOWN_INTR(dev->parent, dev, r, cookie)); } /** * @brief Wrapper function for BUS_SUSPEND_INTR(). * * This function simply calls the BUS_SUSPEND_INTR() method of the * parent of @p dev. */ int bus_suspend_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_SUSPEND_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_RESUME_INTR(). * * This function simply calls the BUS_RESUME_INTR() method of the * parent of @p dev. */ int bus_resume_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_RESUME_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_BIND_INTR(). * * This function simply calls the BUS_BIND_INTR() method of the * parent of @p dev. */ int bus_bind_intr(device_t dev, struct resource *r, int cpu) { if (dev->parent == NULL) return (EINVAL); return (BUS_BIND_INTR(dev->parent, dev, r, cpu)); } /** * @brief Wrapper function for BUS_DESCRIBE_INTR(). * * This function first formats the requested description into a * temporary buffer and then calls the BUS_DESCRIBE_INTR() method of * the parent of @p dev. */ int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) { va_list ap; char descr[MAXCOMLEN + 1]; if (dev->parent == NULL) return (EINVAL); va_start(ap, fmt); vsnprintf(descr, sizeof(descr), fmt, ap); va_end(ap); return (BUS_DESCRIBE_INTR(dev->parent, dev, irq, cookie, descr)); } /** * @brief Wrapper function for BUS_SET_RESOURCE(). * * This function simply calls the BUS_SET_RESOURCE() method of the * parent of @p dev. */ int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count) { return (BUS_SET_RESOURCE(device_get_parent(dev), dev, type, rid, start, count)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev. */ int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp) { return (BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, startp, countp)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the start value. */ rman_res_t bus_get_resource_start(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (start); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the count value. */ rman_res_t bus_get_resource_count(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (count); } /** * @brief Wrapper function for BUS_DELETE_RESOURCE(). * * This function simply calls the BUS_DELETE_RESOURCE() method of the * parent of @p dev. */ void bus_delete_resource(device_t dev, int type, int rid) { BUS_DELETE_RESOURCE(device_get_parent(dev), dev, type, rid); } /** * @brief Wrapper function for BUS_CHILD_PRESENT(). * * This function simply calls the BUS_CHILD_PRESENT() method of the * parent of @p dev. */ int bus_child_present(device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(child), child)); } /** * @brief Wrapper function for BUS_CHILD_PNPINFO_STR(). * * This function simply calls the BUS_CHILD_PNPINFO_STR() method of the * parent of @p dev. */ int bus_child_pnpinfo_str(device_t child, char *buf, size_t buflen) { device_t parent; parent = device_get_parent(child); if (parent == NULL) { *buf = '\0'; return (0); } return (BUS_CHILD_PNPINFO_STR(parent, child, buf, buflen)); } /** * @brief Wrapper function for BUS_CHILD_LOCATION_STR(). * * This function simply calls the BUS_CHILD_LOCATION_STR() method of the * parent of @p dev. */ int bus_child_location_str(device_t child, char *buf, size_t buflen) { device_t parent; parent = device_get_parent(child); if (parent == NULL) { *buf = '\0'; return (0); } return (BUS_CHILD_LOCATION_STR(parent, child, buf, buflen)); } /** * @brief Wrapper function for bus_child_pnpinfo_str using sbuf * * A convenient wrapper frunction for bus_child_pnpinfo_str that allows * us to splat that into an sbuf. It uses unholy knowledge of sbuf to * accomplish this, however. It is an interim function until we can convert * this interface more fully. */ /* Note: we reach inside of sbuf because it's API isn't rich enough to do this */ #define SPACE(s) ((s)->s_size - (s)->s_len) #define EOB(s) ((s)->s_buf + (s)->s_len) static int bus_child_pnpinfo_sb(device_t dev, struct sbuf *sb) { char *p; ssize_t space; MPASS((sb->s_flags & SBUF_INCLUDENUL) == 0); MPASS(sb->s_size >= sb->s_len); if (sb->s_error != 0) return (-1); space = SPACE(sb); if (space <= 1) { sb->s_error = ENOMEM; return (-1); } p = EOB(sb); *p = '\0'; /* sbuf buffer isn't NUL terminated until sbuf_finish() */ bus_child_pnpinfo_str(dev, p, space); sb->s_len += strlen(p); return (0); } /** * @brief Wrapper function for bus_child_pnpinfo_str using sbuf * * A convenient wrapper frunction for bus_child_pnpinfo_str that allows * us to splat that into an sbuf. It uses unholy knowledge of sbuf to * accomplish this, however. It is an interim function until we can convert * this interface more fully. */ static int bus_child_location_sb(device_t dev, struct sbuf *sb) { char *p; ssize_t space; MPASS((sb->s_flags & SBUF_INCLUDENUL) == 0); MPASS(sb->s_size >= sb->s_len); if (sb->s_error != 0) return (-1); space = SPACE(sb); if (space <= 1) { sb->s_error = ENOMEM; return (-1); } p = EOB(sb); *p = '\0'; /* sbuf buffer isn't NUL terminated until sbuf_finish() */ bus_child_location_str(dev, p, space); sb->s_len += strlen(p); return (0); } #undef SPACE #undef EOB /** * @brief Wrapper function for BUS_GET_CPUS(). * * This function simply calls the BUS_GET_CPUS() method of the * parent of @p dev. */ int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (EINVAL); return (BUS_GET_CPUS(parent, dev, op, setsize, cpuset)); } /** * @brief Wrapper function for BUS_GET_DMA_TAG(). * * This function simply calls the BUS_GET_DMA_TAG() method of the * parent of @p dev. */ bus_dma_tag_t bus_get_dma_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (NULL); return (BUS_GET_DMA_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_BUS_TAG(). * * This function simply calls the BUS_GET_BUS_TAG() method of the * parent of @p dev. */ bus_space_tag_t bus_get_bus_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return ((bus_space_tag_t)0); return (BUS_GET_BUS_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_DOMAIN(). * * This function simply calls the BUS_GET_DOMAIN() method of the * parent of @p dev. */ int bus_get_domain(device_t dev, int *domain) { return (BUS_GET_DOMAIN(device_get_parent(dev), dev, domain)); } /* Resume all devices and then notify userland that we're up again. */ static int root_resume(device_t dev) { int error; error = bus_generic_resume(dev); if (error == 0) { devctl_notify("kern", "power", "resume", NULL); /* Deprecated gone in 14 */ devctl_notify("kernel", "power", "resume", NULL); } return (error); } static int root_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += printf("\n"); return (retval); } static int root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* * If an interrupt mapping gets to here something bad has happened. */ panic("root_setup_intr"); } /* * If we get here, assume that the device is permanent and really is * present in the system. Removable bus drivers are expected to intercept * this call long before it gets here. We return -1 so that drivers that * really care can check vs -1 or some ERRNO returned higher in the food * chain. */ static int root_child_present(device_t dev, device_t child) { return (-1); } static int root_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { switch (op) { case INTR_CPUS: /* Default to returning the set of all CPUs. */ if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = all_cpus; return (0); default: return (EINVAL); } } static kobj_method_t root_methods[] = { /* Device interface */ KOBJMETHOD(device_shutdown, bus_generic_shutdown), KOBJMETHOD(device_suspend, bus_generic_suspend), KOBJMETHOD(device_resume, root_resume), /* Bus interface */ KOBJMETHOD(bus_print_child, root_print_child), KOBJMETHOD(bus_read_ivar, bus_generic_read_ivar), KOBJMETHOD(bus_write_ivar, bus_generic_write_ivar), KOBJMETHOD(bus_setup_intr, root_setup_intr), KOBJMETHOD(bus_child_present, root_child_present), KOBJMETHOD(bus_get_cpus, root_get_cpus), KOBJMETHOD_END }; static driver_t root_driver = { "root", root_methods, 1, /* no softc */ }; device_t root_bus; devclass_t root_devclass; static int root_bus_module_handler(module_t mod, int what, void* arg) { switch (what) { case MOD_LOAD: TAILQ_INIT(&bus_data_devices); kobj_class_compile((kobj_class_t) &root_driver); root_bus = make_device(NULL, "root", 0); root_bus->desc = "System root bus"; kobj_init((kobj_t) root_bus, (kobj_class_t) &root_driver); root_bus->driver = &root_driver; root_bus->state = DS_ATTACHED; root_devclass = devclass_find_internal("root", NULL, FALSE); devinit(); return (0); case MOD_SHUTDOWN: device_shutdown(root_bus); return (0); default: return (EOPNOTSUPP); } return (0); } static moduledata_t root_bus_mod = { "rootbus", root_bus_module_handler, NULL }; DECLARE_MODULE(rootbus, root_bus_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); /** * @brief Automatically configure devices * * This function begins the autoconfiguration process by calling * device_probe_and_attach() for each child of the @c root0 device. */ void root_bus_configure(void) { PDEBUG((".")); /* Eventually this will be split up, but this is sufficient for now. */ bus_set_pass(BUS_PASS_DEFAULT); } /** * @brief Module handler for registering device drivers * * This module handler is used to automatically register device * drivers when modules are loaded. If @p what is MOD_LOAD, it calls * devclass_add_driver() for the driver described by the * driver_module_data structure pointed to by @p arg */ int driver_module_handler(module_t mod, int what, void *arg) { struct driver_module_data *dmd; devclass_t bus_devclass; kobj_class_t driver; int error, pass; dmd = (struct driver_module_data *)arg; bus_devclass = devclass_find_internal(dmd->dmd_busname, NULL, TRUE); error = 0; switch (what) { case MOD_LOAD: if (dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); pass = dmd->dmd_pass; driver = dmd->dmd_driver; PDEBUG(("Loading module: driver %s on bus %s (pass %d)", DRIVERNAME(driver), dmd->dmd_busname, pass)); error = devclass_add_driver(bus_devclass, driver, pass, dmd->dmd_devclass); break; case MOD_UNLOAD: PDEBUG(("Unloading module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_delete_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; case MOD_QUIESCE: PDEBUG(("Quiesce module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_quiesce_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; default: error = EOPNOTSUPP; break; } return (error); } /** * @brief Enumerate all hinted devices for this bus. * * Walks through the hints for this bus and calls the bus_hinted_child * routine for each one it fines. It searches first for the specific * bus that's being probed for hinted children (eg isa0), and then for * generic children (eg isa). * * @param dev bus device to enumerate */ void bus_enumerate_hinted_children(device_t bus) { int i; const char *dname, *busname; int dunit; /* * enumerate all devices on the specific bus */ busname = device_get_nameunit(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); /* * and all the generic ones. */ busname = device_get_name(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); } #ifdef BUS_DEBUG /* the _short versions avoid iteration by not calling anything that prints * more than oneliners. I love oneliners. */ static void print_device_short(device_t dev, int indent) { if (!dev) return; indentprintf(("device %d: <%s> %sparent,%schildren,%s%s%s%s%s,%sivars,%ssoftc,busy=%d\n", dev->unit, dev->desc, (dev->parent? "":"no "), (TAILQ_EMPTY(&dev->children)? "no ":""), (dev->flags&DF_ENABLED? "enabled,":"disabled,"), (dev->flags&DF_FIXEDCLASS? "fixed,":""), (dev->flags&DF_WILDCARD? "wildcard,":""), (dev->flags&DF_DESCMALLOCED? "descmalloced,":""), (dev->flags&DF_SUSPENDED? "suspended,":""), (dev->ivars? "":"no "), (dev->softc? "":"no "), dev->busy)); } static void print_device(device_t dev, int indent) { if (!dev) return; print_device_short(dev, indent); indentprintf(("Parent:\n")); print_device_short(dev->parent, indent+1); indentprintf(("Driver:\n")); print_driver_short(dev->driver, indent+1); indentprintf(("Devclass:\n")); print_devclass_short(dev->devclass, indent+1); } void print_device_tree_short(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device_short(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree_short(child, indent+1); } } void print_device_tree(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree(child, indent+1); } } static void print_driver_short(driver_t *driver, int indent) { if (!driver) return; indentprintf(("driver %s: softc size = %zd\n", driver->name, driver->size)); } static void print_driver(driver_t *driver, int indent) { if (!driver) return; print_driver_short(driver, indent); } static void print_driver_list(driver_list_t drivers, int indent) { driverlink_t driver; TAILQ_FOREACH(driver, &drivers, link) { print_driver(driver->driver, indent); } } static void print_devclass_short(devclass_t dc, int indent) { if ( !dc ) return; indentprintf(("devclass %s: max units = %d\n", dc->name, dc->maxunit)); } static void print_devclass(devclass_t dc, int indent) { int i; if ( !dc ) return; print_devclass_short(dc, indent); indentprintf(("Drivers:\n")); print_driver_list(dc->drivers, indent+1); indentprintf(("Devices:\n")); for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) print_device(dc->devices[i], indent+1); } void print_devclass_list_short(void) { devclass_t dc; printf("Short listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass_short(dc, 0); } } void print_devclass_list(void) { devclass_t dc; printf("Full listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass(dc, 0); } } #endif /* * User-space access to the device tree. * * We implement a small set of nodes: * * hw.bus Single integer read method to obtain the * current generation count. * hw.bus.devices Reads the entire device tree in flat space. * hw.bus.rman Resource manager interface * * We might like to add the ability to scan devclasses and/or drivers to * determine what else is currently loaded/available. */ static int sysctl_bus_info(SYSCTL_HANDLER_ARGS) { struct u_businfo ubus; ubus.ub_version = BUS_USER_VERSION; ubus.ub_generation = bus_data_generation; return (SYSCTL_OUT(req, &ubus, sizeof(ubus))); } SYSCTL_PROC(_hw_bus, OID_AUTO, info, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_bus_info, "S,u_businfo", "bus-related data"); static int sysctl_devices(SYSCTL_HANDLER_ARGS) { struct sbuf sb; int *name = (int *)arg1; u_int namelen = arg2; int index; device_t dev; struct u_device *udev; int error; if (namelen != 2) return (EINVAL); if (bus_data_generation_check(name[0])) return (EINVAL); index = name[1]; /* * Scan the list of devices, looking for the requested index. */ TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (index-- == 0) break; } if (dev == NULL) return (ENOENT); /* * Populate the return item, careful not to overflow the buffer. */ udev = malloc(sizeof(*udev), M_BUS, M_WAITOK | M_ZERO); if (udev == NULL) return (ENOMEM); udev->dv_handle = (uintptr_t)dev; udev->dv_parent = (uintptr_t)dev->parent; udev->dv_devflags = dev->devflags; udev->dv_flags = dev->flags; udev->dv_state = dev->state; sbuf_new(&sb, udev->dv_fields, sizeof(udev->dv_fields), SBUF_FIXEDLEN); if (dev->nameunit != NULL) sbuf_cat(&sb, dev->nameunit); sbuf_putc(&sb, '\0'); if (dev->desc != NULL) sbuf_cat(&sb, dev->desc); sbuf_putc(&sb, '\0'); if (dev->driver != NULL) sbuf_cat(&sb, dev->driver->name); sbuf_putc(&sb, '\0'); bus_child_pnpinfo_sb(dev, &sb); sbuf_putc(&sb, '\0'); bus_child_location_sb(dev, &sb); sbuf_putc(&sb, '\0'); error = sbuf_finish(&sb); if (error == 0) error = SYSCTL_OUT(req, udev, sizeof(*udev)); sbuf_delete(&sb); free(udev, M_BUS); return (error); } SYSCTL_NODE(_hw_bus, OID_AUTO, devices, CTLFLAG_RD | CTLFLAG_NEEDGIANT, sysctl_devices, "system device tree"); int bus_data_generation_check(int generation) { if (generation != bus_data_generation) return (1); /* XXX generate optimised lists here? */ return (0); } void bus_data_generation_update(void) { atomic_add_int(&bus_data_generation, 1); } int bus_free_resource(device_t dev, int type, struct resource *r) { if (r == NULL) return (0); return (bus_release_resource(dev, type, rman_get_rid(r), r)); } device_t device_lookup_by_name(const char *name) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (dev->nameunit != NULL && strcmp(dev->nameunit, name) == 0) return (dev); } return (NULL); } /* * /dev/devctl2 implementation. The existing /dev/devctl device has * implicit semantics on open, so it could not be reused for this. * Another option would be to call this /dev/bus? */ static int find_device(struct devreq *req, device_t *devp) { device_t dev; /* * First, ensure that the name is nul terminated. */ if (memchr(req->dr_name, '\0', sizeof(req->dr_name)) == NULL) return (EINVAL); /* * Second, try to find an attached device whose name matches * 'name'. */ dev = device_lookup_by_name(req->dr_name); if (dev != NULL) { *devp = dev; return (0); } /* Finally, give device enumerators a chance. */ dev = NULL; EVENTHANDLER_DIRECT_INVOKE(dev_lookup, req->dr_name, &dev); if (dev == NULL) return (ENOENT); *devp = dev; return (0); } static bool driver_exists(device_t bus, const char *driver) { devclass_t dc; for (dc = bus->devclass; dc != NULL; dc = dc->parent) { if (devclass_find_driver_internal(dc, driver) != NULL) return (true); } return (false); } static void device_gen_nomatch(device_t dev) { device_t child; if (dev->flags & DF_NEEDNOMATCH && dev->state == DS_NOTPRESENT) { BUS_PROBE_NOMATCH(dev->parent, dev); devnomatch(dev); dev->flags |= DF_DONENOMATCH; } dev->flags &= ~DF_NEEDNOMATCH; TAILQ_FOREACH(child, &dev->children, link) { device_gen_nomatch(child); } } static void device_do_deferred_actions(void) { devclass_t dc; driverlink_t dl; /* * Walk through the devclasses to find all the drivers we've tagged as * deferred during the freeze and call the driver added routines. They * have already been added to the lists in the background, so the driver * added routines that trigger a probe will have all the right bidders * for the probe auction. */ TAILQ_FOREACH(dc, &devclasses, link) { TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->flags & DL_DEFERRED_PROBE) { devclass_driver_added(dc, dl->driver); dl->flags &= ~DL_DEFERRED_PROBE; } } } /* * We also defer no-match events during a freeze. Walk the tree and * generate all the pent-up events that are still relevant. */ device_gen_nomatch(root_bus); bus_data_generation_update(); } static int devctl2_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct devreq *req; device_t dev; int error, old; /* Locate the device to control. */ - mtx_lock(&Giant); + bus_topo_lock(); req = (struct devreq *)data; switch (cmd) { case DEV_ATTACH: case DEV_DETACH: case DEV_ENABLE: case DEV_DISABLE: case DEV_SUSPEND: case DEV_RESUME: case DEV_SET_DRIVER: case DEV_CLEAR_DRIVER: case DEV_RESCAN: case DEV_DELETE: case DEV_RESET: error = priv_check(td, PRIV_DRIVER); if (error == 0) error = find_device(req, &dev); break; case DEV_FREEZE: case DEV_THAW: error = priv_check(td, PRIV_DRIVER); break; default: error = ENOTTY; break; } if (error) { - mtx_unlock(&Giant); + bus_topo_unlock(); return (error); } /* Perform the requested operation. */ switch (cmd) { case DEV_ATTACH: if (device_is_attached(dev)) error = EBUSY; else if (!device_is_enabled(dev)) error = ENXIO; else error = device_probe_and_attach(dev); break; case DEV_DETACH: if (!device_is_attached(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } error = device_detach(dev); break; case DEV_ENABLE: if (device_is_enabled(dev)) { error = EBUSY; break; } /* * If the device has been probed but not attached (e.g. * when it has been disabled by a loader hint), just * attach the device rather than doing a full probe. */ device_enable(dev); if (device_is_alive(dev)) { /* * If the device was disabled via a hint, clear * the hint. */ if (resource_disabled(dev->driver->name, dev->unit)) resource_unset_value(dev->driver->name, dev->unit, "disabled"); error = device_attach(dev); } else error = device_probe_and_attach(dev); break; case DEV_DISABLE: if (!device_is_enabled(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } /* * Force DF_FIXEDCLASS on around detach to preserve * the existing name. */ old = dev->flags; dev->flags |= DF_FIXEDCLASS; error = device_detach(dev); if (!(old & DF_FIXEDCLASS)) dev->flags &= ~DF_FIXEDCLASS; if (error == 0) device_disable(dev); break; case DEV_SUSPEND: if (device_is_suspended(dev)) { error = EBUSY; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_SUSPEND_CHILD(device_get_parent(dev), dev); break; case DEV_RESUME: if (!device_is_suspended(dev)) { error = EINVAL; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_RESUME_CHILD(device_get_parent(dev), dev); break; case DEV_SET_DRIVER: { devclass_t dc; char driver[128]; error = copyinstr(req->dr_data, driver, sizeof(driver), NULL); if (error) break; if (driver[0] == '\0') { error = EINVAL; break; } if (dev->devclass != NULL && strcmp(driver, dev->devclass->name) == 0) /* XXX: Could possibly force DF_FIXEDCLASS on? */ break; /* * Scan drivers for this device's bus looking for at * least one matching driver. */ if (dev->parent == NULL) { error = EINVAL; break; } if (!driver_exists(dev->parent, driver)) { error = ENOENT; break; } dc = devclass_create(driver); if (dc == NULL) { error = ENOMEM; break; } /* Detach device if necessary. */ if (device_is_attached(dev)) { if (req->dr_flags & DEVF_SET_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } /* Clear any previously-fixed device class and unit. */ if (dev->flags & DF_FIXEDCLASS) devclass_delete_device(dev->devclass, dev); dev->flags |= DF_WILDCARD; dev->unit = -1; /* Force the new device class. */ error = devclass_add_device(dc, dev); if (error) break; dev->flags |= DF_FIXEDCLASS; error = device_probe_and_attach(dev); break; } case DEV_CLEAR_DRIVER: if (!(dev->flags & DF_FIXEDCLASS)) { error = 0; break; } if (device_is_attached(dev)) { if (req->dr_flags & DEVF_CLEAR_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } dev->flags &= ~DF_FIXEDCLASS; dev->flags |= DF_WILDCARD; devclass_delete_device(dev->devclass, dev); error = device_probe_and_attach(dev); break; case DEV_RESCAN: if (!device_is_attached(dev)) { error = ENXIO; break; } error = BUS_RESCAN(dev); break; case DEV_DELETE: { device_t parent; parent = device_get_parent(dev); if (parent == NULL) { error = EINVAL; break; } if (!(req->dr_flags & DEVF_FORCE_DELETE)) { if (bus_child_present(dev) != 0) { error = EBUSY; break; } } error = device_delete_child(parent, dev); break; } case DEV_FREEZE: if (device_frozen) error = EBUSY; else device_frozen = true; break; case DEV_THAW: if (!device_frozen) error = EBUSY; else { device_do_deferred_actions(); device_frozen = false; } break; case DEV_RESET: if ((req->dr_flags & ~(DEVF_RESET_DETACH)) != 0) { error = EINVAL; break; } error = BUS_RESET_CHILD(device_get_parent(dev), dev, req->dr_flags); break; } - mtx_unlock(&Giant); + bus_topo_unlock(); return (error); } static struct cdevsw devctl2_cdevsw = { .d_version = D_VERSION, .d_ioctl = devctl2_ioctl, .d_name = "devctl2", }; static void devctl2_init(void) { make_dev_credf(MAKEDEV_ETERNAL, &devctl2_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, "devctl2"); } /* * APIs to manage deprecation and obsolescence. */ static int obsolete_panic = 0; SYSCTL_INT(_debug, OID_AUTO, obsolete_panic, CTLFLAG_RWTUN, &obsolete_panic, 0, "Panic when obsolete features are used (0 = never, 1 = if obsolete, " "2 = if deprecated)"); static void gone_panic(int major, int running, const char *msg) { switch (obsolete_panic) { case 0: return; case 1: if (running < major) return; /* FALLTHROUGH */ default: panic("%s", msg); } } void _gone_in(int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) printf("Obsolete code will be removed soon: %s\n", msg); else printf("Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } void _gone_in_dev(device_t dev, int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) device_printf(dev, "Obsolete code will be removed soon: %s\n", msg); else device_printf(dev, "Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } #ifdef DDB DB_SHOW_COMMAND(device, db_show_device) { device_t dev; if (!have_addr) return; dev = (device_t)addr; db_printf("name: %s\n", device_get_nameunit(dev)); db_printf(" driver: %s\n", DRIVERNAME(dev->driver)); db_printf(" class: %s\n", DEVCLANAME(dev->devclass)); db_printf(" addr: %p\n", dev); db_printf(" parent: %p\n", dev->parent); db_printf(" softc: %p\n", dev->softc); db_printf(" ivars: %p\n", dev->ivars); } DB_SHOW_ALL_COMMAND(devices, db_show_all_devices) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { db_show_device((db_expr_t)dev, true, count, modif); } } #endif diff --git a/sys/net/iflib_clone.c b/sys/net/iflib_clone.c index 975873c4a19c..8638491a17d3 100644 --- a/sys/net/iflib_clone.c +++ b/sys/net/iflib_clone.c @@ -1,301 +1,301 @@ /*- * Copyright (c) 2014-2018, Matthew Macy * Copyright (C) 2017-2018 Joyent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Neither the name of Matthew Macy nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_acpi.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" int noop_attach(device_t dev) { return (0); } int iflib_pseudo_detach(device_t dev) { if_ctx_t ctx; ctx = device_get_softc(dev); if ((iflib_get_flags(ctx) & IFC_IN_DETACH) == 0) return (EBUSY); return (0); } static device_t iflib_pseudodev; static struct mtx pseudoif_mtx; MTX_SYSINIT(pseudoif_mtx, &pseudoif_mtx, "pseudoif_mtx", MTX_DEF); #define PSEUDO_LOCK() mtx_lock(&pseudoif_mtx); #define PSEUDO_UNLOCK() mtx_unlock(&pseudoif_mtx); struct if_pseudo { eventhandler_tag ip_detach_tag; eventhandler_tag ip_lladdr_tag; struct if_clone *ip_ifc; if_shared_ctx_t ip_sctx; devclass_t ip_dc; LIST_ENTRY(if_pseudo) ip_list; int ip_on_list; }; static LIST_HEAD(, if_pseudo) iflib_pseudos = LIST_HEAD_INITIALIZER(iflib_pseudos); /* * XXX this assumes that the rest of the * code won't hang on to it after it's * removed / unloaded */ static if_pseudo_t iflib_ip_lookup(const char *name) { if_pseudo_t ip = NULL; PSEUDO_LOCK(); LIST_FOREACH(ip, &iflib_pseudos, ip_list) { if (!strcmp(ip->ip_sctx->isc_name, name)) break; } PSEUDO_UNLOCK(); return (ip); } static void iflib_ip_delete(if_pseudo_t ip) { PSEUDO_LOCK(); if (ip->ip_on_list) { LIST_REMOVE(ip, ip_list); ip->ip_on_list = 0; } PSEUDO_UNLOCK(); } static void iflib_ip_insert(if_pseudo_t ip) { PSEUDO_LOCK(); if (!ip->ip_on_list) { LIST_INSERT_HEAD(&iflib_pseudos, ip, ip_list); ip->ip_on_list = 1; } PSEUDO_UNLOCK(); } static void iflib_ifdetach(void *arg __unused, if_t ifp) { /* If the ifnet is just being renamed, don't do anything. */ if (ifp->if_flags & IFF_RENAMING) return; } static void iflib_iflladdr(void *arg __unused, if_t ifp __unused) { } static int iflib_clone_create(struct if_clone *ifc, int unit, caddr_t params) { const char *name = ifc_name(ifc); struct iflib_cloneattach_ctx clctx; if_ctx_t ctx; if_pseudo_t ip; device_t dev; int rc; clctx.cc_ifc = ifc; clctx.cc_len = 0; clctx.cc_params = params; clctx.cc_name = name; if (__predict_false(iflib_pseudodev == NULL)) { /* SYSINIT initialization would panic !?! */ - mtx_lock(&Giant); + bus_topo_lock(); iflib_pseudodev = device_add_child(root_bus, "ifpseudo", 0); - mtx_unlock(&Giant); + bus_topo_unlock(); MPASS(iflib_pseudodev != NULL); } ip = iflib_ip_lookup(name); if (ip == NULL) { printf("no ip found for %s\n", name); return (ENOENT); } if ((dev = devclass_get_device(ip->ip_dc, unit)) != NULL) { printf("unit %d allocated\n", unit); bus_generic_print_child(iflib_pseudodev, dev); return (EBUSY); } PSEUDO_LOCK(); dev = device_add_child(iflib_pseudodev, name, unit); device_set_driver(dev, &iflib_pseudodriver); PSEUDO_UNLOCK(); device_quiet(dev); rc = device_attach(dev); MPASS(rc == 0); MPASS(dev != NULL); MPASS(devclass_get_device(ip->ip_dc, unit) == dev); rc = iflib_pseudo_register(dev, ip->ip_sctx, &ctx, &clctx); if (rc) { - mtx_lock(&Giant); + bus_topo_lock(); device_delete_child(iflib_pseudodev, dev); - mtx_unlock(&Giant); + bus_topo_unlock(); } else device_set_softc(dev, ctx); return (rc); } static void iflib_clone_destroy(if_t ifp) { if_ctx_t ctx; device_t dev; struct sx *ctx_lock; int rc; /* * Detach device / free / free unit */ ctx = if_getsoftc(ifp); dev = iflib_get_dev(ctx); ctx_lock = iflib_ctx_lock_get(ctx); sx_xlock(ctx_lock); iflib_set_detach(ctx); iflib_stop(ctx); sx_xunlock(ctx_lock); - mtx_lock(&Giant); + bus_topo_lock(); rc = device_delete_child(iflib_pseudodev, dev); - mtx_unlock(&Giant); + bus_topo_unlock(); if (rc == 0) iflib_pseudo_deregister(ctx); } if_pseudo_t iflib_clone_register(if_shared_ctx_t sctx) { if_pseudo_t ip; if (sctx->isc_name == NULL) { printf("iflib_clone_register failed - shared_ctx needs to have a device name\n"); return (NULL); } if (iflib_ip_lookup(sctx->isc_name) != NULL) { printf("iflib_clone_register failed - shared_ctx %s alread registered\n", sctx->isc_name); return (NULL); } ip = malloc(sizeof(*ip), M_IFLIB, M_WAITOK|M_ZERO); ip->ip_sctx = sctx; ip->ip_dc = devclass_create(sctx->isc_name); if (ip->ip_dc == NULL) goto fail_clone; /* XXX --- we can handle clone_advanced later */ ip->ip_ifc = if_clone_simple(sctx->isc_name, iflib_clone_create, iflib_clone_destroy, 0); if (ip->ip_ifc == NULL) { printf("clone_simple failed -- cloned %s devices will not be available\n", sctx->isc_name); goto fail_clone; } ip->ip_lladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, iflib_iflladdr, NULL, EVENTHANDLER_PRI_ANY); if (ip->ip_lladdr_tag == NULL) goto fail_addr; ip->ip_detach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, iflib_ifdetach, NULL, EVENTHANDLER_PRI_ANY); if (ip->ip_detach_tag == NULL) goto fail_depart; iflib_ip_insert(ip); return (ip); fail_depart: EVENTHANDLER_DEREGISTER(iflladdr_event, ip->ip_lladdr_tag); fail_addr: if_clone_detach(ip->ip_ifc); fail_clone: free(ip, M_IFLIB); return (NULL); } void iflib_clone_deregister(if_pseudo_t ip) { /* XXX check that is not still in use */ iflib_ip_delete(ip); EVENTHANDLER_DEREGISTER(ifnet_departure_event, ip->ip_detach_tag); EVENTHANDLER_DEREGISTER(iflladdr_event, ip->ip_lladdr_tag); if_clone_detach(ip->ip_ifc); /* XXX free devclass */ free(ip, M_IFLIB); } diff --git a/sys/sys/bus.h b/sys/sys/bus.h index af2be5c5583b..430beaecc6d0 100644 --- a/sys/sys/bus.h +++ b/sys/sys/bus.h @@ -1,1036 +1,1044 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_BUS_H_ #define _SYS_BUS_H_ #include #include #include #include /** * @defgroup NEWBUS newbus - a generic framework for managing devices * @{ */ /** * @brief Interface information structure. */ struct u_businfo { int ub_version; /**< @brief interface version */ #define BUS_USER_VERSION 2 int ub_generation; /**< @brief generation count */ }; /** * @brief State of the device. */ typedef enum device_state { DS_NOTPRESENT = 10, /**< @brief not probed or probe failed */ DS_ALIVE = 20, /**< @brief probe succeeded */ DS_ATTACHING = 25, /**< @brief currently attaching */ DS_ATTACHED = 30, /**< @brief attach method called */ DS_BUSY = 40 /**< @brief device is open */ } device_state_t; /** * @brief Device proprty types. * * Those are used by bus logic to encode requested properties, * e.g. in DT all properties are stored as BE and need to be converted * to host endianness. */ typedef enum device_property_type { DEVICE_PROP_ANY = 0, DEVICE_PROP_BUFFER = 1, DEVICE_PROP_UINT32 = 2, DEVICE_PROP_UINT64 = 3, } device_property_type_t; /** * @brief Device information exported to userspace. * The strings are placed one after the other, separated by NUL characters. * Fields should be added after the last one and order maintained for compatibility */ #define BUS_USER_BUFFER (3*1024) struct u_device { uintptr_t dv_handle; uintptr_t dv_parent; uint32_t dv_devflags; /**< @brief API Flags for device */ uint16_t dv_flags; /**< @brief flags for dev state */ device_state_t dv_state; /**< @brief State of attachment */ char dv_fields[BUS_USER_BUFFER]; /**< @brief NUL terminated fields */ /* name (name of the device in tree) */ /* desc (driver description) */ /* drivername (Name of driver without unit number) */ /* pnpinfo (Plug and play information from bus) */ /* location (Location of device on parent */ /* NUL */ }; /* Flags exported via dv_flags. */ #define DF_ENABLED 0x01 /* device should be probed/attached */ #define DF_FIXEDCLASS 0x02 /* devclass specified at create time */ #define DF_WILDCARD 0x04 /* unit was originally wildcard */ #define DF_DESCMALLOCED 0x08 /* description was malloced */ #define DF_QUIET 0x10 /* don't print verbose attach message */ #define DF_DONENOMATCH 0x20 /* don't execute DEVICE_NOMATCH again */ #define DF_EXTERNALSOFTC 0x40 /* softc not allocated by us */ #define DF_SUSPENDED 0x100 /* Device is suspended. */ #define DF_QUIET_CHILDREN 0x200 /* Default to quiet for all my children */ #define DF_ATTACHED_ONCE 0x400 /* Has been attached at least once */ #define DF_NEEDNOMATCH 0x800 /* Has a pending NOMATCH event */ /** * @brief Device request structure used for ioctl's. * * Used for ioctl's on /dev/devctl2. All device ioctl's * must have parameter definitions which begin with dr_name. */ struct devreq_buffer { void *buffer; size_t length; }; struct devreq { char dr_name[128]; int dr_flags; /* request-specific flags */ union { struct devreq_buffer dru_buffer; void *dru_data; } dr_dru; #define dr_buffer dr_dru.dru_buffer /* variable-sized buffer */ #define dr_data dr_dru.dru_data /* fixed-size buffer */ }; #define DEV_ATTACH _IOW('D', 1, struct devreq) #define DEV_DETACH _IOW('D', 2, struct devreq) #define DEV_ENABLE _IOW('D', 3, struct devreq) #define DEV_DISABLE _IOW('D', 4, struct devreq) #define DEV_SUSPEND _IOW('D', 5, struct devreq) #define DEV_RESUME _IOW('D', 6, struct devreq) #define DEV_SET_DRIVER _IOW('D', 7, struct devreq) #define DEV_CLEAR_DRIVER _IOW('D', 8, struct devreq) #define DEV_RESCAN _IOW('D', 9, struct devreq) #define DEV_DELETE _IOW('D', 10, struct devreq) #define DEV_FREEZE _IOW('D', 11, struct devreq) #define DEV_THAW _IOW('D', 12, struct devreq) #define DEV_RESET _IOW('D', 13, struct devreq) /* Flags for DEV_DETACH and DEV_DISABLE. */ #define DEVF_FORCE_DETACH 0x0000001 /* Flags for DEV_SET_DRIVER. */ #define DEVF_SET_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_CLEAR_DRIVER. */ #define DEVF_CLEAR_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_DELETE. */ #define DEVF_FORCE_DELETE 0x0000001 /* Flags for DEV_RESET */ #define DEVF_RESET_DETACH 0x0000001 /* Detach drivers vs suspend device */ #ifdef _KERNEL #include #include #include #include /** * Device name parsers. Hook to allow device enumerators to map * scheme-specific names to a device. */ typedef void (*dev_lookup_fn)(void *arg, const char *name, device_t *result); EVENTHANDLER_DECLARE(dev_lookup, dev_lookup_fn); /** * @brief A device driver (included mainly for compatibility with * FreeBSD 4.x). */ typedef struct kobj_class driver_t; /** * @brief A device class * * The devclass object has two main functions in the system. The first * is to manage the allocation of unit numbers for device instances * and the second is to hold the list of device drivers for a * particular bus type. Each devclass has a name and there cannot be * two devclasses with the same name. This ensures that unique unit * numbers are allocated to device instances. * * Drivers that support several different bus attachments (e.g. isa, * pci, pccard) should all use the same devclass to ensure that unit * numbers do not conflict. * * Each devclass may also have a parent devclass. This is used when * searching for device drivers to allow a form of inheritance. When * matching drivers with devices, first the driver list of the parent * device's devclass is searched. If no driver is found in that list, * the search continues in the parent devclass (if any). */ typedef struct devclass *devclass_t; /** * @brief A device method */ #define device_method_t kobj_method_t /** * @brief Driver interrupt filter return values * * If a driver provides an interrupt filter routine it must return an * integer consisting of oring together zero or more of the following * flags: * * FILTER_STRAY - this device did not trigger the interrupt * FILTER_HANDLED - the interrupt has been fully handled and can be EOId * FILTER_SCHEDULE_THREAD - the threaded interrupt handler should be * scheduled to execute * * If the driver does not provide a filter, then the interrupt code will * act is if the filter had returned FILTER_SCHEDULE_THREAD. Note that it * is illegal to specify any other flag with FILTER_STRAY and that it is * illegal to not specify either of FILTER_HANDLED or FILTER_SCHEDULE_THREAD * if FILTER_STRAY is not specified. */ #define FILTER_STRAY 0x01 #define FILTER_HANDLED 0x02 #define FILTER_SCHEDULE_THREAD 0x04 /** * @brief Driver interrupt service routines * * The filter routine is run in primary interrupt context and may not * block or use regular mutexes. It may only use spin mutexes for * synchronization. The filter may either completely handle the * interrupt or it may perform some of the work and defer more * expensive work to the regular interrupt handler. If a filter * routine is not registered by the driver, then the regular interrupt * handler is always used to handle interrupts from this device. * * The regular interrupt handler executes in its own thread context * and may use regular mutexes. However, it is prohibited from * sleeping on a sleep queue. */ typedef int driver_filter_t(void*); typedef void driver_intr_t(void*); /** * @brief Interrupt type bits. * * These flags are used both by newbus interrupt * registration (nexus.c) and also in struct intrec, which defines * interrupt properties. * * XXX We should probably revisit this and remove the vestiges of the * spls implicit in names like INTR_TYPE_TTY. In the meantime, don't * confuse things by renaming them (Grog, 18 July 2000). * * Buses which do interrupt remapping will want to change their type * to reflect what sort of devices are underneath. */ enum intr_type { INTR_TYPE_TTY = 1, INTR_TYPE_BIO = 2, INTR_TYPE_NET = 4, INTR_TYPE_CAM = 8, INTR_TYPE_MISC = 16, INTR_TYPE_CLK = 32, INTR_TYPE_AV = 64, INTR_EXCL = 256, /* exclusive interrupt */ INTR_MPSAFE = 512, /* this interrupt is SMP safe */ INTR_ENTROPY = 1024, /* this interrupt provides entropy */ INTR_MD1 = 4096, /* flag reserved for MD use */ INTR_MD2 = 8192, /* flag reserved for MD use */ INTR_MD3 = 16384, /* flag reserved for MD use */ INTR_MD4 = 32768 /* flag reserved for MD use */ }; enum intr_trigger { INTR_TRIGGER_INVALID = -1, INTR_TRIGGER_CONFORM = 0, INTR_TRIGGER_EDGE = 1, INTR_TRIGGER_LEVEL = 2 }; enum intr_polarity { INTR_POLARITY_CONFORM = 0, INTR_POLARITY_HIGH = 1, INTR_POLARITY_LOW = 2 }; /** * CPU sets supported by bus_get_cpus(). Note that not all sets may be * supported for a given device. If a request is not supported by a * device (or its parents), then bus_get_cpus() will fail with EINVAL. */ enum cpu_sets { LOCAL_CPUS = 0, INTR_CPUS }; typedef int (*devop_t)(void); /** * @brief This structure is deprecated. * * Use the kobj(9) macro DEFINE_CLASS to * declare classes which implement device drivers. */ struct driver { KOBJ_CLASS_FIELDS; }; /** * @brief A resource mapping. */ struct resource_map { bus_space_tag_t r_bustag; bus_space_handle_t r_bushandle; bus_size_t r_size; void *r_vaddr; }; /** * @brief Optional properties of a resource mapping request. */ struct resource_map_request { size_t size; rman_res_t offset; rman_res_t length; vm_memattr_t memattr; }; void resource_init_map_request_impl(struct resource_map_request *_args, size_t _sz); #define resource_init_map_request(rmr) \ resource_init_map_request_impl((rmr), sizeof(*(rmr))) /* * Definitions for drivers which need to keep simple lists of resources * for their child devices. */ struct resource; /** * @brief An entry for a single resource in a resource list. */ struct resource_list_entry { STAILQ_ENTRY(resource_list_entry) link; int type; /**< @brief type argument to alloc_resource */ int rid; /**< @brief resource identifier */ int flags; /**< @brief resource flags */ struct resource *res; /**< @brief the real resource when allocated */ rman_res_t start; /**< @brief start of resource range */ rman_res_t end; /**< @brief end of resource range */ rman_res_t count; /**< @brief count within range */ }; STAILQ_HEAD(resource_list, resource_list_entry); #define RLE_RESERVED 0x0001 /* Reserved by the parent bus. */ #define RLE_ALLOCATED 0x0002 /* Reserved resource is allocated. */ #define RLE_PREFETCH 0x0004 /* Resource is a prefetch range. */ void resource_list_init(struct resource_list *rl); void resource_list_free(struct resource_list *rl); struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_busy(struct resource_list *rl, int type, int rid); int resource_list_reserved(struct resource_list *rl, int type, int rid); struct resource_list_entry* resource_list_find(struct resource_list *rl, int type, int rid); void resource_list_delete(struct resource_list *rl, int type, int rid); struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_release(struct resource_list *rl, device_t bus, device_t child, int type, int rid, struct resource *res); int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type); struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid); void resource_list_purge(struct resource_list *rl); int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format); /* * The root bus, to which all top-level buses are attached. */ extern device_t root_bus; extern devclass_t root_devclass; void root_bus_configure(void); /* * Useful functions for implementing buses. */ struct _cpuset; int bus_generic_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit); int bus_generic_adjust_resource(device_t bus, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end); struct resource * bus_generic_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_generic_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart); int bus_generic_attach(device_t dev); int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu); int bus_generic_child_present(device_t dev, device_t child); int bus_generic_config_intr(device_t, int, enum intr_trigger, enum intr_polarity); int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); int bus_generic_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int bus_generic_detach(device_t dev); void bus_generic_driver_added(device_t dev, driver_t *driver); int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child); bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child); int bus_generic_get_domain(device_t dev, device_t child, int *domain); ssize_t bus_generic_get_property(device_t dev, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type); struct resource_list * bus_generic_get_resource_list (device_t, device_t); int bus_generic_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map); void bus_generic_new_pass(device_t dev); int bus_print_child_header(device_t dev, device_t child); int bus_print_child_domain(device_t dev, device_t child); int bus_print_child_footer(device_t dev, device_t child); int bus_generic_print_child(device_t dev, device_t child); int bus_generic_probe(device_t dev); int bus_generic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int bus_generic_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); int bus_generic_resume(device_t dev); int bus_generic_resume_child(device_t dev, device_t child); int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep); struct resource * bus_generic_rl_alloc_resource (device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); void bus_generic_rl_delete_resource (device_t, device_t, int, int); int bus_generic_rl_get_resource (device_t, device_t, int, int, rman_res_t *, rman_res_t *); int bus_generic_rl_set_resource (device_t, device_t, int, int, rman_res_t, rman_res_t); int bus_generic_rl_release_resource (device_t, device_t, int, int, struct resource *); int bus_generic_shutdown(device_t dev); int bus_generic_suspend(device_t dev); int bus_generic_suspend_child(device_t dev, device_t child); int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map); int bus_generic_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int bus_helper_reset_post(device_t dev, int flags); int bus_helper_reset_prepare(device_t dev, int flags); int bus_null_rescan(device_t dev); /* * Wrapper functions for the BUS_*_RESOURCE methods to make client code * a little simpler. */ struct resource_spec { int type; int rid; int flags; }; #define RESOURCE_SPEC_END {-1, 0, 0} int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res); void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res); int bus_adjust_resource(device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end); struct resource *bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_activate_resource(device_t dev, int type, int rid, struct resource *r); int bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r); int bus_map_resource(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map); int bus_unmap_resource(device_t dev, int type, struct resource *r, struct resource_map *map); int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_get_dma_tag(device_t dev); bus_space_tag_t bus_get_bus_tag(device_t dev); int bus_get_domain(device_t dev, int *domain); int bus_release_resource(device_t dev, int type, int rid, struct resource *r); int bus_free_resource(device_t dev, int type, struct resource *r); int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep); int bus_teardown_intr(device_t dev, struct resource *r, void *cookie); int bus_suspend_intr(device_t dev, struct resource *r); int bus_resume_intr(device_t dev, struct resource *r); int bus_bind_intr(device_t dev, struct resource *r, int cpu); int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) __printflike(4, 5); int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count); int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp); rman_res_t bus_get_resource_start(device_t dev, int type, int rid); rman_res_t bus_get_resource_count(device_t dev, int type, int rid); void bus_delete_resource(device_t dev, int type, int rid); int bus_child_present(device_t child); int bus_child_pnpinfo_str(device_t child, char *buf, size_t buflen); int bus_child_location_str(device_t child, char *buf, size_t buflen); void bus_enumerate_hinted_children(device_t bus); int bus_delayed_attach_children(device_t bus); static __inline struct resource * bus_alloc_resource_any(device_t dev, int type, int *rid, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, 1, flags)); } static __inline struct resource * bus_alloc_resource_anywhere(device_t dev, int type, int *rid, rman_res_t count, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, count, flags)); } /* * Access functions for device. */ device_t device_add_child(device_t dev, const char *name, int unit); device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit); void device_busy(device_t dev); int device_delete_child(device_t dev, device_t child); int device_delete_children(device_t dev); int device_attach(device_t dev); int device_detach(device_t dev); void device_disable(device_t dev); void device_enable(device_t dev); device_t device_find_child(device_t dev, const char *classname, int unit); const char *device_get_desc(device_t dev); devclass_t device_get_devclass(device_t dev); driver_t *device_get_driver(device_t dev); u_int32_t device_get_flags(device_t dev); device_t device_get_parent(device_t dev); int device_get_children(device_t dev, device_t **listp, int *countp); void *device_get_ivars(device_t dev); void device_set_ivars(device_t dev, void *ivars); const char *device_get_name(device_t dev); const char *device_get_nameunit(device_t dev); void *device_get_softc(device_t dev); device_state_t device_get_state(device_t dev); int device_get_unit(device_t dev); struct sysctl_ctx_list *device_get_sysctl_ctx(device_t dev); struct sysctl_oid *device_get_sysctl_tree(device_t dev); int device_has_quiet_children(device_t dev); int device_is_alive(device_t dev); /* did probe succeed? */ int device_is_attached(device_t dev); /* did attach succeed? */ int device_is_enabled(device_t dev); int device_is_suspended(device_t dev); int device_is_quiet(device_t dev); device_t device_lookup_by_name(const char *name); int device_print_prettyname(device_t dev); int device_printf(device_t dev, const char *, ...) __printflike(2, 3); int device_probe(device_t dev); int device_probe_and_attach(device_t dev); int device_probe_child(device_t bus, device_t dev); int device_quiesce(device_t dev); void device_quiet(device_t dev); void device_quiet_children(device_t dev); void device_set_desc(device_t dev, const char* desc); void device_set_desc_copy(device_t dev, const char* desc); int device_set_devclass(device_t dev, const char *classname); int device_set_devclass_fixed(device_t dev, const char *classname); bool device_is_devclass_fixed(device_t dev); int device_set_driver(device_t dev, driver_t *driver); void device_set_flags(device_t dev, u_int32_t flags); void device_set_softc(device_t dev, void *softc); void device_free_softc(void *softc); void device_claim_softc(device_t dev); int device_set_unit(device_t dev, int unit); /* XXX DONT USE XXX */ int device_shutdown(device_t dev); void device_unbusy(device_t dev); void device_verbose(device_t dev); ssize_t device_get_property(device_t dev, const char *prop, void *val, size_t sz, device_property_type_t type); bool device_has_property(device_t dev, const char *prop); /* * Access functions for devclass. */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp); devclass_t devclass_create(const char *classname); int devclass_delete_driver(devclass_t busclass, driver_t *driver); devclass_t devclass_find(const char *classname); const char *devclass_get_name(devclass_t dc); device_t devclass_get_device(devclass_t dc, int unit); void *devclass_get_softc(devclass_t dc, int unit); int devclass_get_devices(devclass_t dc, device_t **listp, int *countp); int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp); int devclass_get_count(devclass_t dc); int devclass_get_maxunit(devclass_t dc); int devclass_find_free_unit(devclass_t dc, int unit); void devclass_set_parent(devclass_t dc, devclass_t pdc); devclass_t devclass_get_parent(devclass_t dc); struct sysctl_ctx_list *devclass_get_sysctl_ctx(devclass_t dc); struct sysctl_oid *devclass_get_sysctl_tree(devclass_t dc); /* * Access functions for device resources. */ int resource_int_value(const char *name, int unit, const char *resname, int *result); int resource_long_value(const char *name, int unit, const char *resname, long *result); int resource_string_value(const char *name, int unit, const char *resname, const char **result); int resource_disabled(const char *name, int unit); int resource_find_match(int *anchor, const char **name, int *unit, const char *resname, const char *value); int resource_find_dev(int *anchor, const char *name, int *unit, const char *resname, const char *value); int resource_unset_value(const char *name, int unit, const char *resname); /* * Functions for maintaining and checking consistency of * bus information exported to userspace. */ int bus_data_generation_check(int generation); void bus_data_generation_update(void); /** * Some convenience defines for probe routines to return. These are just * suggested values, and there's nothing magical about them. * BUS_PROBE_SPECIFIC is for devices that cannot be reprobed, and that no * possible other driver may exist (typically legacy drivers who don't follow * all the rules, or special needs drivers). BUS_PROBE_VENDOR is the * suggested value that vendor supplied drivers use. This is for source or * binary drivers that are not yet integrated into the FreeBSD tree. Its use * in the base OS is prohibited. BUS_PROBE_DEFAULT is the normal return value * for drivers to use. It is intended that nearly all of the drivers in the * tree should return this value. BUS_PROBE_LOW_PRIORITY are for drivers that * have special requirements like when there are two drivers that support * overlapping series of hardware devices. In this case the one that supports * the older part of the line would return this value, while the one that * supports the newer ones would return BUS_PROBE_DEFAULT. BUS_PROBE_GENERIC * is for drivers that wish to have a generic form and a specialized form, * like is done with the pci bus and the acpi pci bus. BUS_PROBE_HOOVER is * for those buses that implement a generic device placeholder for devices on * the bus that have no more specific driver for them (aka ugen). * BUS_PROBE_NOWILDCARD or lower means that the device isn't really bidding * for a device node, but accepts only devices that its parent has told it * use this driver. */ #define BUS_PROBE_SPECIFIC 0 /* Only I can use this device */ #define BUS_PROBE_VENDOR (-10) /* Vendor supplied driver */ #define BUS_PROBE_DEFAULT (-20) /* Base OS default driver */ #define BUS_PROBE_LOW_PRIORITY (-40) /* Older, less desirable drivers */ #define BUS_PROBE_GENERIC (-100) /* generic driver for dev */ #define BUS_PROBE_HOOVER (-1000000) /* Driver for any dev on bus */ #define BUS_PROBE_NOWILDCARD (-2000000000) /* No wildcard device matches */ /** * During boot, the device tree is scanned multiple times. Each scan, * or pass, drivers may be attached to devices. Each driver * attachment is assigned a pass number. Drivers may only probe and * attach to devices if their pass number is less than or equal to the * current system-wide pass number. The default pass is the last pass * and is used by most drivers. Drivers needed by the scheduler are * probed in earlier passes. */ #define BUS_PASS_ROOT 0 /* Used to attach root0. */ #define BUS_PASS_BUS 10 /* Buses and bridges. */ #define BUS_PASS_CPU 20 /* CPU devices. */ #define BUS_PASS_RESOURCE 30 /* Resource discovery. */ #define BUS_PASS_INTERRUPT 40 /* Interrupt controllers. */ #define BUS_PASS_TIMER 50 /* Timers and clocks. */ #define BUS_PASS_SCHEDULER 60 /* Start scheduler. */ #define BUS_PASS_SUPPORTDEV 100000 /* Drivers which support DEFAULT drivers. */ #define BUS_PASS_DEFAULT __INT_MAX /* Everything else. */ #define BUS_PASS_ORDER_FIRST 0 #define BUS_PASS_ORDER_EARLY 2 #define BUS_PASS_ORDER_MIDDLE 5 #define BUS_PASS_ORDER_LATE 7 #define BUS_PASS_ORDER_LAST 9 extern int bus_current_pass; void bus_set_pass(int pass); +/** + * Routines to lock / unlock the newbus lock. + * Must be taken out to interact with newbus. + */ +void bus_topo_lock(void); +void bus_topo_unlock(void); +struct mtx * bus_topo_mtx(void); + /** * Shorthands for constructing method tables. */ #define DEVMETHOD KOBJMETHOD #define DEVMETHOD_END KOBJMETHOD_END /* * Some common device interfaces. */ #include "device_if.h" #include "bus_if.h" struct module; int driver_module_handler(struct module *, int, void *); /** * Module support for automatically adding drivers to buses. */ struct driver_module_data { int (*dmd_chainevh)(struct module *, int, void *); void *dmd_chainarg; const char *dmd_busname; kobj_class_t dmd_driver; devclass_t *dmd_devclass; int dmd_pass; }; #define _DRIVER_MODULE_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, NAME, ...) \ NAME #define _EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, devclass, \ evh, arg, order, pass) \ - \ + \ static struct driver_module_data name##_##busname##_driver_mod = { \ evh, arg, \ #busname, \ (kobj_class_t) &driver, \ devclass, \ pass \ }; \ \ static moduledata_t name##_##busname##_mod = { \ #busname "/" #name, \ driver_module_handler, \ &name##_##busname##_driver_mod \ }; \ DECLARE_MODULE(name##_##busname, name##_##busname##_mod, \ SI_SUB_DRIVERS, order) #define EARLY_DRIVER_MODULE_ORDERED7(name, busname, driver, evh, arg, \ order, pass) \ _EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, NULL, evh, \ arg, order, pass) #define EARLY_DRIVER_MODULE_ORDERED8(name, busname, driver, devclass, \ evh, arg, order, pass) \ _EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, &devclass, \ evh, arg, order, pass) #define EARLY_DRIVER_MODULE_ORDERED(...) \ _DRIVER_MODULE_MACRO(__VA_ARGS__, EARLY_DRIVER_MODULE_ORDERED8, \ EARLY_DRIVER_MODULE_ORDERED7)(__VA_ARGS__) #define EARLY_DRIVER_MODULE7(name, busname, driver, devclass, evh, arg, pass) \ EARLY_DRIVER_MODULE_ORDERED8(name, busname, driver, devclass, \ evh, arg, SI_ORDER_MIDDLE, pass) #define EARLY_DRIVER_MODULE6(name, busname, driver, evh, arg, pass) \ EARLY_DRIVER_MODULE_ORDERED7(name, busname, driver, evh, arg, \ SI_ORDER_MIDDLE, pass) #define EARLY_DRIVER_MODULE(...) \ _DRIVER_MODULE_MACRO(__VA_ARGS__, INVALID, \ EARLY_DRIVER_MODULE7, EARLY_DRIVER_MODULE6)(__VA_ARGS__) #define DRIVER_MODULE_ORDERED7(name, busname, driver, devclass, evh, arg,\ order) \ EARLY_DRIVER_MODULE_ORDERED8(name, busname, driver, devclass, \ evh, arg, order, BUS_PASS_DEFAULT) #define DRIVER_MODULE_ORDERED6(name, busname, driver, evh, arg, order) \ EARLY_DRIVER_MODULE_ORDERED7(name, busname, driver, evh, arg, \ order, BUS_PASS_DEFAULT) #define DRIVER_MODULE_ORDERED(...) \ _DRIVER_MODULE_MACRO(__VA_ARGS__, INVALID, \ DRIVER_MODULE_ORDERED7, DRIVER_MODULE_ORDERED6)(__VA_ARGS__) #define DRIVER_MODULE6(name, busname, driver, devclass, evh, arg) \ EARLY_DRIVER_MODULE7(name, busname, driver, devclass, evh, arg, \ BUS_PASS_DEFAULT) #define DRIVER_MODULE5(name, busname, driver, evh, arg) \ EARLY_DRIVER_MODULE6(name, busname, driver, evh, arg, \ BUS_PASS_DEFAULT) #define DRIVER_MODULE(...) \ _DRIVER_MODULE_MACRO(__VA_ARGS__, INVALID, INVALID, \ DRIVER_MODULE6, DRIVER_MODULE5)(__VA_ARGS__) /** * Generic ivar accessor generation macros for bus drivers */ #define __BUS_ACCESSOR(varp, var, ivarp, ivar, type) \ \ static __inline type varp ## _get_ ## var(device_t dev) \ { \ uintptr_t v; \ int e __diagused; \ e = BUS_READ_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, &v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ return ((type) v); \ } \ \ static __inline void varp ## _set_ ## var(device_t dev, type t) \ { \ uintptr_t v = (uintptr_t) t; \ int e __diagused; \ e = BUS_WRITE_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ } /** * Shorthand macros, taking resource argument * Generated with sys/tools/bus_macro.sh */ #define bus_barrier(r, o, l, f) \ bus_space_barrier((r)->r_bustag, (r)->r_bushandle, (o), (l), (f)) #define bus_poke_1(r, o, v) \ bus_space_poke_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_1(r, o, vp) \ bus_space_peek_1((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_1(r, o) \ bus_space_read_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_1(r, o, d, c) \ bus_space_read_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_1(r, o, d, c) \ bus_space_read_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_1(r, o, v, c) \ bus_space_set_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_1(r, o, v, c) \ bus_space_set_region_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_1(r, o, v) \ bus_space_write_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_1(r, o, d, c) \ bus_space_write_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_1(r, o, d, c) \ bus_space_write_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_1(r, o) \ bus_space_read_stream_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_1(r, o, d, c) \ bus_space_read_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_1(r, o, d, c) \ bus_space_read_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_1(r, o, v, c) \ bus_space_set_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_1(r, o, v, c) \ bus_space_set_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_1(r, o, v) \ bus_space_write_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_1(r, o, d, c) \ bus_space_write_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_1(r, o, d, c) \ bus_space_write_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_2(r, o, v) \ bus_space_poke_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_2(r, o, vp) \ bus_space_peek_2((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_2(r, o) \ bus_space_read_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_2(r, o, d, c) \ bus_space_read_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_2(r, o, d, c) \ bus_space_read_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_2(r, o, v, c) \ bus_space_set_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_2(r, o, v, c) \ bus_space_set_region_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_2(r, o, v) \ bus_space_write_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_2(r, o, d, c) \ bus_space_write_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_2(r, o, d, c) \ bus_space_write_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_2(r, o) \ bus_space_read_stream_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_2(r, o, d, c) \ bus_space_read_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_2(r, o, d, c) \ bus_space_read_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_2(r, o, v, c) \ bus_space_set_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_2(r, o, v, c) \ bus_space_set_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_2(r, o, v) \ bus_space_write_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_2(r, o, d, c) \ bus_space_write_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_2(r, o, d, c) \ bus_space_write_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_4(r, o, v) \ bus_space_poke_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_4(r, o, vp) \ bus_space_peek_4((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_4(r, o) \ bus_space_read_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_4(r, o, d, c) \ bus_space_read_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_4(r, o, d, c) \ bus_space_read_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_4(r, o, v, c) \ bus_space_set_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_4(r, o, v, c) \ bus_space_set_region_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_4(r, o, v) \ bus_space_write_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_4(r, o, d, c) \ bus_space_write_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_4(r, o, d, c) \ bus_space_write_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_4(r, o) \ bus_space_read_stream_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_4(r, o, d, c) \ bus_space_read_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_4(r, o, d, c) \ bus_space_read_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_4(r, o, v, c) \ bus_space_set_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_4(r, o, v, c) \ bus_space_set_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_4(r, o, v) \ bus_space_write_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_4(r, o, d, c) \ bus_space_write_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_4(r, o, d, c) \ bus_space_write_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_8(r, o, v) \ bus_space_poke_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_8(r, o, vp) \ bus_space_peek_8((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_8(r, o) \ bus_space_read_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_8(r, o, d, c) \ bus_space_read_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_8(r, o, d, c) \ bus_space_read_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_8(r, o, v, c) \ bus_space_set_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_8(r, o, v, c) \ bus_space_set_region_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_8(r, o, v) \ bus_space_write_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_8(r, o, d, c) \ bus_space_write_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_8(r, o, d, c) \ bus_space_write_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_8(r, o) \ bus_space_read_stream_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_8(r, o, d, c) \ bus_space_read_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_8(r, o, d, c) \ bus_space_read_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_8(r, o, v, c) \ bus_space_set_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_8(r, o, v, c) \ bus_space_set_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_8(r, o, v) \ bus_space_write_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_8(r, o, d, c) \ bus_space_write_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_8(r, o, d, c) \ bus_space_write_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #endif /* _KERNEL */ #endif /* !_SYS_BUS_H_ */ diff --git a/sys/xen/xenbus/xenbusb.c b/sys/xen/xenbus/xenbusb.c index 457d396c8f98..561f19f2522d 100644 --- a/sys/xen/xenbus/xenbusb.c +++ b/sys/xen/xenbus/xenbusb.c @@ -1,991 +1,988 @@ /****************************************************************************** * Copyright (C) 2010 Spectra Logic Corporation * Copyright (C) 2008 Doug Rabson * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005 XenSource Ltd * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /** * \file xenbusb.c * * \brief Shared support functions for managing the NewBus buses that contain * Xen front and back end device instances. * * The NewBus implementation of XenBus attaches a xenbusb_front and xenbusb_back * child bus to the xenstore device. This strategy allows the small differences * in the handling of XenBus operations for front and back devices to be handled * as overrides in xenbusb_front/back.c. Front and back specific device * classes are also provided so device drivers can register for the devices they * can handle without the need to filter within their probe routines. The * net result is a device hierarchy that might look like this: * * xenstore0/ * xenbusb_front0/ * xn0 * xbd0 * xbd1 * xenbusb_back0/ * xbbd0 * xnb0 * xnb1 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /*------------------------- Private Functions --------------------------------*/ /** * \brief Deallocate XenBus device instance variables. * * \param ivars The instance variable block to free. */ static void xenbusb_free_child_ivars(struct xenbus_device_ivars *ivars) { if (ivars->xd_otherend_watch.node != NULL) { xs_unregister_watch(&ivars->xd_otherend_watch); free(ivars->xd_otherend_watch.node, M_XENBUS); ivars->xd_otherend_watch.node = NULL; } if (ivars->xd_local_watch.node != NULL) { xs_unregister_watch(&ivars->xd_local_watch); ivars->xd_local_watch.node = NULL; } if (ivars->xd_node != NULL) { free(ivars->xd_node, M_XENBUS); ivars->xd_node = NULL; } ivars->xd_node_len = 0; if (ivars->xd_type != NULL) { free(ivars->xd_type, M_XENBUS); ivars->xd_type = NULL; } if (ivars->xd_otherend_path != NULL) { free(ivars->xd_otherend_path, M_XENBUS); ivars->xd_otherend_path = NULL; } ivars->xd_otherend_path_len = 0; free(ivars, M_XENBUS); } /** * XenBus watch callback registered against the "state" XenStore * node of the other-end of a split device connection. * * This callback is invoked whenever the state of a device instance's * peer changes. * * \param watch The xs_watch object used to register this callback * function. * \param vec An array of pointers to NUL terminated strings containing * watch event data. The vector should be indexed via the * xs_watch_type enum in xs_wire.h. * \param vec_size The number of elements in vec. */ static void xenbusb_otherend_watch_cb(struct xs_watch *watch, const char **vec, unsigned int vec_size __unused) { struct xenbus_device_ivars *ivars; device_t child; device_t bus; const char *path; enum xenbus_state newstate; ivars = (struct xenbus_device_ivars *)watch->callback_data; child = ivars->xd_dev; bus = device_get_parent(child); path = vec[XS_WATCH_PATH]; if (ivars->xd_otherend_path == NULL || strncmp(ivars->xd_otherend_path, path, ivars->xd_otherend_path_len)) return; newstate = xenbus_read_driver_state(ivars->xd_otherend_path); XENBUSB_OTHEREND_CHANGED(bus, child, newstate); } /** * XenBus watch callback registered against the XenStore sub-tree * represnting the local half of a split device connection. * * This callback is invoked whenever any XenStore data in the subtree * is modified, either by us or another privledged domain. * * \param watch The xs_watch object used to register this callback * function. * \param vec An array of pointers to NUL terminated strings containing * watch event data. The vector should be indexed via the * xs_watch_type enum in xs_wire.h. * \param vec_size The number of elements in vec. * */ static void xenbusb_local_watch_cb(struct xs_watch *watch, const char **vec, unsigned int vec_size __unused) { struct xenbus_device_ivars *ivars; device_t child; device_t bus; const char *path; ivars = (struct xenbus_device_ivars *)watch->callback_data; child = ivars->xd_dev; bus = device_get_parent(child); path = vec[XS_WATCH_PATH]; if (ivars->xd_node == NULL || strncmp(ivars->xd_node, path, ivars->xd_node_len)) return; XENBUSB_LOCALEND_CHANGED(bus, child, &path[ivars->xd_node_len]); } /** * Search our internal record of configured devices (not the XenStore) * to determine if the XenBus device indicated by \a node is known to * the system. * * \param dev The XenBus bus instance to search for device children. * \param node The XenStore node path for the device to find. * * \return The device_t of the found device if any, or NULL. * * \note device_t is a pointer type, so it can be compared against * NULL for validity. */ static device_t xenbusb_device_exists(device_t dev, const char *node) { device_t *kids; device_t result; struct xenbus_device_ivars *ivars; int i, count; if (device_get_children(dev, &kids, &count)) return (FALSE); result = NULL; for (i = 0; i < count; i++) { ivars = device_get_ivars(kids[i]); if (!strcmp(ivars->xd_node, node)) { result = kids[i]; break; } } free(kids, M_TEMP); return (result); } static void xenbusb_delete_child(device_t dev, device_t child) { struct xenbus_device_ivars *ivars; ivars = device_get_ivars(child); /* * We no longer care about the otherend of the * connection. Cancel the watches now so that we * don't try to handle an event for a partially * detached child. */ if (ivars->xd_otherend_watch.node != NULL) xs_unregister_watch(&ivars->xd_otherend_watch); if (ivars->xd_local_watch.node != NULL) xs_unregister_watch(&ivars->xd_local_watch); device_delete_child(dev, child); xenbusb_free_child_ivars(ivars); } /** * \param dev The NewBus device representing this XenBus bus. * \param child The NewBus device representing a child of dev%'s XenBus bus. */ static void xenbusb_verify_device(device_t dev, device_t child) { if (xs_exists(XST_NIL, xenbus_get_node(child), "") == 0) { /* * Device tree has been removed from Xenbus. * Tear down the device. */ xenbusb_delete_child(dev, child); } } /** * \brief Enumerate the devices on a XenBus bus and register them with * the NewBus device tree. * * xenbusb_enumerate_bus() will create entries (in state DS_NOTPRESENT) * for nodes that appear in the XenStore, but will not invoke probe/attach * operations on drivers. Probe/Attach processing must be separately * performed via an invocation of xenbusb_probe_children(). This is usually * done via the xbs_probe_children task. * * \param xbs XenBus Bus device softc of the owner of the bus to enumerate. * * \return On success, 0. Otherwise an errno value indicating the * type of failure. */ static int xenbusb_enumerate_bus(struct xenbusb_softc *xbs) { const char **types; u_int type_idx; u_int type_count; int error; error = xs_directory(XST_NIL, xbs->xbs_node, "", &type_count, &types); if (error) return (error); for (type_idx = 0; type_idx < type_count; type_idx++) XENBUSB_ENUMERATE_TYPE(xbs->xbs_dev, types[type_idx]); free(types, M_XENSTORE); return (0); } /** * Handler for all generic XenBus device systcl nodes. */ static int xenbusb_device_sysctl_handler(SYSCTL_HANDLER_ARGS) { device_t dev; const char *value; dev = (device_t)arg1; switch (arg2) { case XENBUS_IVAR_NODE: value = xenbus_get_node(dev); break; case XENBUS_IVAR_TYPE: value = xenbus_get_type(dev); break; case XENBUS_IVAR_STATE: value = xenbus_strstate(xenbus_get_state(dev)); break; case XENBUS_IVAR_OTHEREND_ID: return (sysctl_handle_int(oidp, NULL, xenbus_get_otherend_id(dev), req)); /* NOTREACHED */ case XENBUS_IVAR_OTHEREND_PATH: value = xenbus_get_otherend_path(dev); break; default: return (EINVAL); } return (SYSCTL_OUT_STR(req, value)); } /** * Create read-only systcl nodes for xenbusb device ivar data. * * \param dev The XenBus device instance to register with sysctl. */ static void xenbusb_device_sysctl_init(device_t dev) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "xenstore_path", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, XENBUS_IVAR_NODE, xenbusb_device_sysctl_handler, "A", "XenStore path to device"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "xenbus_dev_type", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, XENBUS_IVAR_TYPE, xenbusb_device_sysctl_handler, "A", "XenBus device type"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "xenbus_connection_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, XENBUS_IVAR_STATE, xenbusb_device_sysctl_handler, "A", "XenBus state of peer connection"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "xenbus_peer_domid", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, XENBUS_IVAR_OTHEREND_ID, xenbusb_device_sysctl_handler, "I", "Xen domain ID of peer"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "xenstore_peer_path", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, XENBUS_IVAR_OTHEREND_PATH, xenbusb_device_sysctl_handler, "A", "XenStore path to peer device"); } /** * \brief Decrement the number of XenBus child devices in the * connecting state by one and release the xbs_attch_ch * interrupt configuration hook if the connecting count * drops to zero. * * \param xbs XenBus Bus device softc of the owner of the bus to enumerate. */ static void xenbusb_release_confighook(struct xenbusb_softc *xbs) { mtx_lock(&xbs->xbs_lock); KASSERT(xbs->xbs_connecting_children > 0, ("Connecting device count error\n")); xbs->xbs_connecting_children--; if (xbs->xbs_connecting_children == 0 && (xbs->xbs_flags & XBS_ATTACH_CH_ACTIVE) != 0) { xbs->xbs_flags &= ~XBS_ATTACH_CH_ACTIVE; mtx_unlock(&xbs->xbs_lock); config_intrhook_disestablish(&xbs->xbs_attach_ch); } else { mtx_unlock(&xbs->xbs_lock); } } /** * \brief Verify the existence of attached device instances and perform * probe/attach processing for newly arrived devices. * * \param dev The NewBus device representing this XenBus bus. * * \return On success, 0. Otherwise an errno value indicating the * type of failure. */ static int xenbusb_probe_children(device_t dev) { device_t *kids; struct xenbus_device_ivars *ivars; int i, count, error; if (device_get_children(dev, &kids, &count) == 0) { for (i = 0; i < count; i++) { if (device_get_state(kids[i]) != DS_NOTPRESENT) { /* * We already know about this one. * Make sure it's still here. */ xenbusb_verify_device(dev, kids[i]); continue; } error = device_probe_and_attach(kids[i]); if (error == ENXIO) { struct xenbusb_softc *xbs; /* * We don't have a PV driver for this device. * However, an emulated device we do support * may share this backend. Hide the node from * XenBus until the next rescan, but leave it's * state unchanged so we don't inadvertently * prevent attachment of any emulated device. */ xenbusb_delete_child(dev, kids[i]); /* * Since the XenStore state of this device * still indicates a pending attach, manually * release it's hold on the boot process. */ xbs = device_get_softc(dev); xenbusb_release_confighook(xbs); continue; } else if (error) { /* * Transition device to the closed state * so the world knows that attachment will * not occur. */ xenbus_set_state(kids[i], XenbusStateClosed); /* * Remove our record of this device. * So long as it remains in the closed * state in the XenStore, we will not find * it again. The state will only change * if the control domain actively reconfigures * this device. */ xenbusb_delete_child(dev, kids[i]); continue; } /* * Augment default newbus provided dynamic sysctl * variables with the standard ivar contents of * XenBus devices. */ xenbusb_device_sysctl_init(kids[i]); /* * Now that we have a driver managing this device * that can receive otherend state change events, * hook up a watch for them. */ ivars = device_get_ivars(kids[i]); xs_register_watch(&ivars->xd_otherend_watch); xs_register_watch(&ivars->xd_local_watch); } free(kids, M_TEMP); } return (0); } /** * \brief Task callback function to perform XenBus probe operations * from a known safe context. * * \param arg The NewBus device_t representing the bus instance to * on which to perform probe processing. * \param pending The number of times this task was queued before it could * be run. */ static void xenbusb_probe_children_cb(void *arg, int pending __unused) { device_t dev = (device_t)arg; - /* - * Hold Giant until the Giant free newbus changes are committed. - */ - mtx_lock(&Giant); + bus_topo_lock(); xenbusb_probe_children(dev); - mtx_unlock(&Giant); + bus_topo_unlock(); } /** * \brief XenStore watch callback for the root node of the XenStore * subtree representing a XenBus. * * This callback performs, or delegates to the xbs_probe_children task, * all processing necessary to handle dynmaic device arrival and departure * events from a XenBus. * * \param watch The XenStore watch object associated with this callback. * \param vec The XenStore watch event data. * \param len The number of fields in the event data stream. */ static void xenbusb_devices_changed(struct xs_watch *watch, const char **vec, unsigned int len) { struct xenbusb_softc *xbs; device_t dev; char *node; char *type; char *id; char *p; u_int component; xbs = (struct xenbusb_softc *)watch->callback_data; dev = xbs->xbs_dev; if (len <= XS_WATCH_PATH) { device_printf(dev, "xenbusb_devices_changed: " "Short Event Data.\n"); return; } node = strdup(vec[XS_WATCH_PATH], M_XENBUS); p = strchr(node, '/'); if (p == NULL) goto out; *p = 0; type = p + 1; p = strchr(type, '/'); if (p == NULL) goto out; *p++ = 0; /* * Extract the device ID. A device ID has one or more path * components separated by the '/' character. * * e.g. "/" for backend devices. */ id = p; for (component = 0; component < xbs->xbs_id_components; component++) { p = strchr(p, '/'); if (p == NULL) break; p++; } if (p != NULL) *p = 0; if (*id != 0 && component >= xbs->xbs_id_components - 1) { xenbusb_add_device(xbs->xbs_dev, type, id); taskqueue_enqueue(taskqueue_thread, &xbs->xbs_probe_children); } out: free(node, M_XENBUS); } /** * \brief Interrupt configuration hook callback associated with xbs_attch_ch. * * Since interrupts are always functional at the time of XenBus configuration, * there is nothing to be done when the callback occurs. This hook is only * registered to hold up boot processing while XenBus devices come online. * * \param arg Unused configuration hook callback argument. */ static void xenbusb_nop_confighook_cb(void *arg __unused) { } /*--------------------------- Public Functions -------------------------------*/ /*--------- API comments for these methods can be found in xenbusb.h ---------*/ void xenbusb_identify(driver_t *driver __unused, device_t parent) { /* * A single instance of each bus type for which we have a driver * is always present in a system operating under Xen. */ BUS_ADD_CHILD(parent, 0, driver->name, 0); } int xenbusb_add_device(device_t dev, const char *type, const char *id) { struct xenbusb_softc *xbs; struct sbuf *devpath_sbuf; char *devpath; struct xenbus_device_ivars *ivars; int error; xbs = device_get_softc(dev); devpath_sbuf = sbuf_new_auto(); sbuf_printf(devpath_sbuf, "%s/%s/%s", xbs->xbs_node, type, id); sbuf_finish(devpath_sbuf); devpath = sbuf_data(devpath_sbuf); ivars = malloc(sizeof(*ivars), M_XENBUS, M_ZERO|M_WAITOK); error = ENXIO; if (xs_exists(XST_NIL, devpath, "") != 0) { device_t child; enum xenbus_state state; char *statepath; child = xenbusb_device_exists(dev, devpath); if (child != NULL) { /* * We are already tracking this node */ error = 0; goto out; } state = xenbus_read_driver_state(devpath); if (state != XenbusStateInitialising) { /* * Device is not new, so ignore it. This can * happen if a device is going away after * switching to Closed. */ printf("xenbusb_add_device: Device %s ignored. " "State %d\n", devpath, state); error = 0; goto out; } sx_init(&ivars->xd_lock, "xdlock"); ivars->xd_flags = XDF_CONNECTING; ivars->xd_node = strdup(devpath, M_XENBUS); ivars->xd_node_len = strlen(devpath); ivars->xd_type = strdup(type, M_XENBUS); ivars->xd_state = XenbusStateInitialising; error = XENBUSB_GET_OTHEREND_NODE(dev, ivars); if (error) { printf("xenbus_update_device: %s no otherend id\n", devpath); goto out; } statepath = malloc(ivars->xd_otherend_path_len + strlen("/state") + 1, M_XENBUS, M_WAITOK); sprintf(statepath, "%s/state", ivars->xd_otherend_path); ivars->xd_otherend_watch.node = statepath; ivars->xd_otherend_watch.callback = xenbusb_otherend_watch_cb; ivars->xd_otherend_watch.callback_data = (uintptr_t)ivars; /* * Other end state node watch, limit to one pending event * to prevent frontends from queuing too many events that * could cause resource starvation. */ ivars->xd_otherend_watch.max_pending = 1; ivars->xd_local_watch.node = ivars->xd_node; ivars->xd_local_watch.callback = xenbusb_local_watch_cb; ivars->xd_local_watch.callback_data = (uintptr_t)ivars; /* * Watch our local path, only writable by us or a privileged * domain, no need to limit. */ ivars->xd_local_watch.max_pending = 0; mtx_lock(&xbs->xbs_lock); xbs->xbs_connecting_children++; mtx_unlock(&xbs->xbs_lock); child = device_add_child(dev, NULL, -1); ivars->xd_dev = child; device_set_ivars(child, ivars); } out: sbuf_delete(devpath_sbuf); if (error != 0) xenbusb_free_child_ivars(ivars); return (error); } int xenbusb_attach(device_t dev, char *bus_node, u_int id_components) { struct xenbusb_softc *xbs; xbs = device_get_softc(dev); mtx_init(&xbs->xbs_lock, "xenbusb softc lock", NULL, MTX_DEF); xbs->xbs_node = bus_node; xbs->xbs_id_components = id_components; xbs->xbs_dev = dev; /* * Since XenBus buses are attached to the XenStore, and * the XenStore does not probe children until after interrupt * services are available, this config hook is used solely * to ensure that the remainder of the boot process (e.g. * mount root) is deferred until child devices are adequately * probed. We unblock the boot process as soon as the * connecting child count in our softc goes to 0. */ xbs->xbs_attach_ch.ich_func = xenbusb_nop_confighook_cb; xbs->xbs_attach_ch.ich_arg = dev; config_intrhook_establish(&xbs->xbs_attach_ch); xbs->xbs_flags |= XBS_ATTACH_CH_ACTIVE; xbs->xbs_connecting_children = 1; /* * The subtree for this bus type may not yet exist * causing initial enumeration to fail. We still * want to return success from our attach though * so that we are ready to handle devices for this * bus when they are dynamically attached to us * by a Xen management action. */ (void)xenbusb_enumerate_bus(xbs); xenbusb_probe_children(dev); xbs->xbs_device_watch.node = bus_node; xbs->xbs_device_watch.callback = xenbusb_devices_changed; xbs->xbs_device_watch.callback_data = (uintptr_t)xbs; /* * Allow for unlimited pending watches, as those are local paths * either controlled by the guest or only writable by privileged * domains. */ xbs->xbs_device_watch.max_pending = 0; TASK_INIT(&xbs->xbs_probe_children, 0, xenbusb_probe_children_cb, dev); xs_register_watch(&xbs->xbs_device_watch); xenbusb_release_confighook(xbs); return (0); } int xenbusb_resume(device_t dev) { device_t *kids; struct xenbus_device_ivars *ivars; int i, count, error; char *statepath; /* * We must re-examine each device and find the new path for * its backend. */ if (device_get_children(dev, &kids, &count) == 0) { for (i = 0; i < count; i++) { if (device_get_state(kids[i]) == DS_NOTPRESENT) continue; if (xen_suspend_cancelled) { DEVICE_RESUME(kids[i]); continue; } ivars = device_get_ivars(kids[i]); xs_unregister_watch(&ivars->xd_otherend_watch); xenbus_set_state(kids[i], XenbusStateInitialising); /* * Find the new backend details and * re-register our watch. */ error = XENBUSB_GET_OTHEREND_NODE(dev, ivars); if (error) return (error); statepath = malloc(ivars->xd_otherend_path_len + strlen("/state") + 1, M_XENBUS, M_WAITOK); sprintf(statepath, "%s/state", ivars->xd_otherend_path); free(ivars->xd_otherend_watch.node, M_XENBUS); ivars->xd_otherend_watch.node = statepath; DEVICE_RESUME(kids[i]); xs_register_watch(&ivars->xd_otherend_watch); #if 0 /* * Can't do this yet since we are running in * the xenwatch thread and if we sleep here, * we will stop delivering watch notifications * and the device will never come back online. */ sx_xlock(&ivars->xd_lock); while (ivars->xd_state != XenbusStateClosed && ivars->xd_state != XenbusStateConnected) sx_sleep(&ivars->xd_state, &ivars->xd_lock, 0, "xdresume", 0); sx_xunlock(&ivars->xd_lock); #endif } free(kids, M_TEMP); } return (0); } int xenbusb_print_child(device_t dev, device_t child) { struct xenbus_device_ivars *ivars = device_get_ivars(child); int retval = 0; retval += bus_print_child_header(dev, child); retval += printf(" at %s", ivars->xd_node); retval += bus_print_child_footer(dev, child); return (retval); } int xenbusb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct xenbus_device_ivars *ivars = device_get_ivars(child); switch (index) { case XENBUS_IVAR_NODE: *result = (uintptr_t) ivars->xd_node; return (0); case XENBUS_IVAR_TYPE: *result = (uintptr_t) ivars->xd_type; return (0); case XENBUS_IVAR_STATE: *result = (uintptr_t) ivars->xd_state; return (0); case XENBUS_IVAR_OTHEREND_ID: *result = (uintptr_t) ivars->xd_otherend_id; return (0); case XENBUS_IVAR_OTHEREND_PATH: *result = (uintptr_t) ivars->xd_otherend_path; return (0); } return (ENOENT); } int xenbusb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct xenbus_device_ivars *ivars = device_get_ivars(child); enum xenbus_state newstate; int currstate; switch (index) { case XENBUS_IVAR_STATE: { int error; newstate = (enum xenbus_state)value; sx_xlock(&ivars->xd_lock); if (ivars->xd_state == newstate) { error = 0; goto out; } error = xs_scanf(XST_NIL, ivars->xd_node, "state", NULL, "%d", &currstate); if (error) goto out; do { error = xs_printf(XST_NIL, ivars->xd_node, "state", "%d", newstate); } while (error == EAGAIN); if (error) { /* * Avoid looping through xenbus_dev_fatal() * which calls xenbus_write_ivar to set the * state to closing. */ if (newstate != XenbusStateClosing) xenbus_dev_fatal(dev, error, "writing new state"); goto out; } ivars->xd_state = newstate; if ((ivars->xd_flags & XDF_CONNECTING) != 0 && (newstate == XenbusStateClosed || newstate == XenbusStateConnected)) { struct xenbusb_softc *xbs; ivars->xd_flags &= ~XDF_CONNECTING; xbs = device_get_softc(dev); xenbusb_release_confighook(xbs); } wakeup(&ivars->xd_state); out: sx_xunlock(&ivars->xd_lock); return (error); } case XENBUS_IVAR_NODE: case XENBUS_IVAR_TYPE: case XENBUS_IVAR_OTHEREND_ID: case XENBUS_IVAR_OTHEREND_PATH: /* * These variables are read-only. */ return (EINVAL); } return (ENOENT); } void xenbusb_otherend_changed(device_t bus, device_t child, enum xenbus_state state) { XENBUS_OTHEREND_CHANGED(child, state); } void xenbusb_localend_changed(device_t bus, device_t child, const char *path) { if (strcmp(path, "/state") != 0) { struct xenbus_device_ivars *ivars; ivars = device_get_ivars(child); sx_xlock(&ivars->xd_lock); ivars->xd_state = xenbus_read_driver_state(ivars->xd_node); sx_xunlock(&ivars->xd_lock); } XENBUS_LOCALEND_CHANGED(child, path); }