diff --git a/sys/arm64/nvidia/tegra210/max77620_rtc.c b/sys/arm64/nvidia/tegra210/max77620_rtc.c index dc82d4b387b9..29808af27819 100644 --- a/sys/arm64/nvidia/tegra210/max77620_rtc.c +++ b/sys/arm64/nvidia/tegra210/max77620_rtc.c @@ -1,419 +1,419 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #include "ofw_iicbus_if.h" #include "max77620.h" #define MAX77620_RTC_INT 0x00 #define MAX77620_RTC_INTM 0x01 #define MAX77620_RTC_CONTROLM 0x02 #define MAX77620_RTC_CONTROL 0x03 #define RTC_CONTROL_MODE_24 (1 << 1) #define RTC_CONTROL_BCD_EN (1 << 0) #define MAX77620_RTC_UPDATE0 0x04 #define RTC_UPDATE0_RTC_RBUDR (1 << 4) #define RTC_UPDATE0_RTC_UDR (1 << 0) #define MAX77620_WTSR_SMPL_CNTL 0x06 #define MAX77620_RTC_SEC 0x07 #define MAX77620_RTC_MIN 0x08 #define MAX77620_RTC_HOUR 0x09 #define MAX77620_RTC_WEEKDAY 0x0A #define MAX77620_RTC_MONTH 0x0B #define MAX77620_RTC_YEAR 0x0C #define MAX77620_RTC_DATE 0x0D #define MAX77620_ALARM1_SEC 0x0E #define MAX77620_ALARM1_MIN 0x0F #define MAX77620_ALARM1_HOUR 0x10 #define MAX77620_ALARM1_WEEKDAY 0x11 #define MAX77620_ALARM1_MONTH 0x12 #define MAX77620_ALARM1_YEAR 0x13 #define MAX77620_ALARM1_DATE 0x14 #define MAX77620_ALARM2_SEC 0x15 #define MAX77620_ALARM2_MIN 0x16 #define MAX77620_ALARM2_HOUR 0x17 #define MAX77620_ALARM2_WEEKDAY 0x18 #define MAX77620_ALARM2_MONTH 0x19 #define MAX77620_ALARM2_YEAR 0x1A #define MAX77620_ALARM2_DATE 0x1B #define MAX77620_RTC_START_YEAR 2000 #define MAX77620_RTC_I2C_ADDR 0x68 #define LOCK(_sc) sx_xlock(&(_sc)->lock) #define UNLOCK(_sc) sx_xunlock(&(_sc)->lock) #define LOCK_INIT(_sc) sx_init(&(_sc)->lock, "max77620_rtc") #define LOCK_DESTROY(_sc) sx_destroy(&(_sc)->lock); struct max77620_rtc_softc { device_t dev; struct sx lock; int bus_addr; }; char max77620_rtc_compat[] = "maxim,max77620_rtc"; /* * Raw register access function. */ static int max77620_rtc_read(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t *val) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, 1, val}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } static int max77620_rtc_read_buf(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } static int max77620_rtc_write(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t val) { uint8_t data[2]; int rv; struct iic_msg msgs[1] = { {0, IIC_M_WR, 2, data}, }; msgs[0].slave = sc->bus_addr; data[0] = reg; data[1] = val; rv = iicbus_transfer(sc->dev, msgs, 1); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } static int max77620_rtc_write_buf(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t data[1]; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, data}, {0, IIC_M_WR | IIC_M_NOSTART, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; data[0] = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } static int max77620_rtc_modify(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t clear, uint8_t set) { uint8_t val; int rv; rv = max77620_rtc_read(sc, reg, &val); if (rv != 0) return (rv); val &= ~clear; val |= set; rv = max77620_rtc_write(sc, reg, val); if (rv != 0) return (rv); return (0); } static int max77620_rtc_update(struct max77620_rtc_softc *sc, bool for_read) { uint8_t reg; int rv; reg = for_read ? RTC_UPDATE0_RTC_RBUDR: RTC_UPDATE0_RTC_UDR; rv = max77620_rtc_modify(sc, MAX77620_RTC_UPDATE0, reg, reg); if (rv != 0) return (rv); DELAY(16000); return (rv); } static int max77620_rtc_gettime(device_t dev, struct timespec *ts) { struct max77620_rtc_softc *sc; struct clocktime ct; uint8_t buf[7]; int rv; sc = device_get_softc(dev); LOCK(sc); rv = max77620_rtc_update(sc, true); if (rv != 0) { UNLOCK(sc); device_printf(sc->dev, "Failed to strobe RTC data\n"); return (rv); } rv = max77620_rtc_read_buf(sc, MAX77620_RTC_SEC, buf, nitems(buf)); UNLOCK(sc); if (rv != 0) { device_printf(sc->dev, "Failed to read RTC data\n"); return (rv); } ct.nsec = 0; ct.sec = bcd2bin(buf[0] & 0x7F); ct.min = bcd2bin(buf[1] & 0x7F); ct.hour = bcd2bin(buf[2] & 0x3F); ct.dow = ffs(buf[3] & 07); ct.mon = bcd2bin(buf[4] & 0x1F); ct.year = bcd2bin(buf[5] & 0x7F) + MAX77620_RTC_START_YEAR; ct.day = bcd2bin(buf[6] & 0x3F); return (clock_ct_to_ts(&ct, ts)); } static int max77620_rtc_settime(device_t dev, struct timespec *ts) { struct max77620_rtc_softc *sc; struct clocktime ct; uint8_t buf[7]; int rv; sc = device_get_softc(dev); clock_ts_to_ct(ts, &ct); if (ct.year < MAX77620_RTC_START_YEAR) return (EINVAL); buf[0] = bin2bcd(ct.sec); buf[1] = bin2bcd(ct.min); buf[2] = bin2bcd(ct.hour); buf[3] = 1 << ct.dow; buf[4] = bin2bcd(ct.mon); buf[5] = bin2bcd(ct.year - MAX77620_RTC_START_YEAR); buf[6] = bin2bcd(ct.day); LOCK(sc); rv = max77620_rtc_write_buf(sc, MAX77620_RTC_SEC, buf, nitems(buf)); if (rv != 0) { UNLOCK(sc); device_printf(sc->dev, "Failed to write RTC data\n"); return (rv); } rv = max77620_rtc_update(sc, false); UNLOCK(sc); if (rv != 0) { device_printf(sc->dev, "Failed to update RTC data\n"); return (rv); } return (0); } static int max77620_rtc_probe(device_t dev) { const char *compat; /* * TODO: * ofw_bus_is_compatible() should use compat string from devinfo cache * maximum size of OFW property should be defined in public header */ if ((compat = ofw_bus_get_compat(dev)) == NULL) return (ENXIO); if (strncasecmp(compat, max77620_rtc_compat, 255) != 0) return (ENXIO); device_set_desc(dev, "MAX77620 RTC"); return (BUS_PROBE_DEFAULT); } static int max77620_rtc_attach(device_t dev) { struct max77620_rtc_softc *sc; uint8_t reg; int rv; sc = device_get_softc(dev); sc->dev = dev; sc->bus_addr = iicbus_get_addr(dev); LOCK_INIT(sc); reg = RTC_CONTROL_MODE_24 | RTC_CONTROL_BCD_EN; rv = max77620_rtc_modify(sc, MAX77620_RTC_CONTROLM, reg, reg); if (rv != 0) { device_printf(sc->dev, "Failed to configure RTC\n"); goto fail; } rv = max77620_rtc_modify(sc, MAX77620_RTC_CONTROL, reg, reg); if (rv != 0) { device_printf(sc->dev, "Failed to configure RTC\n"); goto fail; } rv = max77620_rtc_update(sc, false); if (rv != 0) { device_printf(sc->dev, "Failed to update RTC data\n"); return (rv); } clock_register(sc->dev, 1000000); return (bus_generic_attach(dev)); fail: LOCK_DESTROY(sc); return (rv); } static int max77620_rtc_detach(device_t dev) { struct max77620_softc *sc; sc = device_get_softc(dev); LOCK_DESTROY(sc); return (bus_generic_detach(dev)); } /* * The secondary address of MAX77620 (RTC function) is not in DT, * add it manualy as subdevice */ int max77620_rtc_create(struct max77620_softc *sc, phandle_t node) { device_t parent, child; int rv; parent = device_get_parent(sc->dev); - child = BUS_ADD_CHILD(parent, 0, NULL, -1); + child = BUS_ADD_CHILD(parent, 0, NULL, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "Cannot create MAX77620 RTC device.\n"); return (ENXIO); } rv = OFW_IICBUS_SET_DEVINFO(parent, child, -1, "rtc@68", max77620_rtc_compat, MAX77620_RTC_I2C_ADDR << 1); if (rv != 0) { device_printf(sc->dev, "Cannot setup MAX77620 RTC device.\n"); return (ENXIO); } return (0); } static device_method_t max77620_rtc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, max77620_rtc_probe), DEVMETHOD(device_attach, max77620_rtc_attach), DEVMETHOD(device_detach, max77620_rtc_detach), /* RTC interface */ DEVMETHOD(clock_gettime, max77620_rtc_gettime), DEVMETHOD(clock_settime, max77620_rtc_settime), DEVMETHOD_END }; static DEFINE_CLASS_0(rtc, max77620_rtc_driver, max77620_rtc_methods, sizeof(struct max77620_rtc_softc)); EARLY_DRIVER_MODULE(max77620rtc_, iicbus, max77620_rtc_driver, NULL, NULL, 74); diff --git a/sys/crypto/openssl/ossl.c b/sys/crypto/openssl/ossl.c index 031f447e45a5..c2ca28133a78 100644 --- a/sys/crypto/openssl/ossl.c +++ b/sys/crypto/openssl/ossl.c @@ -1,484 +1,484 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Netflix, Inc * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ /* * A driver for the OpenCrypto framework which uses assembly routines * from OpenSSL. */ #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" static MALLOC_DEFINE(M_OSSL, "ossl", "OpenSSL crypto"); static void ossl_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "ossl", -1) == NULL) - BUS_ADD_CHILD(parent, 10, "ossl", -1); + BUS_ADD_CHILD(parent, 10, "ossl", DEVICE_UNIT_ANY); } static int ossl_probe(device_t dev) { device_set_desc(dev, "OpenSSL crypto"); return (BUS_PROBE_DEFAULT); } static int ossl_attach(device_t dev) { struct ossl_softc *sc; sc = device_get_softc(dev); sc->has_aes = sc->has_aes_gcm = false; ossl_cpuid(sc); sc->sc_cid = crypto_get_driverid(dev, sizeof(struct ossl_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | CRYPTOCAP_F_ACCEL_SOFTWARE); if (sc->sc_cid < 0) { device_printf(dev, "failed to allocate crypto driver id\n"); return (ENXIO); } return (0); } static int ossl_detach(device_t dev) { struct ossl_softc *sc; sc = device_get_softc(dev); crypto_unregister_all(sc->sc_cid); return (0); } static struct auth_hash * ossl_lookup_hash(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: return (&ossl_hash_sha1); case CRYPTO_SHA2_224: case CRYPTO_SHA2_224_HMAC: return (&ossl_hash_sha224); case CRYPTO_SHA2_256: case CRYPTO_SHA2_256_HMAC: return (&ossl_hash_sha256); case CRYPTO_SHA2_384: case CRYPTO_SHA2_384_HMAC: return (&ossl_hash_sha384); case CRYPTO_SHA2_512: case CRYPTO_SHA2_512_HMAC: return (&ossl_hash_sha512); case CRYPTO_POLY1305: return (&ossl_hash_poly1305); default: return (NULL); } } static struct ossl_cipher* ossl_lookup_cipher(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: switch (csp->csp_cipher_klen * 8) { case 128: case 192: case 256: break; default: return (NULL); } return (&ossl_cipher_aes_cbc); case CRYPTO_AES_NIST_GCM_16: switch (csp->csp_cipher_klen * 8) { case 128: case 192: case 256: break; default: return (NULL); } return (&ossl_cipher_aes_gcm); case CRYPTO_CHACHA20: if (csp->csp_cipher_klen != CHACHA_KEY_SIZE) return (NULL); return (&ossl_cipher_chacha20); default: return (NULL); } } static int ossl_probesession(device_t dev, const struct crypto_session_params *csp) { struct ossl_softc *sc = device_get_softc(dev); if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (ossl_lookup_hash(csp) == NULL) return (EINVAL); break; case CSP_MODE_CIPHER: if (csp->csp_cipher_alg != CRYPTO_CHACHA20 && !sc->has_aes) return (EINVAL); if (ossl_lookup_cipher(csp) == NULL) return (EINVAL); break; case CSP_MODE_ETA: if (!sc->has_aes || csp->csp_cipher_alg == CRYPTO_CHACHA20 || ossl_lookup_hash(csp) == NULL || ossl_lookup_cipher(csp) == NULL) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_CHACHA20_POLY1305: break; case CRYPTO_AES_NIST_GCM_16: if (!sc->has_aes_gcm || ossl_lookup_cipher(csp) == NULL) return (EINVAL); if (csp->csp_ivlen != AES_GCM_IV_LEN) return (EINVAL); if (csp->csp_auth_mlen != 0 && csp->csp_auth_mlen != GMAC_DIGEST_LEN) return (EINVAL); break; default: return (EINVAL); } break; default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static void ossl_newsession_hash(struct ossl_session *s, const struct crypto_session_params *csp) { struct auth_hash *axf; axf = ossl_lookup_hash(csp); s->hash.axf = axf; if (csp->csp_auth_mlen == 0) s->hash.mlen = axf->hashsize; else s->hash.mlen = csp->csp_auth_mlen; if (csp->csp_auth_klen == 0) { axf->Init(&s->hash.ictx); } else { if (csp->csp_auth_key != NULL) { fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX); if (axf->Setkey != NULL) { axf->Init(&s->hash.ictx); axf->Setkey(&s->hash.ictx, csp->csp_auth_key, csp->csp_auth_klen); } else { hmac_init_ipad(axf, csp->csp_auth_key, csp->csp_auth_klen, &s->hash.ictx); hmac_init_opad(axf, csp->csp_auth_key, csp->csp_auth_klen, &s->hash.octx); } fpu_kern_leave(curthread, NULL); } } } static int ossl_newsession_cipher(struct ossl_session *s, const struct crypto_session_params *csp) { struct ossl_cipher *cipher; int error = 0; cipher = ossl_lookup_cipher(csp); if (cipher == NULL) return (EINVAL); s->cipher.cipher = cipher; if (csp->csp_cipher_key == NULL) return (0); fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX); if (cipher->set_encrypt_key != NULL) { error = cipher->set_encrypt_key(csp->csp_cipher_key, 8 * csp->csp_cipher_klen, &s->cipher.enc_ctx); if (error != 0) { fpu_kern_leave(curthread, NULL); return (error); } } if (cipher->set_decrypt_key != NULL) error = cipher->set_decrypt_key(csp->csp_cipher_key, 8 * csp->csp_cipher_klen, &s->cipher.dec_ctx); fpu_kern_leave(curthread, NULL); return (error); } static int ossl_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct ossl_session *s; int error = 0; s = crypto_get_driver_session(cses); switch (csp->csp_mode) { case CSP_MODE_DIGEST: ossl_newsession_hash(s, csp); break; case CSP_MODE_CIPHER: error = ossl_newsession_cipher(s, csp); break; case CSP_MODE_ETA: ossl_newsession_hash(s, csp); error = ossl_newsession_cipher(s, csp); break; case CSP_MODE_AEAD: if (csp->csp_cipher_alg != CRYPTO_CHACHA20_POLY1305) error = ossl_newsession_cipher(s, csp); break; default: __assert_unreachable(); } return (error); } static int ossl_process_hash(struct ossl_session *s, struct cryptop *crp, const struct crypto_session_params *csp) { struct ossl_hash_context ctx; char digest[HASH_MAX_LEN]; struct auth_hash *axf; int error; axf = s->hash.axf; if (crp->crp_auth_key == NULL) { ctx = s->hash.ictx; } else { if (axf->Setkey != NULL) { axf->Init(&ctx); axf->Setkey(&ctx, crp->crp_auth_key, csp->csp_auth_klen); } else { hmac_init_ipad(axf, crp->crp_auth_key, csp->csp_auth_klen, &ctx); } } if (crp->crp_aad != NULL) error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (error) goto out; error = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (error) goto out; axf->Final(digest, &ctx); if (csp->csp_auth_klen != 0 && axf->Setkey == NULL) { if (crp->crp_auth_key == NULL) ctx = s->hash.octx; else hmac_init_opad(axf, crp->crp_auth_key, csp->csp_auth_klen, &ctx); axf->Update(&ctx, digest, axf->hashsize); axf->Final(digest, &ctx); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { char digest2[HASH_MAX_LEN]; crypto_copydata(crp, crp->crp_digest_start, s->hash.mlen, digest2); if (timingsafe_bcmp(digest, digest2, s->hash.mlen) != 0) error = EBADMSG; explicit_bzero(digest2, sizeof(digest2)); } else { crypto_copyback(crp, crp->crp_digest_start, s->hash.mlen, digest); } explicit_bzero(digest, sizeof(digest)); out: explicit_bzero(&ctx, sizeof(ctx)); return (error); } static int ossl_process_cipher(struct ossl_session *s, struct cryptop *crp, const struct crypto_session_params *csp) { return (s->cipher.cipher->process(&s->cipher, crp, csp)); } static int ossl_process_eta(struct ossl_session *s, struct cryptop *crp, const struct crypto_session_params *csp) { int error; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = s->cipher.cipher->process(&s->cipher, crp, csp); if (error == 0) error = ossl_process_hash(s, crp, csp); } else { error = ossl_process_hash(s, crp, csp); if (error == 0) error = s->cipher.cipher->process(&s->cipher, crp, csp); } return (error); } static int ossl_process_aead(struct ossl_session *s, struct cryptop *crp, const struct crypto_session_params *csp) { if (csp->csp_cipher_alg == CRYPTO_CHACHA20_POLY1305) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) return (ossl_chacha20_poly1305_encrypt(crp, csp)); else return (ossl_chacha20_poly1305_decrypt(crp, csp)); } else { return (s->cipher.cipher->process(&s->cipher, crp, csp)); } } static int ossl_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct ossl_session *s; int error; bool fpu_entered; s = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); if (is_fpu_kern_thread(0)) { fpu_entered = false; } else { fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX); fpu_entered = true; } switch (csp->csp_mode) { case CSP_MODE_DIGEST: error = ossl_process_hash(s, crp, csp); break; case CSP_MODE_CIPHER: error = ossl_process_cipher(s, crp, csp); break; case CSP_MODE_ETA: error = ossl_process_eta(s, crp, csp); break; case CSP_MODE_AEAD: error = ossl_process_aead(s, crp, csp); break; default: __assert_unreachable(); } if (fpu_entered) fpu_kern_leave(curthread, NULL); crp->crp_etype = error; crypto_done(crp); return (0); } static device_method_t ossl_methods[] = { DEVMETHOD(device_identify, ossl_identify), DEVMETHOD(device_probe, ossl_probe), DEVMETHOD(device_attach, ossl_attach), DEVMETHOD(device_detach, ossl_detach), DEVMETHOD(cryptodev_probesession, ossl_probesession), DEVMETHOD(cryptodev_newsession, ossl_newsession), DEVMETHOD(cryptodev_process, ossl_process), DEVMETHOD_END }; static driver_t ossl_driver = { "ossl", ossl_methods, sizeof(struct ossl_softc) }; DRIVER_MODULE(ossl, nexus, ossl_driver, NULL, NULL); MODULE_VERSION(ossl, 1); MODULE_DEPEND(ossl, crypto, 1, 1, 1); diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index 91a1636f2808..b7d7277eb310 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1,4671 +1,4671 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ACPI") static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; static struct cdevsw acpi_cdevsw = { .d_version = D_VERSION, .d_open = acpiopen, .d_close = acpiclose, .d_ioctl = acpiioctl, .d_name = "acpi", }; struct acpi_interface { ACPI_STRING *data; int num; }; static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; /* Global mutex for locking access to the ACPI subsystem. */ struct mtx acpi_mutex; struct callout acpi_sleep_timer; /* Bitmap of device quirks. */ int acpi_quirks; /* Supported sleep states. */ static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT]; static void acpi_lookup(void *arg, const char *name, device_t *dev); static int acpi_modevent(struct module *mod, int event, void *junk); static device_probe_t acpi_probe; static device_attach_t acpi_attach; static device_suspend_t acpi_suspend; static device_resume_t acpi_resume; static device_shutdown_t acpi_shutdown; static bus_add_child_t acpi_add_child; static bus_print_child_t acpi_print_child; static bus_probe_nomatch_t acpi_probe_nomatch; static bus_driver_added_t acpi_driver_added; static bus_child_deleted_t acpi_child_deleted; static bus_read_ivar_t acpi_read_ivar; static bus_write_ivar_t acpi_write_ivar; static bus_get_resource_list_t acpi_get_rlist; static bus_get_rman_t acpi_get_rman; static bus_set_resource_t acpi_set_resource; static bus_alloc_resource_t acpi_alloc_resource; static bus_adjust_resource_t acpi_adjust_resource; static bus_release_resource_t acpi_release_resource; static bus_delete_resource_t acpi_delete_resource; static bus_activate_resource_t acpi_activate_resource; static bus_deactivate_resource_t acpi_deactivate_resource; static bus_map_resource_t acpi_map_resource; static bus_unmap_resource_t acpi_unmap_resource; static bus_child_pnpinfo_t acpi_child_pnpinfo_method; static bus_child_location_t acpi_child_location_method; static bus_hint_device_unit_t acpi_hint_device_unit; static bus_get_property_t acpi_bus_get_prop; static bus_get_device_path_t acpi_get_device_path; static acpi_id_probe_t acpi_device_id_probe; static acpi_evaluate_object_t acpi_device_eval_obj; static acpi_get_property_t acpi_device_get_prop; static acpi_scan_children_t acpi_device_scan_children; static isa_pnp_probe_t acpi_isa_pnp_probe; static void acpi_reserve_resources(device_t dev); static int acpi_sysres_alloc(device_t dev); static uint32_t acpi_isa_get_logicalid(device_t dev); static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *context, void **retval); static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad); static void acpi_platform_osc(device_t dev); static void acpi_probe_children(device_t bus); static void acpi_probe_order(ACPI_HANDLE handle, int *order); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_sleep_enable(void *arg); static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc); static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); static void acpi_shutdown_final(void *arg, int howto); static void acpi_enable_fixed_events(struct acpi_softc *sc); static void acpi_resync_clock(struct acpi_softc *sc); static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_prep_walk(int sstate); static int acpi_wake_sysctl_walk(device_t dev); static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_sname2sstate(const char *sname); static const char *acpi_sstate2sname(int sstate); static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_pm_func(u_long cmd, void *arg, ...); static void acpi_enable_pcie(void); static void acpi_reset_interfaces(device_t dev); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_shutdown, acpi_shutdown), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_suspend, acpi_suspend), DEVMETHOD(device_resume, acpi_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_driver_added), DEVMETHOD(bus_child_deleted, acpi_child_deleted), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_get_resource_list, acpi_get_rlist), DEVMETHOD(bus_get_rman, acpi_get_rman), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_adjust_resource, acpi_adjust_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_delete_resource, acpi_delete_resource), DEVMETHOD(bus_activate_resource, acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_deactivate_resource), DEVMETHOD(bus_map_resource, acpi_map_resource), DEVMETHOD(bus_unmap_resource, acpi_unmap_resource), DEVMETHOD(bus_child_pnpinfo, acpi_child_pnpinfo_method), DEVMETHOD(bus_child_location, acpi_child_location_method), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_domain, acpi_get_domain), DEVMETHOD(bus_get_property, acpi_bus_get_prop), DEVMETHOD(bus_get_device_path, acpi_get_device_path), /* ACPI bus */ DEVMETHOD(acpi_id_probe, acpi_device_id_probe), DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), DEVMETHOD(acpi_get_property, acpi_device_get_prop), DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), DEVMETHOD(acpi_scan_children, acpi_device_scan_children), /* ISA emulation */ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), DEVMETHOD_END }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_modevent, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(acpi, 1); ACPI_SERIAL_DECL(acpi, "ACPI root bus"); /* Local pools for managing system resources for ACPI child devices. */ static struct rman acpi_rman_io, acpi_rman_mem; #define ACPI_MINIMUM_AWAKETIME 5 /* Holds the description of the acpi0 device. */ static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ACPI debugging"); static char acpi_ca_version[12]; SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, acpi_ca_version, 0, "Version of Intel ACPI-CA"); /* * Allow overriding _OSI methods. */ static char acpi_install_interface[256]; TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, sizeof(acpi_install_interface)); static char acpi_remove_interface[256]; TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, sizeof(acpi_remove_interface)); /* Allow users to dump Debug objects without ACPI debugger. */ static int acpi_debug_objects; TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0, acpi_debug_objects_sysctl, "I", "Enable Debug objects"); /* Allow the interpreter to ignore common mistakes in BIOS. */ static int acpi_interpreter_slack = 1; TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN, &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); /* Ignore register widths set by FADT and use default widths instead. */ static int acpi_ignore_reg_width = 1; TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width); SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN, &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT"); /* Allow users to override quirks. */ TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); int acpi_susp_bounce; SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); #if defined(__amd64__) || defined(__i386__) int acpi_override_isa_irq_polarity; #endif /* * ACPI standard UUID for Device Specific Data Package * "Device Properties UUID for _DSD" Rev. 2.0 */ static const struct uuid acpi_dsd_uuid = { 0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, { 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 } }; /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus hierarchy hangs * off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: if (!cold) { printf("The ACPI driver cannot be loaded after boot.\n"); return (EPERM); } break; case MOD_UNLOAD: if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) return (EBUSY); break; default: break; } return (0); } /* * Perform early initialization. */ ACPI_STATUS acpi_Startup(void) { static int started = 0; ACPI_STATUS status; int val; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Only run the startup code once. The MADT driver also calls this. */ if (started) return_VALUE (AE_OK); started = 1; /* * Initialize the ACPICA subsystem. */ if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) { printf("ACPI: Could not initialize Subsystem: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing * if more tables exist. */ if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { printf("ACPI: Table initialisation failed: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* Set up any quirks we have for this system. */ if (acpi_quirks == ACPI_Q_OK) acpi_table_quirks(&acpi_quirks); /* If the user manually set the disabled hint to 0, force-enable ACPI. */ if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) acpi_quirks &= ~ACPI_Q_BROKEN; if (acpi_quirks & ACPI_Q_BROKEN) { printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); status = AE_SUPPORT; } return_VALUE (status); } /* * Detect ACPI and perform early initialisation. */ int acpi_identify(void) { ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_HEADER *rsdt; ACPI_PHYSICAL_ADDRESS paddr; struct sbuf sb; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (!cold) return (ENXIO); /* Check that we haven't been disabled with a hint. */ if (resource_disabled("acpi", 0)) return (ENXIO); /* Check for other PM systems. */ if (power_pm_get_type() != POWER_PM_TYPE_NONE && power_pm_get_type() != POWER_PM_TYPE_ACPI) { printf("ACPI identify failed, other PM system enabled.\n"); return (ENXIO); } /* Initialize root tables. */ if (ACPI_FAILURE(acpi_Startup())) { printf("ACPI: Try disabling either ACPI or apic support.\n"); return (ENXIO); } if ((paddr = AcpiOsGetRootPointer()) == 0 || (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) return (ENXIO); if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; else paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) return (ENXIO); sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); sbuf_trim(&sb); sbuf_putc(&sb, ' '); sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); sbuf_trim(&sb); sbuf_finish(&sb); sbuf_delete(&sb); AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); return (0); } /* * Fetch some descriptive data from ACPI to put in our attach message. */ static int acpi_probe(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); device_set_desc(dev, acpi_desc); return_VALUE (BUS_PROBE_NOWILDCARD); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error, state; UINT32 flags; UINT8 TypeA, TypeB; char *env; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->acpi_dev = dev; callout_init(&sc->susp_force_to, 1); error = ENXIO; /* Initialize resource manager. */ acpi_rman_io.rm_type = RMAN_ARRAY; acpi_rman_io.rm_start = 0; acpi_rman_io.rm_end = 0xffff; acpi_rman_io.rm_descr = "ACPI I/O ports"; if (rman_init(&acpi_rman_io) != 0) panic("acpi rman_init IO ports failed"); acpi_rman_mem.rm_type = RMAN_ARRAY; acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; if (rman_init(&acpi_rman_mem) != 0) panic("acpi rman_init memory failed"); resource_list_init(&sc->sysres_rl); /* Initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); /* * Set the globals from our tunables. This is needed because ACPI-CA * uses UINT8 for some values and we have no tunable_byte. */ AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE; #ifndef ACPI_DEBUG /* * Disable all debugging layers and levels. */ AcpiDbgLayer = 0; AcpiDbgLevel = 0; #endif /* Override OS interfaces if the user requested. */ acpi_reset_interfaces(dev); /* Load ACPI name space. */ status = AcpiLoadTables(); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not load Namespace: %s\n", AcpiFormatException(status)); goto out; } /* Handle MCFG table if present. */ acpi_enable_pcie(); /* * Note that some systems (specifically, those with namespace evaluation * issues that require the avoidance of parts of the namespace) must * avoid running _INI and _STA on everything, as well as dodging the final * object init pass. * * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). * * XXX We should arrange for the object init pass after we have attached * all our child devices, but on many systems it works here. */ flags = 0; if (testenv("debug.acpi.avoid")) flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; /* Bring the hardware and basic handlers online. */ if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { device_printf(dev, "Could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Call the ECDT probe function to provide EC functionality before * the namespace has been evaluated. * * XXX This happens before the sysresource devices have been probed and * attached so its resources come from nexus0. In practice, this isn't * a problem but should be addressed eventually. */ acpi_ec_ecdt_probe(dev); /* Bring device objects and regions online. */ if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { device_printf(dev, "Could not initialize ACPI objects: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, acpi_supported_sleep_state_sysctl, "A", "List supported ACPI sleep states."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "Power button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "Sleep button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, "sleep delay in seconds"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "disable_on_reboot", CTLFLAG_RW, &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "handle_reboot", CTLFLAG_RW, &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); #if defined(__amd64__) || defined(__i386__) /* * Enable workaround for incorrect ISA IRQ polarity by default on * systems with Intel CPUs. */ if (cpu_vendor_id == CPU_VENDOR_INTEL) acpi_override_isa_irq_polarity = 1; SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "override_isa_irq_polarity", CTLFLAG_RDTUN, &acpi_override_isa_irq_polarity, 0, "Force active-hi polarity for edge-triggered ISA IRQs"); #endif /* * Default to 1 second before sleeping to give some machines time to * stabilize. */ sc->acpi_sleep_delay = 1; if (bootverbose) sc->acpi_verbose = 1; if ((env = kern_getenv("hw.acpi.verbose")) != NULL) { if (strcmp(env, "0") != 0) sc->acpi_verbose = 1; freeenv(env); } /* Only enable reboot by default if the FADT says it is available. */ if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) sc->acpi_handle_reboot = 1; #if !ACPI_REDUCED_HARDWARE /* Only enable S4BIOS by default if the FACS says it is available. */ if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) sc->acpi_s4bios = 1; #endif /* Probe all supported sleep states. */ acpi_sleep_states[ACPI_STATE_S0] = TRUE; for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT, __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) && ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) acpi_sleep_states[state] = TRUE; /* * Dispatch the default sleep state to devices. The lid switch is set * to UNKNOWN by default to avoid surprising users. */ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ? ACPI_STATE_S5 : ACPI_STATE_UNKNOWN; sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN; sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ? ACPI_STATE_S1 : ACPI_STATE_UNKNOWN; sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ? ACPI_STATE_S3 : ACPI_STATE_UNKNOWN; /* Pick the first valid sleep state for the sleep button default. */ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN; for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) if (acpi_sleep_states[state]) { sc->acpi_sleep_button_sx = state; break; } acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ /* Register our shutdown handler. */ EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST + 150); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* Flag our initial states. */ sc->acpi_enabled = TRUE; sc->acpi_sstate = ACPI_STATE_S0; sc->acpi_sleep_disabled = TRUE; /* Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664, "acpi"); sc->acpi_dev_t->si_drv1 = sc; if ((error = acpi_machdep_init(dev))) goto out; /* Register ACPI again to pass the correct argument of pm_func. */ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); acpi_platform_osc(dev); if (!acpi_disabled("bus")) { EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000); acpi_probe_children(dev); } /* Update all GPEs and enable runtime GPEs. */ status = AcpiUpdateAllGpes(); if (ACPI_FAILURE(status)) device_printf(dev, "Could not update all GPEs: %s\n", AcpiFormatException(status)); /* Allow sleep request after a while. */ callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0); callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME, acpi_sleep_enable, sc); error = 0; out: return_VALUE (error); } static void acpi_set_power_children(device_t dev, int state) { device_t child; device_t *devlist; int dstate, i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; /* * Retrieve and set D-state for the sleep state if _SxD is present. * Skip children who aren't attached since they are handled separately. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dstate = state; if (device_is_attached(child) && acpi_device_pwr_for_sleep(dev, child, &dstate) == 0) acpi_set_powerstate(child, dstate); } free(devlist, M_TEMP); } static int acpi_suspend(device_t dev) { int error; bus_topo_assert(); error = bus_generic_suspend(dev); if (error == 0) acpi_set_power_children(dev, ACPI_STATE_D3); return (error); } static int acpi_resume(device_t dev) { bus_topo_assert(); acpi_set_power_children(dev, ACPI_STATE_D0); return (bus_generic_resume(dev)); } static int acpi_shutdown(device_t dev) { bus_topo_assert(); /* Allow children to shutdown first. */ bus_generic_shutdown(dev); /* * Enable any GPEs that are able to power-on the system (i.e., RTC). * Also, disable any that are not valid for this state (most). */ acpi_wake_prep_walk(ACPI_STATE_S5); return (0); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); else free(ad, M_ACPIDEV); return (child); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += bus_print_child_domain(bus, child); retval += bus_print_child_footer(bus, child); return (retval); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. * * XXX Disabled for now since many necessary devices (like fdc and * ATA) don't claim the devices we created for them but still expect * them to be powered up. */ static void acpi_probe_nomatch(device_t bus, device_t child) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D3); #endif } /* * If a new driver has a chance to probe a child, first power it up. * * XXX Disabled for now (see acpi_probe_nomatch for details). */ static void acpi_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs)) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); #else device_probe_and_attach(child); #endif } } free(devlist, M_TEMP); } /* Location hint for devctl(8) */ static int acpi_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); int pxm; if (dinfo->ad_handle) { sbuf_printf(sb, "handle=%s", acpi_name(dinfo->ad_handle)); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) { sbuf_printf(sb, " _PXM=%d", pxm); } } return (0); } /* PnP information for devctl(8) */ int acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb) { ACPI_DEVICE_INFO *adinfo; if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) { sbuf_printf(sb, "unknown"); return (0); } sbuf_printf(sb, "_HID=%s _UID=%lu _CID=%s", (adinfo->Valid & ACPI_VALID_HID) ? adinfo->HardwareId.String : "none", (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL, ((adinfo->Valid & ACPI_VALID_CID) && adinfo->CompatibleIdList.Count > 0) ? adinfo->CompatibleIdList.Ids[0].String : "none"); AcpiOsFree(adinfo); return (0); } static int acpi_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); return (acpi_pnpinfo(dinfo->ad_handle, sb)); } /* * Note: the check for ACPI locator may be redundant. However, this routine is * suitable for both busses whose only locator is ACPI and as a building block * for busses that have multiple locators to cope with. */ int acpi_get_acpi_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) { ACPI_HANDLE *handle = acpi_get_handle(child); if (handle != NULL) sbuf_printf(sb, "%s", acpi_name(handle)); return (0); } return (bus_generic_get_device_path(bus, child, locator, sb)); } static int acpi_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) return (acpi_get_acpi_device_path(bus, child, locator, sb)); if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { ACPI_DEVICE_INFO *adinfo; if (!ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo)) && dinfo->ad_handle != 0 && (adinfo->Valid & ACPI_VALID_HID)) { const char *hid = adinfo->HardwareId.String; u_long uid = (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL; u_long hidval; /* * In UEFI Stanard Version 2.6, Section 9.6.1.6 Text * Device Node Reference, there's an insanely long table * 98. This implements the relevant bits from that * table. Newer versions appear to have not required * anything new. The EDK2 firmware presents both PciRoot * and PcieRoot as PciRoot. Follow the EDK2 standard. */ if (strncmp("PNP", hid, 3) != 0) goto nomatch; hidval = strtoul(hid + 3, NULL, 16); switch (hidval) { case 0x0301: sbuf_printf(sb, "Keyboard(0x%lx)", uid); break; case 0x0401: sbuf_printf(sb, "ParallelPort(0x%lx)", uid); break; case 0x0501: sbuf_printf(sb, "Serial(0x%lx)", uid); break; case 0x0604: sbuf_printf(sb, "Floppy(0x%lx)", uid); break; case 0x0a03: case 0x0a08: sbuf_printf(sb, "PciRoot(0x%lx)", uid); break; default: /* Everything else gets a generic encode */ nomatch: sbuf_printf(sb, "Acpi(%s,0x%lx)", hid, uid); break; } } /* Not handled: AcpiAdr... unsure how to know it's one */ } /* For the rest, punt to the default handler */ return (bus_generic_get_device_path(bus, child, locator, sb)); } /* * Handle device deletion. */ static void acpi_child_deleted(device_t dev, device_t child) { struct acpi_device *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ad_handle) == child) AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } /* ACPI and ISA compatibility ivars */ switch(index) { case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; case ACPI_IVAR_FLAGS: *(int *)result = ad->ad_flags; break; case ISA_IVAR_VENDORID: case ISA_IVAR_SERIAL: case ISA_IVAR_COMPATID: *(int *)result = -1; break; case ISA_IVAR_LOGICALID: *(int *)result = acpi_isa_get_logicalid(child); break; case PCI_IVAR_CLASS: *(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff; break; case PCI_IVAR_SUBCLASS: *(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff; break; case PCI_IVAR_PROGIF: *(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff; break; default: return (ENOENT); } return (0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } switch(index) { case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; case ACPI_IVAR_FLAGS: ad->ad_flags = (int)value; break; default: panic("bad ivar write request (%d)", index); return (ENOENT); } return (0); } /* * Handle child resource allocation/removal */ static struct resource_list * acpi_get_rlist(device_t dev, device_t child) { struct acpi_device *ad; ad = device_get_ivars(child); return (&ad->ad_rl); } static int acpi_match_resource_hint(device_t dev, int type, long value) { struct acpi_device *ad = device_get_ivars(dev); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->start <= value && rle->end >= value) return (1); } return (0); } /* * Does this device match because the resources match? */ static bool acpi_hint_device_matches_resources(device_t child, const char *name, int unit) { long value; bool matches; /* * Check for matching resources. We must have at least one match. * Since I/O and memory resources cannot be shared, if we get a * match on either of those, ignore any mismatches in IRQs or DRQs. * * XXX: We may want to revisit this to be more lenient and wire * as long as it gets one match. */ matches = false; if (resource_long_value(name, unit, "port", &value) == 0) { /* * Floppy drive controllers are notorious for having a * wide variety of resources not all of which include the * first port that is specified by the hint (typically * 0x3f0) (see the comment above fdc_isa_alloc_resources() * in fdc_isa.c). However, they do all seem to include * port + 2 (e.g. 0x3f2) so for a floppy device, look for * 'value + 2' in the port resources instead of the hint * value. */ if (strcmp(name, "fdc") == 0) value += 2; if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) matches = true; else return false; } if (resource_long_value(name, unit, "maddr", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) matches = true; else return false; } /* * If either the I/O address and/or the memory address matched, then * assumed this devices matches and that any mismatch in other resources * will be resolved by siltently ignoring those other resources. Otherwise * all further resources must match. */ if (matches) { return (true); } if (resource_long_value(name, unit, "irq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) matches = true; else return false; } if (resource_long_value(name, unit, "drq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) matches = true; else return false; } return matches; } /* * Wire device unit numbers based on resource matches in hints. */ static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp) { device_location_cache_t *cache; const char *s; int line, unit; bool matches; /* * Iterate over all the hints for the devices with the specified * name to see if one's resources are a subset of this device. */ line = 0; cache = dev_wired_cache_init(); while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { /* Must have an "at" for acpi or isa. */ resource_string_value(name, unit, "at", &s); matches = false; if (strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0) matches = acpi_hint_device_matches_resources(child, name, unit); else matches = dev_wired_cache_match(cache, child, s); if (matches) { /* We have a winner! */ *unitp = unit; break; } } dev_wired_cache_fini(cache); } /* * Fetch the NUMA domain for a device by mapping the value returned by * _PXM to a NUMA domain. If the device does not have a _PXM method, * -2 is returned. If any other error occurs, -1 is returned. */ static int acpi_parse_pxm(device_t dev) { #ifdef NUMA #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) ACPI_HANDLE handle; ACPI_STATUS status; int pxm; handle = acpi_get_handle(dev); if (handle == NULL) return (-2); status = acpi_GetInteger(handle, "_PXM", &pxm); if (ACPI_SUCCESS(status)) return (acpi_map_pxm_to_vm_domainid(pxm)); if (status == AE_NOT_FOUND) return (-2); #endif #endif return (-1); } int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { int d, error; d = acpi_parse_pxm(child); if (d < 0) return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); switch (op) { case LOCAL_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = cpuset_domain[d]; return (0); case INTR_CPUS: error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); if (error != 0) return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } } /* * Fetch the NUMA domain for the given device 'dev'. * * If a device has a _PXM method, map that to a NUMA domain. * Otherwise, pass the request up to the parent. * If there's no matching domain or the domain cannot be * determined, return ENOENT. */ int acpi_get_domain(device_t dev, device_t child, int *domain) { int d; d = acpi_parse_pxm(child); if (d >= 0) { *domain = d; return (0); } if (d == -1) return (ENOENT); /* No _PXM node; go up a level */ return (bus_generic_get_domain(dev, child, domain)); } static struct rman * acpi_get_rman(device_t bus, int type, u_int flags) { /* Only memory and IO resources are managed. */ switch (type) { case SYS_RES_IOPORT: return (&acpi_rman_io); case SYS_RES_MEMORY: return (&acpi_rman_mem); default: return (NULL); } } /* * Pre-allocate/manage all memory and IO resources. Since rman can't handle * duplicates, we merge any in the sysresource attach routine. */ static int acpi_sysres_alloc(device_t dev) { struct acpi_softc *sc = device_get_softc(dev); struct resource *res; struct resource_list_entry *rle; struct rman *rm; device_t *children; int child_count, i; /* * Probe/attach any sysresource devices. This would be unnecessary if we * had multi-pass probe/attach. */ if (device_get_children(dev, &children, &child_count) != 0) return (ENXIO); for (i = 0; i < child_count; i++) { if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) device_probe_and_attach(children[i]); } free(children, M_TEMP); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->res != NULL) { device_printf(dev, "duplicate resource for %jx\n", rle->start); continue; } /* Only memory and IO resources are valid here. */ rm = acpi_get_rman(dev, rle->type, 0); if (rm == NULL) continue; /* Pre-allocate resource and add to our rman pool. */ res = bus_alloc_resource(dev, rle->type, &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, RF_ACTIVE | RF_UNMAPPED); if (res != NULL) { rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); rle->res = res; } else if (bootverbose) device_printf(dev, "reservation of %jx, %jx (%d) failed\n", rle->start, rle->count, rle->type); } return (0); } /* * Reserve declared resources for active devices found during the * namespace scan once the boot-time attach of devices has completed. * * Ideally reserving firmware-assigned resources would work in a * depth-first traversal of the device namespace, but this is * complicated. In particular, not all resources are enumerated by * ACPI (e.g. PCI bridges and devices enumerate their resources via * other means). Some systems also enumerate devices via ACPI behind * PCI bridges but without a matching a PCI device_t enumerated via * PCI bus scanning, the device_t's end up as direct children of * acpi0. Doing this scan late is not ideal, but works for now. */ static void acpi_reserve_resources(device_t dev) { struct resource_list_entry *rle; struct resource_list *rl; struct acpi_device *ad; device_t *children; int child_count, i; if (device_get_children(dev, &children, &child_count) != 0) return; for (i = 0; i < child_count; i++) { ad = device_get_ivars(children[i]); rl = &ad->ad_rl; /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) continue; STAILQ_FOREACH(rle, rl, link) { /* * Don't reserve IRQ resources. There are many sticky things * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET * when using legacy routing). */ if (rle->type == SYS_RES_IRQ) continue; /* * Don't reserve the resource if it is already allocated. * The acpi_ec(4) driver can allocate its resources early * if ECDT is present. */ if (rle->res != NULL) continue; /* * Try to reserve the resource from our parent. If this * fails because the resource is a system resource, just * let it be. The resource range is already reserved so * that other devices will not use it. If the driver * needs to allocate the resource, then * acpi_alloc_resource() will sub-alloc from the system * resource. */ resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid, rle->start, rle->end, rle->count, 0); } } free(children, M_TEMP); } static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; rman_res_t end; #ifdef INTRNG /* map with default for now */ if (type == SYS_RES_IRQ) start = (rman_res_t)acpi_map_intr(child, (u_int)start, acpi_get_handle(child)); #endif /* If the resource is already allocated, fail. */ if (resource_list_busy(rl, type, rid)) return (EBUSY); /* If the resource is already reserved, release it. */ if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, dev, child, type, rid); /* Add the resource. */ end = (start + count - 1); resource_list_add(rl, type, rid, start, end, count); return (0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifndef INTRNG ACPI_RESOURCE ares; #endif struct acpi_device *ad; struct resource_list_entry *rle; struct resource_list *rl; struct resource *res; int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* * First attempt at allocating the resource. For direct children, * use resource_list_alloc() to handle reserved resources. For * other devices, pass the request up to our parent. */ if (bus == device_get_parent(child)) { ad = device_get_ivars(child); rl = &ad->ad_rl; /* * Simulate the behavior of the ISA bus for direct children * devices. That is, if a non-default range is specified for * a resource that doesn't exist, use bus_set_resource() to * add the resource before allocating it. Note that these * resources will not be reserved. */ if (!isdefault && resource_list_find(rl, type, *rid) == NULL) resource_list_add(rl, type, *rid, start, end, count); res = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); #ifndef INTRNG if (res != NULL && type == SYS_RES_IRQ) { /* * Since bus_config_intr() takes immediate effect, we cannot * configure the interrupt associated with a device when we * parse the resources but have to defer it until a driver * actually allocates the interrupt via bus_alloc_resource(). * * XXX: Should we handle the lookup failing? */ if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) acpi_config_intr(child, &ares); } #endif /* * If this is an allocation of the "default" range for a given * RID, fetch the exact bounds for this resource from the * resource list entry to try to allocate the range from the * system resource regions. */ if (res == NULL && isdefault) { rle = resource_list_find(rl, type, *rid); if (rle != NULL) { start = rle->start; end = rle->end; count = rle->count; } } } else res = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); /* * If the first attempt failed and this is an allocation of a * specific range, try to satisfy the request via a suballocation * from our system resource regions. */ if (res == NULL && start + count - 1 == end) res = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags); return (res); } static bool acpi_is_resource_managed(device_t bus, struct resource *r) { struct rman *rm; rm = acpi_get_rman(bus, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (false); return (rman_is_region_manager(r, rm)); } static struct resource * acpi_managed_resource(device_t bus, struct resource *r) { struct acpi_softc *sc = device_get_softc(bus); struct resource_list_entry *rle; KASSERT(acpi_is_resource_managed(bus, r), ("resource %p is not suballocated", r)); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->type != rman_get_type(r) || rle->res == NULL) continue; if (rman_get_start(r) >= rman_get_start(rle->res) && rman_get_end(r) <= rman_get_end(rle->res)) return (rle->res); } return (NULL); } static int acpi_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (acpi_is_resource_managed(bus, r)) return (rman_adjust_resource(r, start, end)); return (bus_generic_adjust_resource(bus, child, r, start, end)); } static int acpi_release_resource(device_t bus, device_t child, struct resource *r) { /* * If this resource belongs to one of our internal managers, * deactivate it and release it to the local pool. */ if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_release_resource(bus, child, r)); return (bus_generic_rl_release_resource(bus, child, r)); } static void acpi_delete_resource(device_t bus, device_t child, int type, int rid) { struct resource_list *rl; rl = acpi_get_rlist(bus, child); if (resource_list_busy(rl, type, rid)) { device_printf(bus, "delete_resource: Resource still owned by child" " (type=%d, rid=%d)\n", type, rid); return; } if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, bus, child, type, rid); resource_list_delete(rl, type, rid); } static int acpi_activate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_activate_resource(bus, child, r)); return (bus_generic_activate_resource(bus, child, r)); } static int acpi_deactivate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_deactivate_resource(bus, child, r)); return (bus_generic_deactivate_resource(bus, child, r)); } static int acpi_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct resource *sysres; rman_res_t length, start; int error; if (!acpi_is_resource_managed(bus, r)) return (bus_generic_map_resource(bus, child, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sysres = acpi_managed_resource(bus, r); if (sysres == NULL) return (ENOENT); args.offset = start - rman_get_start(sysres); args.length = length; return (bus_map_resource(bus, sysres, &args, map)); } static int acpi_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { struct resource *sysres; if (!acpi_is_resource_managed(bus, r)) return (bus_generic_unmap_resource(bus, child, r, map)); sysres = acpi_managed_resource(bus, r); if (sysres == NULL) return (ENOENT); return (bus_unmap_resource(bus, sysres, map)); } /* Allocate an IO port or memory resource, given its GAS. */ int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags) { int error, res_type; error = ENOMEM; if (type == NULL || rid == NULL || gas == NULL || res == NULL) return (EINVAL); /* We only support memory and IO spaces. */ switch (gas->SpaceId) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: res_type = SYS_RES_MEMORY; break; case ACPI_ADR_SPACE_SYSTEM_IO: res_type = SYS_RES_IOPORT; break; default: return (EOPNOTSUPP); } /* * If the register width is less than 8, assume the BIOS author means * it is a bit field and just allocate a byte. */ if (gas->BitWidth && gas->BitWidth < 8) gas->BitWidth = 8; /* Validate the address after we're sure we support the space. */ if (gas->Address == 0 || gas->BitWidth == 0) return (EINVAL); bus_set_resource(dev, res_type, *rid, gas->Address, gas->BitWidth / 8); *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); if (*res != NULL) { *type = res_type; error = 0; } else bus_delete_resource(dev, res_type, *rid); return (error); } /* Probe _HID and _CID for compatible ISA PNP ids. */ static uint32_t acpi_isa_get_logicalid(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; uint32_t pnpid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Fetch and validate the HID. */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 && devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ? PNP_EISAID(devinfo->HardwareId.String) : 0; AcpiOsFree(devinfo); return_VALUE (pnpid); } static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) { ACPI_DEVICE_INFO *devinfo; ACPI_PNP_DEVICE_ID *ids; ACPI_HANDLE h; uint32_t *pnpid; int i, valid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = cids; /* Fetch and validate the CID */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); if ((devinfo->Valid & ACPI_VALID_CID) == 0) { AcpiOsFree(devinfo); return_VALUE (0); } if (devinfo->CompatibleIdList.Count < count) count = devinfo->CompatibleIdList.Count; ids = devinfo->CompatibleIdList.Ids; for (i = 0, valid = 0; i < count; i++) if (ids[i].Length >= ACPI_EISAID_STRING_SIZE && strncmp(ids[i].String, "PNP", 3) == 0) { *pnpid++ = PNP_EISAID(ids[i].String); valid++; } AcpiOsFree(devinfo); return_VALUE (valid); } static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; int rv; int i; h = acpi_get_handle(dev); if (ids == NULL || h == NULL) return (ENXIO); t = acpi_get_type(dev); if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR) return (ENXIO); /* Try to match one of the array of IDs with a HID or CID. */ for (i = 0; ids[i] != NULL; i++) { rv = acpi_MatchHid(h, ids[i]); if (rv == ACPI_MATCHHID_NOMATCH) continue; if (match != NULL) { *match = ids[i]; } return ((rv == ACPI_MATCHHID_HID)? BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY); } return (ENXIO); } static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) { ACPI_HANDLE h; if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); return (AcpiEvaluateObject(h, pathname, parameters, ret)); } static ACPI_STATUS acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { const ACPI_OBJECT *pkg, *name, *val; struct acpi_device *ad; ACPI_STATUS status; int i; ad = device_get_ivars(dev); if (ad == NULL || propname == NULL) return (AE_BAD_PARAMETER); if (ad->dsd_pkg == NULL) { if (ad->dsd.Pointer == NULL) { status = acpi_find_dsd(ad); if (ACPI_FAILURE(status)) return (status); } else { return (AE_NOT_FOUND); } } for (i = 0; i < ad->dsd_pkg->Package.Count; i ++) { pkg = &ad->dsd_pkg->Package.Elements[i]; if (pkg->Type != ACPI_TYPE_PACKAGE || pkg->Package.Count != 2) continue; name = &pkg->Package.Elements[0]; val = &pkg->Package.Elements[1]; if (name->Type != ACPI_TYPE_STRING) continue; if (strncmp(propname, name->String.Pointer, name->String.Length) == 0) { if (value != NULL) *value = val; return (AE_OK); } } return (AE_NOT_FOUND); } static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad) { const ACPI_OBJECT *dsd, *guid, *pkg; ACPI_STATUS status; ad->dsd.Length = ACPI_ALLOCATE_BUFFER; ad->dsd.Pointer = NULL; ad->dsd_pkg = NULL; status = AcpiEvaluateObject(ad->ad_handle, "_DSD", NULL, &ad->dsd); if (ACPI_FAILURE(status)) return (status); dsd = ad->dsd.Pointer; guid = &dsd->Package.Elements[0]; pkg = &dsd->Package.Elements[1]; if (guid->Type != ACPI_TYPE_BUFFER || pkg->Type != ACPI_TYPE_PACKAGE || guid->Buffer.Length != sizeof(acpi_dsd_uuid)) return (AE_NOT_FOUND); if (memcmp(guid->Buffer.Pointer, &acpi_dsd_uuid, sizeof(acpi_dsd_uuid)) == 0) { ad->dsd_pkg = pkg; return (AE_OK); } return (AE_NOT_FOUND); } static ssize_t acpi_bus_get_prop_handle(const ACPI_OBJECT *hobj, void *propvalue, size_t size) { ACPI_OBJECT *pobj; ACPI_HANDLE h; if (hobj->Type != ACPI_TYPE_PACKAGE) goto err; if (hobj->Package.Count != 1) goto err; pobj = &hobj->Package.Elements[0]; if (pobj == NULL) goto err; if (pobj->Type != ACPI_TYPE_LOCAL_REFERENCE) goto err; h = acpi_GetReference(NULL, pobj); if (h == NULL) goto err; if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) *(ACPI_HANDLE *)propvalue = h; return (sizeof(ACPI_HANDLE)); err: return (-1); } static ssize_t acpi_bus_get_prop(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { ACPI_STATUS status; const ACPI_OBJECT *obj; status = acpi_device_get_prop(bus, child, __DECONST(char *, propname), &obj); if (ACPI_FAILURE(status)) return (-1); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: break; case DEVICE_PROP_HANDLE: return (acpi_bus_get_prop_handle(obj, propvalue, size)); default: return (-1); } switch (obj->Type) { case ACPI_TYPE_INTEGER: if (type == DEVICE_PROP_UINT32) { if (propvalue != NULL && size >= sizeof(uint32_t)) *((uint32_t *)propvalue) = obj->Integer.Value; return (sizeof(uint32_t)); } if (propvalue != NULL && size >= sizeof(uint64_t)) *((uint64_t *) propvalue) = obj->Integer.Value; return (sizeof(uint64_t)); case ACPI_TYPE_STRING: if (type != DEVICE_PROP_ANY && type != DEVICE_PROP_BUFFER) return (-1); if (propvalue != NULL && size > 0) memcpy(propvalue, obj->String.Pointer, MIN(size, obj->String.Length)); return (obj->String.Length); case ACPI_TYPE_BUFFER: if (propvalue != NULL && size > 0) memcpy(propvalue, obj->Buffer.Pointer, MIN(size, obj->Buffer.Length)); return (obj->Buffer.Length); case ACPI_TYPE_PACKAGE: if (propvalue != NULL && size >= sizeof(ACPI_OBJECT *)) { *((ACPI_OBJECT **) propvalue) = __DECONST(ACPI_OBJECT *, obj); } return (sizeof(ACPI_OBJECT *)); case ACPI_TYPE_LOCAL_REFERENCE: if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) { ACPI_HANDLE h; h = acpi_GetReference(NULL, __DECONST(ACPI_OBJECT *, obj)); memcpy(propvalue, h, sizeof(ACPI_HANDLE)); } return (sizeof(ACPI_HANDLE)); default: return (0); } } int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) { struct acpi_softc *sc; ACPI_HANDLE handle; ACPI_STATUS status; char sxd[8]; handle = acpi_get_handle(dev); /* * XXX If we find these devices, don't try to power them down. * The serial and IRDA ports on my T23 hang the system when * set to D3 and it appears that such legacy devices may * need special handling in their drivers. */ if (dstate == NULL || handle == NULL || acpi_MatchHid(handle, "PNP0500") || acpi_MatchHid(handle, "PNP0501") || acpi_MatchHid(handle, "PNP0502") || acpi_MatchHid(handle, "PNP0510") || acpi_MatchHid(handle, "PNP0511")) return (ENXIO); /* * Override next state with the value from _SxD, if present. * Note illegal _S0D is evaluated because some systems expect this. */ sc = device_get_softc(bus); snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); status = acpi_GetInteger(handle, sxd, dstate); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { device_printf(dev, "failed to get %s on %s: %s\n", sxd, acpi_name(handle), AcpiFormatException(status)); return (ENXIO); } return (0); } /* Callback arg for our implementation of walking the namespace. */ struct acpi_device_scan_ctx { acpi_scan_cb_t user_fn; void *arg; ACPI_HANDLE parent; }; static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) { struct acpi_device_scan_ctx *ctx; device_t dev, old_dev; ACPI_STATUS status; ACPI_OBJECT_TYPE type; /* * Skip this device if we think we'll have trouble with it or it is * the parent where the scan began. */ ctx = (struct acpi_device_scan_ctx *)arg; if (acpi_avoid(h) || h == ctx->parent) return (AE_OK); /* If this is not a valid device type (e.g., a method), skip it. */ if (ACPI_FAILURE(AcpiGetType(h, &type))) return (AE_OK); if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) return (AE_OK); /* * Call the user function with the current device. If it is unchanged * afterwards, return. Otherwise, we update the handle to the new dev. */ old_dev = acpi_get_device(h); dev = old_dev; status = ctx->user_fn(h, &dev, level, ctx->arg); if (ACPI_FAILURE(status) || old_dev == dev) return (status); /* Remove the old child and its connection to the handle. */ if (old_dev != NULL) device_delete_child(device_get_parent(old_dev), old_dev); /* Recreate the handle association if the user created a device. */ if (dev != NULL) AcpiAttachData(h, acpi_fake_objhandler, dev); return (AE_OK); } static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg) { ACPI_HANDLE h; struct acpi_device_scan_ctx ctx; if (acpi_disabled("children")) return (AE_OK); if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); ctx.user_fn = user_fn; ctx.arg = arg; ctx.parent = h; return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, acpi_device_scan_cb, NULL, &ctx, NULL)); } /* * Even though ACPI devices are not PCI, we use the PCI approach for setting * device power states since it's close enough to ACPI. */ int acpi_set_powerstate(device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; h = acpi_get_handle(child); if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX) return (EINVAL); if (h == NULL) return (0); /* Ignore errors if the power methods aren't present. */ status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) device_printf(child, "set ACPI power state D%d on %s\n", state, acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(child, "failed to set ACPI power state D%d on %s: %s\n", state, acpi_name(h), AcpiFormatException(status)); return (0); } static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) { int result, cid_count, i; uint32_t lid, cids[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * ISA-style drivers attached to ACPI may persist and * probe manually if we return ENOENT. We never want * that to happen, so don't ever return it. */ result = ENXIO; /* Scan the supplied IDs for a match */ lid = acpi_isa_get_logicalid(child); cid_count = acpi_isa_get_compatid(child, cids, 8); while (ids && ids->ip_id) { if (lid == ids->ip_id) { result = 0; goto out; } for (i = 0; i < cid_count; i++) { if (cids[i] == ids->ip_id) { result = 0; goto out; } } ids++; } out: if (result == 0 && ids->ip_desc) device_set_desc(child, ids->ip_desc); return_VALUE (result); } /* * Look for a MCFG table. If it is present, use the settings for * domain (segment) 0 to setup PCI config space access via the memory * map. * * On non-x86 architectures (arm64 for now), this will be done from the * PCI host bridge driver. */ static void acpi_enable_pcie(void) { #if defined(__i386__) || defined(__amd64__) ACPI_TABLE_HEADER *hdr; ACPI_MCFG_ALLOCATION *alloc, *end; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); if (ACPI_FAILURE(status)) return; end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); while (alloc < end) { pcie_cfgregopen(alloc->Address, alloc->PciSegment, alloc->StartBusNumber, alloc->EndBusNumber); alloc++; } #endif } static void acpi_platform_osc(device_t dev) { ACPI_HANDLE sb_handle; ACPI_STATUS status; uint32_t cap_set[2]; /* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */ static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = { 0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44, 0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48 }; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) return; cap_set[1] = 0x10; /* APEI Support */ status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) return; device_printf(dev, "_OSC failed: %s\n", AcpiFormatException(status)); return; } } /* * Scan all of the ACPI namespace and attach child devices. * * We should only expect to find devices in the \_PR, \_TZ, \_SI, and * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. * However, in violation of the spec, some systems place their PCI link * devices in \, so we have to walk the whole namespace. We check the * type of namespace nodes, so this should be ok. */ static void acpi_probe_children(device_t bus) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Scan the namespace and insert placeholders for all the devices that * we find. We also probe/attach any early devices. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are * currently present. (This assumes that we don't want to create/remove * devices as they appear, which might be smarter.) */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, NULL, bus, NULL); /* Pre-allocate resources for our rman from any sysresource devices. */ acpi_sysres_alloc(bus); /* Create any static children by calling device identify methods. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); bus_generic_probe(bus); /* Probe/attach all children, created statically and from the namespace. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n")); bus_generic_attach(bus); /* * Reserve resources allocated to children but not yet allocated * by a driver. */ acpi_reserve_resources(bus); /* Attach wake sysctls. */ acpi_wake_sysctl_walk(bus); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); return_VOID; } /* * Determine the probe order for a given device. */ static void acpi_probe_order(ACPI_HANDLE handle, int *order) { ACPI_OBJECT_TYPE type; /* * 0. CPUs * 1. I/O port and memory system resource holders * 2. Clocks and timers (to handle early accesses) * 3. Embedded controllers (to handle early accesses) * 4. PCI Link Devices */ AcpiGetType(handle, &type); if (type == ACPI_TYPE_PROCESSOR) *order = 0; else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) *order = 1; else if (acpi_MatchHid(handle, "PNP0100") || acpi_MatchHid(handle, "PNP0103") || acpi_MatchHid(handle, "PNP0B00")) *order = 2; else if (acpi_MatchHid(handle, "PNP0C09")) *order = 3; else if (acpi_MatchHid(handle, "PNP0C0F")) *order = 4; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_DEVICE_INFO *devinfo; struct acpi_device *ad; struct acpi_prw_data prw; ACPI_OBJECT_TYPE type; ACPI_HANDLE h; device_t bus, child; char *handle_str; int order; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (acpi_disabled("children")) return_ACPI_STATUS (AE_OK); /* Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS (AE_OK); bus = (device_t)context; if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { handle_str = acpi_name(handle); switch (type) { case ACPI_TYPE_DEVICE: /* * Since we scan from \, be sure to skip system scope objects. * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run * during the initialization and \_TZ_ is to support Notify() on it. */ if (strcmp(handle_str, "\\_SB_") == 0 || strcmp(handle_str, "\\_TZ_") == 0) break; if (acpi_parse_prw(handle, &prw) == 0) AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); /* * Ignore devices that do not have a _HID or _CID. They should * be discovered by other buses (e.g. the PCI bus driver). */ if (!acpi_has_hid(handle)) break; /* FALLTHROUGH */ case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: /* * Create a placeholder device for this node. Sort the * placeholder so that the probe/attach passes will run * breadth-first. Orders less than ACPI_DEV_BASE_ORDER * are reserved for special objects (i.e., system * resources). */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); order = level * 10 + ACPI_DEV_BASE_ORDER; acpi_probe_order(handle, &order); - child = BUS_ADD_CHILD(bus, order, NULL, -1); + child = BUS_ADD_CHILD(bus, order, NULL, DEVICE_UNIT_ANY); if (child == NULL) break; /* Associate the handle with the device_t and vice versa. */ acpi_set_handle(child, handle); AcpiAttachData(handle, acpi_fake_objhandler, child); /* * Check that the device is present. If it's not present, * leave it disabled (so that we have a device_t attached to * the handle, but we don't probe it). * * XXX PCI link devices sometimes report "present" but not * "functional" (i.e. if disabled). Go ahead and probe them * anyway since we may enable them later. */ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { /* Never disable PCI link devices. */ if (acpi_MatchHid(handle, "PNP0C0F")) break; /* * RTC Device should be enabled for CMOS register space * unless FADT indicate it is not present. * (checked in RTC probe routine.) */ if (acpi_MatchHid(handle, "PNP0B00")) break; /* * Docking stations should remain enabled since the system * may be undocked at boot. */ if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) break; device_disable(child); break; } /* * Get the device's resource settings and attach them. * Note that if the device has _PRS but no _CRS, we need * to decide when it's appropriate to try to configure the * device. Ignore the return value here; it's OK for the * device not to have any resources. */ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); ad = device_get_ivars(child); ad->ad_cls_class = 0xffffff; if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) { if ((devinfo->Valid & ACPI_VALID_CLS) != 0 && devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) { ad->ad_cls_class = strtoul(devinfo->ClassCode.String, NULL, 16); } AcpiOsFree(devinfo); } break; } } return_ACPI_STATUS (AE_OK); } /* * AcpiAttachData() requires an object handler but never uses it. This is a * placeholder object handler so we can store a device_t in an ACPI_HANDLE. */ void acpi_fake_objhandler(ACPI_HANDLE h, void *data) { } static void acpi_shutdown_final(void *arg, int howto) { struct acpi_softc *sc = (struct acpi_softc *)arg; register_t intr; ACPI_STATUS status; /* * XXX Shutdown code should only run on the BSP (cpuid 0). * Some chipsets do not power off the system correctly if called from * an AP. */ if ((howto & RB_POWEROFF) != 0) { status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); return; } device_printf(sc->acpi_dev, "Powering system off\n"); intr = intr_disable(); status = AcpiEnterSleepState(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - timeout\n"); } } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { /* Reboot using the reset register. */ status = AcpiReset(); if (ACPI_SUCCESS(status)) { DELAY(1000000); device_printf(sc->acpi_dev, "reset failed - timeout\n"); } else if (status != AE_NOT_EXIST) device_printf(sc->acpi_dev, "reset failed - %s\n", AcpiFormatException(status)); } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { /* * Only disable ACPI if the user requested. On some systems, writing * the disable value to SMI_CMD hangs the system. */ device_printf(sc->acpi_dev, "Shutting down\n"); AcpiTerminate(); } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; /* Enable and clear fixed events and install handlers. */ if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_event_power_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Power Button (fixed)\n"); } if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_event_sleep_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); #ifdef ACPI_EARLY_EPYC_WAR /* * Certain Treadripper boards always returns 0 for FreeBSD because it * only returns non-zero for the OS string "Windows 2015". Otherwise it * will return zero. Force them to always be treated as present. * Beata versions were worse: they always returned 0. */ if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010")) return (TRUE); #endif status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if the battery is actually present and inserted. */ BOOLEAN acpi_BatteryIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if a device has at least one valid device ID. */ BOOLEAN acpi_has_hid(ACPI_HANDLE h) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; if (h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (FALSE); ret = FALSE; if ((devinfo->Valid & ACPI_VALID_HID) != 0) ret = TRUE; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) if (devinfo->CompatibleIdList.Count > 0) ret = TRUE; AcpiOsFree(devinfo); return (ret); } /* * Match a HID string against a handle * returns ACPI_MATCHHID_HID if _HID match * ACPI_MATCHHID_CID if _CID match and not _HID match. * ACPI_MATCHHID_NOMATCH=0 if no match. */ int acpi_MatchHid(ACPI_HANDLE h, const char *hid) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; int i; if (hid == NULL || h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ACPI_MATCHHID_NOMATCH); ret = ACPI_MATCHHID_NOMATCH; if ((devinfo->Valid & ACPI_VALID_HID) != 0 && strcmp(hid, devinfo->HardwareId.String) == 0) ret = ACPI_MATCHHID_HID; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { ret = ACPI_MATCHHID_CID; break; } } AcpiOsFree(devinfo); return (ret); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; /* Walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (ACPI_SUCCESS(status)) { *result = r; return (AE_OK); } /* XXX Return error here? */ if (status != AE_NOT_FOUND) return (AE_OK); if (ACPI_FAILURE(AcpiGetParent(parent, &r))) return (AE_NOT_FOUND); parent = r; } } ACPI_STATUS acpi_GetProperty(device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { device_t bus = device_get_parent(dev); return (ACPI_GET_PROPERTY(bus, dev, propname, value)); } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return (NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return (buf); } ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) { ACPI_OBJECT arg1; ACPI_OBJECT_LIST args; arg1.Type = ACPI_TYPE_INTEGER; arg1.Integer.Value = number; args.Count = 1; args.Pointer = &arg1; return (AcpiEvaluateObject(handle, path, &args, NULL)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT param; if (handle == NULL) handle = ACPI_ROOT_OBJECT; /* * Assume that what we've been pointed at is an Integer object, or * a method that will return an Integer. */ buf.Pointer = ¶m; buf.Length = sizeof(param); status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) { if (param.Type == ACPI_TYPE_INTEGER) *number = param.Integer.Value; else status = AE_TYPE; } /* * In some applications, a method that's expected to return an Integer * may instead return a Buffer (probably to simplify some internal * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, * convert it into an Integer as best we can. * * This is a hack. */ if (status == AE_BUFFER_OVERFLOW) { if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { status = AE_NO_MEMORY; } else { status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) status = acpi_ConvertBufferToInteger(&buf, number); AcpiOsFree(buf.Pointer); } } return (status); } ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) { ACPI_OBJECT *p; UINT8 *val; int i; p = (ACPI_OBJECT *)bufp->Pointer; if (p->Type == ACPI_TYPE_INTEGER) { *number = p->Integer.Value; return (AE_OK); } if (p->Type != ACPI_TYPE_BUFFER) return (AE_TYPE); if (p->Buffer.Length > sizeof(int)) return (AE_BAD_DATA); *number = 0; val = p->Buffer.Pointer; for (i = 0; i < p->Buffer.Length; i++) *number += val[i] << (i * 8); return (AE_OK); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) return (AE_BAD_PARAMETER); /* Iterate over components */ i = 0; comp = pkg->Package.Elements; for (; i < pkg->Package.Count; i++, comp++) func(comp, arg); return (AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* Range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); /* Check for terminator */ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) return (AE_NOT_FOUND); rp = ACPI_NEXT_RESOURCE(rp); } if (resp != NULL) *resp = rp; return (AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return (AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; } if (res == NULL) return (AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* Range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) break; rp = ACPI_NEXT_RESOURCE(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RS_SIZE_NO_DATA + ACPI_RS_SIZE_MIN) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return (AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); /* And add the terminator. */ rp = ACPI_NEXT_RESOURCE(rp); rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; return (AE_OK); } UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision) { /* * ACPI spec 9.1.1 defines this. * * "Arg2: Function Index Represents a specific function whose meaning is * specific to the UUID and Revision ID. Function indices should start * with 1. Function number zero is a query function (see the special * return code defined below)." */ ACPI_BUFFER buf; ACPI_OBJECT *obj; UINT64 ret = 0; int i; if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) { ACPI_INFO(("Failed to enumerate DSM functions\n")); return (0); } obj = (ACPI_OBJECT *)buf.Pointer; KASSERT(obj, ("Object not allowed to be NULL\n")); /* * From ACPI 6.2 spec 9.1.1: * If Function Index = 0, a Buffer containing a function index bitfield. * Otherwise, the return value and type depends on the UUID and revision * ID (see below). */ switch (obj->Type) { case ACPI_TYPE_BUFFER: for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++) ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8)); break; case ACPI_TYPE_INTEGER: ACPI_BIOS_WARNING((AE_INFO, "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n")); ret = obj->Integer.Value; break; default: ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type)); }; AcpiOsFree(obj); return ret; } /* * DSM may return multiple types depending on the function. It is therefore * unsafe to use the typed evaluation. It is highly recommended that the caller * check the type of the returned object. */ ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf) { return (acpi_EvaluateDSMTyped(handle, uuid, revision, function, package, out_buf, ACPI_TYPE_ANY)); } ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type) { ACPI_OBJECT arg[4]; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; if (out_buf == NULL) return (AE_NO_MEMORY); arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid); arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = function; if (package) { arg[3] = *package; } else { arg[3].Type = ACPI_TYPE_PACKAGE; arg[3].Package.Count = 0; arg[3].Package.Elements = NULL; } arglist.Pointer = arg; arglist.Count = 4; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type); if (ACPI_FAILURE(status)) return (status); KASSERT(ACPI_SUCCESS(status), ("Unexpected status")); *out_buf = buf; return (status); } ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query) { ACPI_OBJECT arg[4], *ret; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; arglist.Pointer = arg; arglist.Count = 4; arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = uuid; arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = count; arg[3].Type = ACPI_TYPE_BUFFER; arg[3].Buffer.Length = count * sizeof(*caps_in); arg[3].Buffer.Pointer = (uint8_t *)caps_in; caps_in[0] = query ? 1 : 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return (status); if (caps_out != NULL) { ret = buf.Pointer; if (ret->Buffer.Length != count * sizeof(*caps_out)) { AcpiOsFree(buf.Pointer); return (AE_BUFFER_OVERFLOW); } bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length); } AcpiOsFree(buf.Pointer); return (status); } /* * Set interrupt model. */ ACPI_STATUS acpi_SetIntrModel(int model) { return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); } /* * Walk subtables of a table and call a callback routine for each * subtable. The caller should provide the first subtable and a * pointer to the end of the table. This can be used to walk tables * such as MADT and SRAT that use subtable entries. */ void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg) { ACPI_SUBTABLE_HEADER *entry; for (entry = first; (void *)entry < end; ) { /* Avoid an infinite loop if we hit a bogus entry. */ if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER)) return; handler(entry, arg); entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length); } } /* * DEPRECATED. This interface has serious deficiencies and will be * removed. * * Immediately enter the sleep state. In the old model, acpiconf(8) ran * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { static int once; if (!once) { device_printf(sc->acpi_dev, "warning: acpi_SetSleepState() deprecated, need to update your software\n"); once = 1; } return (acpi_EnterSleepState(sc, state)); } #if defined(__amd64__) || defined(__i386__) static void acpi_sleep_force_task(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) device_printf(sc->acpi_dev, "force sleep state S%d failed\n", sc->acpi_next_sstate); } static void acpi_sleep_force(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; device_printf(sc->acpi_dev, "suspend request timed out, forcing sleep now\n"); /* * XXX Suspending from callout causes freezes in DEVICE_SUSPEND(). * Suspend from acpi_task thread instead. */ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_sleep_force_task, sc))) device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n"); } #endif /* * Request that the system enter the given suspend state. All /dev/apm * devices and devd(8) will be notified. Userland then has a chance to * save state and acknowledge the request. The system sleeps once all * acks are in. */ int acpi_ReqSleepState(struct acpi_softc *sc, int state) { #if defined(__amd64__) || defined(__i386__) struct apm_clone_data *clone; ACPI_STATUS status; if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); /* * If a reboot/shutdown/suspend request is already in progress or * suspend is blocked due to an upcoming shutdown, just return. */ if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) { return (0); } /* Wait until sleep is enabled. */ while (sc->acpi_sleep_disabled) { AcpiOsSleep(1000); } ACPI_LOCK(acpi); sc->acpi_next_sstate = state; /* S5 (soft-off) should be entered directly with no waiting. */ if (state == ACPI_STATE_S5) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* Record the pending state and notify all apm devices. */ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { clone->notify_status = APM_EV_NONE; if ((clone->flags & ACPI_EVF_DEVD) == 0) { selwakeuppri(&clone->sel_read, PZERO); KNOTE_LOCKED(&clone->sel_read.si_note, 0); } } /* If devd(8) is not running, immediately enter the sleep state. */ if (!devctl_process_running()) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* * Set a timeout to fire if userland doesn't ack the suspend request * in time. This way we still eventually go to sleep if we were * overheating or running low on battery, even if userland is hung. * We cancel this timeout once all userland acks are in or the * suspend request is aborted. */ callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); ACPI_UNLOCK(acpi); /* Now notify devd(8) also. */ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); return (0); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } /* * Acknowledge (or reject) a pending sleep state. The caller has * prepared for suspend and is now ready for it to proceed. If the * error argument is non-zero, it indicates suspend should be cancelled * and gives an errno value describing why. Once all votes are in, * we suspend the system. */ int acpi_AckSleepState(struct apm_clone_data *clone, int error) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc; int ret, sleeping; /* If no pending sleep state, return an error. */ ACPI_LOCK(acpi); sc = clone->acpi_sc; if (sc->acpi_next_sstate == 0) { ACPI_UNLOCK(acpi); return (ENXIO); } /* Caller wants to abort suspend process. */ if (error) { sc->acpi_next_sstate = 0; callout_stop(&sc->susp_force_to); device_printf(sc->acpi_dev, "listener on %s cancelled the pending suspend\n", devtoname(clone->cdev)); ACPI_UNLOCK(acpi); return (0); } /* * Mark this device as acking the suspend request. Then, walk through * all devices, seeing if they agree yet. We only count devices that * are writable since read-only devices couldn't ack the request. */ sleeping = TRUE; clone->notify_status = APM_EV_ACKED; STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { if ((clone->flags & ACPI_EVF_WRITE) != 0 && clone->notify_status != APM_EV_ACKED) { sleeping = FALSE; break; } } /* If all devices have voted "yes", we will suspend now. */ if (sleeping) callout_stop(&sc->susp_force_to); ACPI_UNLOCK(acpi); ret = 0; if (sleeping) { if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) ret = ENODEV; } return (ret); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } static void acpi_sleep_enable(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; ACPI_LOCK_ASSERT(acpi); /* Reschedule if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) { callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); return; } sc->acpi_sleep_disabled = FALSE; } static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc) { ACPI_STATUS status; /* Fail if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) return (AE_ERROR); ACPI_LOCK(acpi); status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK; sc->acpi_sleep_disabled = TRUE; ACPI_UNLOCK(acpi); return (status); } enum acpi_sleep_state { ACPI_SS_NONE, ACPI_SS_GPE_SET, ACPI_SS_DEV_SUSPEND, ACPI_SS_SLP_PREP, ACPI_SS_SLEPT, }; /* * Enter the desired system sleep state. * * Currently we support S1-S5 but S4 is only S4BIOS */ static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state) { register_t intr; ACPI_STATUS status; ACPI_EVENT_STATUS power_button_status; enum acpi_sleep_state slp_state; int sleep_result; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return_ACPI_STATUS (AE_BAD_PARAMETER); if (!acpi_sleep_states[state]) { device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", state); return (AE_SUPPORT); } /* Re-entry once we're suspending is not allowed. */ status = acpi_sleep_disable(sc); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "suspend request ignored (not ready yet)\n"); return (status); } if (state == ACPI_STATE_S5) { /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); return_ACPI_STATUS (AE_OK); } EVENTHANDLER_INVOKE(power_suspend_early); stop_all_proc(); suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); #ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } #endif /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME */ bus_topo_lock(); slp_state = ACPI_SS_NONE; sc->acpi_sstate = state; /* Enable any GPEs as appropriate and requested by the user. */ acpi_wake_prep_walk(state); slp_state = ACPI_SS_GPE_SET; /* * Inform all devices that we are going to sleep. If at least one * device fails, DEVICE_SUSPEND() automatically resumes the tree. * * XXX Note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the current * bus interface does not provide for this. */ if (DEVICE_SUSPEND(root_bus) != 0) { device_printf(sc->acpi_dev, "device_suspend failed\n"); goto backout; } slp_state = ACPI_SS_DEV_SUSPEND; status = AcpiEnterSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); goto backout; } slp_state = ACPI_SS_SLP_PREP; if (sc->acpi_sleep_delay > 0) DELAY(sc->acpi_sleep_delay * 1000000); suspendclock(); intr = intr_disable(); if (state != ACPI_STATE_S1) { sleep_result = acpi_sleep_machdep(sc, state); acpi_wakeup_machdep(sc, state, sleep_result, 0); /* * XXX According to ACPI specification SCI_EN bit should be restored * by ACPI platform (BIOS, firmware) to its pre-sleep state. * Unfortunately some BIOSes fail to do that and that leads to * unexpected and serious consequences during wake up like a system * getting stuck in SMI handlers. * This hack is picked up from Linux, which claims that it follows * Windows behavior. */ if (sleep_result == 1 && state != ACPI_STATE_S4) AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT); if (sleep_result == 1 && state == ACPI_STATE_S3) { /* * Prevent mis-interpretation of the wakeup by power button * as a request for power off. * Ideally we should post an appropriate wakeup event, * perhaps using acpi_event_power_button_wake or alike. * * Clearing of power button status after wakeup is mandated * by ACPI specification in section "Fixed Power Button". * * XXX As of ACPICA 20121114 AcpiGetEventStatus provides * status as 0/1 corressponding to inactive/active despite * its type being ACPI_EVENT_STATUS. In other words, * we should not test for ACPI_EVENT_FLAG_SET for time being. */ if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON, &power_button_status)) && power_button_status != 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); device_printf(sc->acpi_dev, "cleared fixed power button status\n"); } } intr_restore(intr); /* call acpi_wakeup_machdep() again with interrupt enabled */ acpi_wakeup_machdep(sc, state, sleep_result, 1); AcpiLeaveSleepStatePrep(state); if (sleep_result == -1) goto backout; /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state == ACPI_STATE_S4) AcpiEnable(); } else { status = AcpiEnterSleepState(state); intr_restore(intr); AcpiLeaveSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); goto backout; } } slp_state = ACPI_SS_SLEPT; /* * Back out state according to how far along we got in the suspend * process. This handles both the error and success cases. */ backout: if (slp_state >= ACPI_SS_SLP_PREP) resumeclock(); if (slp_state >= ACPI_SS_GPE_SET) { acpi_wake_prep_walk(state); sc->acpi_sstate = ACPI_STATE_S0; } if (slp_state >= ACPI_SS_DEV_SUSPEND) DEVICE_RESUME(root_bus); if (slp_state >= ACPI_SS_SLP_PREP) AcpiLeaveSleepState(state); if (slp_state >= ACPI_SS_SLEPT) { #if defined(__i386__) || defined(__amd64__) /* NB: we are still using ACPI timecounter at this point. */ resume_TSC(); #endif acpi_resync_clock(sc); acpi_enable_fixed_events(sc); } sc->acpi_next_sstate = 0; bus_topo_unlock(); #ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif resume_all_fs(); resume_all_proc(); EVENTHANDLER_INVOKE(power_resume); /* Allow another sleep request after a while. */ callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); /* Run /etc/rc.resume after we are back. */ if (devctl_process_running()) acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); return_ACPI_STATUS (status); } static void acpi_resync_clock(struct acpi_softc *sc) { /* * Warm up timecounter again and reset system clock. */ (void)timecounter->tc_get_timecount(timecounter); inittodr(time_second + sc->acpi_sleep_delay); } /* Enable or disable the device's wake GPE. */ int acpi_wake_set_enable(device_t dev, int enable) { struct acpi_prw_data prw; ACPI_STATUS status; int flags; /* Make sure the device supports waking the system and get the GPE. */ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) return (ENXIO); flags = acpi_get_flags(dev); if (enable) { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "enable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); } else { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "disable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); } return (0); } static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* Check that this is a wake-capable device and get its GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); /* * The destination sleep state must be less than (i.e., higher power) * or equal to the value specified by _PRW. If this GPE cannot be * enabled for the next sleep state, then disable it. If it can and * the user requested it be enabled, turn on any required power resources * and set _PSW. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (bootverbose) device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", acpi_name(handle), sstate); } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { acpi_pwr_wake_enable(handle, 1); acpi_SetInteger(handle, "_PSW", 1); if (bootverbose) device_printf(dev, "wake_prep enabled for %s (S%d)\n", acpi_name(handle), sstate); } return (0); } static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* * Check that this is a wake-capable device and get its GPE. Return * now if the user didn't enable this device for wake. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) return (0); /* * If this GPE couldn't be enabled for the previous sleep state, it was * disabled before going to sleep so re-enable it. If it was enabled, * clear _PSW and turn off any power resources it used. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (bootverbose) device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); } else { acpi_SetInteger(handle, "_PSW", 0); acpi_pwr_wake_enable(handle, 0); if (bootverbose) device_printf(dev, "run_prep cleaned up for %s\n", acpi_name(handle)); } return (0); } static ACPI_STATUS acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { int sstate; /* If suspending, run the sleep prep function, otherwise wake. */ sstate = *(int *)context; if (AcpiGbl_SystemAwakeAndRunning) acpi_wake_sleep_prep(handle, sstate); else acpi_wake_run_prep(handle, sstate); return (AE_OK); } /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ static int acpi_wake_prep_walk(int sstate) { ACPI_HANDLE sb_handle; if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_wake_prep, NULL, &sstate, NULL); return (0); } /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ static int acpi_wake_sysctl_walk(device_t dev) { int error, i, numdevs; device_t *devlist; device_t child; ACPI_STATUS status; error = device_get_children(dev, &devlist, &numdevs); if (error != 0 || numdevs == 0) { if (numdevs == 0) free(devlist, M_TEMP); return (error); } for (i = 0; i < numdevs; i++) { child = devlist[i]; acpi_wake_sysctl_walk(child); if (!device_is_attached(child)) continue; status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); if (ACPI_SUCCESS(status)) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, "wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0, acpi_wake_set_sysctl, "I", "Device set to wake the system"); } } free(devlist, M_TEMP); return (0); } /* Enable or disable wake from userland. */ static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) { int enable, error; device_t dev; dev = (device_t)arg1; enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; error = sysctl_handle_int(oidp, &enable, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enable != 0 && enable != 1) return (EINVAL); return (acpi_wake_set_enable(dev, enable)); } /* Parse a device's _PRW into a structure. */ int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) { ACPI_STATUS status; ACPI_BUFFER prw_buffer; ACPI_OBJECT *res, *res2; int error, i, power_count; if (h == NULL || prw == NULL) return (EINVAL); /* * The _PRW object (7.2.9) is only required for devices that have the * ability to wake the system from a sleeping state. */ error = EINVAL; prw_buffer.Pointer = NULL; prw_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); if (ACPI_FAILURE(status)) return (ENOENT); res = (ACPI_OBJECT *)prw_buffer.Pointer; if (res == NULL) return (ENOENT); if (!ACPI_PKG_VALID(res, 2)) goto out; /* * Element 1 of the _PRW object: * The lowest power system sleeping state that can be entered while still * providing wake functionality. The sleeping state being entered must * be less than (i.e., higher power) or equal to this value. */ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) goto out; /* * Element 0 of the _PRW object: */ switch (res->Package.Elements[0].Type) { case ACPI_TYPE_INTEGER: /* * If the data type of this package element is numeric, then this * _PRW package element is the bit index in the GPEx_EN, in the * GPE blocks described in the FADT, of the enable bit that is * enabled for the wake event. */ prw->gpe_handle = NULL; prw->gpe_bit = res->Package.Elements[0].Integer.Value; error = 0; break; case ACPI_TYPE_PACKAGE: /* * If the data type of this package element is a package, then this * _PRW package element is itself a package containing two * elements. The first is an object reference to the GPE Block * device that contains the GPE that will be triggered by the wake * event. The second element is numeric and it contains the bit * index in the GPEx_EN, in the GPE Block referenced by the * first element in the package, of the enable bit that is enabled for * the wake event. * * For example, if this field is a package then it is of the form: * Package() {\_SB.PCI0.ISA.GPE, 2} */ res2 = &res->Package.Elements[0]; if (!ACPI_PKG_VALID(res2, 2)) goto out; prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); if (prw->gpe_handle == NULL) goto out; if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) goto out; error = 0; break; default: goto out; } /* Elements 2 to N of the _PRW object are power resources. */ power_count = res->Package.Count - 2; if (power_count > ACPI_PRW_MAX_POWERRES) { printf("ACPI device %s has too many power resources\n", acpi_name(h)); power_count = 0; } prw->power_res_count = power_count; for (i = 0; i < power_count; i++) prw->power_res[i] = res->Package.Elements[i]; out: if (prw_buffer.Pointer != NULL) AcpiOsFree(prw_buffer.Pointer); return (error); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { struct acpi_softc *sc = (struct acpi_softc *)arg; int ret; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Check if button action is disabled or unknown. */ if (state == ACPI_STATE_UNKNOWN) return; /* Request that the system prepare to enter the given suspend state. */ ret = acpi_ReqSleepState(sc, state); if (ret != 0) device_printf(sc->acpi_dev, "request to enter state S%d failed (err %d)\n", state, ret); return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Currently, nothing to do for wakeup. */ return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ static void acpi_invoke_sleep_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context); } static void acpi_invoke_wake_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context); } UINT32 acpi_event_power_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_power_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } /* * XXX This static buffer is suboptimal. There is no locking so only * use this for single-threaded callers. */ char * acpi_name(ACPI_HANDLE handle) { ACPI_BUFFER buf; static char data[256]; buf.Length = sizeof(data); buf.Pointer = data; if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) return (data); return ("(unknown)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *env, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((env = kern_getenv("debug.acpi.avoid")) == NULL) return (0); /* Scan the avoid list checking for a match */ cp = env; for (;;) { while (*cp != 0 && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while (cp[len] != 0 && !isspace(cp[len])) len++; if (!strncmp(cp, np, len)) { freeenv(env); return(1); } cp += len; } freeenv(env); return (0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp, *env; int len; if ((env = kern_getenv("debug.acpi.disabled")) == NULL) return (0); if (strcmp(env, "all") == 0) { freeenv(env); return (1); } /* Scan the disable list, checking for a match. */ cp = env; for (;;) { while (*cp != '\0' && isspace(*cp)) cp++; if (*cp == '\0') break; len = 0; while (cp[len] != '\0' && !isspace(cp[len])) len++; if (strncmp(cp, subsys, len) == 0) { freeenv(env); return (1); } cp += len; } freeenv(env); return (0); } static void acpi_lookup(void *arg, const char *name, device_t *dev) { ACPI_HANDLE handle; if (*dev != NULL) return; /* * Allow any handle name that is specified as an absolute path and * starts with '\'. We could restrict this to \_SB and friends, * but see acpi_probe_children() for notes on why we scan the entire * namespace for devices. * * XXX: The pathname argument to AcpiGetHandle() should be fixed to * be const. */ if (name[0] != '\\') return; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name), &handle))) return; *dev = acpi_get_device(handle); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the * register/deregister interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; acpi_ioctl_fn fn; void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return (ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); ACPI_UNLOCK(acpi); return (0); } void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) { struct acpi_ioctl_hook *hp; ACPI_LOCK(acpi); TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if (hp->cmd == cmd && hp->fn == fn) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } ACPI_UNLOCK(acpi); } static int acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, state; error = 0; hp = NULL; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted) TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) break; } ACPI_UNLOCK(acpi); if (hp) return (hp->fn(cmd, addr, hp->arg)); /* * Core ioctls are not permitted for non-writable user. * Currently, other ioctls just fetch information. * Not changing system behavior. */ if ((flag & FWRITE) == 0) return (EPERM); /* Core system ioctls. */ switch (cmd) { case ACPIIO_REQSLPSTATE: state = *(int *)addr; if (state != ACPI_STATE_S5) return (acpi_ReqSleepState(sc, state)); device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n"); error = EOPNOTSUPP; break; case ACPIIO_ACKSLPSTATE: error = *(int *)addr; error = acpi_AckSleepState(sc->acpi_clone, error); break; case ACPIIO_SETSLPSTATE: /* DEPRECATED */ state = *(int *)addr; if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) error = ENXIO; break; default: error = ENXIO; break; } return (error); } static int acpi_sname2sstate(const char *sname) { int sstate; if (toupper(sname[0]) == 'S') { sstate = sname[1] - '0'; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 && sname[2] == '\0') return (sstate); } else if (strcasecmp(sname, "NONE") == 0) return (ACPI_STATE_UNKNOWN); return (-1); } static const char * acpi_sstate2sname(int sstate) { static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" }; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5) return (snames[sstate]); else if (sstate == ACPI_STATE_UNKNOWN) return ("NONE"); return (NULL); } static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { int error; struct sbuf sb; UINT8 state; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (acpi_sleep_states[state]) sbuf_printf(&sb, "%s ", acpi_sstate2sname(state)); sbuf_trim(&sb); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error, new_state, old_state; old_state = *(int *)oidp->oid_arg1; strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state)); error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { new_state = acpi_sname2sstate(sleep_state); if (new_state < ACPI_STATE_S1) return (EINVAL); if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state]) return (EOPNOTSUPP); if (new_state != old_state) *(int *)oidp->oid_arg1 = new_state; } return (error); } /* Inform devctl(4) when we receive a Notify. */ void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) { char notify_buf[16]; ACPI_BUFFER handle_buf; ACPI_STATUS status; if (subsystem == NULL) return; handle_buf.Pointer = NULL; handle_buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); if (ACPI_FAILURE(status)) return; snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); AcpiOsFree(handle_buf.Pointer); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUS", ACPI_BUS}, {"ACPI_BUTTON", ACPI_BUTTON}, {"ACPI_EC", ACPI_EC}, {"ACPI_FAN", ACPI_FAN}, {"ACPI_POWERRES", ACPI_POWERRES}, {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, {"ACPI_TIMER", ACPI_TIMER}, {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, {NULL, 0} }; static struct debugtag dbg_level[] = { {"ACPI_LV_INIT", ACPI_LV_INIT}, {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, {"ACPI_LV_INFO", ACPI_LV_INFO}, {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, /* Trace verbosity level 1 [Standard Trace Level] */ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, {"ACPI_LV_PARSE", ACPI_LV_PARSE}, {"ACPI_LV_LOAD", ACPI_LV_LOAD}, {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, {"ACPI_LV_EXEC", ACPI_LV_EXEC}, {"ACPI_LV_NAMES", ACPI_LV_NAMES}, {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, {"ACPI_LV_TABLES", ACPI_LV_TABLES}, {"ACPI_LV_VALUES", ACPI_LV_VALUES}, {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, /* Trace verbosity level 2 [Function tracing and memory allocation] */ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, {"ACPI_LV_ALL", ACPI_LV_ALL}, /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, {"ACPI_LV_THREADS", ACPI_LV_THREADS}, {"ACPI_LV_IO", ACPI_LV_IO}, {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, /* Exceptionally verbose output -- also used in the global "DebugLevel" */ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) *flag |= tag[i].value; else *flag &= ~tag[i].value; } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *layer, *level; if (cold) { AcpiDbgLayer = 0; AcpiDbgLevel = 0; } layer = kern_getenv("debug.acpi.layer"); level = kern_getenv("debug.acpi.level"); if (layer == NULL && level == NULL) return; printf("ACPI set debug"); if (layer != NULL) { if (strcmp("NONE", layer) != 0) printf(" layer '%s'", layer); acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); freeenv(layer); } if (level != NULL) { if (strcmp("NONE", level) != 0) printf(" level '%s'", level); acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); freeenv(level); } printf("\n"); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); static int acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) { int error, *dbg; struct debugtag *tag; struct sbuf sb; char temp[128]; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) return (ENOMEM); if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { tag = &dbg_layer[0]; dbg = &AcpiDbgLayer; } else { tag = &dbg_level[0]; dbg = &AcpiDbgLevel; } /* Get old values if this is a get request. */ ACPI_SERIAL_BEGIN(acpi); if (*dbg == 0) { sbuf_cpy(&sb, "NONE"); } else if (req->newptr == NULL) { for (; tag->name != NULL; tag++) { if ((*dbg & tag->value) == tag->value) sbuf_printf(&sb, "%s ", tag->name); } } sbuf_trim(&sb); sbuf_finish(&sb); strlcpy(temp, sbuf_data(&sb), sizeof(temp)); sbuf_delete(&sb); error = sysctl_handle_string(oidp, temp, sizeof(temp), req); /* Check for error or no change */ if (error == 0 && req->newptr != NULL) { *dbg = 0; kern_setenv((char *)oidp->oid_arg1, temp); acpi_set_debugging(NULL); } ACPI_SERIAL_END(acpi); return (error); } SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); #endif /* ACPI_DEBUG */ static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) { int error; int old; old = acpi_debug_objects; error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (old == acpi_debug_objects || (old && acpi_debug_objects)) return (0); ACPI_SERIAL_BEGIN(acpi); AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; ACPI_SERIAL_END(acpi); return (0); } static int acpi_parse_interfaces(char *str, struct acpi_interface *iface) { char *p; size_t len; int i, j; p = str; while (isspace(*p) || *p == ',') p++; len = strlen(p); if (len == 0) return (0); p = strdup(p, M_TEMP); for (i = 0; i < len; i++) if (p[i] == ',') p[i] = '\0'; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { i += strlen(p + i) + 1; j++; } if (j == 0) { free(p, M_TEMP); return (0); } iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); iface->num = j; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { iface->data[j] = p + i; i += strlen(p + i) + 1; j++; } return (j); } static void acpi_free_interfaces(struct acpi_interface *iface) { free(iface->data[0], M_TEMP); free(iface->data, M_TEMP); } static void acpi_reset_interfaces(device_t dev) { struct acpi_interface list; ACPI_STATUS status; int i; if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiInstallInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to install _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "installed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiRemoveInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to remove _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "removed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } } static int acpi_pm_func(u_long cmd, void *arg, ...) { int state, acpi_state; int error; struct acpi_softc *sc; va_list ap; error = 0; switch (cmd) { case POWER_CMD_SUSPEND: sc = (struct acpi_softc *)arg; if (sc == NULL) { error = EINVAL; goto out; } va_start(ap, arg); state = va_arg(ap, int); va_end(ap); switch (state) { case POWER_SLEEP_STATE_STANDBY: acpi_state = sc->acpi_standby_sx; break; case POWER_SLEEP_STATE_SUSPEND: acpi_state = sc->acpi_suspend_sx; break; case POWER_SLEEP_STATE_HIBERNATE: acpi_state = ACPI_STATE_S4; break; default: error = EINVAL; goto out; } if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) error = ENXIO; break; default: error = EINVAL; goto out; } out: return (error); } static void acpi_pm_register(void *arg) { if (!cold || resource_disabled("acpi", 0)) return; power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); } SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL); diff --git a/sys/dev/amdsbwd/amdsbwd.c b/sys/dev/amdsbwd/amdsbwd.c index 1dfe6c1b11bf..122fa2d58277 100644 --- a/sys/dev/amdsbwd/amdsbwd.c +++ b/sys/dev/amdsbwd/amdsbwd.c @@ -1,592 +1,592 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009 Andriy Gapon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This is a driver for watchdog timer present in AMD SB600/SB7xx/SB8xx * southbridges. * Please see the following specifications for the descriptions of the * registers and flags: * - AMD SB600 Register Reference Guide, Public Version, Rev. 3.03 (SB600 RRG) * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/46155_sb600_rrg_pub_3.03.pdf * - AMD SB700/710/750 Register Reference Guide (RRG) * http://developer.amd.com/assets/43009_sb7xx_rrg_pub_1.00.pdf * - AMD SB700/710/750 Register Programming Requirements (RPR) * http://developer.amd.com/assets/42413_sb7xx_rpr_pub_1.00.pdf * - AMD SB800-Series Southbridges Register Reference Guide (RRG) * http://support.amd.com/us/Embedded_TechDocs/45482.pdf * Please see the following for Watchdog Resource Table specification: * - Watchdog Timer Hardware Requirements for Windows Server 2003 (WDRT) * http://www.microsoft.com/whdc/system/sysinternals/watchdog.mspx * AMD SB600/SB7xx/SB8xx watchdog hardware seems to conform to the above * specifications, but the table hasn't been spotted in the wild yet. */ #include #include "opt_amdsbwd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Registers in the Watchdog IO space. * See SB7xx RRG 2.3.4, WDRT. */ #define AMDSB_WD_CTRL 0x00 #define AMDSB_WD_RUN 0x01 #define AMDSB_WD_FIRED 0x02 #define AMDSB_WD_SHUTDOWN 0x04 #define AMDSB_WD_DISABLE 0x08 #define AMDSB_WD_RESERVED 0x70 #define AMDSB_WD_RELOAD 0x80 #define AMDSB_WD_COUNT 0x04 #define AMDSB_WD_COUNT_MASK 0xffff #define AMDSB_WDIO_REG_WIDTH 4 #define amdsbwd_verbose_printf(dev, ...) \ do { \ if (bootverbose) \ device_printf(dev, __VA_ARGS__);\ } while (0) struct amdsbwd_softc { device_t dev; eventhandler_tag ev_tag; struct resource *res_ctrl; struct resource *res_count; int rid_ctrl; int rid_count; int ms_per_tick; int max_ticks; int active; unsigned int timeout; }; static void amdsbwd_identify(driver_t *driver, device_t parent); static int amdsbwd_probe(device_t dev); static int amdsbwd_attach(device_t dev); static int amdsbwd_detach(device_t dev); static int amdsbwd_suspend(device_t dev); static int amdsbwd_resume(device_t dev); static device_method_t amdsbwd_methods[] = { DEVMETHOD(device_identify, amdsbwd_identify), DEVMETHOD(device_probe, amdsbwd_probe), DEVMETHOD(device_attach, amdsbwd_attach), DEVMETHOD(device_detach, amdsbwd_detach), DEVMETHOD(device_suspend, amdsbwd_suspend), DEVMETHOD(device_resume, amdsbwd_resume), #if 0 DEVMETHOD(device_shutdown, amdsbwd_detach), #endif DEVMETHOD_END }; static driver_t amdsbwd_driver = { "amdsbwd", amdsbwd_methods, sizeof(struct amdsbwd_softc) }; DRIVER_MODULE(amdsbwd, isa, amdsbwd_driver, NULL, NULL); static uint8_t pmio_read(struct resource *res, uint8_t reg) { bus_write_1(res, 0, reg); /* Index */ return (bus_read_1(res, 1)); /* Data */ } static void pmio_write(struct resource *res, uint8_t reg, uint8_t val) { bus_write_1(res, 0, reg); /* Index */ bus_write_1(res, 1, val); /* Data */ } static uint32_t wdctrl_read(struct amdsbwd_softc *sc) { return (bus_read_4(sc->res_ctrl, 0)); } static void wdctrl_write(struct amdsbwd_softc *sc, uint32_t val) { bus_write_4(sc->res_ctrl, 0, val); } static __unused uint32_t wdcount_read(struct amdsbwd_softc *sc) { return (bus_read_4(sc->res_count, 0)); } static void wdcount_write(struct amdsbwd_softc *sc, uint32_t val) { bus_write_4(sc->res_count, 0, val); } static void amdsbwd_tmr_enable(struct amdsbwd_softc *sc) { uint32_t val; val = wdctrl_read(sc); val |= AMDSB_WD_RUN; wdctrl_write(sc, val); sc->active = 1; amdsbwd_verbose_printf(sc->dev, "timer enabled\n"); } static void amdsbwd_tmr_disable(struct amdsbwd_softc *sc) { uint32_t val; val = wdctrl_read(sc); val &= ~AMDSB_WD_RUN; wdctrl_write(sc, val); sc->active = 0; amdsbwd_verbose_printf(sc->dev, "timer disabled\n"); } static void amdsbwd_tmr_reload(struct amdsbwd_softc *sc) { uint32_t val; val = wdctrl_read(sc); val |= AMDSB_WD_RELOAD; wdctrl_write(sc, val); } static void amdsbwd_tmr_set(struct amdsbwd_softc *sc, uint16_t timeout) { timeout &= AMDSB_WD_COUNT_MASK; wdcount_write(sc, timeout); sc->timeout = timeout; amdsbwd_verbose_printf(sc->dev, "timeout set to %u ticks\n", timeout); } static void amdsbwd_event(void *arg, unsigned int cmd, int *error) { struct amdsbwd_softc *sc = arg; uint64_t timeout; if (cmd != 0) { timeout = 0; cmd &= WD_INTERVAL; if (cmd >= WD_TO_1MS) { timeout = (uint64_t)1 << (cmd - WD_TO_1MS); timeout = timeout / sc->ms_per_tick; } /* For a too short timeout use 1 tick. */ if (timeout == 0) timeout = 1; /* For a too long timeout stop the timer. */ if (timeout > sc->max_ticks) timeout = 0; } else { timeout = 0; } if (timeout != 0) { if (timeout != sc->timeout) amdsbwd_tmr_set(sc, timeout); if (!sc->active) amdsbwd_tmr_enable(sc); amdsbwd_tmr_reload(sc); *error = 0; } else { if (sc->active) amdsbwd_tmr_disable(sc); } } static void amdsbwd_identify(driver_t *driver, device_t parent) { device_t child; device_t smb_dev; if (resource_disabled("amdsbwd", 0)) return; if (device_find_child(parent, "amdsbwd", -1) != NULL) return; /* * Try to identify SB600/SB7xx by PCI Device ID of SMBus device * that should be present at bus 0, device 20, function 0. */ smb_dev = pci_find_bsf(0, 20, 0); if (smb_dev == NULL) return; if (pci_get_devid(smb_dev) != AMDSB_SMBUS_DEVID && pci_get_devid(smb_dev) != AMDFCH_SMBUS_DEVID && pci_get_devid(smb_dev) != AMDCZ_SMBUS_DEVID && pci_get_devid(smb_dev) != HYGONCZ_SMBUS_DEVID) return; - child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "amdsbwd", -1); + child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "amdsbwd", DEVICE_UNIT_ANY); if (child == NULL) device_printf(parent, "add amdsbwd child failed\n"); } static void amdsbwd_probe_sb7xx(device_t dev, struct resource *pmres, uint32_t *addr) { uint8_t val; int i; /* Report cause of previous reset for user's convenience. */ val = pmio_read(pmres, AMDSB_PM_RESET_STATUS0); if (val != 0) amdsbwd_verbose_printf(dev, "ResetStatus0 = %#04x\n", val); val = pmio_read(pmres, AMDSB_PM_RESET_STATUS1); if (val != 0) amdsbwd_verbose_printf(dev, "ResetStatus1 = %#04x\n", val); if ((val & AMDSB_WD_RST_STS) != 0) device_printf(dev, "Previous Reset was caused by Watchdog\n"); /* Find base address of memory mapped WDT registers. */ for (*addr = 0, i = 0; i < 4; i++) { *addr <<= 8; *addr |= pmio_read(pmres, AMDSB_PM_WDT_BASE_MSB - i); } *addr &= ~0x07u; /* Set watchdog timer tick to 1s. */ val = pmio_read(pmres, AMDSB_PM_WDT_CTRL); val &= ~AMDSB_WDT_RES_MASK; val |= AMDSB_WDT_RES_1S; pmio_write(pmres, AMDSB_PM_WDT_CTRL, val); /* Enable watchdog device (in stopped state). */ val = pmio_read(pmres, AMDSB_PM_WDT_CTRL); val &= ~AMDSB_WDT_DISABLE; pmio_write(pmres, AMDSB_PM_WDT_CTRL, val); /* * XXX TODO: Ensure that watchdog decode is enabled * (register 0x41, bit 3). */ device_set_desc(dev, "AMD SB600/SB7xx Watchdog Timer"); } static void amdsbwd_probe_sb8xx(device_t dev, struct resource *pmres, uint32_t *addr) { uint32_t val; int i; /* Report cause of previous reset for user's convenience. */ val = pmio_read(pmres, AMDSB8_PM_RESET_CTRL); if ((val & AMDSB8_RST_STS_DIS) != 0) { val &= ~AMDSB8_RST_STS_DIS; pmio_write(pmres, AMDSB8_PM_RESET_CTRL, val); } val = 0; for (i = 3; i >= 0; i--) { val <<= 8; val |= pmio_read(pmres, AMDSB8_PM_RESET_STATUS + i); } if (val != 0) amdsbwd_verbose_printf(dev, "ResetStatus = 0x%08x\n", val); if ((val & AMDSB8_WD_RST_STS) != 0) device_printf(dev, "Previous Reset was caused by Watchdog\n"); /* Find base address of memory mapped WDT registers. */ for (*addr = 0, i = 0; i < 4; i++) { *addr <<= 8; *addr |= pmio_read(pmres, AMDSB8_PM_WDT_EN + 3 - i); } *addr &= ~0x07u; /* Set watchdog timer tick to 1s. */ val = pmio_read(pmres, AMDSB8_PM_WDT_CTRL); val &= ~AMDSB8_WDT_RES_MASK; val |= AMDSB8_WDT_1HZ; pmio_write(pmres, AMDSB8_PM_WDT_CTRL, val); #ifdef AMDSBWD_DEBUG val = pmio_read(pmres, AMDSB8_PM_WDT_CTRL); amdsbwd_verbose_printf(dev, "AMDSB8_PM_WDT_CTRL value = %#04x\n", val); #endif /* * Enable watchdog device (in stopped state) * and decoding of its address. */ val = pmio_read(pmres, AMDSB8_PM_WDT_EN); val &= ~AMDSB8_WDT_DISABLE; val |= AMDSB8_WDT_DEC_EN; pmio_write(pmres, AMDSB8_PM_WDT_EN, val); #ifdef AMDSBWD_DEBUG val = pmio_read(pmres, AMDSB8_PM_WDT_EN); device_printf(dev, "AMDSB8_PM_WDT_EN value = %#04x\n", val); #endif device_set_desc(dev, "AMD SB8xx/SB9xx/Axx Watchdog Timer"); } static void amdsbwd_probe_fch41(device_t dev, struct resource *pmres, uint32_t *addr) { uint8_t val; /* * Enable decoding of watchdog MMIO address. */ val = pmio_read(pmres, AMDFCH41_PM_DECODE_EN0); val |= AMDFCH41_WDT_EN; pmio_write(pmres, AMDFCH41_PM_DECODE_EN0, val); #ifdef AMDSBWD_DEBUG val = pmio_read(pmres, AMDFCH41_PM_DECODE_EN0); device_printf(dev, "AMDFCH41_PM_DECODE_EN0 value = %#04x\n", val); #endif val = pmio_read(pmres, AMDFCH41_PM_ISA_CTRL); if ((val & AMDFCH41_MMIO_EN) != 0) { /* Fixed offset for the watchdog within ACPI MMIO range. */ amdsbwd_verbose_printf(dev, "ACPI MMIO range is enabled\n"); *addr = AMDFCH41_MMIO_ADDR + AMDFCH41_MMIO_WDT_OFF; } else { /* Special fixed MMIO range for the watchdog. */ *addr = AMDFCH41_WDT_FIXED_ADDR; } /* * Set watchdog timer tick to 1s and * enable the watchdog device (in stopped state). */ val = pmio_read(pmres, AMDFCH41_PM_DECODE_EN3); val &= ~AMDFCH41_WDT_RES_MASK; val |= AMDFCH41_WDT_RES_1S; val &= ~AMDFCH41_WDT_EN_MASK; val |= AMDFCH41_WDT_ENABLE; pmio_write(pmres, AMDFCH41_PM_DECODE_EN3, val); #ifdef AMDSBWD_DEBUG val = pmio_read(pmres, AMDFCH41_PM_DECODE_EN3); amdsbwd_verbose_printf(dev, "AMDFCH41_PM_DECODE_EN3 value = %#04x\n", val); #endif device_set_descf(dev, "%s FCH Rev 41h+ Watchdog Timer", cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD"); } static int amdsbwd_probe(device_t dev) { struct resource *res; device_t smb_dev; uint32_t addr; int rid; int rc; uint32_t devid; uint8_t revid; /* Do not claim some ISA PnP device by accident. */ if (isa_get_logicalid(dev) != 0) return (ENXIO); rc = bus_set_resource(dev, SYS_RES_IOPORT, 0, AMDSB_PMIO_INDEX, AMDSB_PMIO_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for IO failed\n"); return (ENXIO); } rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE | RF_SHAREABLE); if (res == NULL) { device_printf(dev, "bus_alloc_resource for IO failed\n"); return (ENXIO); } smb_dev = pci_find_bsf(0, 20, 0); KASSERT(smb_dev != NULL, ("can't find SMBus PCI device\n")); devid = pci_get_devid(smb_dev); revid = pci_get_revid(smb_dev); if (devid == AMDSB_SMBUS_DEVID && revid < AMDSB8_SMBUS_REVID) amdsbwd_probe_sb7xx(dev, res, &addr); else if (devid == AMDSB_SMBUS_DEVID || (devid == AMDFCH_SMBUS_DEVID && revid < AMDFCH41_SMBUS_REVID) || (devid == AMDCZ_SMBUS_DEVID && revid < AMDCZ49_SMBUS_REVID)) amdsbwd_probe_sb8xx(dev, res, &addr); else amdsbwd_probe_fch41(dev, res, &addr); bus_release_resource(dev, SYS_RES_IOPORT, rid, res); bus_delete_resource(dev, SYS_RES_IOPORT, rid); amdsbwd_verbose_printf(dev, "memory base address = %#010x\n", addr); rc = bus_set_resource(dev, SYS_RES_MEMORY, 0, addr + AMDSB_WD_CTRL, AMDSB_WDIO_REG_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for control failed\n"); return (ENXIO); } rc = bus_set_resource(dev, SYS_RES_MEMORY, 1, addr + AMDSB_WD_COUNT, AMDSB_WDIO_REG_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for count failed\n"); return (ENXIO); } return (0); } static int amdsbwd_attach_sb(device_t dev, struct amdsbwd_softc *sc) { sc->max_ticks = UINT16_MAX; sc->rid_ctrl = 0; sc->rid_count = 1; sc->ms_per_tick = 1000; sc->res_ctrl = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid_ctrl, RF_ACTIVE); if (sc->res_ctrl == NULL) { device_printf(dev, "bus_alloc_resource for ctrl failed\n"); return (ENXIO); } sc->res_count = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid_count, RF_ACTIVE); if (sc->res_count == NULL) { device_printf(dev, "bus_alloc_resource for count failed\n"); return (ENXIO); } return (0); } static int amdsbwd_attach(device_t dev) { struct amdsbwd_softc *sc; int rc; sc = device_get_softc(dev); sc->dev = dev; rc = amdsbwd_attach_sb(dev, sc); if (rc != 0) goto fail; #ifdef AMDSBWD_DEBUG device_printf(dev, "wd ctrl = %#04x\n", wdctrl_read(sc)); device_printf(dev, "wd count = %#04x\n", wdcount_read(sc)); #endif /* Setup initial state of Watchdog Control. */ wdctrl_write(sc, AMDSB_WD_FIRED); if (wdctrl_read(sc) & AMDSB_WD_DISABLE) { device_printf(dev, "watchdog hardware is disabled\n"); goto fail; } sc->ev_tag = EVENTHANDLER_REGISTER(watchdog_list, amdsbwd_event, sc, EVENTHANDLER_PRI_ANY); return (0); fail: amdsbwd_detach(dev); return (ENXIO); } static int amdsbwd_detach(device_t dev) { struct amdsbwd_softc *sc; sc = device_get_softc(dev); if (sc->ev_tag != NULL) EVENTHANDLER_DEREGISTER(watchdog_list, sc->ev_tag); if (sc->active) amdsbwd_tmr_disable(sc); if (sc->res_ctrl != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->rid_ctrl, sc->res_ctrl); if (sc->res_count != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->rid_count, sc->res_count); return (0); } static int amdsbwd_suspend(device_t dev) { struct amdsbwd_softc *sc; uint32_t val; sc = device_get_softc(dev); val = wdctrl_read(sc); val &= ~AMDSB_WD_RUN; wdctrl_write(sc, val); return (0); } static int amdsbwd_resume(device_t dev) { struct amdsbwd_softc *sc; sc = device_get_softc(dev); wdctrl_write(sc, AMDSB_WD_FIRED); if (sc->active) { amdsbwd_tmr_set(sc, sc->timeout); amdsbwd_tmr_enable(sc); amdsbwd_tmr_reload(sc); } return (0); } diff --git a/sys/dev/bhnd/bcma/bcma.c b/sys/dev/bhnd/bcma/bcma.c index c9df03cfb504..9ce4bf04a1c5 100644 --- a/sys/dev/bhnd/bcma/bcma.c +++ b/sys/dev/bhnd/bcma/bcma.c @@ -1,756 +1,756 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #include #include #include #include #include #include #include #include "bcma_dmp.h" #include "bcma_eromreg.h" #include "bcma_eromvar.h" #include "bcmavar.h" /* RID used when allocating EROM table */ #define BCMA_EROM_RID 0 static bhnd_erom_class_t * bcma_get_erom_class(driver_t *driver) { return (&bcma_erom_parser); } int bcma_probe(device_t dev) { device_set_desc(dev, "BCMA BHND bus"); return (BUS_PROBE_DEFAULT); } /** * Default bcma(4) bus driver implementation of DEVICE_ATTACH(). * * This implementation initializes internal bcma(4) state and performs * bus enumeration, and must be called by subclassing drivers in * DEVICE_ATTACH() before any other bus methods. */ int bcma_attach(device_t dev) { int error; /* Enumerate children */ if ((error = bcma_add_children(dev))) { device_delete_children(dev); return (error); } return (0); } int bcma_detach(device_t dev) { return (bhnd_generic_detach(dev)); } static device_t bcma_add_child(device_t dev, u_int order, const char *name, int unit) { struct bcma_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); if ((dinfo = bcma_alloc_dinfo(dev)) == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, dinfo); return (child); } static void bcma_child_deleted(device_t dev, device_t child) { struct bcma_devinfo *dinfo; /* Call required bhnd(4) implementation */ bhnd_generic_child_deleted(dev, child); /* Free bcma device info */ if ((dinfo = device_get_ivars(child)) != NULL) bcma_free_dinfo(dev, child, dinfo); device_set_ivars(child, NULL); } static int bcma_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { const struct bcma_devinfo *dinfo; const struct bhnd_core_info *ci; dinfo = device_get_ivars(child); ci = &dinfo->corecfg->core_info; switch (index) { case BHND_IVAR_VENDOR: *result = ci->vendor; return (0); case BHND_IVAR_DEVICE: *result = ci->device; return (0); case BHND_IVAR_HWREV: *result = ci->hwrev; return (0); case BHND_IVAR_DEVICE_CLASS: *result = bhnd_core_class(ci); return (0); case BHND_IVAR_VENDOR_NAME: *result = (uintptr_t) bhnd_vendor_name(ci->vendor); return (0); case BHND_IVAR_DEVICE_NAME: *result = (uintptr_t) bhnd_core_name(ci); return (0); case BHND_IVAR_CORE_INDEX: *result = ci->core_idx; return (0); case BHND_IVAR_CORE_UNIT: *result = ci->unit; return (0); case BHND_IVAR_PMU_INFO: *result = (uintptr_t) dinfo->pmu_info; return (0); default: return (ENOENT); } } static int bcma_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct bcma_devinfo *dinfo; dinfo = device_get_ivars(child); switch (index) { case BHND_IVAR_VENDOR: case BHND_IVAR_DEVICE: case BHND_IVAR_HWREV: case BHND_IVAR_DEVICE_CLASS: case BHND_IVAR_VENDOR_NAME: case BHND_IVAR_DEVICE_NAME: case BHND_IVAR_CORE_INDEX: case BHND_IVAR_CORE_UNIT: return (EINVAL); case BHND_IVAR_PMU_INFO: dinfo->pmu_info = (void *)value; return (0); default: return (ENOENT); } } static struct resource_list * bcma_get_resource_list(device_t dev, device_t child) { struct bcma_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } static int bcma_read_iost(device_t dev, device_t child, uint16_t *iost) { uint32_t value; int error; if ((error = bhnd_read_config(child, BCMA_DMP_IOSTATUS, &value, 4))) return (error); /* Return only the bottom 16 bits */ *iost = (value & BCMA_DMP_IOST_MASK); return (0); } static int bcma_read_ioctl(device_t dev, device_t child, uint16_t *ioctl) { uint32_t value; int error; if ((error = bhnd_read_config(child, BCMA_DMP_IOCTRL, &value, 4))) return (error); /* Return only the bottom 16 bits */ *ioctl = (value & BCMA_DMP_IOCTRL_MASK); return (0); } static int bcma_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask) { struct bcma_devinfo *dinfo; struct bhnd_resource *r; uint32_t ioctl; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Write new value */ ioctl = bhnd_bus_read_4(r, BCMA_DMP_IOCTRL); ioctl &= ~(BCMA_DMP_IOCTRL_MASK & mask); ioctl |= (value & mask); bhnd_bus_write_4(r, BCMA_DMP_IOCTRL, ioctl); /* Perform read-back and wait for completion */ bhnd_bus_read_4(r, BCMA_DMP_IOCTRL); DELAY(10); return (0); } static bool bcma_is_hw_suspended(device_t dev, device_t child) { uint32_t rst; uint16_t ioctl; int error; /* Is core held in RESET? */ error = bhnd_read_config(child, BCMA_DMP_RESETCTRL, &rst, 4); if (error) { device_printf(child, "error reading HW reset state: %d\n", error); return (true); } if (rst & BCMA_DMP_RC_RESET) return (true); /* Is core clocked? */ error = bhnd_read_ioctl(child, &ioctl); if (error) { device_printf(child, "error reading HW ioctl register: %d\n", error); return (true); } if (!(ioctl & BHND_IOCTL_CLK_EN)) return (true); return (false); } static int bcma_reset_hw(device_t dev, device_t child, uint16_t ioctl, uint16_t reset_ioctl) { struct bcma_devinfo *dinfo; struct bhnd_resource *r; uint16_t clkflags; int error; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; if (ioctl & clkflags) return (EINVAL); /* Can't suspend the core without access to the agent registers */ if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Place core into known RESET state */ if ((error = bhnd_suspend_hw(child, reset_ioctl))) return (error); /* * Leaving the core in reset: * - Set the caller's IOCTL flags * - Enable clocks * - Force clock distribution to ensure propagation throughout the * core. */ if ((error = bhnd_write_ioctl(child, ioctl | clkflags, UINT16_MAX))) return (error); /* Bring the core out of reset */ if ((error = bcma_dmp_write_reset(child, dinfo, 0x0))) return (error); /* Disable forced clock gating (leaving clock enabled) */ error = bhnd_write_ioctl(child, 0x0, BHND_IOCTL_CLK_FORCE); if (error) return (error); return (0); } static int bcma_suspend_hw(device_t dev, device_t child, uint16_t ioctl) { struct bcma_devinfo *dinfo; struct bhnd_resource *r; uint16_t clkflags; int error; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; if (ioctl & clkflags) return (EINVAL); /* Can't suspend the core without access to the agent registers */ if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Wait for any pending reset operations to clear */ if ((error = bcma_dmp_wait_reset(child, dinfo))) return (error); /* Put core into reset (if not already in reset) */ if ((error = bcma_dmp_write_reset(child, dinfo, BCMA_DMP_RC_RESET))) return (error); /* Write core flags (and clear CLK_EN/CLK_FORCE) */ if ((error = bhnd_write_ioctl(child, ioctl, ~clkflags))) return (error); return (0); } static int bcma_read_config(device_t dev, device_t child, bus_size_t offset, void *value, u_int width) { struct bcma_devinfo *dinfo; struct bhnd_resource *r; /* Must be a directly attached child core */ if (device_get_parent(child) != dev) return (EINVAL); /* Fetch the agent registers */ dinfo = device_get_ivars(child); if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Verify bounds */ if (offset > rman_get_size(r->res)) return (EFAULT); if (rman_get_size(r->res) - offset < width) return (EFAULT); switch (width) { case 1: *((uint8_t *)value) = bhnd_bus_read_1(r, offset); return (0); case 2: *((uint16_t *)value) = bhnd_bus_read_2(r, offset); return (0); case 4: *((uint32_t *)value) = bhnd_bus_read_4(r, offset); return (0); default: return (EINVAL); } } static int bcma_write_config(device_t dev, device_t child, bus_size_t offset, const void *value, u_int width) { struct bcma_devinfo *dinfo; struct bhnd_resource *r; /* Must be a directly attached child core */ if (device_get_parent(child) != dev) return (EINVAL); /* Fetch the agent registers */ dinfo = device_get_ivars(child); if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Verify bounds */ if (offset > rman_get_size(r->res)) return (EFAULT); if (rman_get_size(r->res) - offset < width) return (EFAULT); switch (width) { case 1: bhnd_bus_write_1(r, offset, *(const uint8_t *)value); return (0); case 2: bhnd_bus_write_2(r, offset, *(const uint16_t *)value); return (0); case 4: bhnd_bus_write_4(r, offset, *(const uint32_t *)value); return (0); default: return (EINVAL); } } static u_int bcma_get_port_count(device_t dev, device_t child, bhnd_port_type type) { struct bcma_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child, type)); dinfo = device_get_ivars(child); switch (type) { case BHND_PORT_DEVICE: return (dinfo->corecfg->num_dev_ports); case BHND_PORT_BRIDGE: return (dinfo->corecfg->num_bridge_ports); case BHND_PORT_AGENT: return (dinfo->corecfg->num_wrapper_ports); default: device_printf(dev, "%s: unknown type (%d)\n", __func__, type); return (0); } } static u_int bcma_get_region_count(device_t dev, device_t child, bhnd_port_type type, u_int port_num) { struct bcma_devinfo *dinfo; struct bcma_sport_list *ports; struct bcma_sport *port; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child, type, port_num)); dinfo = device_get_ivars(child); ports = bcma_corecfg_get_port_list(dinfo->corecfg, type); STAILQ_FOREACH(port, ports, sp_link) { if (port->sp_num == port_num) return (port->sp_num_maps); } /* not found */ return (0); } static int bcma_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num) { struct bcma_devinfo *dinfo; struct bcma_map *map; struct bcma_sport_list *ports; struct bcma_sport *port; dinfo = device_get_ivars(child); ports = bcma_corecfg_get_port_list(dinfo->corecfg, port_type); STAILQ_FOREACH(port, ports, sp_link) { if (port->sp_num != port_num) continue; STAILQ_FOREACH(map, &port->sp_maps, m_link) if (map->m_region_num == region_num) return map->m_rid; } return -1; } static int bcma_decode_port_rid(device_t dev, device_t child, int type, int rid, bhnd_port_type *port_type, u_int *port_num, u_int *region_num) { struct bcma_devinfo *dinfo; struct bcma_map *map; struct bcma_sport_list *ports; struct bcma_sport *port; dinfo = device_get_ivars(child); /* Ports are always memory mapped */ if (type != SYS_RES_MEMORY) return (EINVAL); /* Starting with the most likely device list, search all three port * lists */ bhnd_port_type types[] = { BHND_PORT_DEVICE, BHND_PORT_AGENT, BHND_PORT_BRIDGE }; for (int i = 0; i < nitems(types); i++) { ports = bcma_corecfg_get_port_list(dinfo->corecfg, types[i]); STAILQ_FOREACH(port, ports, sp_link) { STAILQ_FOREACH(map, &port->sp_maps, m_link) { if (map->m_rid != rid) continue; *port_type = port->sp_type; *port_num = port->sp_num; *region_num = map->m_region_num; return (0); } } } return (ENOENT); } static int bcma_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size) { struct bcma_devinfo *dinfo; struct bcma_map *map; struct bcma_sport_list *ports; struct bcma_sport *port; dinfo = device_get_ivars(child); ports = bcma_corecfg_get_port_list(dinfo->corecfg, port_type); /* Search the port list */ STAILQ_FOREACH(port, ports, sp_link) { if (port->sp_num != port_num) continue; STAILQ_FOREACH(map, &port->sp_maps, m_link) { if (map->m_region_num != region_num) continue; /* Found! */ *addr = map->m_base; *size = map->m_size; return (0); } } return (ENOENT); } /** * Default bcma(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT(). */ u_int bcma_get_intr_count(device_t dev, device_t child) { struct bcma_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child)); dinfo = device_get_ivars(child); return (dinfo->num_intrs); } /** * Default bcma(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC(). */ int bcma_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec) { struct bcma_devinfo *dinfo; struct bcma_intr *desc; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) { return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child, intr, ivec)); } dinfo = device_get_ivars(child); STAILQ_FOREACH(desc, &dinfo->intrs, i_link) { if (desc->i_sel == intr) { *ivec = desc->i_busline; return (0); } } /* Not found */ return (ENXIO); } /** * Scan the device enumeration ROM table, adding all valid discovered cores to * the bus. * * @param bus The bcma bus. */ int bcma_add_children(device_t bus) { bhnd_erom_t *erom; struct bcma_erom *bcma_erom; struct bhnd_erom_io *eio; const struct bhnd_chipid *cid; struct bcma_corecfg *corecfg; struct bcma_devinfo *dinfo; device_t child; int error; cid = BHND_BUS_GET_CHIPID(bus, bus); corecfg = NULL; /* Allocate our EROM parser */ eio = bhnd_erom_iores_new(bus, BCMA_EROM_RID); erom = bhnd_erom_alloc(&bcma_erom_parser, cid, eio); if (erom == NULL) { bhnd_erom_io_fini(eio); return (ENODEV); } /* Add all cores. */ bcma_erom = (struct bcma_erom *)erom; while ((error = bcma_erom_next_corecfg(bcma_erom, &corecfg)) == 0) { /* Add the child device */ - child = BUS_ADD_CHILD(bus, 0, NULL, -1); + child = BUS_ADD_CHILD(bus, 0, NULL, DEVICE_UNIT_ANY); if (child == NULL) { error = ENXIO; goto cleanup; } /* Initialize device ivars */ dinfo = device_get_ivars(child); if ((error = bcma_init_dinfo(bus, child, dinfo, corecfg))) goto cleanup; /* The dinfo instance now owns the corecfg value */ corecfg = NULL; /* If pins are floating or the hardware is otherwise * unpopulated, the device shouldn't be used. */ if (bhnd_is_hw_disabled(child)) device_disable(child); /* Issue bus callback for fully initialized child. */ BHND_BUS_CHILD_ADDED(bus, child); } /* EOF while parsing cores is expected */ if (error == ENOENT) error = 0; cleanup: bhnd_erom_free(erom); if (corecfg != NULL) bcma_free_corecfg(corecfg); if (error) device_delete_children(bus); return (error); } static device_method_t bcma_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bcma_probe), DEVMETHOD(device_attach, bcma_attach), DEVMETHOD(device_detach, bcma_detach), /* Bus interface */ DEVMETHOD(bus_add_child, bcma_add_child), DEVMETHOD(bus_child_deleted, bcma_child_deleted), DEVMETHOD(bus_read_ivar, bcma_read_ivar), DEVMETHOD(bus_write_ivar, bcma_write_ivar), DEVMETHOD(bus_get_resource_list, bcma_get_resource_list), /* BHND interface */ DEVMETHOD(bhnd_bus_get_erom_class, bcma_get_erom_class), DEVMETHOD(bhnd_bus_read_ioctl, bcma_read_ioctl), DEVMETHOD(bhnd_bus_write_ioctl, bcma_write_ioctl), DEVMETHOD(bhnd_bus_read_iost, bcma_read_iost), DEVMETHOD(bhnd_bus_is_hw_suspended, bcma_is_hw_suspended), DEVMETHOD(bhnd_bus_reset_hw, bcma_reset_hw), DEVMETHOD(bhnd_bus_suspend_hw, bcma_suspend_hw), DEVMETHOD(bhnd_bus_read_config, bcma_read_config), DEVMETHOD(bhnd_bus_write_config, bcma_write_config), DEVMETHOD(bhnd_bus_get_port_count, bcma_get_port_count), DEVMETHOD(bhnd_bus_get_region_count, bcma_get_region_count), DEVMETHOD(bhnd_bus_get_port_rid, bcma_get_port_rid), DEVMETHOD(bhnd_bus_decode_port_rid, bcma_decode_port_rid), DEVMETHOD(bhnd_bus_get_region_addr, bcma_get_region_addr), DEVMETHOD(bhnd_bus_get_intr_count, bcma_get_intr_count), DEVMETHOD(bhnd_bus_get_intr_ivec, bcma_get_intr_ivec), DEVMETHOD_END }; DEFINE_CLASS_1(bhnd, bcma_driver, bcma_methods, sizeof(struct bcma_softc), bhnd_driver); MODULE_VERSION(bcma, 1); MODULE_DEPEND(bcma, bhnd, 1, 1, 1); diff --git a/sys/dev/bhnd/bhndb/bhndb.c b/sys/dev/bhnd/bhndb/bhndb.c index af62057690ac..eeff088ffdde 100644 --- a/sys/dev/bhnd/bhndb/bhndb.c +++ b/sys/dev/bhnd/bhndb/bhndb.c @@ -1,2300 +1,2300 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Abstract BHND Bridge Device Driver * * Provides generic support for bridging from a parent bus (such as PCI) to * a BHND-compatible bus (e.g. bcma or siba). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhndbvar.h" #include "bhndb_bus_if.h" #include "bhndb_hwdata.h" #include "bhndb_private.h" /* Debugging flags */ static u_long bhndb_debug = 0; TUNABLE_ULONG("hw.bhndb.debug", &bhndb_debug); enum { BHNDB_DEBUG_PRIO = 1 << 0, }; #define BHNDB_DEBUG(_type) (BHNDB_DEBUG_ ## _type & bhndb_debug) static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw); static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *r, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table); static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw); bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child); static struct rman *bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type); static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size); static int bhndb_activate_static_region( struct bhndb_softc *sc, struct bhndb_region *region, device_t child, struct resource *r); static int bhndb_try_activate_resource( struct bhndb_softc *sc, device_t child, struct resource *r, bool *indirect); static inline struct bhndb_dw_alloc *bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore); /** * Default bhndb(4) implementation of DEVICE_PROBE(). * * This function provides the default bhndb implementation of DEVICE_PROBE(), * and is compatible with bhndb(4) bridges attached via bhndb_attach_bridge(). */ int bhndb_generic_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static void bhndb_probe_nomatch(device_t dev, device_t child) { const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> (no driver attached)\n", name); } static int bhndb_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int bhndb_child_location(device_t dev, device_t child, struct sbuf *sb) { struct bhndb_softc *sc; sc = device_get_softc(dev); sbuf_printf(sb, "base=0x%llx", (unsigned long long) sc->chipid.enum_addr); return (0); } /** * Return true if @p cores matches the @p hw specification. * * @param sc BHNDB device state. * @param cores A device table to match against. * @param ncores The number of cores in @p cores. * @param hw The hardware description to be matched against. */ static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw) { for (u_int i = 0; i < hw->num_hw_reqs; i++) { const struct bhnd_core_match *match; bool found; match = &hw->hw_reqs[i]; found = false; for (u_int d = 0; d < ncores; d++) { struct bhnd_core_info *core = &cores[d]; if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; if (!bhnd_core_matches(core, match)) continue; found = true; break; } if (!found) return (false); } return (true); } /** * Initialize the region maps and priority configuration in @p br using * the priority @p table and the set of cores enumerated by @p erom. * * @param sc The bhndb device state. * @param br The resource state to be configured. * @param erom EROM parser used to enumerate @p cores. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param table Hardware priority table to be used to determine the relative * priorities of per-core port resources. */ static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *br, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table) { const struct bhndb_hw_priority *hp; bhnd_addr_t addr; bhnd_size_t size; size_t prio_low, prio_default, prio_high; int error; /* The number of port regions per priority band that must be accessible * via dynamic register windows */ prio_low = 0; prio_default = 0; prio_high = 0; /* * Register bridge regions covering all statically mapped ports. */ for (u_int i = 0; i < ncores; i++) { const struct bhndb_regwin *regw; struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); for (regw = br->cfg->register_windows; regw->win_type != BHNDB_REGWIN_T_INVALID; regw++) { const struct bhndb_port_priority *pp; uint32_t alloc_flags; /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) continue; /* Skip non-matching cores. */ if (!bhndb_regwin_match_core(regw, core)) continue; /* Fetch the base address of the mapped port */ error = bhnd_erom_lookup_core_addr(erom, &md, regw->d.core.port_type, regw->d.core.port, regw->d.core.region, NULL, &addr, &size); if (error) { /* Skip non-applicable register windows */ if (error == ENOENT) continue; return (error); } /* * Apply the register window's region offset, if any. */ if (regw->d.core.offset > size) { device_printf(sc->dev, "invalid register " "window offset %#jx for region %#jx+%#jx\n", regw->d.core.offset, addr, size); return (EINVAL); } addr += regw->d.core.offset; /* * Always defer to the register window's size. * * If the port size is smaller than the window size, * this ensures that we fully utilize register windows * larger than the referenced port. * * If the port size is larger than the window size, this * ensures that we do not directly map the allocations * within the region to a too-small window. */ size = regw->win_size; /* Fetch allocation flags from the corresponding port * priority entry, if any */ pp = bhndb_hw_priorty_find_port(table, core, regw->d.core.port_type, regw->d.core.port, regw->d.core.region); if (pp != NULL) { alloc_flags = pp->alloc_flags; } else { alloc_flags = 0; } /* * Add to the bus region list. * * The window priority for a statically mapped region is * always HIGH. */ error = bhndb_add_resource_region(br, addr, size, BHNDB_PRIORITY_HIGH, alloc_flags, regw); if (error) return (error); } } /* * Perform priority accounting and register bridge regions for all * ports defined in the priority table */ for (u_int i = 0; i < ncores; i++) { struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); /* * Skip priority accounting for cores that ... */ /* ... do not require bridge resources */ if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; /* ... do not have a priority table entry */ hp = bhndb_hw_priority_find_core(table, core); if (hp == NULL) continue; /* ... are explicitly disabled in the priority table. */ if (hp->priority == BHNDB_PRIORITY_NONE) continue; /* Determine the number of dynamic windows required and * register their bus_region entries. */ for (u_int i = 0; i < hp->num_ports; i++) { const struct bhndb_port_priority *pp; pp = &hp->ports[i]; /* Fetch the address+size of the mapped port. */ error = bhnd_erom_lookup_core_addr(erom, &md, pp->type, pp->port, pp->region, NULL, &addr, &size); if (error) { /* Skip ports not defined on this device */ if (error == ENOENT) continue; return (error); } /* Skip ports with an existing static mapping */ if (bhndb_has_static_region_mapping(br, addr, size)) continue; /* Define a dynamic region for this port */ error = bhndb_add_resource_region(br, addr, size, pp->priority, pp->alloc_flags, NULL); if (error) return (error); /* Update port mapping counts */ switch (pp->priority) { case BHNDB_PRIORITY_NONE: break; case BHNDB_PRIORITY_LOW: prio_low++; break; case BHNDB_PRIORITY_DEFAULT: prio_default++; break; case BHNDB_PRIORITY_HIGH: prio_high++; break; } } } /* Determine the minimum priority at which we'll allocate direct * register windows from our dynamic pool */ size_t prio_total = prio_low + prio_default + prio_high; if (prio_total <= br->dwa_count) { /* low+default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_LOW; } else if (prio_default + prio_high <= br->dwa_count) { /* default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_DEFAULT; } else { /* high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_HIGH; } if (BHNDB_DEBUG(PRIO)) { struct bhndb_region *region; const char *direct_msg, *type_msg; bhndb_priority_t prio, prio_min; uint32_t flags; prio_min = br->min_prio; device_printf(sc->dev, "min_prio: %d\n", prio_min); STAILQ_FOREACH(region, &br->bus_regions, link) { prio = region->priority; flags = region->alloc_flags; direct_msg = prio >= prio_min ? "direct" : "indirect"; type_msg = region->static_regwin ? "static" : "dynamic"; device_printf(sc->dev, "region 0x%llx+0x%llx priority " "%u %s/%s", (unsigned long long) region->addr, (unsigned long long) region->size, region->priority, direct_msg, type_msg); if (flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) printf(" [overcommit]\n"); else printf("\n"); } } return (0); } /** * Find a hardware specification for @p dev. * * @param sc The bhndb device state. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param[out] hw On success, the matched hardware specification. * with @p dev. * * @retval 0 success * @retval non-zero if an error occurs fetching device info for comparison. */ static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw) { const struct bhndb_hw *next, *hw_table; /* Search for the first matching hardware config. */ hw_table = BHNDB_BUS_GET_HARDWARE_TABLE(sc->parent_dev, sc->dev); for (next = hw_table; next->hw_reqs != NULL; next++) { if (!bhndb_hw_matches(sc, cores, ncores, next)) continue; /* Found */ *hw = next; return (0); } return (ENOENT); } /** * Helper function that must be called by subclass bhndb(4) drivers * when implementing DEVICE_ATTACH() before calling any bhnd(4) or bhndb(4) * APIs on the bridge device. * * This function will add a bridged bhnd(4) child device with a device order of * BHND_PROBE_BUS. Any subclass bhndb(4) driver may use the BHND_PROBE_* * priority bands to add additional devices that will be attached in * their preferred order relative to the bridged bhnd(4) bus. * * @param dev The bridge device to attach. * @param cid The bridged device's chip identification. * @param cores The bridged device's core table. * @param ncores The number of cores in @p cores. * @param bridge_core Core info for the bhnd(4) core serving as the host * bridge. * @param erom_class An erom parser class that may be used to parse * the bridged device's device enumeration table. */ int bhndb_attach(device_t dev, struct bhnd_chipid *cid, struct bhnd_core_info *cores, u_int ncores, struct bhnd_core_info *bridge_core, bhnd_erom_class_t *erom_class) { struct bhndb_devinfo *dinfo; struct bhndb_softc *sc; const struct bhndb_hw *hw; const struct bhndb_hwcfg *hwcfg; const struct bhndb_hw_priority *hwprio; struct bhnd_erom_io *eio; bhnd_erom_t *erom; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent_dev = device_get_parent(dev); sc->bridge_core = *bridge_core; sc->chipid = *cid; if ((error = bhnd_service_registry_init(&sc->services))) return (error); BHNDB_LOCK_INIT(sc); erom = NULL; /* Find a matching bridge hardware configuration */ if ((error = bhndb_find_hwspec(sc, cores, ncores, &hw))) { device_printf(sc->dev, "unable to identify device, " " using generic bridge resource definitions\n"); hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, dev); hw = NULL; } else { hwcfg = hw->cfg; } if (hw != NULL && (bootverbose || BHNDB_DEBUG(PRIO))) { device_printf(sc->dev, "%s resource configuration\n", hw->name); } /* Allocate bridge resource state using the discovered hardware * configuration */ sc->bus_res = bhndb_alloc_resources(sc->dev, sc->parent_dev, hwcfg); if (sc->bus_res == NULL) { device_printf(sc->dev, "failed to allocate bridge resource " "state\n"); error = ENOMEM; goto failed; } /* Add our bridged bus device */ - sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", -1); + sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", DEVICE_UNIT_ANY); if (sc->bus_dev == NULL) { error = ENXIO; goto failed; } dinfo = device_get_ivars(sc->bus_dev); dinfo->addrspace = BHNDB_ADDRSPACE_BRIDGED; /* We can now use bhndb to perform bridging of SYS_RES_MEMORY resources; * we use this to instantiate an erom parser instance */ eio = bhnd_erom_iores_new(sc->bus_dev, 0); if ((erom = bhnd_erom_alloc(erom_class, cid, eio)) == NULL) { bhnd_erom_io_fini(eio); error = ENXIO; goto failed; } /* Populate our resource priority configuration */ hwprio = BHNDB_BUS_GET_HARDWARE_PRIO(sc->parent_dev, sc->dev); error = bhndb_init_region_cfg(sc, erom, sc->bus_res, cores, ncores, hwprio); if (error) { device_printf(sc->dev, "failed to initialize resource " "priority configuration: %d\n", error); goto failed; } /* Free our erom instance */ bhnd_erom_free(erom); erom = NULL; return (0); failed: BHNDB_LOCK_DESTROY(sc); if (sc->bus_res != NULL) bhndb_free_resources(sc->bus_res); if (erom != NULL) bhnd_erom_free(erom); bhnd_service_registry_fini(&sc->services); return (error); } /** * Default bhndb(4) implementation of DEVICE_DETACH(). * * This function detaches any child devices, and if successful, releases all * resources held by the bridge device. */ int bhndb_generic_detach(device_t dev) { struct bhndb_softc *sc; int error; sc = device_get_softc(dev); /* Detach children */ if ((error = bus_generic_detach(dev))) return (error); /* Delete children */ if ((error = device_delete_children(dev))) return (error); /* Clean up our service registry */ if ((error = bhnd_service_registry_fini(&sc->services))) return (error); /* Clean up our driver state. */ bhndb_free_resources(sc->bus_res); BHNDB_LOCK_DESTROY(sc); return (0); } /** * Default bhndb(4) implementation of DEVICE_SUSPEND(). * * This function calls bus_generic_suspend() (or implements equivalent * behavior). */ int bhndb_generic_suspend(device_t dev) { return (bus_generic_suspend(dev)); } /** * Default bhndb(4) implementation of DEVICE_RESUME(). * * This function calls bus_generic_resume() (or implements equivalent * behavior). */ int bhndb_generic_resume(device_t dev) { struct bhndb_softc *sc; struct bhndb_resources *bus_res; struct bhndb_dw_alloc *dwa; int error; sc = device_get_softc(dev); bus_res = sc->bus_res; /* Guarantee that all in-use dynamic register windows are mapped to * their previously configured target address. */ BHNDB_LOCK(sc); error = 0; for (size_t i = 0; i < bus_res->dwa_count; i++) { dwa = &bus_res->dw_alloc[i]; /* Skip regions that were not previously used */ if (bhndb_dw_is_free(bus_res, dwa) && dwa->target == 0x0) continue; /* Otherwise, ensure the register window is correct before * any children attempt MMIO */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) break; } BHNDB_UNLOCK(sc); /* Error restoring hardware state; children cannot be safely resumed */ if (error) { device_printf(dev, "Unable to restore hardware configuration; " "cannot resume: %d\n", error); return (error); } return (bus_generic_resume(dev)); } /** * Default implementation of BHNDB_SUSPEND_RESOURCE. */ static void bhndb_suspend_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; struct bhndb_dw_alloc *dwa; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return; BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa == NULL) { BHNDB_UNLOCK(sc); return; } if (BHNDB_DEBUG(PRIO)) device_printf(child, "suspend resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); /* Release the resource's window reference */ bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } /** * Default implementation of BHNDB_RESUME_RESOURCE. */ static int bhndb_resume_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return (0); /* Inactive resources don't require reallocation of bridge resources */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (0); if (BHNDB_DEBUG(PRIO)) device_printf(child, "resume resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); return (bhndb_try_activate_resource(sc, rman_get_device(r), r, NULL)); } /** * Default bhndb(4) implementation of BUS_READ_IVAR(). */ static int bhndb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } /** * Default bhndb(4) implementation of BUS_WRITE_IVAR(). */ static int bhndb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * Return the address space for the given @p child device. */ bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child) { struct bhndb_devinfo *dinfo; device_t imd_dev; /* Find the directly attached parent of the requesting device */ imd_dev = child; while (imd_dev != NULL && device_get_parent(imd_dev) != sc->dev) imd_dev = device_get_parent(imd_dev); if (imd_dev == NULL) panic("bhndb address space request for non-child device %s\n", device_get_nameunit(child)); dinfo = device_get_ivars(imd_dev); return (dinfo->addrspace); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The bhndb device state. * @param child The requesting child. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type) { switch (bhndb_get_addrspace(sc, child)) { case BHNDB_ADDRSPACE_NATIVE: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->ht_mem_rman); case SYS_RES_IRQ: return (NULL); default: return (NULL); } case BHNDB_ADDRSPACE_BRIDGED: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->br_mem_rman); case SYS_RES_IRQ: return (&sc->bus_res->br_irq_rman); default: return (NULL); } } /* Quieten gcc */ return (NULL); } /** * Default implementation of BUS_ADD_CHILD() */ static device_t bhndb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhndb_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct bhndb_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } /** * Default implementation of BUS_CHILD_DELETED(). */ static void bhndb_child_deleted(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { resource_list_free(&dinfo->resources); free(dinfo, M_BHND); } device_set_ivars(child, NULL); } /** * Default implementation of BHNDB_GET_CHIPID(). */ static const struct bhnd_chipid * bhndb_get_chipid(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->chipid); } /** * Default implementation of BHNDB_IS_CORE_DISABLED(). */ static bool bhndb_is_core_disabled(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Try to defer to the bhndb bus parent */ if (BHNDB_BUS_IS_CORE_DISABLED(sc->parent_dev, dev, core)) return (true); /* Otherwise, we treat bridge-capable cores as unpopulated if they're * not the configured host bridge */ if (BHND_DEVCLASS_SUPPORTS_HOSTB(bhnd_core_class(core))) return (!bhnd_cores_equal(core, &sc->bridge_core)); /* Assume the core is populated */ return (false); } /** * Default bhndb(4) implementation of BHNDB_GET_HOSTB_CORE(). * * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged * bhnd(4) devices. */ static int bhndb_get_hostb_core(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc = device_get_softc(dev); *core = sc->bridge_core; return (0); } /** * Default bhndb(4) implementation of BHND_BUS_GET_SERVICE_REGISTRY(). */ static struct bhnd_service_registry * bhndb_get_service_registry(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->services); } /** * Default bhndb(4) implementation of BUS_ALLOC_RESOURCE(). */ static struct resource * bhndb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { /* Delegate to our parent device's bus; the requested * resource type isn't handled locally. */ return (BUS_ALLOC_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, start, end, count, flags)); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Validate resource addresses */ if (start > end || count > ((end - start) + 1)) return (NULL); /* Make our reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); rman_set_type(rv, type); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } /** * Default bhndb(4) implementation of BUS_RELEASE_RESOURCE(). */ static int bhndb_release_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_softc *sc; struct resource_list_entry *rle; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ if (bhndb_get_rman(sc, child, rman_get_type(r)) == NULL) { return (BUS_RELEASE_RESOURCE(device_get_parent(sc->parent_dev), child, r)); } /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), rman_get_type(r), rman_get_rid(r)); if (rle != NULL) rle->res = NULL; } return (0); } /** * Default bhndb(4) implementation of BUS_ADJUST_RESOURCE(). */ static int bhndb_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct bhndb_softc *sc; struct rman *rm; rman_res_t mstart, mend; int error; sc = device_get_softc(dev); error = 0; /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ rm = bhndb_get_rman(sc, child, rman_get_type(r)); if (rm == NULL) { return (BUS_ADJUST_RESOURCE(device_get_parent(sc->parent_dev), child, r, start, end)); } /* Verify basic constraints */ if (end <= start) return (EINVAL); if (!rman_is_region_manager(r, rm)) return (ENXIO); BHNDB_LOCK(sc); /* If not active, allow any range permitted by the resource manager */ if (!(rman_get_flags(r) & RF_ACTIVE)) goto done; /* Otherwise, the range is limited by the bridged resource mapping */ error = bhndb_find_resource_limits(sc->bus_res, r, &mstart, &mend); if (error) goto done; if (start < mstart || end > mend) { error = EINVAL; goto done; } /* Fall through */ done: if (!error) error = rman_adjust_resource(r, start, end); BHNDB_UNLOCK(sc); return (error); } /** * Initialize child resource @p r with a virtual address, tag, and handle * copied from @p parent, adjusted to contain only the range defined by * @p offsize and @p size. * * @param r The register to be initialized. * @param parent The parent bus resource that fully contains the subregion. * @param offset The subregion offset within @p parent. * @param size The subregion size. * @p r. */ static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size) { bus_space_handle_t bh, child_bh; bus_space_tag_t bt; uintptr_t vaddr; int error; /* Fetch the parent resource's real bus values */ vaddr = (uintptr_t) rman_get_virtual(parent); bt = rman_get_bustag(parent); bh = rman_get_bushandle(parent); /* Configure child resource with window-adjusted real bus values */ vaddr += offset; error = bus_space_subregion(bt, bh, offset, size, &child_bh); if (error) return (error); rman_set_virtual(r, (void *) vaddr); rman_set_bustag(r, bt); rman_set_bushandle(r, child_bh); return (0); } /** * Attempt activation of a fixed register window mapping for @p child. * * @param sc BHNDB device state. * @param region The static region definition capable of mapping @p r. * @param child A child requesting resource activation. * @param type Resource type. * @param rid Resource identifier. * @param r Resource to be activated. * * @retval 0 if @p r was activated successfully * @retval ENOENT if no fixed register window was found. * @retval non-zero if @p r could not be activated. */ static int bhndb_activate_static_region(struct bhndb_softc *sc, struct bhndb_region *region, device_t child, struct resource *r) { struct resource *bridge_res; const struct bhndb_regwin *win; bhnd_size_t parent_offset; rman_res_t r_start, r_size; int error; win = region->static_regwin; KASSERT(win != NULL && BHNDB_REGWIN_T_IS_STATIC(win->win_type), ("can't activate non-static region")); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Find the corresponding bridge resource */ bridge_res = bhndb_host_resource_for_regwin(sc->bus_res->res, win); if (bridge_res == NULL) return (ENXIO); /* Calculate subregion offset within the parent resource */ parent_offset = r_start - region->addr; parent_offset += win->win_offset; /* Configure resource with its real bus values. */ error = bhndb_init_child_resource(r, bridge_res, parent_offset, r_size); if (error) return (error); /* Mark active */ if ((error = rman_activate_resource(r))) return (error); return (0); } /** * Attempt to allocate/retain a dynamic register window for @p r, returning * the retained window. * * @param sc The bhndb driver state. * @param r The resource for which a window will be retained. */ static struct bhndb_dw_alloc * bhndb_retain_dynamic_window(struct bhndb_softc *sc, struct resource *r) { struct bhndb_dw_alloc *dwa; rman_res_t r_start, r_size; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Look for an existing dynamic window we can reference */ dwa = bhndb_dw_find_mapping(sc->bus_res, r_start, r_size); if (dwa != NULL) { if (bhndb_dw_retain(sc->bus_res, dwa, r) == 0) return (dwa); return (NULL); } /* Otherwise, try to reserve a free window */ dwa = bhndb_dw_next_free(sc->bus_res); if (dwa == NULL) { /* No free windows */ return (NULL); } /* Window must be large enough to map the entire resource */ if (dwa->win->win_size < rman_get_size(r)) return (NULL); /* Set the window target */ error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, rman_get_start(r), rman_get_size(r)); if (error) { device_printf(sc->dev, "dynamic window initialization " "for 0x%llx-0x%llx failed: %d\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, error); return (NULL); } /* Add our reservation */ if (bhndb_dw_retain(sc->bus_res, dwa, r)) return (NULL); return (dwa); } /** * Activate a resource using any viable static or dynamic register window. * * @param sc The bhndb driver state. * @param child The child holding ownership of @p r. * @param r The resource to be activated * @param[out] indirect On error and if not NULL, will be set to 'true' if * the caller should instead use an indirect resource mapping. * * @retval 0 success * @retval non-zero activation failed. */ static int bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, struct resource *r, bool *indirect) { struct bhndb_region *region; struct bhndb_dw_alloc *dwa; bhndb_priority_t dw_priority; rman_res_t r_start, r_size; rman_res_t parent_offset; int error, type; BHNDB_LOCK_ASSERT(sc, MA_NOTOWNED); if (indirect != NULL) *indirect = false; type = rman_get_type(r); switch (type) { case SYS_RES_IRQ: /* IRQ resources are always directly mapped */ return (rman_activate_resource(r)); case SYS_RES_MEMORY: /* Handled below */ break; default: device_printf(sc->dev, "unsupported resource type %d\n", type); return (ENXIO); } /* Only MMIO resources can be mapped via register windows */ KASSERT(type == SYS_RES_MEMORY, ("invalid type: %d", type)); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Activate native addrspace resources using the host address space */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_NATIVE) { struct resource *parent; /* Find the bridge resource referenced by the child */ parent = bhndb_host_resource_for_range(sc->bus_res->res, type, r_start, r_size); if (parent == NULL) { device_printf(sc->dev, "host resource not found " "for 0x%llx-0x%llx\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (ENOENT); } /* Initialize child resource with the real bus values */ error = bhndb_init_child_resource(r, parent, r_start - rman_get_start(parent), r_size); if (error) return (error); /* Try to activate child resource */ return (rman_activate_resource(r)); } /* Default to low priority */ dw_priority = BHNDB_PRIORITY_LOW; /* Look for a bus region matching the resource's address range */ region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) dw_priority = region->priority; /* Prefer static mappings over consuming a dynamic windows. */ if (region && region->static_regwin) { error = bhndb_activate_static_region(sc, region, child, r); if (error) device_printf(sc->dev, "static window allocation " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (error); } /* A dynamic window will be required; is this resource high enough * priority to be reserved a dynamic window? */ if (dw_priority < sc->bus_res->min_prio) { if (indirect) *indirect = true; return (ENOMEM); } /* Find and retain a usable window */ BHNDB_LOCK(sc); { dwa = bhndb_retain_dynamic_window(sc, r); } BHNDB_UNLOCK(sc); if (dwa == NULL) { if (indirect) *indirect = true; return (ENOMEM); } /* Configure resource with its real bus values. */ parent_offset = dwa->win->win_offset; parent_offset += r_start - dwa->target; error = bhndb_init_child_resource(r, dwa->parent_res, parent_offset, dwa->win->win_size); if (error) goto failed; /* Mark active */ if ((error = rman_activate_resource(r))) goto failed; return (0); failed: /* Release our region allocation. */ BHNDB_LOCK(sc); bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); return (error); } /** * Default bhndb(4) implementation of BUS_ACTIVATE_RESOURCE(). */ static int bhndb_activate_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_softc *sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, rman_get_type(r)) == NULL) { return (BUS_ACTIVATE_RESOURCE(device_get_parent(sc->parent_dev), child, r)); } return (bhndb_try_activate_resource(sc, child, r, NULL)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_dw_alloc *dwa; struct bhndb_softc *sc; struct rman *rm; int error, type; sc = device_get_softc(dev); type = rman_get_type(r); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_DEACTIVATE_RESOURCE( device_get_parent(sc->parent_dev), child, r)); } /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); switch (type) { case SYS_RES_IRQ: /* No bridge-level state to be freed */ return (0); case SYS_RES_MEMORY: /* Free any dynamic window allocation. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa != NULL) bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } return (0); default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /** * Default bhndb(4) implementation of BUS_GET_RESOURCE_LIST(). */ static struct resource_list * bhndb_get_resource_list(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /** * Default bhndb(4) implementation of BHND_BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_NATIVE children, all resources are activated as direct * resources via BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_BRIDGED children, the resource priority is determined, * and if possible, the resource is activated as a direct resource. For example, * depending on resource priority and bridge resource availability, this * function will attempt to activate SYS_RES_MEMORY resources using either a * static register window, a dynamic register window, or it will configure @p r * as an indirect resource -- in that order. */ static int bhndb_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct bhndb_softc *sc; struct bhndb_region *region; bhndb_priority_t r_prio; rman_res_t r_start, r_size; int error; bool indirect; KASSERT(!r->direct, ("direct flag set on inactive resource")); KASSERT(!(rman_get_flags(r->res) & RF_ACTIVE), ("RF_ACTIVE set on inactive resource")); sc = device_get_softc(dev); /* Delegate directly to BUS_ACTIVATE_RESOURCE() if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { error = BUS_ACTIVATE_RESOURCE(dev, child, r->res); if (error == 0) r->direct = true; return (error); } r_start = rman_get_start(r->res); r_size = rman_get_size(r->res); /* Determine the resource priority of bridged resources, and skip direct * allocation if the priority is too low. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { switch (type) { case SYS_RES_IRQ: /* IRQ resources are always direct */ break; case SYS_RES_MEMORY: region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) r_prio = region->priority; else r_prio = BHNDB_PRIORITY_NONE; /* If less than the minimum dynamic window priority, * this resource should always be indirect. */ if (r_prio < sc->bus_res->min_prio) return (0); break; default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /* Attempt direct activation */ error = bhndb_try_activate_resource(sc, child, r->res, &indirect); if (!error) { r->direct = true; } else if (indirect) { /* The request was valid, but no viable register window is * available; indirection must be employed. */ error = 0; r->direct = false; } if (BHNDB_DEBUG(PRIO) && bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { device_printf(child, "activated 0x%llx-0x%llx as %s " "resource\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, r->direct ? "direct" : "indirect"); } return (error); } /** * Default bhndb(4) implementation of BHND_BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Indirect resources don't require activation */ if (!r->direct) return (0); KASSERT(rman_get_flags(r->res) & RF_ACTIVE, ("RF_ACTIVE not set on direct resource")); /* Perform deactivation */ error = BUS_DEACTIVATE_RESOURCE(dev, child, r->res); if (!error) r->direct = false; return (error); } /** * Find the best available bridge resource allocation record capable of handling * bus I/O requests of @p size at @p addr. * * In order of preference, this function will either: * * - Configure and return a free allocation record * - Return an existing allocation record mapping the requested space, or * - Steal, configure, and return an in-use allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] borrowed Set to true if the allocation record was borrowed to * fulfill this request; the borrowed record maps the target address range, * and must not be modified. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static struct bhndb_dw_alloc * bhndb_io_resource_get_window(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bool *borrowed, bool *stolen, bus_addr_t *restore) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; struct bhndb_region *region; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; *borrowed = false; *stolen = false; /* Try to fetch a free window */ if ((dwa = bhndb_dw_next_free(br)) != NULL) return (dwa); /* Search for an existing dynamic mapping of this address range. * Static regions are not searched, as a statically mapped * region would never be allocated as an indirect resource. */ for (size_t i = 0; i < br->dwa_count; i++) { const struct bhndb_regwin *win; dwa = &br->dw_alloc[i]; win = dwa->win; KASSERT(win->win_type == BHNDB_REGWIN_T_DYN, ("invalid register window type")); /* Verify the range */ if (addr < dwa->target) continue; if (addr + size > dwa->target + win->win_size) continue; /* Found */ *borrowed = true; return (dwa); } /* Try to steal a window; this should only be required on very early * PCI_V0 (BCM4318, etc) Wi-Fi chipsets */ region = bhndb_find_resource_region(br, addr, size); if (region == NULL) return (NULL); if ((region->alloc_flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) == 0) return (NULL); /* Steal a window. This acquires our backing spinlock, disabling * interrupts; the spinlock will be released by * bhndb_dw_return_stolen() */ if ((dwa = bhndb_dw_steal(br, restore)) != NULL) { *stolen = true; return (dwa); } panic("register windows exhausted attempting to map 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } /** * Return a borrowed reference to a bridge resource allocation record capable * of handling bus I/O requests of @p size at @p addr. * * This will either return a reference to an existing allocation record mapping * the requested space, or will configure and return a free allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] offset The offset within the returned resource at which * to perform the I/O request. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static inline struct bhndb_dw_alloc * bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore) { struct bhndb_dw_alloc *dwa; bool borrowed; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); dwa = bhndb_io_resource_get_window(sc, addr, size, &borrowed, stolen, restore); /* Adjust the window if the I/O request won't fit in the current * target range. */ if (addr < dwa->target || addr > dwa->target + dwa->win->win_size || (dwa->target + dwa->win->win_size) - addr < size) { /* Cannot modify target of borrowed windows */ if (borrowed) { panic("borrowed register window does not map expected " "range 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, addr, size); if (error) { panic("failed to set register window target mapping " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } } /* Calculate the offset and return */ *offset = (addr - dwa->target) + dwa->win->win_offset; return (dwa); } /* * BHND_BUS_(READ|WRITE_* implementations */ /* bhndb_bus_(read|write) common implementation */ #define BHNDB_IO_COMMON_SETUP(_io_size) \ struct bhndb_softc *sc; \ struct bhndb_dw_alloc *dwa; \ struct resource *io_res; \ bus_size_t io_offset; \ bus_addr_t restore; \ bool stolen; \ \ sc = device_get_softc(dev); \ \ BHNDB_LOCK(sc); \ dwa = bhndb_io_resource(sc, rman_get_start(r->res) + \ offset, _io_size, &io_offset, &stolen, &restore); \ io_res = dwa->parent_res; \ \ KASSERT(!r->direct, \ ("bhnd_bus slow path used for direct resource")); \ \ KASSERT(rman_get_flags(io_res) & RF_ACTIVE, \ ("i/o resource is not active")); #define BHNDB_IO_COMMON_TEARDOWN() \ if (stolen) { \ bhndb_dw_return_stolen(sc->dev, sc->bus_res, \ dwa, restore); \ } \ BHNDB_UNLOCK(sc); /* Defines a bhndb_bus_read_* method implementation */ #define BHNDB_IO_READ(_type, _name) \ static _type \ bhndb_bus_read_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ _type v; \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ v = bus_read_ ## _name (io_res, io_offset); \ BHNDB_IO_COMMON_TEARDOWN(); \ \ return (v); \ } /* Defines a bhndb_bus_write_* method implementation */ #define BHNDB_IO_WRITE(_type, _name) \ static void \ bhndb_bus_write_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ bus_write_ ## _name (io_res, io_offset, value); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a bhndb_bus_(read|write|set)_(multi|region)_* method */ #define BHNDB_IO_MISC(_type, _ptr, _op, _size) \ static void \ bhndb_bus_ ## _op ## _ ## _size (device_t dev, \ device_t child, struct bhnd_resource *r, bus_size_t offset, \ _type _ptr datap, bus_size_t count) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type) * count); \ bus_ ## _op ## _ ## _size (io_res, io_offset, \ datap, count); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a complete set of read/write methods */ #define BHNDB_IO_METHODS(_type, _size) \ BHNDB_IO_READ(_type, _size) \ BHNDB_IO_WRITE(_type, _size) \ \ BHNDB_IO_READ(_type, stream_ ## _size) \ BHNDB_IO_WRITE(_type, stream_ ## _size) \ \ BHNDB_IO_MISC(_type, *, read_multi, _size) \ BHNDB_IO_MISC(_type, *, write_multi, _size) \ \ BHNDB_IO_MISC(_type, *, read_multi_stream, _size) \ BHNDB_IO_MISC(_type, *, write_multi_stream, _size) \ \ BHNDB_IO_MISC(_type, , set_multi, _size) \ BHNDB_IO_MISC(_type, , set_region, _size) \ BHNDB_IO_MISC(_type, *, read_region, _size) \ BHNDB_IO_MISC(_type, *, write_region, _size) \ \ BHNDB_IO_MISC(_type, *, read_region_stream, _size) \ BHNDB_IO_MISC(_type, *, write_region_stream, _size) BHNDB_IO_METHODS(uint8_t, 1); BHNDB_IO_METHODS(uint16_t, 2); BHNDB_IO_METHODS(uint32_t, 4); /** * Default bhndb(4) implementation of BHND_BUS_BARRIER(). */ static void bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHNDB_IO_COMMON_SETUP(length); bus_barrier(io_res, io_offset + offset, length, flags); BHNDB_IO_COMMON_TEARDOWN(); } /** * Default bhndb(4) implementation of BHND_MAP_INTR(). */ static int bhndb_bhnd_map_intr(device_t dev, device_t child, u_int intr, rman_res_t *irq) { u_int ivec; int error; /* Is the intr valid? */ if (intr >= bhnd_get_intr_count(child)) return (EINVAL); /* Fetch the interrupt vector */ if ((error = bhnd_get_intr_ivec(child, intr, &ivec))) return (error); /* Map directly to the actual backplane interrupt vector */ *irq = ivec; return (0); } /** * Default bhndb(4) implementation of BHND_UNMAP_INTR(). */ static void bhndb_bhnd_unmap_intr(device_t dev, device_t child, rman_res_t irq) { /* No state to clean up */ } /** * Default bhndb(4) implementation of BUS_SETUP_INTR(). */ static int bhndb_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { struct bhndb_softc *sc; struct bhndb_intr_isrc *isrc; struct bhndb_intr_handler *ih; int error; sc = device_get_softc(dev); /* Fetch the isrc */ if ((error = BHNDB_MAP_INTR_ISRC(dev, r, &isrc))) { device_printf(dev, "failed to fetch isrc: %d\n", error); return (error); } /* Allocate new ihandler entry */ ih = bhndb_alloc_intr_handler(child, r, isrc); if (ih == NULL) return (ENOMEM); /* Perform actual interrupt setup via the host isrc */ error = bus_setup_intr(isrc->is_owner, isrc->is_res, flags, filter, handler, arg, &ih->ih_cookiep); if (error) { bhndb_free_intr_handler(ih); return (error); } /* Add to our interrupt handler list */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Provide the interrupt handler entry as our cookiep value */ *cookiep = ih; return (0); } /** * Default bhndb(4) implementation of BUS_TEARDOWN_INTR(). */ static int bhndb_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookiep) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; int error; sc = device_get_softc(dev); /* Locate and claim ownership of the interrupt handler entry */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookiep); if (ih == NULL) { panic("%s requested teardown of invalid cookiep %p", device_get_nameunit(child), cookiep); } bhndb_deregister_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Perform actual interrupt teardown via the host isrc */ isrc = ih->ih_isrc; error = bus_teardown_intr(isrc->is_owner, isrc->is_res, ih->ih_cookiep); if (error) { /* If teardown fails, we need to reinsert the handler entry * to allow later teardown */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); return (error); } /* Free the entry */ bhndb_free_intr_handler(ih); return (0); } /** * Default bhndb(4) implementation of BUS_BIND_INTR(). */ static int bhndb_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); isrc = NULL; /* Fetch the isrc corresponding to the child IRQ resource */ BHNDB_LOCK(sc); STAILQ_FOREACH(ih, &sc->bus_res->bus_intrs, ih_link) { if (ih->ih_res == irq) { isrc = ih->ih_isrc; break; } } BHNDB_UNLOCK(sc); if (isrc == NULL) { panic("%s requested bind of invalid irq %#jx-%#jx", device_get_nameunit(child), rman_get_start(irq), rman_get_end(irq)); } /* Perform actual bind via the host isrc */ return (bus_bind_intr(isrc->is_owner, isrc->is_res, cpu)); } /** * Default bhndb(4) implementation of BUS_DESCRIBE_INTR(). */ static int bhndb_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); /* Locate the interrupt handler entry; the caller owns the handler * reference, and thus our entry is guaranteed to remain valid after * we drop out lock below. */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookie); if (ih == NULL) { panic("%s requested invalid cookiep %p", device_get_nameunit(child), cookie); } isrc = ih->ih_isrc; BHNDB_UNLOCK(sc); /* Perform the actual request via the host isrc */ return (BUS_DESCRIBE_INTR(device_get_parent(isrc->is_owner), isrc->is_owner, isrc->is_res, ih->ih_cookiep, descr)); } /** * Default bhndb(4) implementation of BUS_CONFIG_INTR(). */ static int bhndb_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BUS_REMAP_INTR(). */ static int bhndb_remap_intr(device_t dev, device_t child, u_int irq) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BHND_BUS_GET_DMA_TRANSLATION(). */ static inline int bhndb_get_dma_translation(device_t dev, device_t child, u_int width, uint32_t flags, bus_dma_tag_t *dmat, struct bhnd_dma_translation *translation) { struct bhndb_softc *sc; const struct bhndb_hwcfg *hwcfg; const struct bhnd_dma_translation *match; bus_dma_tag_t match_dmat; bhnd_addr_t addr_mask, match_addr_mask; sc = device_get_softc(dev); hwcfg = sc->bus_res->cfg; /* Is DMA supported? */ if (sc->bus_res->res->dma_tags == NULL) return (ENODEV); /* Is the requested width supported? */ if (width > BHND_DMA_ADDR_32BIT) { /* Backplane must support 64-bit addressing */ if (!(sc->chipid.chip_caps & BHND_CAP_BP64)) width = BHND_DMA_ADDR_32BIT; } /* Find the best matching descriptor for the requested width */ addr_mask = BHND_DMA_ADDR_BITMASK(width); match = NULL; match_addr_mask = 0x0; match_dmat = NULL; for (size_t i = 0; i < sc->bus_res->res->num_dma_tags; i++) { const struct bhnd_dma_translation *dwin; bhnd_addr_t masked; dwin = &hwcfg->dma_translations[i]; /* The base address must be device addressable */ if ((dwin->base_addr & addr_mask) != dwin->base_addr) continue; /* The flags must match */ if ((dwin->flags & flags) != flags) continue; /* The window must cover at least part of our addressable * range */ masked = (dwin->addr_mask | dwin->addrext_mask) & addr_mask; if (masked == 0) continue; /* Is this a better match? */ if (match == NULL || masked > match_addr_mask) { match = dwin; match_addr_mask = masked; match_dmat = sc->bus_res->res->dma_tags[i]; } } if (match == NULL || match_addr_mask == 0) return (ENOENT); if (dmat != NULL) *dmat = match_dmat; if (translation != NULL) *translation = *match; return (0); } /** * Default bhndb(4) implementation of BUS_GET_DMA_TAG(). */ static bus_dma_tag_t bhndb_get_dma_tag(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); /* * A bridge may have multiple DMA translation descriptors, each with * their own incompatible restrictions; drivers should in general call * BHND_BUS_GET_DMA_TRANSLATION() to fetch both the best available DMA * translation, and its corresponding DMA tag. * * Child drivers that do not use BHND_BUS_GET_DMA_TRANSLATION() are * responsible for creating their own restricted DMA tag; since we * cannot do this for them in BUS_GET_DMA_TAG(), we simply return the * bridge parent's DMA tag directly; */ return (bus_get_dma_tag(sc->parent_dev)); } static device_method_t bhndb_methods[] = { /* Device interface */ \ DEVMETHOD(device_probe, bhndb_generic_probe), DEVMETHOD(device_detach, bhndb_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bhndb_generic_suspend), DEVMETHOD(device_resume, bhndb_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhndb_probe_nomatch), DEVMETHOD(bus_print_child, bhndb_print_child), DEVMETHOD(bus_child_location, bhndb_child_location), DEVMETHOD(bus_add_child, bhndb_add_child), DEVMETHOD(bus_child_deleted, bhndb_child_deleted), DEVMETHOD(bus_alloc_resource, bhndb_alloc_resource), DEVMETHOD(bus_release_resource, bhndb_release_resource), DEVMETHOD(bus_activate_resource, bhndb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhndb_deactivate_resource), DEVMETHOD(bus_setup_intr, bhndb_setup_intr), DEVMETHOD(bus_teardown_intr, bhndb_teardown_intr), DEVMETHOD(bus_config_intr, bhndb_config_intr), DEVMETHOD(bus_bind_intr, bhndb_bind_intr), DEVMETHOD(bus_describe_intr, bhndb_describe_intr), DEVMETHOD(bus_remap_intr, bhndb_remap_intr), DEVMETHOD(bus_get_dma_tag, bhndb_get_dma_tag), DEVMETHOD(bus_adjust_resource, bhndb_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource_list, bhndb_get_resource_list), DEVMETHOD(bus_read_ivar, bhndb_read_ivar), DEVMETHOD(bus_write_ivar, bhndb_write_ivar), /* BHNDB interface */ DEVMETHOD(bhndb_get_chipid, bhndb_get_chipid), DEVMETHOD(bhndb_is_core_disabled, bhndb_is_core_disabled), DEVMETHOD(bhndb_get_hostb_core, bhndb_get_hostb_core), DEVMETHOD(bhndb_suspend_resource, bhndb_suspend_resource), DEVMETHOD(bhndb_resume_resource, bhndb_resume_resource), /* BHND interface */ DEVMETHOD(bhnd_bus_get_chipid, bhndb_get_chipid), DEVMETHOD(bhnd_bus_activate_resource, bhndb_activate_bhnd_resource), DEVMETHOD(bhnd_bus_deactivate_resource, bhndb_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_bus_generic_get_nvram_var), DEVMETHOD(bhnd_bus_map_intr, bhndb_bhnd_map_intr), DEVMETHOD(bhnd_bus_unmap_intr, bhndb_bhnd_unmap_intr), DEVMETHOD(bhnd_bus_get_dma_translation, bhndb_get_dma_translation), DEVMETHOD(bhnd_bus_get_service_registry,bhndb_get_service_registry), DEVMETHOD(bhnd_bus_register_provider, bhnd_bus_generic_sr_register_provider), DEVMETHOD(bhnd_bus_deregister_provider, bhnd_bus_generic_sr_deregister_provider), DEVMETHOD(bhnd_bus_retain_provider, bhnd_bus_generic_sr_retain_provider), DEVMETHOD(bhnd_bus_release_provider, bhnd_bus_generic_sr_release_provider), DEVMETHOD(bhnd_bus_read_1, bhndb_bus_read_1), DEVMETHOD(bhnd_bus_read_2, bhndb_bus_read_2), DEVMETHOD(bhnd_bus_read_4, bhndb_bus_read_4), DEVMETHOD(bhnd_bus_write_1, bhndb_bus_write_1), DEVMETHOD(bhnd_bus_write_2, bhndb_bus_write_2), DEVMETHOD(bhnd_bus_write_4, bhndb_bus_write_4), DEVMETHOD(bhnd_bus_read_stream_1, bhndb_bus_read_stream_1), DEVMETHOD(bhnd_bus_read_stream_2, bhndb_bus_read_stream_2), DEVMETHOD(bhnd_bus_read_stream_4, bhndb_bus_read_stream_4), DEVMETHOD(bhnd_bus_write_stream_1, bhndb_bus_write_stream_1), DEVMETHOD(bhnd_bus_write_stream_2, bhndb_bus_write_stream_2), DEVMETHOD(bhnd_bus_write_stream_4, bhndb_bus_write_stream_4), DEVMETHOD(bhnd_bus_read_multi_1, bhndb_bus_read_multi_1), DEVMETHOD(bhnd_bus_read_multi_2, bhndb_bus_read_multi_2), DEVMETHOD(bhnd_bus_read_multi_4, bhndb_bus_read_multi_4), DEVMETHOD(bhnd_bus_write_multi_1, bhndb_bus_write_multi_1), DEVMETHOD(bhnd_bus_write_multi_2, bhndb_bus_write_multi_2), DEVMETHOD(bhnd_bus_write_multi_4, bhndb_bus_write_multi_4), DEVMETHOD(bhnd_bus_read_multi_stream_1, bhndb_bus_read_multi_stream_1), DEVMETHOD(bhnd_bus_read_multi_stream_2, bhndb_bus_read_multi_stream_2), DEVMETHOD(bhnd_bus_read_multi_stream_4, bhndb_bus_read_multi_stream_4), DEVMETHOD(bhnd_bus_write_multi_stream_1,bhndb_bus_write_multi_stream_1), DEVMETHOD(bhnd_bus_write_multi_stream_2,bhndb_bus_write_multi_stream_2), DEVMETHOD(bhnd_bus_write_multi_stream_4,bhndb_bus_write_multi_stream_4), DEVMETHOD(bhnd_bus_set_multi_1, bhndb_bus_set_multi_1), DEVMETHOD(bhnd_bus_set_multi_2, bhndb_bus_set_multi_2), DEVMETHOD(bhnd_bus_set_multi_4, bhndb_bus_set_multi_4), DEVMETHOD(bhnd_bus_set_region_1, bhndb_bus_set_region_1), DEVMETHOD(bhnd_bus_set_region_2, bhndb_bus_set_region_2), DEVMETHOD(bhnd_bus_set_region_4, bhndb_bus_set_region_4), DEVMETHOD(bhnd_bus_read_region_1, bhndb_bus_read_region_1), DEVMETHOD(bhnd_bus_read_region_2, bhndb_bus_read_region_2), DEVMETHOD(bhnd_bus_read_region_4, bhndb_bus_read_region_4), DEVMETHOD(bhnd_bus_write_region_1, bhndb_bus_write_region_1), DEVMETHOD(bhnd_bus_write_region_2, bhndb_bus_write_region_2), DEVMETHOD(bhnd_bus_write_region_4, bhndb_bus_write_region_4), DEVMETHOD(bhnd_bus_read_region_stream_1,bhndb_bus_read_region_stream_1), DEVMETHOD(bhnd_bus_read_region_stream_2,bhndb_bus_read_region_stream_2), DEVMETHOD(bhnd_bus_read_region_stream_4,bhndb_bus_read_region_stream_4), DEVMETHOD(bhnd_bus_write_region_stream_1,bhndb_bus_write_region_stream_1), DEVMETHOD(bhnd_bus_write_region_stream_2,bhndb_bus_write_region_stream_2), DEVMETHOD(bhnd_bus_write_region_stream_4,bhndb_bus_write_region_stream_4), DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier), DEVMETHOD_END }; DEFINE_CLASS_0(bhndb, bhndb_driver, bhndb_methods, sizeof(struct bhndb_softc)); MODULE_VERSION(bhndb, 1); MODULE_DEPEND(bhndb, bhnd, 1, 1, 1); diff --git a/sys/dev/bhnd/cores/chipc/chipc.c b/sys/dev/bhnd/cores/chipc/chipc.c index 60cb04400cb0..a7da4ec9b7fc 100644 --- a/sys/dev/bhnd/cores/chipc/chipc.c +++ b/sys/dev/bhnd/cores/chipc/chipc.c @@ -1,1391 +1,1391 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2016 Michael Zhilin * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Broadcom ChipCommon driver. * * With the exception of some very early chipsets, the ChipCommon core * has been included in all HND SoCs and chipsets based on the siba(4) * and bcma(4) interconnects, providing a common interface to chipset * identification, bus enumeration, UARTs, clocks, watchdog interrupts, * GPIO, flash, etc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "chipcreg.h" #include "chipcvar.h" #include "chipc_private.h" static struct bhnd_device_quirk chipc_quirks[]; /* Supported device identifiers */ static const struct bhnd_device chipc_devices[] = { BHND_DEVICE(BCM, CC, NULL, chipc_quirks), BHND_DEVICE(BCM, 4706_CC, NULL, chipc_quirks), BHND_DEVICE_END }; /* Device quirks table */ static struct bhnd_device_quirk chipc_quirks[] = { /* HND OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (12), CHIPC_QUIRK_OTP_HND), /* (?) */ BHND_CORE_QUIRK (HWREV_EQ (17), CHIPC_QUIRK_OTP_HND), /* BCM4311 */ BHND_CORE_QUIRK (HWREV_EQ (22), CHIPC_QUIRK_OTP_HND), /* BCM4312 */ /* IPX OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (21), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(23), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(32), CHIPC_QUIRK_SUPPORTS_SPROM), BHND_CORE_QUIRK (HWREV_GTE(35), CHIPC_QUIRK_SUPPORTS_CAP_EXT), BHND_CORE_QUIRK (HWREV_GTE(49), CHIPC_QUIRK_IPX_OTPL_SIZE), /* 4706 variant quirks */ BHND_CORE_QUIRK (HWREV_EQ (38), CHIPC_QUIRK_4706_NFLASH), /* BCM5357? */ BHND_CHIP_QUIRK (4706, HWREV_ANY, CHIPC_QUIRK_4706_NFLASH), /* 4331 quirks*/ BHND_CHIP_QUIRK (4331, HWREV_ANY, CHIPC_QUIRK_4331_EXTPA_MUX_SPROM), BHND_PKG_QUIRK (4331, TN, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TNA0, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TT, CHIPC_QUIRK_4331_EXTPA2_MUX_SPROM), /* 4360 quirks */ BHND_CHIP_QUIRK (4352, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43460, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43462, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43602, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_DEVICE_QUIRK_END }; static int chipc_add_children(struct chipc_softc *sc); static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps); static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps); static bool chipc_should_enable_muxed_sprom( struct chipc_softc *sc); static int chipc_enable_otp_power(struct chipc_softc *sc); static void chipc_disable_otp_power(struct chipc_softc *sc); static int chipc_enable_sprom_pins(struct chipc_softc *sc); static void chipc_disable_sprom_pins(struct chipc_softc *sc); static int chipc_try_activate_resource(device_t dev, device_t child, struct resource *r, bool req_direct); static int chipc_init_rman(struct chipc_softc *sc); static void chipc_free_rman(struct chipc_softc *sc); static struct rman *chipc_get_rman(device_t dev, int type, u_int flags); /* quirk and capability flag convenience macros */ #define CHIPC_QUIRK(_sc, _name) \ ((_sc)->quirks & CHIPC_QUIRK_ ## _name) #define CHIPC_CAP(_sc, _name) \ ((_sc)->caps._name) #define CHIPC_ASSERT_QUIRK(_sc, name) \ KASSERT(CHIPC_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) #define CHIPC_ASSERT_CAP(_sc, name) \ KASSERT(CHIPC_CAP((_sc), name), ("capability " __STRING(_name) " not set")) static int chipc_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, chipc_devices, sizeof(chipc_devices[0])); if (id == NULL) return (ENXIO); bhnd_set_default_core_desc(dev); return (BUS_PROBE_DEFAULT); } static int chipc_attach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->quirks = bhnd_device_quirks(dev, chipc_devices, sizeof(chipc_devices[0])); sc->sprom_refcnt = 0; CHIPC_LOCK_INIT(sc); STAILQ_INIT(&sc->mem_regions); /* Set up resource management */ if ((error = chipc_init_rman(sc))) { device_printf(sc->dev, "failed to initialize chipc resource state: %d\n", error); goto failed; } /* Allocate the region containing the chipc register block */ if ((sc->core_region = chipc_find_region_by_rid(sc, 0)) == NULL) { error = ENXIO; goto failed; } error = chipc_retain_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); if (error) { sc->core_region = NULL; goto failed; } /* Save a direct reference to our chipc registers */ sc->core = sc->core_region->cr_res; /* Fetch and parse capability register(s) */ if ((error = chipc_read_caps(sc, &sc->caps))) goto failed; if (bootverbose) chipc_print_caps(sc->dev, &sc->caps); /* Attach all supported child devices */ if ((error = chipc_add_children(sc))) goto failed; /* * Register ourselves with the bus; we're fully initialized and can * response to ChipCommin API requests. * * Since our children may need access to ChipCommon, this must be done * before attaching our children below (via bus_generic_attach). */ if ((error = bhnd_register_provider(dev, BHND_SERVICE_CHIPC))) goto failed; if ((error = bus_generic_attach(dev))) goto failed; return (0); failed: device_delete_children(sc->dev); if (sc->core_region != NULL) { chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); } chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (error); } static int chipc_detach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev))) return (error); if ((error = device_delete_children(dev))) return (error); if ((error = bhnd_deregister_provider(dev, BHND_SERVICE_ANY))) return (error); chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (0); } static int chipc_add_children(struct chipc_softc *sc) { device_t child; const char *flash_bus; int error; /* SPROM/OTP */ if (sc->caps.nvram_src == BHND_NVRAM_SRC_SPROM || sc->caps.nvram_src == BHND_NVRAM_SRC_OTP) { - child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", -1); + child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add nvram device\n"); return (ENXIO); } /* Both OTP and external SPROM are mapped at CHIPC_SPROM_OTP */ error = chipc_set_mem_resource(sc, child, 0, CHIPC_SPROM_OTP, CHIPC_SPROM_OTP_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set OTP memory " "resource: %d\n", error); return (error); } } /* * PMU/PWR_CTRL * * On AOB ("Always on Bus") devices, the PMU core (if it exists) is * attached directly to the bhnd(4) bus -- not chipc. */ if (sc->caps.pmu && !sc->caps.aob) { - child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", -1); + child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add pmu\n"); return (ENXIO); } } else if (sc->caps.pwr_ctrl) { - child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", -1); + child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add pwrctl\n"); return (ENXIO); } } /* GPIO */ - child = BUS_ADD_CHILD(sc->dev, 0, "gpio", -1); + child = BUS_ADD_CHILD(sc->dev, 0, "gpio", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add gpio\n"); return (ENXIO); } error = chipc_set_mem_resource(sc, child, 0, 0, RM_MAX_END, 0, 0); if (error) { device_printf(sc->dev, "failed to set gpio memory resource: " "%d\n", error); return (error); } /* All remaining devices are SoC-only */ if (bhnd_get_attach_type(sc->dev) != BHND_ATTACH_NATIVE) return (0); /* UARTs */ for (u_int i = 0; i < min(sc->caps.num_uarts, CHIPC_UART_MAX); i++) { int irq_rid, mem_rid; irq_rid = 0; mem_rid = 0; - child = BUS_ADD_CHILD(sc->dev, 0, "uart", -1); + child = BUS_ADD_CHILD(sc->dev, 0, "uart", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add uart%u\n", i); return (ENXIO); } /* Shared IRQ */ error = chipc_set_irq_resource(sc, child, irq_rid, 0); if (error) { device_printf(sc->dev, "failed to set uart%u irq %u\n", i, 0); return (error); } /* UART registers are mapped sequentially */ error = chipc_set_mem_resource(sc, child, mem_rid, CHIPC_UART(i), CHIPC_UART_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set uart%u memory " "resource: %d\n", i, error); return (error); } } /* Flash */ flash_bus = chipc_flash_bus_name(sc->caps.flash_type); if (flash_bus != NULL) { int rid; - child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, -1); + child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add %s device\n", flash_bus); return (ENXIO); } /* flash memory mapping */ rid = 0; error = chipc_set_mem_resource(sc, child, rid, 0, RM_MAX_END, 1, 1); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } /* flashctrl registers */ rid++; error = chipc_set_mem_resource(sc, child, rid, CHIPC_SFLASH_BASE, CHIPC_SFLASH_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } } return (0); } /** * Determine the NVRAM data source for this device. * * The SPROM, OTP, and flash capability flags must be fully populated in * @p caps. * * @param sc chipc driver state. * @param caps capability flags to be used to derive NVRAM configuration. */ static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t otp_st, srom_ctrl; /* * We check for hardware presence in order of precedence. For example, * SPROM is always used in preference to internal OTP if found. */ if (CHIPC_QUIRK(sc, SUPPORTS_SPROM) && caps->sprom) { srom_ctrl = bhnd_bus_read_4(sc->core, CHIPC_SPROM_CTRL); if (srom_ctrl & CHIPC_SRC_PRESENT) return (BHND_NVRAM_SRC_SPROM); } /* Check for programmed OTP H/W subregion (contains SROM data) */ if (CHIPC_QUIRK(sc, SUPPORTS_OTP) && caps->otp_size > 0) { /* TODO: need access to HND-OTP device */ if (!CHIPC_QUIRK(sc, OTP_HND)) { device_printf(sc->dev, "NVRAM unavailable: unsupported OTP controller.\n"); return (BHND_NVRAM_SRC_UNKNOWN); } otp_st = bhnd_bus_read_4(sc->core, CHIPC_OTPST); if (otp_st & CHIPC_OTPS_GUP_HW) return (BHND_NVRAM_SRC_OTP); } /* Check for flash */ if (caps->flash_type != CHIPC_FLASH_NONE) return (BHND_NVRAM_SRC_FLASH); /* No NVRAM hardware capability declared */ return (BHND_NVRAM_SRC_UNKNOWN); } /* Read and parse chipc capabilities */ static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t cap_reg; uint32_t cap_ext_reg; uint32_t regval; /* Fetch cap registers */ cap_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES); cap_ext_reg = 0; if (CHIPC_QUIRK(sc, SUPPORTS_CAP_EXT)) cap_ext_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES_EXT); /* Extract values */ caps->num_uarts = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_NUM_UART); caps->mipseb = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_MIPSEB); caps->uart_gpio = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_UARTGPIO); caps->uart_clock = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_UCLKSEL); caps->extbus_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_EXTBUS); caps->pwr_ctrl = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PWR_CTL); caps->jtag_master = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_JTAGP); caps->pll_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_PLL); caps->backplane_64 = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_BKPLN64); caps->boot_rom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ROM); caps->pmu = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PMU); caps->eci = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ECI); caps->sprom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_SPROM); caps->otp_size = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_OTP_SIZE); caps->seci = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_SECI); caps->gsio = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_GSIO); caps->aob = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_AOB); /* Fetch OTP size for later IPX controller revisions */ if (CHIPC_QUIRK(sc, IPX_OTPL_SIZE)) { regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->otp_size = CHIPC_GET_BITS(regval, CHIPC_OTPL_SIZE); } /* Determine flash type and parameters */ caps->cfi_width = 0; switch (CHIPC_GET_BITS(cap_reg, CHIPC_CAP_FLASH)) { case CHIPC_CAP_SFLASH_ST: caps->flash_type = CHIPC_SFLASH_ST; break; case CHIPC_CAP_SFLASH_AT: caps->flash_type = CHIPC_SFLASH_AT; break; case CHIPC_CAP_NFLASH: /* unimplemented */ caps->flash_type = CHIPC_NFLASH; break; case CHIPC_CAP_PFLASH: caps->flash_type = CHIPC_PFLASH_CFI; /* determine cfi width */ regval = bhnd_bus_read_4(sc->core, CHIPC_FLASH_CFG); if (CHIPC_GET_FLAG(regval, CHIPC_FLASH_CFG_DS)) caps->cfi_width = 2; else caps->cfi_width = 1; break; case CHIPC_CAP_FLASH_NONE: caps->flash_type = CHIPC_FLASH_NONE; break; } /* Handle 4706_NFLASH fallback */ if (CHIPC_QUIRK(sc, 4706_NFLASH) && CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_4706_NFLASH)) { caps->flash_type = CHIPC_NFLASH_4706; } /* Determine NVRAM source. Must occur after the SPROM/OTP/flash * capability flags have been populated. */ caps->nvram_src = chipc_find_nvram_src(sc, caps); /* Determine the SPROM offset within OTP (if any). SPROM-formatted * data is placed within the OTP general use region. */ caps->sprom_offset = 0; if (caps->nvram_src == BHND_NVRAM_SRC_OTP) { CHIPC_ASSERT_QUIRK(sc, OTP_IPX); /* Bit offset to GUP HW subregion containing SPROM data */ regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->sprom_offset = CHIPC_GET_BITS(regval, CHIPC_OTPL_GUP); /* Convert to bytes */ caps->sprom_offset /= 8; } return (0); } static int chipc_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int chipc_resume(device_t dev) { return (bus_generic_resume(dev)); } static void chipc_probe_nomatch(device_t dev, device_t child) { struct resource_list *rl; const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> at", name); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } printf(" (no driver attached)\n"); } static int chipc_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static device_t chipc_add_child(device_t dev, u_int order, const char *name, int unit) { struct chipc_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct chipc_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&dinfo->resources); dinfo->irq_mapped = false; device_set_ivars(child, dinfo); return (child); } static void chipc_child_deleted(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { /* Free the child's resource list */ resource_list_free(&dinfo->resources); /* Unmap the child's IRQ */ if (dinfo->irq_mapped) { bhnd_unmap_intr(dev, dinfo->irq); dinfo->irq_mapped = false; } free(dinfo, M_BHND); } device_set_ivars(child, NULL); } static struct resource_list * chipc_get_resource_list(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* Allocate region records for the given port, and add the port's memory * range to the mem_rman */ static int chipc_rman_init_regions (struct chipc_softc *sc, bhnd_port_type type, u_int port) { struct chipc_region *cr; rman_res_t start, end; u_int num_regions; int error; num_regions = bhnd_get_region_count(sc->dev, type, port); for (u_int region = 0; region < num_regions; region++) { /* Allocate new region record */ cr = chipc_alloc_region(sc, type, port, region); if (cr == NULL) return (ENODEV); /* Can't manage regions that cannot be allocated */ if (cr->cr_rid < 0) { BHND_DEBUG_DEV(sc->dev, "no rid for chipc region " "%s%u.%u", bhnd_port_type_name(type), port, region); chipc_free_region(sc, cr); continue; } /* Add to rman's managed range */ start = cr->cr_addr; end = cr->cr_end; if ((error = rman_manage_region(&sc->mem_rman, start, end))) { chipc_free_region(sc, cr); return (error); } /* Add to region list */ STAILQ_INSERT_TAIL(&sc->mem_regions, cr, cr_link); } return (0); } /* Initialize memory state for all chipc port regions */ static int chipc_init_rman(struct chipc_softc *sc) { u_int num_ports; int error; /* Port types for which we'll register chipc_region mappings */ bhnd_port_type types[] = { BHND_PORT_DEVICE }; /* Initialize resource manager */ sc->mem_rman.rm_start = 0; sc->mem_rman.rm_end = BUS_SPACE_MAXADDR; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "ChipCommon Device Memory"; if ((error = rman_init(&sc->mem_rman))) { device_printf(sc->dev, "could not initialize mem_rman: %d\n", error); return (error); } /* Populate per-port-region state */ for (u_int i = 0; i < nitems(types); i++) { num_ports = bhnd_get_port_count(sc->dev, types[i]); for (u_int port = 0; port < num_ports; port++) { error = chipc_rman_init_regions(sc, types[i], port); if (error) { device_printf(sc->dev, "region init failed for %s%u: %d\n", bhnd_port_type_name(types[i]), port, error); goto failed; } } } return (0); failed: chipc_free_rman(sc); return (error); } /* Free memory management state */ static void chipc_free_rman(struct chipc_softc *sc) { struct chipc_region *cr, *cr_next; STAILQ_FOREACH_SAFE(cr, &sc->mem_regions, cr_link, cr_next) chipc_free_region(sc, cr); rman_fini(&sc->mem_rman); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The chipc device state. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) * @param flags Resource flags (e.g. RF_PREFETCHABLE) */ static struct rman * chipc_get_rman(device_t dev, int type, u_int flags) { struct chipc_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_MEMORY: return (&sc->mem_rman); case SYS_RES_IRQ: /* We delegate IRQ resource management to the parent bus */ return (NULL); default: return (NULL); }; } static struct resource * chipc_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct chipc_softc *sc; struct chipc_region *cr; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager, delegate request if necessary */ rm = chipc_get_rman(dev, type, flags); if (rm == NULL) { /* Requested resource type is delegated to our parent */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy " "[%d]\n", *rid, type, device_get_nameunit(child), rman_get_flags(rle->res)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Locate a mapping region */ if ((cr = chipc_find_region(sc, start, end)) == NULL) { /* Resource requests outside our shared port regions can be * delegated to our parent. */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* * As a special case, children that map the complete ChipCommon register * block are delegated to our parent. * * The rman API does not support sharing resources that are not * identical in size; since we allocate subregions to various children, * any children that need to map the entire register block (e.g. because * they require access to discontiguous register ranges) must make the * allocation through our parent, where we hold a compatible * RF_SHAREABLE allocation. */ if (cr == sc->core_region && cr->cr_addr == start && cr->cr_end == end && cr->cr_count == count) { rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Try to retain a region reference */ if ((error = chipc_retain_region(sc, cr, RF_ALLOCATED))) return (NULL); /* Make our rman reservation */ rv = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (rv == NULL) { chipc_release_region(sc, cr, RF_ALLOCATED); return (NULL); } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } static int chipc_release_resource(device_t dev, device_t child, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; struct resource_list_entry *rle; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_rl_release_resource(dev, child, r)); } /* Locate the mapping region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Deactivate resources */ error = bus_generic_rman_release_resource(dev, child, r); if (error != 0) return (error); /* Drop allocation reference */ chipc_release_region(sc, cr, RF_ALLOCATED); /* Clear reference from the resource list entry if exists */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), rman_get_type(r), rman_get_rid(r)); if (rle != NULL) rle->res = NULL; return (0); } static int chipc_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_adjust_resource(dev, child, r, start, end)); } /* The range is limited to the existing region mapping */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); if (end <= start) return (EINVAL); if (start < cr->cr_addr || end > cr->cr_end) return (EINVAL); /* Range falls within the existing region */ return (rman_adjust_resource(r, start, end)); } /** * Retain an RF_ACTIVE reference to the region mapping @p r, and * configure @p r with its subregion values. * * @param sc Driver instance state. * @param child Requesting child device. * @param r resource to be activated. * @param req_direct If true, failure to allocate a direct bhnd resource * will be treated as an error. If false, the resource will not be marked * as RF_ACTIVE if bhnd direct resource allocation fails. */ static int chipc_try_activate_resource(device_t dev, device_t child, struct resource *r, bool req_direct) { struct chipc_softc *sc = device_get_softc(dev); struct rman *rm; struct chipc_region *cr; bhnd_size_t cr_offset; rman_res_t r_start, r_end, r_size; int error; rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) return (EINVAL); r_start = rman_get_start(r); r_end = rman_get_end(r); r_size = rman_get_size(r); /* Find the corresponding chipc region */ cr = chipc_find_region(sc, r_start, r_end); if (cr == NULL) return (EINVAL); /* Calculate subregion offset within the chipc region */ cr_offset = r_start - cr->cr_addr; /* Retain (and activate, if necessary) the chipc region */ if ((error = chipc_retain_region(sc, cr, RF_ACTIVE))) return (error); /* Configure child resource with its subregion values. */ if (cr->cr_res->direct) { error = chipc_init_child_resource(r, cr->cr_res->res, cr_offset, r_size); if (error) goto cleanup; /* Mark active */ if ((error = rman_activate_resource(r))) goto cleanup; } else if (req_direct) { error = ENOMEM; goto cleanup; } return (0); cleanup: chipc_release_region(sc, cr, RF_ACTIVE); return (error); } static int chipc_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct rman *rm; int error; /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(dev, type, rman_get_flags(r->res)); if (rm == NULL || !rman_is_region_manager(r->res, rm)) { return (bhnd_bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region resource */ error = chipc_try_activate_resource(dev, child, r->res, false); if (error) return (error); /* Mark the child resource as direct according to the returned resource * state */ if (rman_get_flags(r->res) & RF_ACTIVE) r->direct = true; return (0); } static int chipc_activate_resource(device_t dev, device_t child, struct resource *r) { struct rman *rm; /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_activate_resource(dev, child, r)); } /* Try activating the chipc region-based resource */ return (chipc_try_activate_resource(dev, child, r, true)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int chipc_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_deactivate_resource(dev, child, r)); } /* Find the corresponding chipc region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); /* Drop associated RF_ACTIVE reference */ chipc_release_region(sc, cr, RF_ACTIVE); return (0); } /** * Examine bus state and make a best effort determination of whether it's * likely safe to enable the muxed SPROM pins. * * On devices that do not use SPROM pin muxing, always returns true. * * @param sc chipc driver state. */ static bool chipc_should_enable_muxed_sprom(struct chipc_softc *sc) { device_t *devs; device_t hostb; device_t parent; int devcount; int error; bool result; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (true); bus_topo_lock(); parent = device_get_parent(sc->dev); hostb = bhnd_bus_find_hostb_device(parent); if ((error = device_get_children(parent, &devs, &devcount))) { bus_topo_unlock(); return (false); } /* Reject any active devices other than ChipCommon, or the * host bridge (if any). */ result = true; for (int i = 0; i < devcount; i++) { if (devs[i] == hostb || devs[i] == sc->dev) continue; if (!device_is_attached(devs[i])) continue; if (device_is_suspended(devs[i])) continue; /* Active device; assume SPROM is busy */ result = false; break; } free(devs, M_TEMP); bus_topo_unlock(); return (result); } static int chipc_enable_sprom(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Already enabled? */ if (sc->sprom_refcnt >= 1) { sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (0); } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: error = chipc_enable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: error = chipc_enable_otp_power(sc); break; default: error = 0; break; } /* Bump the reference count */ if (error == 0) sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (error); } static void chipc_disable_sprom(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Check reference count, skip disable if in-use. */ KASSERT(sc->sprom_refcnt > 0, ("sprom refcnt overrelease")); sc->sprom_refcnt--; if (sc->sprom_refcnt > 0) { CHIPC_UNLOCK(sc); return; } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: chipc_disable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: chipc_disable_otp_power(sc); break; default: break; } CHIPC_UNLOCK(sc); } static int chipc_enable_otp_power(struct chipc_softc *sc) { // TODO: Enable OTP resource via PMU, and wait up to 100 usec for // OTPS_READY to be set in `optstatus`. return (0); } static void chipc_disable_otp_power(struct chipc_softc *sc) { // TODO: Disable OTP resource via PMU } /** * If required by this device, enable access to the SPROM. * * @param sc chipc driver state. */ static int chipc_enable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins already enabled")); /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (0); /* Check whether bus is busy */ if (!chipc_should_enable_muxed_sprom(sc)) return (EBUSY); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return (0); } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } /* Refuse to proceed on unsupported devices with muxed SPROM pins */ device_printf(sc->dev, "muxed sprom lines on unrecognized device\n"); return (ENXIO); } /** * If required by this device, revert any GPIO/pin configuration applied * to allow SPROM access. * * @param sc chipc driver state. */ static void chipc_disable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins in use")); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl |= CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return; } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } } static uint32_t chipc_read_chipst(device_t dev) { struct chipc_softc *sc = device_get_softc(dev); return (bhnd_bus_read_4(sc->core, CHIPC_CHIPST)); } static void chipc_write_chipctrl(device_t dev, uint32_t value, uint32_t mask) { struct chipc_softc *sc; uint32_t cctrl; sc = device_get_softc(dev); CHIPC_LOCK(sc); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); cctrl = (cctrl & ~mask) | (value | mask); bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); CHIPC_UNLOCK(sc); } static struct chipc_caps * chipc_get_caps(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); return (&sc->caps); } static device_method_t chipc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, chipc_probe), DEVMETHOD(device_attach, chipc_attach), DEVMETHOD(device_detach, chipc_detach), DEVMETHOD(device_suspend, chipc_suspend), DEVMETHOD(device_resume, chipc_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, chipc_probe_nomatch), DEVMETHOD(bus_print_child, chipc_print_child), DEVMETHOD(bus_add_child, chipc_add_child), DEVMETHOD(bus_child_deleted, chipc_child_deleted), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_alloc_resource, chipc_alloc_resource), DEVMETHOD(bus_release_resource, chipc_release_resource), DEVMETHOD(bus_adjust_resource, chipc_adjust_resource), DEVMETHOD(bus_activate_resource, chipc_activate_resource), DEVMETHOD(bus_deactivate_resource, chipc_deactivate_resource), DEVMETHOD(bus_get_resource_list, chipc_get_resource_list), DEVMETHOD(bus_get_rman, chipc_get_rman), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), /* BHND bus inteface */ DEVMETHOD(bhnd_bus_activate_resource, chipc_activate_bhnd_resource), /* ChipCommon interface */ DEVMETHOD(bhnd_chipc_read_chipst, chipc_read_chipst), DEVMETHOD(bhnd_chipc_write_chipctrl, chipc_write_chipctrl), DEVMETHOD(bhnd_chipc_enable_sprom, chipc_enable_sprom), DEVMETHOD(bhnd_chipc_disable_sprom, chipc_disable_sprom), DEVMETHOD(bhnd_chipc_get_caps, chipc_get_caps), DEVMETHOD_END }; DEFINE_CLASS_0(bhnd_chipc, bhnd_chipc_driver, chipc_methods, sizeof(struct chipc_softc)); EARLY_DRIVER_MODULE(bhnd_chipc, bhnd, bhnd_chipc_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_DEPEND(bhnd_chipc, bhnd, 1, 1, 1); MODULE_VERSION(bhnd_chipc, 1); diff --git a/sys/dev/bhnd/cores/chipc/chipc_spi.c b/sys/dev/bhnd/cores/chipc/chipc_spi.c index 75e4b5cb7bc4..46b445cf0756 100644 --- a/sys/dev/bhnd/cores/chipc/chipc_spi.c +++ b/sys/dev/bhnd/cores/chipc/chipc_spi.c @@ -1,276 +1,276 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2016 Michael Zhilin * Copyright (c) 2016 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #include #include #include #include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "spibus_if.h" #include "chipcreg.h" #include "chipcvar.h" #include "chipc_slicer.h" #include "chipc_spi.h" static int chipc_spi_probe(device_t dev); static int chipc_spi_attach(device_t dev); static int chipc_spi_detach(device_t dev); static int chipc_spi_transfer(device_t dev, device_t child, struct spi_command *cmd); static int chipc_spi_txrx(struct chipc_spi_softc *sc, uint8_t in, uint8_t* out); static int chipc_spi_wait(struct chipc_spi_softc *sc); static int chipc_spi_probe(device_t dev) { device_set_desc(dev, "Broadcom ChipCommon SPI"); return (BUS_PROBE_NOWILDCARD); } static int chipc_spi_attach(device_t dev) { struct chipc_spi_softc *sc; struct chipc_caps *ccaps; device_t flash_dev; device_t spibus; const char *flash_name; int error; sc = device_get_softc(dev); /* Allocate SPI controller registers */ sc->sc_rid = 1; sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(dev, "failed to allocate device registers\n"); return (ENXIO); } /* Allocate flash shadow region */ sc->sc_flash_rid = 0; sc->sc_flash_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_flash_rid, RF_ACTIVE); if (sc->sc_flash_res == NULL) { device_printf(dev, "failed to allocate flash region\n"); error = ENXIO; goto failed; } /* * Add flash device * * XXX: This should be replaced with a DEVICE_IDENTIFY implementation * in chipc-specific subclasses of the mx25l and at45d drivers. */ if ((spibus = device_add_child(dev, "spibus", -1)) == NULL) { device_printf(dev, "failed to add spibus\n"); error = ENXIO; goto failed; } /* Let spibus perform full attach before we try to call * BUS_ADD_CHILD() */ if ((error = bus_generic_attach(dev))) goto failed; /* Determine flash type and add the flash child */ ccaps = BHND_CHIPC_GET_CAPS(device_get_parent(dev)); flash_name = chipc_sflash_device_name(ccaps->flash_type); if (flash_name != NULL) { - flash_dev = BUS_ADD_CHILD(spibus, 0, flash_name, -1); + flash_dev = BUS_ADD_CHILD(spibus, 0, flash_name, DEVICE_UNIT_ANY); if (flash_dev == NULL) { device_printf(dev, "failed to add %s\n", flash_name); error = ENXIO; goto failed; } chipc_register_slicer(ccaps->flash_type); if ((error = device_probe_and_attach(flash_dev))) { device_printf(dev, "failed to attach %s: %d\n", flash_name, error); goto failed; } } return (0); failed: device_delete_children(dev); if (sc->sc_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res); if (sc->sc_flash_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_flash_rid, sc->sc_flash_res); return (error); } static int chipc_spi_detach(device_t dev) { struct chipc_spi_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev))) return (error); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_flash_rid, sc->sc_flash_res); return (0); } static int chipc_spi_wait(struct chipc_spi_softc *sc) { int i; for (i = CHIPC_SPI_MAXTRIES; i > 0; i--) if (!(SPI_READ(sc, CHIPC_SPI_FLASHCTL) & CHIPC_SPI_FLASHCTL_START)) break; if (i > 0) return (0); BHND_WARN_DEV(sc->sc_dev, "busy: CTL=0x%x DATA=0x%x", SPI_READ(sc, CHIPC_SPI_FLASHCTL), SPI_READ(sc, CHIPC_SPI_FLASHDATA)); return (-1); } static int chipc_spi_txrx(struct chipc_spi_softc *sc, uint8_t out, uint8_t* in) { uint32_t ctl; ctl = CHIPC_SPI_FLASHCTL_START | CHIPC_SPI_FLASHCTL_CSACTIVE | out; SPI_BARRIER_WRITE(sc); SPI_WRITE(sc, CHIPC_SPI_FLASHCTL, ctl); SPI_BARRIER_WRITE(sc); if (chipc_spi_wait(sc)) return (-1); *in = SPI_READ(sc, CHIPC_SPI_FLASHDATA) & 0xff; return (0); } static int chipc_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct chipc_spi_softc *sc; uint8_t *buf_in; uint8_t *buf_out; int i; sc = device_get_softc(dev); KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, ("TX/RX command sizes should be equal")); KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, ("TX/RX data sizes should be equal")); if (cmd->tx_cmd_sz == 0) { BHND_DEBUG_DEV(child, "size of command is ZERO"); return (EIO); } SPI_BARRIER_WRITE(sc); SPI_WRITE(sc, CHIPC_SPI_FLASHADDR, 0); SPI_BARRIER_WRITE(sc); /* * Transfer command */ buf_out = (uint8_t *)cmd->tx_cmd; buf_in = (uint8_t *)cmd->rx_cmd; for (i = 0; i < cmd->tx_cmd_sz; i++) if (chipc_spi_txrx(sc, buf_out[i], &(buf_in[i]))) return (EIO); /* * Receive/transmit data */ buf_out = (uint8_t *)cmd->tx_data; buf_in = (uint8_t *)cmd->rx_data; for (i = 0; i < cmd->tx_data_sz; i++) if (chipc_spi_txrx(sc, buf_out[i], &(buf_in[i]))) return (EIO); /* * Clear CS bit and whole control register */ SPI_BARRIER_WRITE(sc); SPI_WRITE(sc, CHIPC_SPI_FLASHCTL, 0); SPI_BARRIER_WRITE(sc); return (0); } static device_method_t chipc_spi_methods[] = { DEVMETHOD(device_probe, chipc_spi_probe), DEVMETHOD(device_attach, chipc_spi_attach), DEVMETHOD(device_detach, chipc_spi_detach), /* SPI */ DEVMETHOD(spibus_transfer, chipc_spi_transfer), DEVMETHOD_END }; static driver_t chipc_spi_driver = { "spi", chipc_spi_methods, sizeof(struct chipc_spi_softc), }; DRIVER_MODULE(chipc_spi, bhnd_chipc, chipc_spi_driver, 0, 0); diff --git a/sys/dev/bhnd/siba/siba.c b/sys/dev/bhnd/siba/siba.c index 2688f3415aa5..e693481f0819 100644 --- a/sys/dev/bhnd/siba/siba.c +++ b/sys/dev/bhnd/siba/siba.c @@ -1,1438 +1,1438 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #include #include #include #include #include #include #include #include #include #include "siba_eromvar.h" #include "sibareg.h" #include "sibavar.h" /* RID used when allocating EROM resources */ #define SIBA_EROM_RID 0 static bhnd_erom_class_t * siba_get_erom_class(driver_t *driver) { return (&siba_erom_parser); } int siba_probe(device_t dev) { device_set_desc(dev, "SIBA BHND bus"); return (BUS_PROBE_DEFAULT); } /** * Default siba(4) bus driver implementation of DEVICE_ATTACH(). * * This implementation initializes internal siba(4) state and performs * bus enumeration, and must be called by subclassing drivers in * DEVICE_ATTACH() before any other bus methods. */ int siba_attach(device_t dev) { struct siba_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; SIBA_LOCK_INIT(sc); /* Enumerate children */ if ((error = siba_add_children(dev))) { device_delete_children(dev); SIBA_LOCK_DESTROY(sc); return (error); } return (0); } int siba_detach(device_t dev) { struct siba_softc *sc; int error; sc = device_get_softc(dev); if ((error = bhnd_generic_detach(dev))) return (error); SIBA_LOCK_DESTROY(sc); return (0); } int siba_resume(device_t dev) { return (bhnd_generic_resume(dev)); } int siba_suspend(device_t dev) { return (bhnd_generic_suspend(dev)); } static int siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct siba_softc *sc; const struct siba_devinfo *dinfo; const struct bhnd_core_info *cfg; sc = device_get_softc(dev); dinfo = device_get_ivars(child); cfg = &dinfo->core_id.core_info; switch (index) { case BHND_IVAR_VENDOR: *result = cfg->vendor; return (0); case BHND_IVAR_DEVICE: *result = cfg->device; return (0); case BHND_IVAR_HWREV: *result = cfg->hwrev; return (0); case BHND_IVAR_DEVICE_CLASS: *result = bhnd_core_class(cfg); return (0); case BHND_IVAR_VENDOR_NAME: *result = (uintptr_t) bhnd_vendor_name(cfg->vendor); return (0); case BHND_IVAR_DEVICE_NAME: *result = (uintptr_t) bhnd_core_name(cfg); return (0); case BHND_IVAR_CORE_INDEX: *result = cfg->core_idx; return (0); case BHND_IVAR_CORE_UNIT: *result = cfg->unit; return (0); case BHND_IVAR_PMU_INFO: SIBA_LOCK(sc); switch (dinfo->pmu_state) { case SIBA_PMU_NONE: *result = (uintptr_t)NULL; SIBA_UNLOCK(sc); return (0); case SIBA_PMU_BHND: *result = (uintptr_t)dinfo->pmu.bhnd_info; SIBA_UNLOCK(sc); return (0); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: *result = (uintptr_t)NULL; SIBA_UNLOCK(sc); return (0); } panic("invalid PMU state: %d", dinfo->pmu_state); return (ENXIO); default: return (ENOENT); } } static int siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct siba_softc *sc; struct siba_devinfo *dinfo; sc = device_get_softc(dev); dinfo = device_get_ivars(child); switch (index) { case BHND_IVAR_VENDOR: case BHND_IVAR_DEVICE: case BHND_IVAR_HWREV: case BHND_IVAR_DEVICE_CLASS: case BHND_IVAR_VENDOR_NAME: case BHND_IVAR_DEVICE_NAME: case BHND_IVAR_CORE_INDEX: case BHND_IVAR_CORE_UNIT: return (EINVAL); case BHND_IVAR_PMU_INFO: SIBA_LOCK(sc); switch (dinfo->pmu_state) { case SIBA_PMU_NONE: case SIBA_PMU_BHND: dinfo->pmu.bhnd_info = (void *)value; dinfo->pmu_state = SIBA_PMU_BHND; SIBA_UNLOCK(sc); return (0); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: panic("bhnd_set_pmu_info() called with siba PMU state " "%d", dinfo->pmu_state); return (ENXIO); } panic("invalid PMU state: %d", dinfo->pmu_state); return (ENXIO); default: return (ENOENT); } } static struct resource_list * siba_get_resource_list(device_t dev, device_t child) { struct siba_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* BHND_BUS_ALLOC_PMU() */ static int siba_alloc_pmu(device_t dev, device_t child) { struct siba_softc *sc; struct siba_devinfo *dinfo; device_t chipc; device_t pwrctl; struct chipc_caps ccaps; siba_pmu_state pmu_state; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); pwrctl = NULL; /* Fetch ChipCommon capability flags */ chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC); if (chipc != NULL) { ccaps = *BHND_CHIPC_GET_CAPS(chipc); bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC); } else { memset(&ccaps, 0, sizeof(ccaps)); } /* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and * advertises PMU support */ if (ccaps.pmu) { if ((error = bhnd_generic_alloc_pmu(dev, child))) return (error); KASSERT(dinfo->pmu_state == SIBA_PMU_BHND, ("unexpected PMU state: %d", dinfo->pmu_state)); return (0); } /* * This is either a legacy PWRCTL chipset, or the device does not * support dynamic clock control. * * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations. */ if (ccaps.pwr_ctrl) { pmu_state = SIBA_PMU_PWRCTL; pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL); if (pwrctl == NULL) { device_printf(dev, "PWRCTL not found\n"); return (ENODEV); } } else { pmu_state = SIBA_PMU_FIXED; pwrctl = NULL; } SIBA_LOCK(sc); /* Per-core PMU state already allocated? */ if (dinfo->pmu_state != SIBA_PMU_NONE) { panic("duplicate PMU allocation for %s", device_get_nameunit(child)); } /* Update the child's PMU allocation state, and transfer ownership of * the PWRCTL provider reference (if any) */ dinfo->pmu_state = pmu_state; dinfo->pmu.pwrctl = pwrctl; SIBA_UNLOCK(sc); return (0); } /* BHND_BUS_RELEASE_PMU() */ static int siba_release_pmu(device_t dev, device_t child) { struct siba_softc *sc; struct siba_devinfo *dinfo; device_t pwrctl; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("pmu over-release for %s", device_get_nameunit(child)); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_release_pmu(dev, child)); case SIBA_PMU_PWRCTL: /* Requesting BHND_CLOCK_DYN releases any outstanding clock * reservations */ pwrctl = dinfo->pmu.pwrctl; error = bhnd_pwrctl_request_clock(pwrctl, child, BHND_CLOCK_DYN); if (error) { SIBA_UNLOCK(sc); return (error); } /* Clean up the child's PMU state */ dinfo->pmu_state = SIBA_PMU_NONE; dinfo->pmu.pwrctl = NULL; SIBA_UNLOCK(sc); /* Release the provider reference */ bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL); return (0); case SIBA_PMU_FIXED: /* Clean up the child's PMU state */ KASSERT(dinfo->pmu.pwrctl == NULL, ("PWRCTL reference with FIXED state")); dinfo->pmu_state = SIBA_PMU_NONE; dinfo->pmu.pwrctl = NULL; SIBA_UNLOCK(sc); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_GET_CLOCK_LATENCY() */ static int siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock, u_int *latency) { struct siba_softc *sc; struct siba_devinfo *dinfo; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_get_clock_latency(dev, child, clock, latency)); case SIBA_PMU_PWRCTL: error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock, latency); SIBA_UNLOCK(sc); return (error); case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); /* HT clock is always available, and incurs no transition * delay. */ switch (clock) { case BHND_CLOCK_HT: *latency = 0; return (0); default: return (ENODEV); } return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_GET_CLOCK_FREQ() */ static int siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock, u_int *freq) { struct siba_softc *sc; struct siba_devinfo *dinfo; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_get_clock_freq(dev, child, clock, freq)); case SIBA_PMU_PWRCTL: error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock, freq); SIBA_UNLOCK(sc); return (error); case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_REQUEST_EXT_RSRC() */ static int siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc) { struct siba_softc *sc; struct siba_devinfo *dinfo; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_request_ext_rsrc(dev, child, rsrc)); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: /* HW does not support per-core external resources */ SIBA_UNLOCK(sc); return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_RELEASE_EXT_RSRC() */ static int siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc) { struct siba_softc *sc; struct siba_devinfo *dinfo; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_release_ext_rsrc(dev, child, rsrc)); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: /* HW does not support per-core external resources */ SIBA_UNLOCK(sc); return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_REQUEST_CLOCK() */ static int siba_request_clock(device_t dev, device_t child, bhnd_clock clock) { struct siba_softc *sc; struct siba_devinfo *dinfo; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_request_clock(dev, child, clock)); case SIBA_PMU_PWRCTL: error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child, clock); SIBA_UNLOCK(sc); return (error); case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); /* HT clock is always available, and fulfills any of the * following clock requests */ switch (clock) { case BHND_CLOCK_DYN: case BHND_CLOCK_ILP: case BHND_CLOCK_ALP: case BHND_CLOCK_HT: return (0); default: return (ENODEV); } } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_ENABLE_CLOCKS() */ static int siba_enable_clocks(device_t dev, device_t child, uint32_t clocks) { struct siba_softc *sc; struct siba_devinfo *dinfo; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_enable_clocks(dev, child, clocks)); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); /* All (supported) clocks are already enabled by default */ clocks &= ~(BHND_CLOCK_DYN | BHND_CLOCK_ILP | BHND_CLOCK_ALP | BHND_CLOCK_HT); if (clocks != 0) { device_printf(dev, "%s requested unknown clocks: %#x\n", device_get_nameunit(child), clocks); return (ENODEV); } return (0); } panic("invalid PMU state: %d", dinfo->pmu_state); } static int siba_read_iost(device_t dev, device_t child, uint16_t *iost) { uint32_t tmhigh; int error; error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4); if (error) return (error); *iost = (SIBA_REG_GET(tmhigh, TMH_SISF)); return (0); } static int siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl) { uint32_t ts_low; int error; if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4))) return (error); *ioctl = (SIBA_REG_GET(ts_low, TML_SICF)); return (0); } static int siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask) { struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t ts_low, ts_mask; if (device_get_parent(child) != dev) return (EINVAL); /* Fetch CFG0 mapping */ dinfo = device_get_ivars(child); if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* Mask and set TMSTATELOW core flag bits */ ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK; ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask; siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask); return (0); } static bool siba_is_hw_suspended(device_t dev, device_t child) { uint32_t ts_low; uint16_t ioctl; int error; /* Fetch target state */ error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4); if (error) { device_printf(child, "error reading HW reset state: %d\n", error); return (true); } /* Is core held in RESET? */ if (ts_low & SIBA_TML_RESET) return (true); /* Is target reject enabled? */ if (ts_low & SIBA_TML_REJ_MASK) return (true); /* Is core clocked? */ ioctl = SIBA_REG_GET(ts_low, TML_SICF); if (!(ioctl & BHND_IOCTL_CLK_EN)) return (true); return (false); } static int siba_reset_hw(device_t dev, device_t child, uint16_t ioctl, uint16_t reset_ioctl) { struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t ts_low, imstate; uint16_t clkflags; int error; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); /* Can't suspend the core without access to the CFG0 registers */ if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; if (ioctl & clkflags) return (EINVAL); /* Place core into known RESET state */ if ((error = bhnd_suspend_hw(child, reset_ioctl))) return (error); /* Set RESET, clear REJ, set the caller's IOCTL flags, and * force clocks to ensure the signal propagates throughout the * core. */ ts_low = SIBA_TML_RESET | (ioctl << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT); siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, UINT32_MAX); /* Clear any target errors */ if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) { siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH, 0x0, SIBA_TMH_SERR); } /* Clear any initiator errors */ imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE); if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) { siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, SIBA_IM_IBE|SIBA_IM_TO); } /* Release from RESET while leaving clocks forced, ensuring the * signal propagates throughout the core */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, SIBA_TML_RESET); /* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE * bit and allow the core to manage clock gating. */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT)); return (0); } static int siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl) { struct siba_softc *sc; struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t idl, ts_low, ts_mask; uint16_t cflags, clkflags; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); /* Can't suspend the core without access to the CFG0 registers */ if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; if (ioctl & clkflags) return (EINVAL); /* Already in RESET? */ ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW); if (ts_low & SIBA_TML_RESET) return (0); /* If clocks are already disabled, we can place the core directly * into RESET|REJ while setting the caller's IOCTL flags. */ cflags = SIBA_REG_GET(ts_low, TML_SICF); if (!(cflags & BHND_IOCTL_CLK_EN)) { ts_low = SIBA_TML_RESET | SIBA_TML_REJ | (ioctl << SIBA_TML_SICF_SHIFT); ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK; siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask); return (0); } /* Reject further transactions reaching this core */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, SIBA_TML_REJ, SIBA_TML_REJ); /* Wait for transaction busy flag to clear for all transactions * initiated by this core */ error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH, 0x0, SIBA_TMH_BUSY, 100000); if (error) return (error); /* If this is an initiator core, we need to reject initiator * transactions too. */ idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW); if (idl & SIBA_IDL_INIT) { /* Reject further initiator transactions */ siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, SIBA_IM_RJ, SIBA_IM_RJ); /* Wait for initiator busy flag to clear */ error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, SIBA_IM_BY, 100000); if (error) return (error); } /* Put the core into RESET, set the caller's IOCTL flags, and * force clocks to ensure the RESET signal propagates throughout the * core. */ ts_low = SIBA_TML_RESET | (ioctl << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT); ts_mask = SIBA_TML_RESET | SIBA_TML_SICF_MASK; siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask); /* Give RESET ample time */ DELAY(10); /* Clear previously asserted initiator reject */ if (idl & SIBA_IDL_INIT) { siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, SIBA_IM_RJ); } /* Disable all clocks, leaving RESET and REJ asserted */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT); /* * Core is now in RESET. * * If the core holds any PWRCTL clock reservations, we need to release * those now. This emulates the standard bhnd(4) PMU behavior of RESET * automatically clearing clkctl */ SIBA_LOCK(sc); if (dinfo->pmu_state == SIBA_PMU_PWRCTL) { error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child, BHND_CLOCK_DYN); SIBA_UNLOCK(sc); if (error) { device_printf(child, "failed to release clock request: " "%d", error); return (error); } return (0); } else { SIBA_UNLOCK(sc); return (0); } } static int siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value, u_int width) { struct siba_devinfo *dinfo; rman_res_t r_size; /* Must be directly attached */ if (device_get_parent(child) != dev) return (EINVAL); /* CFG0 registers must be available */ dinfo = device_get_ivars(child); if (dinfo->cfg_res[0] == NULL) return (ENODEV); /* Offset must fall within CFG0 */ r_size = rman_get_size(dinfo->cfg_res[0]->res); if (r_size < offset || r_size - offset < width) return (EFAULT); switch (width) { case 1: *((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0], offset); return (0); case 2: *((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0], offset); return (0); case 4: *((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0], offset); return (0); default: return (EINVAL); } } static int siba_write_config(device_t dev, device_t child, bus_size_t offset, const void *value, u_int width) { struct siba_devinfo *dinfo; struct bhnd_resource *r; rman_res_t r_size; /* Must be directly attached */ if (device_get_parent(child) != dev) return (EINVAL); /* CFG0 registers must be available */ dinfo = device_get_ivars(child); if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* Offset must fall within CFG0 */ r_size = rman_get_size(r->res); if (r_size < offset || r_size - offset < width) return (EFAULT); switch (width) { case 1: bhnd_bus_write_1(r, offset, *(const uint8_t *)value); return (0); case 2: bhnd_bus_write_2(r, offset, *(const uint8_t *)value); return (0); case 4: bhnd_bus_write_4(r, offset, *(const uint8_t *)value); return (0); default: return (EINVAL); } } static u_int siba_get_port_count(device_t dev, device_t child, bhnd_port_type type) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child, type)); dinfo = device_get_ivars(child); return (siba_port_count(&dinfo->core_id, type)); } static u_int siba_get_region_count(device_t dev, device_t child, bhnd_port_type type, u_int port) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child, type, port)); dinfo = device_get_ivars(child); return (siba_port_region_count(&dinfo->core_id, type, port)); } static int siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num) { struct siba_devinfo *dinfo; struct siba_addrspace *addrspace; struct siba_cfg_block *cfg; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child, port_type, port_num, region_num)); dinfo = device_get_ivars(child); /* Look for a matching addrspace entry */ addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); if (addrspace != NULL) return (addrspace->sa_rid); /* Try the config blocks */ cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num); if (cfg != NULL) return (cfg->cb_rid); /* Not found */ return (-1); } static int siba_decode_port_rid(device_t dev, device_t child, int type, int rid, bhnd_port_type *port_type, u_int *port_num, u_int *region_num) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child, type, rid, port_type, port_num, region_num)); dinfo = device_get_ivars(child); /* Ports are always memory mapped */ if (type != SYS_RES_MEMORY) return (EINVAL); /* Look for a matching addrspace entry */ for (u_int i = 0; i < dinfo->core_id.num_admatch; i++) { if (dinfo->addrspace[i].sa_rid != rid) continue; *port_type = BHND_PORT_DEVICE; *port_num = siba_addrspace_device_port(i); *region_num = siba_addrspace_device_region(i); return (0); } /* Try the config blocks */ for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) { if (dinfo->cfg[i].cb_rid != rid) continue; *port_type = BHND_PORT_AGENT; *port_num = siba_cfg_agent_port(i); *region_num = siba_cfg_agent_region(i); return (0); } /* Not found */ return (ENOENT); } static int siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size) { struct siba_devinfo *dinfo; struct siba_addrspace *addrspace; struct siba_cfg_block *cfg; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) { return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child, port_type, port_num, region_num, addr, size)); } dinfo = device_get_ivars(child); /* Look for a matching addrspace */ addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); if (addrspace != NULL) { *addr = addrspace->sa_base; *size = addrspace->sa_size - addrspace->sa_bus_reserved; return (0); } /* Look for a matching cfg block */ cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num); if (cfg != NULL) { *addr = cfg->cb_base; *size = cfg->cb_size; return (0); } /* Not found */ return (ENOENT); } /** * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT(). */ u_int siba_get_intr_count(device_t dev, device_t child) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child)); dinfo = device_get_ivars(child); if (!dinfo->core_id.intr_en) { /* No interrupts */ return (0); } else { /* One assigned interrupt */ return (1); } } /** * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC(). */ int siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child, intr, ivec)); /* Must be a valid interrupt ID */ if (intr >= siba_get_intr_count(dev, child)) return (ENXIO); KASSERT(intr == 0, ("invalid ivec %u", intr)); dinfo = device_get_ivars(child); KASSERT(dinfo->core_id.intr_en, ("core does not have an interrupt assigned")); *ivec = dinfo->core_id.intr_flag; return (0); } /** * Map per-core configuration blocks for @p dinfo. * * @param dev The siba bus device. * @param dinfo The device info instance on which to map all per-core * configuration blocks. */ static int siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo) { struct siba_addrspace *addrspace; rman_res_t r_start, r_count, r_end; uint8_t num_cfg; int rid; num_cfg = dinfo->core_id.num_cfg_blocks; if (num_cfg > SIBA_MAX_CFG) { device_printf(dev, "config block count %hhu out of range\n", num_cfg); return (ENXIO); } /* Fetch the core register address space */ addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0); if (addrspace == NULL) { device_printf(dev, "missing device registers\n"); return (ENXIO); } /* * Map the per-core configuration blocks */ for (uint8_t i = 0; i < num_cfg; i++) { /* Add to child's resource list */ r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i); r_count = SIBA_CFG_SIZE; r_end = r_start + r_count - 1; rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY, r_start, r_end, r_count); /* Initialize config block descriptor */ dinfo->cfg[i] = ((struct siba_cfg_block) { .cb_base = r_start, .cb_size = SIBA_CFG_SIZE, .cb_rid = rid }); /* Map the config resource for bus-level access */ dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i); dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev, SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end, r_count, RF_ACTIVE|RF_SHAREABLE); if (dinfo->cfg_res[i] == NULL) { device_printf(dev, "failed to allocate SIBA_CFG%hhu\n", i); return (ENXIO); } } return (0); } static device_t siba_add_child(device_t dev, u_int order, const char *name, int unit) { struct siba_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); if ((dinfo = siba_alloc_dinfo(dev)) == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, dinfo); return (child); } static void siba_child_deleted(device_t dev, device_t child) { struct siba_devinfo *dinfo; /* Call required bhnd(4) implementation */ bhnd_generic_child_deleted(dev, child); /* Free siba device info */ if ((dinfo = device_get_ivars(child)) != NULL) siba_free_dinfo(dev, child, dinfo); device_set_ivars(child, NULL); } /** * Scan the core table and add all valid discovered cores to * the bus. * * @param dev The siba bus device. */ int siba_add_children(device_t dev) { bhnd_erom_t *erom; struct siba_erom *siba_erom; struct bhnd_erom_io *eio; const struct bhnd_chipid *cid; struct siba_core_id *cores; device_t *children; int error; cid = BHND_BUS_GET_CHIPID(dev, dev); /* Allocate our EROM parser */ eio = bhnd_erom_iores_new(dev, SIBA_EROM_RID); erom = bhnd_erom_alloc(&siba_erom_parser, cid, eio); if (erom == NULL) { bhnd_erom_io_fini(eio); return (ENODEV); } /* Allocate our temporary core and device table */ cores = malloc(sizeof(*cores) * cid->ncores, M_BHND, M_WAITOK); children = malloc(sizeof(*children) * cid->ncores, M_BHND, M_WAITOK | M_ZERO); /* * Add child devices for all discovered cores. * * On bridged devices, we'll exhaust our available register windows if * we map config blocks on unpopulated/disabled cores. To avoid this, we * defer mapping of the per-core siba(4) config blocks until all cores * have been enumerated and otherwise configured. */ siba_erom = (struct siba_erom *)erom; for (u_int i = 0; i < cid->ncores; i++) { struct siba_devinfo *dinfo; device_t child; if ((error = siba_erom_get_core_id(siba_erom, i, &cores[i]))) goto failed; /* Add the child device */ - child = BUS_ADD_CHILD(dev, 0, NULL, -1); + child = BUS_ADD_CHILD(dev, 0, NULL, DEVICE_UNIT_ANY); if (child == NULL) { error = ENXIO; goto failed; } children[i] = child; /* Initialize per-device bus info */ if ((dinfo = device_get_ivars(child)) == NULL) { error = ENXIO; goto failed; } if ((error = siba_init_dinfo(dev, child, dinfo, &cores[i]))) goto failed; /* If pins are floating or the hardware is otherwise * unpopulated, the device shouldn't be used. */ if (bhnd_is_hw_disabled(child)) device_disable(child); } /* Free EROM (and any bridge register windows it might hold) */ bhnd_erom_free(erom); erom = NULL; /* Map all valid core's config register blocks and perform interrupt * assignment */ for (u_int i = 0; i < cid->ncores; i++) { struct siba_devinfo *dinfo; device_t child; child = children[i]; /* Skip if core is disabled */ if (bhnd_is_hw_disabled(child)) continue; dinfo = device_get_ivars(child); /* Map the core's config blocks */ if ((error = siba_map_cfg_resources(dev, dinfo))) goto failed; /* Issue bus callback for fully initialized child. */ BHND_BUS_CHILD_ADDED(dev, child); } free(cores, M_BHND); free(children, M_BHND); return (0); failed: for (u_int i = 0; i < cid->ncores; i++) { if (children[i] == NULL) continue; device_delete_child(dev, children[i]); } free(cores, M_BHND); free(children, M_BHND); if (erom != NULL) bhnd_erom_free(erom); return (error); } static device_method_t siba_methods[] = { /* Device interface */ DEVMETHOD(device_probe, siba_probe), DEVMETHOD(device_attach, siba_attach), DEVMETHOD(device_detach, siba_detach), DEVMETHOD(device_resume, siba_resume), DEVMETHOD(device_suspend, siba_suspend), /* Bus interface */ DEVMETHOD(bus_add_child, siba_add_child), DEVMETHOD(bus_child_deleted, siba_child_deleted), DEVMETHOD(bus_read_ivar, siba_read_ivar), DEVMETHOD(bus_write_ivar, siba_write_ivar), DEVMETHOD(bus_get_resource_list, siba_get_resource_list), /* BHND interface */ DEVMETHOD(bhnd_bus_get_erom_class, siba_get_erom_class), DEVMETHOD(bhnd_bus_alloc_pmu, siba_alloc_pmu), DEVMETHOD(bhnd_bus_release_pmu, siba_release_pmu), DEVMETHOD(bhnd_bus_request_clock, siba_request_clock), DEVMETHOD(bhnd_bus_enable_clocks, siba_enable_clocks), DEVMETHOD(bhnd_bus_request_ext_rsrc, siba_request_ext_rsrc), DEVMETHOD(bhnd_bus_release_ext_rsrc, siba_release_ext_rsrc), DEVMETHOD(bhnd_bus_get_clock_freq, siba_get_clock_freq), DEVMETHOD(bhnd_bus_get_clock_latency, siba_get_clock_latency), DEVMETHOD(bhnd_bus_read_ioctl, siba_read_ioctl), DEVMETHOD(bhnd_bus_write_ioctl, siba_write_ioctl), DEVMETHOD(bhnd_bus_read_iost, siba_read_iost), DEVMETHOD(bhnd_bus_is_hw_suspended, siba_is_hw_suspended), DEVMETHOD(bhnd_bus_reset_hw, siba_reset_hw), DEVMETHOD(bhnd_bus_suspend_hw, siba_suspend_hw), DEVMETHOD(bhnd_bus_read_config, siba_read_config), DEVMETHOD(bhnd_bus_write_config, siba_write_config), DEVMETHOD(bhnd_bus_get_port_count, siba_get_port_count), DEVMETHOD(bhnd_bus_get_region_count, siba_get_region_count), DEVMETHOD(bhnd_bus_get_port_rid, siba_get_port_rid), DEVMETHOD(bhnd_bus_decode_port_rid, siba_decode_port_rid), DEVMETHOD(bhnd_bus_get_region_addr, siba_get_region_addr), DEVMETHOD(bhnd_bus_get_intr_count, siba_get_intr_count), DEVMETHOD(bhnd_bus_get_intr_ivec, siba_get_intr_ivec), DEVMETHOD_END }; DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver); MODULE_VERSION(siba, 1); MODULE_DEPEND(siba, bhnd, 1, 1, 1); diff --git a/sys/dev/cfe/cfe_resource.c b/sys/dev/cfe/cfe_resource.c index d5047e045f37..78c5f149ba7d 100644 --- a/sys/dev/cfe/cfe_resource.c +++ b/sys/dev/cfe/cfe_resource.c @@ -1,158 +1,158 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2007 Bruce M. Simpson. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver to swallow up memory ranges reserved by CFE platform firmware. * CFE on Sentry5 doesn't specify reserved ranges, so this is not useful * at the present time. * TODO: Don't attach this off nexus. */ #include #include #include #include #include #include #include #include #include #include #include #define MAX_CFE_RESERVATIONS 16 struct cferes_softc { int rnum; int rid[MAX_CFE_RESERVATIONS]; struct resource *res[MAX_CFE_RESERVATIONS]; }; static int cferes_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int cferes_attach(device_t dev) { return (0); } static void cferes_identify(driver_t* driver, device_t parent) { device_t child; int i; struct resource *res; int result; int rid; struct cferes_softc *sc; uint64_t addr, len, type; - child = BUS_ADD_CHILD(parent, 100, "cferes", -1); + child = BUS_ADD_CHILD(parent, 100, "cferes", DEVICE_UNIT_ANY); device_set_driver(child, driver); sc = device_get_softc(child); sc->rnum = 0; for (i = 0; i < ~0U; i++) { result = cfe_enummem(i, CFE_FLG_FULL_ARENA, &addr, &len, &type); if (result < 0) break; if (type != CFE_MI_RESERVED) { if (bootverbose) printf("%s: skipping non reserved range 0x%0jx(%jd)\n", device_getnameunit(child), (uintmax_t)addr, (uintmax_t)len); continue; } bus_set_resource(child, SYS_RES_MEMORY, sc->rnum, addr, len); rid = sc->rnum; res = bus_alloc_resource_any(child, SYS_RES_MEMORY, &rid, 0); if (res == NULL) { bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); continue; } sc->rid[sc->rnum] = rid; sc->res[sc->rnum] = res; sc->rnum++; if (sc->rnum == MAX_CFE_RESERVATIONS) break; } if (sc->rnum == 0) { device_delete_child(parent, child); return; } device_set_desc(child, "CFE reserved memory"); } static int cferes_detach(device_t dev) { int i; struct cferes_softc *sc = device_get_softc(dev); for (i = 0; i < sc->rnum; i++) { bus_release_resource(dev, SYS_RES_MEMORY, sc->rid[i], sc->res[i]); } return (0); } static device_method_t cferes_methods[] = { /* Device interface */ DEVMETHOD(device_identify, cferes_identify), DEVMETHOD(device_probe, cferes_probe), DEVMETHOD(device_attach, cferes_attach), DEVMETHOD(device_detach, cferes_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), { 0, 0 } }; static driver_t cferes_driver = { "cferes", cferes_methods, sizeof (struct cferes_softc) }; static devclass_t cferes_devclass; DRIVER_MODULE(cfe, nexus, cferes_driver, cferes_devclass, 0, 0); diff --git a/sys/dev/chromebook_platform/chromebook_platform.c b/sys/dev/chromebook_platform/chromebook_platform.c index cd5ad76a8701..935685d86955 100644 --- a/sys/dev/chromebook_platform/chromebook_platform.c +++ b/sys/dev/chromebook_platform/chromebook_platform.c @@ -1,96 +1,96 @@ /*- * Copyright (c) 2016 The FreeBSD Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include /* * Driver that attaches I2C devices. */ static struct { uint32_t pci_id; const char *name; uint8_t addr; } slaves[] = { { 0x9c628086, "isl", 0x88 }, { 0x9c618086, "cyapa", 0xce }, }; static void chromebook_i2c_identify(driver_t *driver, device_t bus) { device_t controller; device_t child; int i; /* * A stopgap approach to preserve the status quo. * A more intelligent approach is required to correctly * identify a machine model and hardware available on it. * For instance, DMI could be used. * See http://lxr.free-electrons.com/source/drivers/platform/chrome/chromeos_laptop.c */ controller = device_get_parent(bus); if (strcmp(device_get_name(controller), "ig4iic") != 0) return; for (i = 0; i < nitems(slaves); i++) { if (device_find_child(bus, slaves[i].name, -1) != NULL) continue; if (slaves[i].pci_id != pci_get_devid(controller)) continue; - child = BUS_ADD_CHILD(bus, 0, slaves[i].name, -1); + child = BUS_ADD_CHILD(bus, 0, slaves[i].name, DEVICE_UNIT_ANY); if (child != NULL) iicbus_set_addr(child, slaves[i].addr); } } static device_method_t chromebook_i2c_methods[] = { DEVMETHOD(device_identify, chromebook_i2c_identify), { 0, 0 } }; static driver_t chromebook_i2c_driver = { "chromebook_i2c", chromebook_i2c_methods, 0 /* no softc */ }; DRIVER_MODULE(chromebook_i2c, iicbus, chromebook_i2c_driver, 0, 0); MODULE_VERSION(chromebook_i2c, 1); diff --git a/sys/dev/etherswitch/e6000sw/e6000sw.c b/sys/dev/etherswitch/e6000sw/e6000sw.c index 3b309f7f36f8..4e95287399e3 100644 --- a/sys/dev/etherswitch/e6000sw/e6000sw.c +++ b/sys/dev/etherswitch/e6000sw/e6000sw.c @@ -1,1782 +1,1782 @@ /*- * Copyright (c) 2015 Semihalf * Copyright (c) 2015 Stormshield * Copyright (c) 2018-2019, Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #else #include #endif #include "e6000swreg.h" #include "etherswitch_if.h" #include "miibus_if.h" #include "mdio_if.h" MALLOC_DECLARE(M_E6000SW); MALLOC_DEFINE(M_E6000SW, "e6000sw", "e6000sw switch"); #define E6000SW_LOCK(_sc) sx_xlock(&(_sc)->sx) #define E6000SW_UNLOCK(_sc) sx_unlock(&(_sc)->sx) #define E6000SW_LOCK_ASSERT(_sc, _what) sx_assert(&(_sc)->sx, (_what)) #define E6000SW_TRYLOCK(_sc) sx_tryxlock(&(_sc)->sx) #define E6000SW_LOCKED(_sc) sx_xlocked(&(_sc)->sx) #define E6000SW_WAITREADY(_sc, _reg, _bit) \ e6000sw_waitready((_sc), REG_GLOBAL, (_reg), (_bit)) #define E6000SW_WAITREADY2(_sc, _reg, _bit) \ e6000sw_waitready((_sc), REG_GLOBAL2, (_reg), (_bit)) #define MDIO_READ(dev, addr, reg) \ MDIO_READREG(device_get_parent(dev), (addr), (reg)) #define MDIO_WRITE(dev, addr, reg, val) \ MDIO_WRITEREG(device_get_parent(dev), (addr), (reg), (val)) typedef struct e6000sw_softc { device_t dev; #ifdef FDT phandle_t node; #endif struct sx sx; if_t ifp[E6000SW_MAX_PORTS]; char *ifname[E6000SW_MAX_PORTS]; device_t miibus[E6000SW_MAX_PORTS]; struct taskqueue *sc_tq; struct timeout_task sc_tt; int vlans[E6000SW_NUM_VLANS]; uint32_t swid; uint32_t vlan_mode; uint32_t cpuports_mask; uint32_t fixed_mask; uint32_t fixed25_mask; uint32_t ports_mask; int phy_base; int sw_addr; int num_ports; } e6000sw_softc_t; static etherswitch_info_t etherswitch_info = { .es_nports = 0, .es_nvlangroups = 0, .es_vlan_caps = ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOT1Q, .es_name = "Marvell 6000 series switch" }; static void e6000sw_identify(driver_t *, device_t); static int e6000sw_probe(device_t); #ifdef FDT static int e6000sw_parse_fixed_link(e6000sw_softc_t *, phandle_t, uint32_t); static int e6000sw_parse_ethernet(e6000sw_softc_t *, phandle_t, uint32_t); #endif static int e6000sw_attach(device_t); static int e6000sw_detach(device_t); static int e6000sw_read_xmdio(device_t, int, int, int); static int e6000sw_write_xmdio(device_t, int, int, int, int); static int e6000sw_readphy(device_t, int, int); static int e6000sw_writephy(device_t, int, int, int); static int e6000sw_readphy_locked(device_t, int, int); static int e6000sw_writephy_locked(device_t, int, int, int); static etherswitch_info_t* e6000sw_getinfo(device_t); static int e6000sw_getconf(device_t, etherswitch_conf_t *); static int e6000sw_setconf(device_t, etherswitch_conf_t *); static void e6000sw_lock(device_t); static void e6000sw_unlock(device_t); static int e6000sw_getport(device_t, etherswitch_port_t *); static int e6000sw_setport(device_t, etherswitch_port_t *); static int e6000sw_set_vlan_mode(e6000sw_softc_t *, uint32_t); static int e6000sw_readreg_wrapper(device_t, int); static int e6000sw_writereg_wrapper(device_t, int, int); static int e6000sw_getvgroup_wrapper(device_t, etherswitch_vlangroup_t *); static int e6000sw_setvgroup_wrapper(device_t, etherswitch_vlangroup_t *); static int e6000sw_setvgroup(device_t, etherswitch_vlangroup_t *); static int e6000sw_getvgroup(device_t, etherswitch_vlangroup_t *); static void e6000sw_setup(device_t, e6000sw_softc_t *); static void e6000sw_tick(void *, int); static void e6000sw_set_atustat(device_t, e6000sw_softc_t *, int, int); static int e6000sw_atu_flush(device_t, e6000sw_softc_t *, int); static int e6000sw_vtu_flush(e6000sw_softc_t *); static int e6000sw_vtu_update(e6000sw_softc_t *, int, int, int, int, int); static __inline void e6000sw_writereg(e6000sw_softc_t *, int, int, int); static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *, int, int); static int e6000sw_ifmedia_upd(if_t); static void e6000sw_ifmedia_sts(if_t, struct ifmediareq *); static int e6000sw_atu_mac_table(device_t, e6000sw_softc_t *, struct atu_opt *, int); static int e6000sw_get_pvid(e6000sw_softc_t *, int, int *); static void e6000sw_set_pvid(e6000sw_softc_t *, int, int); static __inline bool e6000sw_is_cpuport(e6000sw_softc_t *, int); static __inline bool e6000sw_is_fixedport(e6000sw_softc_t *, int); static __inline bool e6000sw_is_fixed25port(e6000sw_softc_t *, int); static __inline bool e6000sw_is_phyport(e6000sw_softc_t *, int); static __inline bool e6000sw_is_portenabled(e6000sw_softc_t *, int); static __inline struct mii_data *e6000sw_miiforphy(e6000sw_softc_t *, unsigned int); static device_method_t e6000sw_methods[] = { /* device interface */ DEVMETHOD(device_identify, e6000sw_identify), DEVMETHOD(device_probe, e6000sw_probe), DEVMETHOD(device_attach, e6000sw_attach), DEVMETHOD(device_detach, e6000sw_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* mii interface */ DEVMETHOD(miibus_readreg, e6000sw_readphy), DEVMETHOD(miibus_writereg, e6000sw_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, e6000sw_getinfo), DEVMETHOD(etherswitch_getconf, e6000sw_getconf), DEVMETHOD(etherswitch_setconf, e6000sw_setconf), DEVMETHOD(etherswitch_lock, e6000sw_lock), DEVMETHOD(etherswitch_unlock, e6000sw_unlock), DEVMETHOD(etherswitch_getport, e6000sw_getport), DEVMETHOD(etherswitch_setport, e6000sw_setport), DEVMETHOD(etherswitch_readreg, e6000sw_readreg_wrapper), DEVMETHOD(etherswitch_writereg, e6000sw_writereg_wrapper), DEVMETHOD(etherswitch_readphyreg, e6000sw_readphy), DEVMETHOD(etherswitch_writephyreg, e6000sw_writephy), DEVMETHOD(etherswitch_setvgroup, e6000sw_setvgroup_wrapper), DEVMETHOD(etherswitch_getvgroup, e6000sw_getvgroup_wrapper), DEVMETHOD_END }; DEFINE_CLASS_0(e6000sw, e6000sw_driver, e6000sw_methods, sizeof(e6000sw_softc_t)); DRIVER_MODULE(e6000sw, mdio, e6000sw_driver, 0, 0); DRIVER_MODULE(etherswitch, e6000sw, etherswitch_driver, 0, 0); DRIVER_MODULE(miibus, e6000sw, miibus_driver, 0, 0); MODULE_DEPEND(e6000sw, mdio, 1, 1, 1); static void e6000sw_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "e6000sw", -1) == NULL) - BUS_ADD_CHILD(parent, 0, "e6000sw", -1); + BUS_ADD_CHILD(parent, 0, "e6000sw", DEVICE_UNIT_ANY); } static int e6000sw_probe(device_t dev) { e6000sw_softc_t *sc; const char *description; #ifdef FDT phandle_t switch_node; #else int is_6190; #endif sc = device_get_softc(dev); sc->dev = dev; #ifdef FDT switch_node = ofw_bus_find_compatible(OF_finddevice("/"), "marvell,mv88e6085"); if (switch_node == 0) { switch_node = ofw_bus_find_compatible(OF_finddevice("/"), "marvell,mv88e6190"); if (switch_node == 0) return (ENXIO); /* * Trust DTS and fix the port register offset for the MV88E6190 * detection bellow. */ sc->swid = MV88E6190; } if (bootverbose) device_printf(dev, "Found switch_node: 0x%x\n", switch_node); sc->node = switch_node; if (OF_getencprop(sc->node, "reg", &sc->sw_addr, sizeof(sc->sw_addr)) < 0) return (ENXIO); #else if (resource_int_value(device_get_name(sc->dev), device_get_unit(sc->dev), "addr", &sc->sw_addr) != 0) return (ENXIO); if (resource_int_value(device_get_name(sc->dev), device_get_unit(sc->dev), "is6190", &is_6190) != 0) /* * Check "is8190" to keep backward compatibility with * older setups. */ resource_int_value(device_get_name(sc->dev), device_get_unit(sc->dev), "is8190", &is_6190); if (is_6190 != 0) sc->swid = MV88E6190; #endif if (sc->sw_addr < 0 || sc->sw_addr > 32) return (ENXIO); /* * Create temporary lock, just to satisfy assertions, * when obtaining the switch ID. Destroy immediately afterwards. */ sx_init(&sc->sx, "e6000sw_tmp"); E6000SW_LOCK(sc); sc->swid = e6000sw_readreg(sc, REG_PORT(sc, 0), SWITCH_ID) & 0xfff0; E6000SW_UNLOCK(sc); sx_destroy(&sc->sx); switch (sc->swid) { case MV88E6141: description = "Marvell 88E6141"; sc->phy_base = 0x10; sc->num_ports = 6; break; case MV88E6341: description = "Marvell 88E6341"; sc->phy_base = 0x10; sc->num_ports = 6; break; case MV88E6352: description = "Marvell 88E6352"; sc->num_ports = 7; break; case MV88E6172: description = "Marvell 88E6172"; sc->num_ports = 7; break; case MV88E6176: description = "Marvell 88E6176"; sc->num_ports = 7; break; case MV88E6190: description = "Marvell 88E6190"; sc->num_ports = 11; break; default: device_printf(dev, "Unrecognized device, id 0x%x.\n", sc->swid); return (ENXIO); } device_set_desc(dev, description); return (BUS_PROBE_DEFAULT); } #ifdef FDT static int e6000sw_parse_fixed_link(e6000sw_softc_t *sc, phandle_t node, uint32_t port) { int speed; phandle_t fixed_link; fixed_link = ofw_bus_find_child(node, "fixed-link"); if (fixed_link != 0) { sc->fixed_mask |= (1 << port); if (OF_getencprop(fixed_link, "speed", &speed, sizeof(speed)) < 0) { device_printf(sc->dev, "Port %d has a fixed-link node without a speed " "property\n", port); return (ENXIO); } if (speed == 2500 && (MVSWITCH(sc, MV88E6141) || MVSWITCH(sc, MV88E6341) || MVSWITCH(sc, MV88E6190))) sc->fixed25_mask |= (1 << port); } return (0); } static int e6000sw_parse_ethernet(e6000sw_softc_t *sc, phandle_t port_handle, uint32_t port) { phandle_t switch_eth, switch_eth_handle; if (OF_getencprop(port_handle, "ethernet", (void*)&switch_eth_handle, sizeof(switch_eth_handle)) > 0) { if (switch_eth_handle > 0) { switch_eth = OF_node_from_xref(switch_eth_handle); device_printf(sc->dev, "CPU port at %d\n", port); sc->cpuports_mask |= (1 << port); return (e6000sw_parse_fixed_link(sc, switch_eth, port)); } else device_printf(sc->dev, "Port %d has ethernet property but it points " "to an invalid location\n", port); } return (0); } static int e6000sw_parse_child_fdt(e6000sw_softc_t *sc, phandle_t child, int *pport) { uint32_t port; if (pport == NULL) return (ENXIO); if (OF_getencprop(child, "reg", (void *)&port, sizeof(port)) < 0) return (ENXIO); if (port >= sc->num_ports) return (ENXIO); *pport = port; if (e6000sw_parse_fixed_link(sc, child, port) != 0) return (ENXIO); if (e6000sw_parse_ethernet(sc, child, port) != 0) return (ENXIO); if ((sc->fixed_mask & (1 << port)) != 0) device_printf(sc->dev, "fixed port at %d\n", port); else device_printf(sc->dev, "PHY at port %d\n", port); return (0); } #else static int e6000sw_check_hint_val(device_t dev, int *val, char *fmt, ...) { char *resname; int err, len; va_list ap; len = min(strlen(fmt) * 2, 128); if (len == 0) return (-1); resname = malloc(len, M_E6000SW, M_WAITOK); memset(resname, 0, len); va_start(ap, fmt); vsnprintf(resname, len - 1, fmt, ap); va_end(ap); err = resource_int_value(device_get_name(dev), device_get_unit(dev), resname, val); free(resname, M_E6000SW); return (err); } static int e6000sw_parse_hinted_port(e6000sw_softc_t *sc, int port) { int err, val; err = e6000sw_check_hint_val(sc->dev, &val, "port%ddisabled", port); if (err == 0 && val != 0) return (1); err = e6000sw_check_hint_val(sc->dev, &val, "port%dcpu", port); if (err == 0 && val != 0) { sc->cpuports_mask |= (1 << port); sc->fixed_mask |= (1 << port); if (bootverbose) device_printf(sc->dev, "CPU port at %d\n", port); } err = e6000sw_check_hint_val(sc->dev, &val, "port%dspeed", port); if (err == 0 && val != 0) { sc->fixed_mask |= (1 << port); if (val == 2500) sc->fixed25_mask |= (1 << port); } if (bootverbose) { if ((sc->fixed_mask & (1 << port)) != 0) device_printf(sc->dev, "fixed port at %d\n", port); else device_printf(sc->dev, "PHY at port %d\n", port); } return (0); } #endif static int e6000sw_init_interface(e6000sw_softc_t *sc, int port) { char name[IFNAMSIZ]; snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev)); sc->ifp[port] = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp[port], sc); if_setflagbits(sc->ifp[port], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX, 0); sc->ifname[port] = malloc(strlen(name) + 1, M_E6000SW, M_NOWAIT); if (sc->ifname[port] == NULL) { if_free(sc->ifp[port]); return (ENOMEM); } memcpy(sc->ifname[port], name, strlen(name) + 1); if_initname(sc->ifp[port], sc->ifname[port], port); return (0); } static int e6000sw_attach_miibus(e6000sw_softc_t *sc, int port) { int err; err = mii_attach(sc->dev, &sc->miibus[port], sc->ifp[port], e6000sw_ifmedia_upd, e6000sw_ifmedia_sts, BMSR_DEFCAPMASK, port + sc->phy_base, MII_OFFSET_ANY, 0); if (err != 0) return (err); return (0); } static void e6000sw_serdes_power(device_t dev, int port, bool sgmii) { uint32_t reg; /* SGMII */ reg = e6000sw_read_xmdio(dev, port, E6000SW_SERDES_DEV, E6000SW_SERDES_SGMII_CTL); if (sgmii) reg &= ~E6000SW_SERDES_PDOWN; else reg |= E6000SW_SERDES_PDOWN; e6000sw_write_xmdio(dev, port, E6000SW_SERDES_DEV, E6000SW_SERDES_SGMII_CTL, reg); /* 10GBASE-R/10GBASE-X4/X2 */ reg = e6000sw_read_xmdio(dev, port, E6000SW_SERDES_DEV, E6000SW_SERDES_PCS_CTL1); if (sgmii) reg |= E6000SW_SERDES_PDOWN; else reg &= ~E6000SW_SERDES_PDOWN; e6000sw_write_xmdio(dev, port, E6000SW_SERDES_DEV, E6000SW_SERDES_PCS_CTL1, reg); } static int e6000sw_attach(device_t dev) { bool sgmii; e6000sw_softc_t *sc; #ifdef FDT phandle_t child, ports; #endif int err, port; uint32_t reg; err = 0; sc = device_get_softc(dev); /* * According to the Linux source code, all of the Switch IDs we support * are multi_chip capable, and should go into multi-chip mode if the * sw_addr != 0. */ if (MVSWITCH_MULTICHIP(sc)) device_printf(dev, "multi-chip addressing mode (%#x)\n", sc->sw_addr); else device_printf(dev, "single-chip addressing mode\n"); sx_init(&sc->sx, "e6000sw"); E6000SW_LOCK(sc); e6000sw_setup(dev, sc); sc->sc_tq = taskqueue_create("e6000sw_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); TIMEOUT_TASK_INIT(sc->sc_tq, &sc->sc_tt, 0, e6000sw_tick, sc); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)); #ifdef FDT ports = ofw_bus_find_child(sc->node, "ports"); if (ports == 0) { device_printf(dev, "failed to parse DTS: no ports found for " "switch\n"); E6000SW_UNLOCK(sc); return (ENXIO); } for (child = OF_child(ports); child != 0; child = OF_peer(child)) { err = e6000sw_parse_child_fdt(sc, child, &port); if (err != 0) { device_printf(sc->dev, "failed to parse DTS\n"); goto out_fail; } #else for (port = 0; port < sc->num_ports; port++) { err = e6000sw_parse_hinted_port(sc, port); if (err != 0) continue; #endif /* Port is in use. */ sc->ports_mask |= (1 << port); err = e6000sw_init_interface(sc, port); if (err != 0) { device_printf(sc->dev, "failed to init interface\n"); goto out_fail; } if (e6000sw_is_fixedport(sc, port)) { /* Link must be down to change speed force value. */ reg = e6000sw_readreg(sc, REG_PORT(sc, port), PSC_CONTROL); reg &= ~PSC_CONTROL_LINK_UP; reg |= PSC_CONTROL_FORCED_LINK; e6000sw_writereg(sc, REG_PORT(sc, port), PSC_CONTROL, reg); /* * Force speed, full-duplex, EEE off and flow-control * on. */ reg &= ~(PSC_CONTROL_SPD2500 | PSC_CONTROL_ALT_SPD | PSC_CONTROL_FORCED_FC | PSC_CONTROL_FC_ON | PSC_CONTROL_FORCED_EEE); if (e6000sw_is_fixed25port(sc, port)) reg |= PSC_CONTROL_SPD2500; else reg |= PSC_CONTROL_SPD1000; if (MVSWITCH(sc, MV88E6190) && e6000sw_is_fixed25port(sc, port)) reg |= PSC_CONTROL_ALT_SPD; reg |= PSC_CONTROL_FORCED_DPX | PSC_CONTROL_FULLDPX | PSC_CONTROL_FORCED_LINK | PSC_CONTROL_LINK_UP | PSC_CONTROL_FORCED_SPD; if (!MVSWITCH(sc, MV88E6190)) reg |= PSC_CONTROL_FORCED_FC | PSC_CONTROL_FC_ON; if (MVSWITCH(sc, MV88E6141) || MVSWITCH(sc, MV88E6341) || MVSWITCH(sc, MV88E6190)) reg |= PSC_CONTROL_FORCED_EEE; e6000sw_writereg(sc, REG_PORT(sc, port), PSC_CONTROL, reg); /* Power on the SERDES interfaces. */ if (MVSWITCH(sc, MV88E6190) && (port == 9 || port == 10)) { if (e6000sw_is_fixed25port(sc, port)) sgmii = false; else sgmii = true; e6000sw_serdes_power(sc->dev, port, sgmii); } } /* Don't attach miibus at CPU/fixed ports */ if (!e6000sw_is_phyport(sc, port)) continue; err = e6000sw_attach_miibus(sc, port); if (err != 0) { device_printf(sc->dev, "failed to attach miibus\n"); goto out_fail; } } etherswitch_info.es_nports = sc->num_ports; /* Default to port vlan. */ e6000sw_set_vlan_mode(sc, ETHERSWITCH_VLAN_PORT); reg = e6000sw_readreg(sc, REG_GLOBAL, SWITCH_GLOBAL_STATUS); if (reg & SWITCH_GLOBAL_STATUS_IR) device_printf(dev, "switch is ready.\n"); E6000SW_UNLOCK(sc); bus_generic_probe(dev); bus_generic_attach(dev); taskqueue_enqueue_timeout(sc->sc_tq, &sc->sc_tt, hz); return (0); out_fail: e6000sw_detach(dev); return (err); } static int e6000sw_waitready(e6000sw_softc_t *sc, uint32_t phy, uint32_t reg, uint32_t busybit) { int i; for (i = 0; i < E6000SW_RETRIES; i++) { if ((e6000sw_readreg(sc, phy, reg) & busybit) == 0) return (0); DELAY(1); } return (1); } /* XMDIO/Clause 45 access. */ static int e6000sw_read_xmdio(device_t dev, int phy, int devaddr, int devreg) { e6000sw_softc_t *sc; uint32_t reg; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } reg = devaddr & SMI_CMD_REG_ADDR_MASK; reg |= (phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK; /* Load C45 register address. */ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg); e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, reg | SMI_CMD_OP_C45_ADDR); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } /* Start C45 read operation. */ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, reg | SMI_CMD_OP_C45_READ); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } /* Read C45 data. */ reg = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG); return (reg & PHY_DATA_MASK); } static int e6000sw_write_xmdio(device_t dev, int phy, int devaddr, int devreg, int val) { e6000sw_softc_t *sc; uint32_t reg; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } reg = devaddr & SMI_CMD_REG_ADDR_MASK; reg |= (phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK; /* Load C45 register address. */ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg); e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, reg | SMI_CMD_OP_C45_ADDR); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } /* Load data and start the C45 write operation. */ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg); e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, reg | SMI_CMD_OP_C45_WRITE); return (0); } static int e6000sw_readphy(device_t dev, int phy, int reg) { e6000sw_softc_t *sc; int locked, ret; sc = device_get_softc(dev); locked = E6000SW_LOCKED(sc); if (!locked) E6000SW_LOCK(sc); ret = e6000sw_readphy_locked(dev, phy, reg); if (!locked) E6000SW_UNLOCK(sc); return (ret); } /* * PHY registers are paged. Put page index in reg 22 (accessible from every * page), then access specific register. */ static int e6000sw_readphy_locked(device_t dev, int phy, int reg) { e6000sw_softc_t *sc; uint32_t val; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, SMI_CMD_OP_C22_READ | (reg & SMI_CMD_REG_ADDR_MASK) | ((phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK)); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } val = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG); return (val & PHY_DATA_MASK); } static int e6000sw_writephy(device_t dev, int phy, int reg, int data) { e6000sw_softc_t *sc; int locked, ret; sc = device_get_softc(dev); locked = E6000SW_LOCKED(sc); if (!locked) E6000SW_LOCK(sc); ret = e6000sw_writephy_locked(dev, phy, reg, data); if (!locked) E6000SW_UNLOCK(sc); return (ret); } static int e6000sw_writephy_locked(device_t dev, int phy, int reg, int data) { e6000sw_softc_t *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, data & PHY_DATA_MASK); e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, SMI_CMD_OP_C22_WRITE | (reg & SMI_CMD_REG_ADDR_MASK) | ((phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK)); return (0); } static int e6000sw_detach(device_t dev) { int phy; e6000sw_softc_t *sc; sc = device_get_softc(dev); if (device_is_attached(dev)) taskqueue_drain_timeout(sc->sc_tq, &sc->sc_tt); if (sc->sc_tq != NULL) taskqueue_free(sc->sc_tq); device_delete_children(dev); sx_destroy(&sc->sx); for (phy = 0; phy < sc->num_ports; phy++) { if (sc->ifp[phy] != NULL) if_free(sc->ifp[phy]); if (sc->ifname[phy] != NULL) free(sc->ifname[phy], M_E6000SW); } return (0); } static etherswitch_info_t* e6000sw_getinfo(device_t dev) { return (ðerswitch_info); } static int e6000sw_getconf(device_t dev, etherswitch_conf_t *conf) { struct e6000sw_softc *sc; /* Return the VLAN mode. */ sc = device_get_softc(dev); conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; return (0); } static int e6000sw_setconf(device_t dev, etherswitch_conf_t *conf) { struct e6000sw_softc *sc; /* Set the VLAN mode. */ sc = device_get_softc(dev); if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { E6000SW_LOCK(sc); e6000sw_set_vlan_mode(sc, conf->vlan_mode); E6000SW_UNLOCK(sc); } return (0); } static void e6000sw_lock(device_t dev) { struct e6000sw_softc *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); } static void e6000sw_unlock(device_t dev) { struct e6000sw_softc *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); E6000SW_UNLOCK(sc); } static int e6000sw_getport(device_t dev, etherswitch_port_t *p) { struct mii_data *mii; int err; struct ifmediareq *ifmr; uint32_t reg; e6000sw_softc_t *sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); if (p->es_port >= sc->num_ports || p->es_port < 0) return (EINVAL); if (!e6000sw_is_portenabled(sc, p->es_port)) return (0); E6000SW_LOCK(sc); e6000sw_get_pvid(sc, p->es_port, &p->es_pvid); /* Port flags. */ reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2); if (reg & PORT_CONTROL2_DISC_TAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPTAGGED; if (reg & PORT_CONTROL2_DISC_UNTAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; err = 0; if (e6000sw_is_fixedport(sc, p->es_port)) { if (e6000sw_is_cpuport(sc, p->es_port)) p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; if (e6000sw_is_fixed25port(sc, p->es_port)) ifmr->ifm_active = IFM_2500_T; else ifmr->ifm_active = IFM_1000_T; ifmr->ifm_active |= IFM_ETHER | IFM_FDX; ifmr->ifm_current = ifmr->ifm_active; ifmr->ifm_mask = 0; } else { mii = e6000sw_miiforphy(sc, p->es_port); err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); } E6000SW_UNLOCK(sc); return (err); } static int e6000sw_setport(device_t dev, etherswitch_port_t *p) { e6000sw_softc_t *sc; int err; struct mii_data *mii; uint32_t reg; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); if (p->es_port >= sc->num_ports || p->es_port < 0) return (EINVAL); if (!e6000sw_is_portenabled(sc, p->es_port)) return (0); E6000SW_LOCK(sc); /* Port flags. */ reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2); if (p->es_flags & ETHERSWITCH_PORT_DROPTAGGED) reg |= PORT_CONTROL2_DISC_TAGGED; else reg &= ~PORT_CONTROL2_DISC_TAGGED; if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) reg |= PORT_CONTROL2_DISC_UNTAGGED; else reg &= ~PORT_CONTROL2_DISC_UNTAGGED; e6000sw_writereg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2, reg); err = 0; if (p->es_pvid != 0) e6000sw_set_pvid(sc, p->es_port, p->es_pvid); if (e6000sw_is_phyport(sc, p->es_port)) { mii = e6000sw_miiforphy(sc, p->es_port); err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCSIFMEDIA); } E6000SW_UNLOCK(sc); return (err); } static __inline void e6000sw_port_vlan_assign(e6000sw_softc_t *sc, int port, uint32_t fid, uint32_t members) { uint32_t reg; reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VLAN_MAP); reg &= ~(PORT_MASK(sc) | PORT_VLAN_MAP_FID_MASK); reg |= members & PORT_MASK(sc) & ~(1 << port); reg |= (fid << PORT_VLAN_MAP_FID) & PORT_VLAN_MAP_FID_MASK; e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VLAN_MAP, reg); reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL1); reg &= ~PORT_CONTROL1_FID_MASK; reg |= (fid >> 4) & PORT_CONTROL1_FID_MASK; e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL1, reg); } static int e6000sw_init_vlan(struct e6000sw_softc *sc) { int i, port, ret; uint32_t members; /* Disable all ports */ for (port = 0; port < sc->num_ports; port++) { ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL, (ret & ~PORT_CONTROL_ENABLE)); } /* Flush VTU. */ e6000sw_vtu_flush(sc); for (port = 0; port < sc->num_ports; port++) { /* Reset the egress and frame mode. */ ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL); ret &= ~(PORT_CONTROL_EGRESS | PORT_CONTROL_FRAME); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL, ret); /* Set the 802.1q mode. */ ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL2); ret &= ~PORT_CONTROL2_DOT1Q; if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) ret |= PORT_CONTROL2_DOT1Q; e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL2, ret); } for (port = 0; port < sc->num_ports; port++) { if (!e6000sw_is_portenabled(sc, port)) continue; ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID); /* Set port priority */ ret &= ~PORT_VID_PRIORITY_MASK; /* Set VID map */ ret &= ~PORT_VID_DEF_VID_MASK; if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) ret |= 1; else ret |= (port + 1); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VID, ret); } /* Assign the member ports to each origin port. */ for (port = 0; port < sc->num_ports; port++) { members = 0; if (e6000sw_is_portenabled(sc, port)) { for (i = 0; i < sc->num_ports; i++) { if (i == port || !e6000sw_is_portenabled(sc, i)) continue; members |= (1 << i); } } /* Default to FID 0. */ e6000sw_port_vlan_assign(sc, port, 0, members); } /* Reset internal VLAN table. */ for (i = 0; i < nitems(sc->vlans); i++) sc->vlans[i] = 0; /* Create default VLAN (1). */ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { sc->vlans[0] = 1; e6000sw_vtu_update(sc, 0, sc->vlans[0], 1, 0, sc->ports_mask); } /* Enable all ports */ for (port = 0; port < sc->num_ports; port++) { if (!e6000sw_is_portenabled(sc, port)) continue; ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL, (ret | PORT_CONTROL_ENABLE)); } return (0); } static int e6000sw_set_vlan_mode(struct e6000sw_softc *sc, uint32_t mode) { E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); switch (mode) { case ETHERSWITCH_VLAN_PORT: sc->vlan_mode = ETHERSWITCH_VLAN_PORT; etherswitch_info.es_nvlangroups = sc->num_ports; return (e6000sw_init_vlan(sc)); break; case ETHERSWITCH_VLAN_DOT1Q: sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; etherswitch_info.es_nvlangroups = E6000SW_NUM_VLANS; return (e6000sw_init_vlan(sc)); break; default: return (EINVAL); } } /* * Registers in this switch are divided into sections, specified in * documentation. So as to access any of them, section index and reg index * is necessary. etherswitchcfg uses only one variable, so indexes were * compressed into addr_reg: 32 * section_index + reg_index. */ static int e6000sw_readreg_wrapper(device_t dev, int addr_reg) { e6000sw_softc_t *sc; sc = device_get_softc(dev); if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) || (addr_reg < (REG_PORT(sc, 0) * 32))) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } return (e6000sw_readreg(device_get_softc(dev), addr_reg / 32, addr_reg % 32)); } static int e6000sw_writereg_wrapper(device_t dev, int addr_reg, int val) { e6000sw_softc_t *sc; sc = device_get_softc(dev); if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) || (addr_reg < (REG_PORT(sc, 0) * 32))) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } e6000sw_writereg(device_get_softc(dev), addr_reg / 32, addr_reg % 32, val); return (0); } /* * setvgroup/getvgroup called from etherswitchfcg need to be locked, * while internal calls do not. */ static int e6000sw_setvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; int ret; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); ret = e6000sw_setvgroup(dev, vg); E6000SW_UNLOCK(sc); return (ret); } static int e6000sw_getvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; int ret; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); ret = e6000sw_getvgroup(dev, vg); E6000SW_UNLOCK(sc); return (ret); } static int e6000sw_set_port_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg) { uint32_t port; port = vg->es_vlangroup; if (port > sc->num_ports) return (EINVAL); if (vg->es_member_ports != vg->es_untagged_ports) { device_printf(sc->dev, "Tagged ports not supported.\n"); return (EINVAL); } e6000sw_port_vlan_assign(sc, port, 0, vg->es_untagged_ports); vg->es_vid = port | ETHERSWITCH_VID_VALID; return (0); } static int e6000sw_set_dot1q_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg) { int i, vlan; vlan = vg->es_vid & ETHERSWITCH_VID_MASK; /* Set VLAN to '0' removes it from table. */ if (vlan == 0) { e6000sw_vtu_update(sc, VTU_PURGE, sc->vlans[vg->es_vlangroup], 0, 0, 0); sc->vlans[vg->es_vlangroup] = 0; return (0); } /* Is this VLAN already in table ? */ for (i = 0; i < etherswitch_info.es_nvlangroups; i++) if (i != vg->es_vlangroup && vlan == sc->vlans[i]) return (EINVAL); sc->vlans[vg->es_vlangroup] = vlan; e6000sw_vtu_update(sc, 0, vlan, vg->es_vlangroup + 1, vg->es_member_ports & sc->ports_mask, vg->es_untagged_ports & sc->ports_mask); return (0); } static int e6000sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) return (e6000sw_set_port_vlan(sc, vg)); else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) return (e6000sw_set_dot1q_vlan(sc, vg)); return (EINVAL); } static int e6000sw_get_port_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg) { uint32_t port, reg; port = vg->es_vlangroup; if (port > sc->num_ports) return (EINVAL); if (!e6000sw_is_portenabled(sc, port)) { vg->es_vid = port; return (0); } reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VLAN_MAP); vg->es_untagged_ports = vg->es_member_ports = reg & PORT_MASK(sc); vg->es_vid = port | ETHERSWITCH_VID_VALID; vg->es_fid = (reg & PORT_VLAN_MAP_FID_MASK) >> PORT_VLAN_MAP_FID; reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL1); vg->es_fid |= (reg & PORT_CONTROL1_FID_MASK) << 4; return (0); } static int e6000sw_get_dot1q_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg) { int i, port; uint32_t reg; vg->es_fid = 0; vg->es_vid = sc->vlans[vg->es_vlangroup]; vg->es_untagged_ports = vg->es_member_ports = 0; if (vg->es_vid == 0) return (0); if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "VTU unit is busy, cannot access\n"); return (EBUSY); } e6000sw_writereg(sc, REG_GLOBAL, VTU_VID, vg->es_vid - 1); reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_OPERATION); reg &= ~VTU_OP_MASK; reg |= VTU_GET_NEXT | VTU_BUSY; e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, reg); if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "Timeout while reading\n"); return (EBUSY); } reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_VID); if (reg == VTU_VID_MASK || (reg & VTU_VID_VALID) == 0) return (EINVAL); if ((reg & VTU_VID_MASK) != vg->es_vid) return (EINVAL); vg->es_vid |= ETHERSWITCH_VID_VALID; reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA); for (i = 0; i < sc->num_ports; i++) { if (i == VTU_PPREG(sc)) reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA2); port = (reg >> VTU_PORT(sc, i)) & VTU_PORT_MASK; if (port == VTU_PORT_UNTAGGED) { vg->es_untagged_ports |= (1 << i); vg->es_member_ports |= (1 << i); } else if (port == VTU_PORT_TAGGED) vg->es_member_ports |= (1 << i); } return (0); } static int e6000sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) return (e6000sw_get_port_vlan(sc, vg)); else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) return (e6000sw_get_dot1q_vlan(sc, vg)); return (EINVAL); } static __inline struct mii_data* e6000sw_miiforphy(e6000sw_softc_t *sc, unsigned int phy) { if (!e6000sw_is_phyport(sc, phy)) return (NULL); return (device_get_softc(sc->miibus[phy])); } static int e6000sw_ifmedia_upd(if_t ifp) { e6000sw_softc_t *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = e6000sw_miiforphy(sc, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void e6000sw_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { e6000sw_softc_t *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = e6000sw_miiforphy(sc, if_getdunit(ifp)); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int e6000sw_smi_waitready(e6000sw_softc_t *sc, int phy) { int i; for (i = 0; i < E6000SW_SMI_TIMEOUT; i++) { if ((MDIO_READ(sc->dev, phy, SMI_CMD) & SMI_CMD_BUSY) == 0) return (0); DELAY(1); } return (1); } static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *sc, int addr, int reg) { E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!MVSWITCH_MULTICHIP(sc)) return (MDIO_READ(sc->dev, addr, reg) & 0xffff); if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return (0xffff); } MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD, SMI_CMD_OP_C22_READ | (reg & SMI_CMD_REG_ADDR_MASK) | ((addr << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK)); if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return (0xffff); } return (MDIO_READ(sc->dev, sc->sw_addr, SMI_DATA) & 0xffff); } static __inline void e6000sw_writereg(e6000sw_softc_t *sc, int addr, int reg, int val) { E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!MVSWITCH_MULTICHIP(sc)) { MDIO_WRITE(sc->dev, addr, reg, val); return; } if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return; } MDIO_WRITE(sc->dev, sc->sw_addr, SMI_DATA, val); MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD, SMI_CMD_OP_C22_WRITE | (reg & SMI_CMD_REG_ADDR_MASK) | ((addr << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK)); } static __inline bool e6000sw_is_cpuport(e6000sw_softc_t *sc, int port) { return ((sc->cpuports_mask & (1 << port)) ? true : false); } static __inline bool e6000sw_is_fixedport(e6000sw_softc_t *sc, int port) { return ((sc->fixed_mask & (1 << port)) ? true : false); } static __inline bool e6000sw_is_fixed25port(e6000sw_softc_t *sc, int port) { return ((sc->fixed25_mask & (1 << port)) ? true : false); } static __inline bool e6000sw_is_phyport(e6000sw_softc_t *sc, int port) { uint32_t phy_mask; phy_mask = ~(sc->fixed_mask | sc->cpuports_mask); return ((phy_mask & (1 << port)) ? true : false); } static __inline bool e6000sw_is_portenabled(e6000sw_softc_t *sc, int port) { return ((sc->ports_mask & (1 << port)) ? true : false); } static __inline void e6000sw_set_pvid(e6000sw_softc_t *sc, int port, int pvid) { uint32_t reg; reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID); reg &= ~PORT_VID_DEF_VID_MASK; reg |= (pvid & PORT_VID_DEF_VID_MASK); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VID, reg); } static __inline int e6000sw_get_pvid(e6000sw_softc_t *sc, int port, int *pvid) { if (pvid == NULL) return (ENXIO); *pvid = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID) & PORT_VID_DEF_VID_MASK; return (0); } /* * Convert port status to ifmedia. */ static void e6000sw_update_ifmedia(uint16_t portstatus, u_int *media_status, u_int *media_active) { *media_active = IFM_ETHER; *media_status = IFM_AVALID; if ((portstatus & PORT_STATUS_LINK_MASK) != 0) *media_status |= IFM_ACTIVE; else { *media_active |= IFM_NONE; return; } switch (portstatus & PORT_STATUS_SPEED_MASK) { case PORT_STATUS_SPEED_10: *media_active |= IFM_10_T; break; case PORT_STATUS_SPEED_100: *media_active |= IFM_100_TX; break; case PORT_STATUS_SPEED_1000: *media_active |= IFM_1000_T; break; } if ((portstatus & PORT_STATUS_DUPLEX_MASK) == 0) *media_active |= IFM_FDX; else *media_active |= IFM_HDX; } static void e6000sw_tick(void *arg, int p __unused) { e6000sw_softc_t *sc; struct mii_data *mii; struct mii_softc *miisc; uint16_t portstatus; int port; sc = arg; E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); for (port = 0; port < sc->num_ports; port++) { /* Tick only on PHY ports */ if (!e6000sw_is_portenabled(sc, port) || !e6000sw_is_phyport(sc, port)) continue; mii = e6000sw_miiforphy(sc, port); if (mii == NULL) continue; portstatus = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_STATUS); e6000sw_update_ifmedia(portstatus, &mii->mii_media_status, &mii->mii_media_active); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; mii_phy_update(miisc, MII_POLLSTAT); } } E6000SW_UNLOCK(sc); } static void e6000sw_setup(device_t dev, e6000sw_softc_t *sc) { uint32_t atu_ctrl; /* Set aging time. */ atu_ctrl = e6000sw_readreg(sc, REG_GLOBAL, ATU_CONTROL); atu_ctrl &= ~ATU_CONTROL_AGETIME_MASK; atu_ctrl |= E6000SW_DEFAULT_AGETIME << ATU_CONTROL_AGETIME; e6000sw_writereg(sc, REG_GLOBAL, ATU_CONTROL, atu_ctrl); /* Send all with specific mac address to cpu port */ e6000sw_writereg(sc, REG_GLOBAL2, MGMT_EN_2x, MGMT_EN_ALL); e6000sw_writereg(sc, REG_GLOBAL2, MGMT_EN_0x, MGMT_EN_ALL); /* Disable Remote Management */ e6000sw_writereg(sc, REG_GLOBAL, SWITCH_GLOBAL_CONTROL2, 0); /* Disable loopback filter and flow control messages */ e6000sw_writereg(sc, REG_GLOBAL2, SWITCH_MGMT, SWITCH_MGMT_PRI_MASK | (1 << SWITCH_MGMT_RSVD2CPU) | SWITCH_MGMT_FC_PRI_MASK | (1 << SWITCH_MGMT_FORCEFLOW)); e6000sw_atu_flush(dev, sc, NO_OPERATION); e6000sw_atu_mac_table(dev, sc, NULL, NO_OPERATION); e6000sw_set_atustat(dev, sc, 0, COUNT_ALL); } static void e6000sw_set_atustat(device_t dev, e6000sw_softc_t *sc, int bin, int flag) { e6000sw_readreg(sc, REG_GLOBAL2, ATU_STATS); e6000sw_writereg(sc, REG_GLOBAL2, ATU_STATS, (bin << ATU_STATS_BIN ) | (flag << ATU_STATS_FLAG)); } static int e6000sw_atu_mac_table(device_t dev, e6000sw_softc_t *sc, struct atu_opt *atu, int flag) { uint16_t ret_opt; uint16_t ret_data; if (flag == NO_OPERATION) return (0); else if ((flag & (LOAD_FROM_FIB | PURGE_FROM_FIB | GET_NEXT_IN_FIB | GET_VIOLATION_DATA | CLEAR_VIOLATION_DATA)) == 0) { device_printf(dev, "Wrong Opcode for ATU operation\n"); return (EINVAL); } if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) { device_printf(dev, "ATU unit is busy, cannot access\n"); return (EBUSY); } ret_opt = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION); if (flag & LOAD_FROM_FIB) { ret_data = e6000sw_readreg(sc, REG_GLOBAL, ATU_DATA); e6000sw_writereg(sc, REG_GLOBAL2, ATU_DATA, (ret_data & ~ENTRY_STATE)); } e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR01, atu->mac_01); e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR23, atu->mac_23); e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR45, atu->mac_45); e6000sw_writereg(sc, REG_GLOBAL, ATU_FID, atu->fid); e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION, (ret_opt | ATU_UNIT_BUSY | flag)); if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) device_printf(dev, "Timeout while waiting ATU\n"); else if (flag & GET_NEXT_IN_FIB) { atu->mac_01 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR01); atu->mac_23 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR23); atu->mac_45 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR45); } return (0); } static int e6000sw_atu_flush(device_t dev, e6000sw_softc_t *sc, int flag) { uint32_t reg; if (flag == NO_OPERATION) return (0); if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) { device_printf(dev, "ATU unit is busy, cannot access\n"); return (EBUSY); } reg = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION); e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION, (reg | ATU_UNIT_BUSY | flag)); if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) device_printf(dev, "Timeout while flushing ATU\n"); return (0); } static int e6000sw_vtu_flush(e6000sw_softc_t *sc) { if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "VTU unit is busy, cannot access\n"); return (EBUSY); } e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, VTU_FLUSH | VTU_BUSY); if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "Timeout while flushing VTU\n"); return (ETIMEDOUT); } return (0); } static int e6000sw_vtu_update(e6000sw_softc_t *sc, int purge, int vid, int fid, int members, int untagged) { int i, op; uint32_t data[2]; if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "VTU unit is busy, cannot access\n"); return (EBUSY); } *data = (vid & VTU_VID_MASK); if (purge == 0) *data |= VTU_VID_VALID; e6000sw_writereg(sc, REG_GLOBAL, VTU_VID, *data); if (purge == 0) { data[0] = 0; data[1] = 0; for (i = 0; i < sc->num_ports; i++) { if ((untagged & (1 << i)) != 0) data[i / VTU_PPREG(sc)] |= VTU_PORT_UNTAGGED << VTU_PORT(sc, i); else if ((members & (1 << i)) != 0) data[i / VTU_PPREG(sc)] |= VTU_PORT_TAGGED << VTU_PORT(sc, i); else data[i / VTU_PPREG(sc)] |= VTU_PORT_DISCARD << VTU_PORT(sc, i); } e6000sw_writereg(sc, REG_GLOBAL, VTU_DATA, data[0]); e6000sw_writereg(sc, REG_GLOBAL, VTU_DATA2, data[1]); e6000sw_writereg(sc, REG_GLOBAL, VTU_FID, fid & VTU_FID_MASK(sc)); op = VTU_LOAD; } else op = VTU_PURGE; e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, op | VTU_BUSY); if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "Timeout while flushing VTU\n"); return (ETIMEDOUT); } return (0); } diff --git a/sys/dev/etherswitch/etherswitch.c b/sys/dev/etherswitch/etherswitch.c index 74afcd259007..c66918f77174 100644 --- a/sys/dev/etherswitch/etherswitch.c +++ b/sys/dev/etherswitch/etherswitch.c @@ -1,226 +1,226 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011-2012 Stefan Bethke. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "etherswitch_if.h" struct etherswitch_softc { device_t sc_dev; struct cdev *sc_devnode; }; static int etherswitch_probe(device_t); static int etherswitch_attach(device_t); static int etherswitch_detach(device_t); static void etherswitch_identify(driver_t *driver, device_t parent); static device_method_t etherswitch_methods[] = { /* device interface */ DEVMETHOD(device_identify, etherswitch_identify), DEVMETHOD(device_probe, etherswitch_probe), DEVMETHOD(device_attach, etherswitch_attach), DEVMETHOD(device_detach, etherswitch_detach), DEVMETHOD_END }; driver_t etherswitch_driver = { "etherswitch", etherswitch_methods, sizeof(struct etherswitch_softc), }; static d_ioctl_t etherswitchioctl; static struct cdevsw etherswitch_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_ioctl = etherswitchioctl, .d_name = "etherswitch", }; static void etherswitch_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "etherswitch", -1) == NULL) - BUS_ADD_CHILD(parent, 0, "etherswitch", -1); + BUS_ADD_CHILD(parent, 0, "etherswitch", DEVICE_UNIT_ANY); } static int etherswitch_probe(device_t dev) { device_set_desc(dev, "Switch controller"); return (0); } static int etherswitch_attach(device_t dev) { int err; struct etherswitch_softc *sc; struct make_dev_args devargs; sc = device_get_softc(dev); sc->sc_dev = dev; make_dev_args_init(&devargs); devargs.mda_devsw = ðerswitch_cdevsw; devargs.mda_uid = UID_ROOT; devargs.mda_gid = GID_WHEEL; devargs.mda_mode = 0600; devargs.mda_si_drv1 = sc; err = make_dev_s(&devargs, &sc->sc_devnode, "etherswitch%d", device_get_unit(dev)); if (err != 0) { device_printf(dev, "failed to create character device\n"); return (ENXIO); } return (0); } static int etherswitch_detach(device_t dev) { struct etherswitch_softc *sc = (struct etherswitch_softc *)device_get_softc(dev); if (sc->sc_devnode) destroy_dev(sc->sc_devnode); return (0); } static int etherswitchioctl(struct cdev *cdev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct etherswitch_softc *sc = cdev->si_drv1; device_t dev = sc->sc_dev; device_t etherswitch = device_get_parent(dev); etherswitch_conf_t conf; etherswitch_info_t *info; etherswitch_reg_t *reg; etherswitch_phyreg_t *phyreg; etherswitch_portid_t *portid; int error = 0; switch (cmd) { case IOETHERSWITCHGETINFO: info = ETHERSWITCH_GETINFO(etherswitch); bcopy(info, data, sizeof(etherswitch_info_t)); break; case IOETHERSWITCHGETREG: reg = (etherswitch_reg_t *)data; ETHERSWITCH_LOCK(etherswitch); reg->val = ETHERSWITCH_READREG(etherswitch, reg->reg); ETHERSWITCH_UNLOCK(etherswitch); break; case IOETHERSWITCHSETREG: reg = (etherswitch_reg_t *)data; ETHERSWITCH_LOCK(etherswitch); error = ETHERSWITCH_WRITEREG(etherswitch, reg->reg, reg->val); ETHERSWITCH_UNLOCK(etherswitch); break; case IOETHERSWITCHGETPORT: error = ETHERSWITCH_GETPORT(etherswitch, (etherswitch_port_t *)data); break; case IOETHERSWITCHSETPORT: error = ETHERSWITCH_SETPORT(etherswitch, (etherswitch_port_t *)data); break; case IOETHERSWITCHGETVLANGROUP: error = ETHERSWITCH_GETVGROUP(etherswitch, (etherswitch_vlangroup_t *)data); break; case IOETHERSWITCHSETVLANGROUP: error = ETHERSWITCH_SETVGROUP(etherswitch, (etherswitch_vlangroup_t *)data); break; case IOETHERSWITCHGETPHYREG: phyreg = (etherswitch_phyreg_t *)data; phyreg->val = ETHERSWITCH_READPHYREG(etherswitch, phyreg->phy, phyreg->reg); break; case IOETHERSWITCHSETPHYREG: phyreg = (etherswitch_phyreg_t *)data; error = ETHERSWITCH_WRITEPHYREG(etherswitch, phyreg->phy, phyreg->reg, phyreg->val); break; case IOETHERSWITCHGETCONF: bzero(&conf, sizeof(etherswitch_conf_t)); error = ETHERSWITCH_GETCONF(etherswitch, &conf); bcopy(&conf, data, sizeof(etherswitch_conf_t)); break; case IOETHERSWITCHSETCONF: error = ETHERSWITCH_SETCONF(etherswitch, (etherswitch_conf_t *)data); break; case IOETHERSWITCHFLUSHALL: error = ETHERSWITCH_FLUSH_ALL(etherswitch); break; case IOETHERSWITCHFLUSHPORT: portid = (etherswitch_portid_t *)data; error = ETHERSWITCH_FLUSH_PORT(etherswitch, portid->es_port); break; case IOETHERSWITCHGETTABLE: error = ETHERSWITCH_FETCH_TABLE(etherswitch, (void *) data); break; case IOETHERSWITCHGETTABLEENTRY: error = ETHERSWITCH_FETCH_TABLE_ENTRY(etherswitch, (void *) data); break; default: error = ENOTTY; } return (error); } MODULE_VERSION(etherswitch, 1); diff --git a/sys/dev/etherswitch/ip17x/ip17x.c b/sys/dev/etherswitch/ip17x/ip17x.c index 5e07e1f72b80..349dc59e09ae 100644 --- a/sys/dev/etherswitch/ip17x/ip17x.c +++ b/sys/dev/etherswitch/ip17x/ip17x.c @@ -1,653 +1,653 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Luiz Otavio O Souza. * Copyright (c) 2011-2012 Stefan Bethke. * Copyright (c) 2012 Adrian Chadd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" MALLOC_DECLARE(M_IP17X); MALLOC_DEFINE(M_IP17X, "ip17x", "ip17x data structures"); static void ip17x_tick(void *); static int ip17x_ifmedia_upd(if_t); static void ip17x_ifmedia_sts(if_t, struct ifmediareq *); static void ip17x_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "ip17x", -1) == NULL) - BUS_ADD_CHILD(parent, 0, "ip17x", -1); + BUS_ADD_CHILD(parent, 0, "ip17x", DEVICE_UNIT_ANY); } static int ip17x_probe(device_t dev) { struct ip17x_softc *sc; uint32_t oui, model, phy_id1, phy_id2; #ifdef FDT phandle_t ip17x_node; pcell_t cell; ip17x_node = fdt_find_compatible(OF_finddevice("/"), "icplus,ip17x", 0); if (ip17x_node == 0) return (ENXIO); #endif sc = device_get_softc(dev); /* Read ID from PHY 0. */ phy_id1 = MDIO_READREG(device_get_parent(dev), 0, MII_PHYIDR1); phy_id2 = MDIO_READREG(device_get_parent(dev), 0, MII_PHYIDR2); oui = MII_OUI(phy_id1, phy_id2); model = MII_MODEL(phy_id2); /* We only care about IC+ devices. */ if (oui != IP17X_OUI) { device_printf(dev, "Unsupported IC+ switch. Unknown OUI: %#x\n", oui); return (ENXIO); } switch (model) { case IP17X_IP175A: sc->sc_switchtype = IP17X_SWITCH_IP175A; break; case IP17X_IP175C: sc->sc_switchtype = IP17X_SWITCH_IP175C; break; default: device_printf(dev, "Unsupported IC+ switch model: %#x\n", model); return (ENXIO); } /* IP175D has a specific ID register. */ model = MDIO_READREG(device_get_parent(dev), IP175D_ID_PHY, IP175D_ID_REG); if (model == 0x175d) sc->sc_switchtype = IP17X_SWITCH_IP175D; else { /* IP178 has more PHYs. Try it. */ model = MDIO_READREG(device_get_parent(dev), 5, MII_PHYIDR1); if (phy_id1 == model) sc->sc_switchtype = IP17X_SWITCH_IP178C; } sc->miipoll = 1; #ifdef FDT if ((OF_getencprop(ip17x_node, "mii-poll", &cell, sizeof(cell))) > 0) sc->miipoll = cell ? 1 : 0; #else (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "mii-poll", &sc->miipoll); #endif device_set_desc(dev, "IC+ IP17x switch driver"); return (BUS_PROBE_DEFAULT); } static int ip17x_attach_phys(struct ip17x_softc *sc) { int err, phy, port; char name[IFNAMSIZ]; port = err = 0; /* PHYs need an interface, so we generate a dummy one */ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev)); for (phy = 0; phy < MII_NPHY; phy++) { if (((1 << phy) & sc->phymask) == 0) continue; sc->phyport[phy] = port; sc->portphy[port] = phy; sc->ifp[port] = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp[port], sc); if_setflags(sc->ifp[port], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX); if_initname(sc->ifp[port], name, port); sc->miibus[port] = malloc(sizeof(device_t), M_IP17X, M_WAITOK | M_ZERO); err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port], ip17x_ifmedia_upd, ip17x_ifmedia_sts, \ BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n", device_get_nameunit(*sc->miibus[port]), if_name(sc->ifp[port])); if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", phy); break; } sc->info.es_nports = port + 1; if (++port >= sc->numports) break; } return (err); } static int ip17x_attach(device_t dev) { struct ip17x_softc *sc; int err; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "ip17x", NULL, MTX_DEF); strlcpy(sc->info.es_name, device_get_desc(dev), sizeof(sc->info.es_name)); /* XXX Defaults */ sc->phymask = 0x0f; sc->media = 100; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "phymask", &sc->phymask); /* Number of vlans supported by the switch. */ sc->info.es_nvlangroups = IP17X_MAX_VLANS; /* Attach the switch related functions. */ if (IP17X_IS_SWITCH(sc, IP175C)) ip175c_attach(sc); else if (IP17X_IS_SWITCH(sc, IP175D)) ip175d_attach(sc); else /* We don't have support to all the models yet :-/ */ return (ENXIO); /* Always attach the cpu port. */ sc->phymask |= (1 << sc->cpuport); sc->ifp = malloc(sizeof(if_t) * sc->numports, M_IP17X, M_WAITOK | M_ZERO); sc->pvid = malloc(sizeof(uint32_t) * sc->numports, M_IP17X, M_WAITOK | M_ZERO); sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_IP17X, M_WAITOK | M_ZERO); sc->portphy = malloc(sizeof(int) * sc->numports, M_IP17X, M_WAITOK | M_ZERO); /* Initialize the switch. */ sc->hal.ip17x_reset(sc); /* * Attach the PHYs and complete the bus enumeration. */ err = ip17x_attach_phys(sc); if (err != 0) return (err); /* * Set the switch to port based vlans or disabled (if not supported * on this model). */ sc->hal.ip17x_set_vlan_mode(sc, ETHERSWITCH_VLAN_PORT); bus_generic_probe(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); if (err != 0) return (err); if (sc->miipoll) { callout_init(&sc->callout_tick, 0); ip17x_tick(sc); } return (0); } static int ip17x_detach(device_t dev) { struct ip17x_softc *sc; int i, port; sc = device_get_softc(dev); if (sc->miipoll) callout_drain(&sc->callout_tick); for (i=0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = sc->phyport[i]; if (sc->miibus[port] != NULL) device_delete_child(dev, (*sc->miibus[port])); if (sc->ifp[port] != NULL) if_free(sc->ifp[port]); free(sc->miibus[port], M_IP17X); } free(sc->portphy, M_IP17X); free(sc->miibus, M_IP17X); free(sc->pvid, M_IP17X); free(sc->ifp, M_IP17X); /* Reset the switch. */ sc->hal.ip17x_reset(sc); bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } static inline struct mii_data * ip17x_miiforport(struct ip17x_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); return (device_get_softc(*sc->miibus[port])); } static inline if_t ip17x_ifpforport(struct ip17x_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); return (sc->ifp[port]); } /* * Poll the status for all PHYs. */ static void ip17x_miipollstat(struct ip17x_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; int i, port; IP17X_LOCK_ASSERT(sc, MA_NOTOWNED); for (i = 0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = sc->phyport[i]; if ((*sc->miibus[port]) == NULL) continue; mii = device_get_softc(*sc->miibus[port]); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; ukphy_status(miisc); mii_phy_update(miisc, MII_POLLSTAT); } } } static void ip17x_tick(void *arg) { struct ip17x_softc *sc; sc = arg; ip17x_miipollstat(sc); callout_reset(&sc->callout_tick, hz, ip17x_tick, sc); } static void ip17x_lock(device_t dev) { struct ip17x_softc *sc; sc = device_get_softc(dev); IP17X_LOCK_ASSERT(sc, MA_NOTOWNED); IP17X_LOCK(sc); } static void ip17x_unlock(device_t dev) { struct ip17x_softc *sc; sc = device_get_softc(dev); IP17X_LOCK_ASSERT(sc, MA_OWNED); IP17X_UNLOCK(sc); } static etherswitch_info_t * ip17x_getinfo(device_t dev) { struct ip17x_softc *sc; sc = device_get_softc(dev); return (&sc->info); } static int ip17x_getport(device_t dev, etherswitch_port_t *p) { struct ip17x_softc *sc; struct ifmediareq *ifmr; struct mii_data *mii; int err, phy; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); phy = sc->portphy[p->es_port]; /* Retrieve the PVID. */ p->es_pvid = sc->pvid[phy]; /* Port flags. */ if (sc->addtag & (1 << phy)) p->es_flags |= ETHERSWITCH_PORT_ADDTAG; if (sc->striptag & (1 << phy)) p->es_flags |= ETHERSWITCH_PORT_STRIPTAG; ifmr = &p->es_ifmr; /* No media settings ? */ if (p->es_ifmr.ifm_count == 0) return (0); mii = ip17x_miiforport(sc, p->es_port); if (mii == NULL) return (ENXIO); if (phy == sc->cpuport) { /* fill in fixed values for CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr->ifm_count = 0; if (sc->media == 100) ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; else ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else { err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } return (0); } static int ip17x_setport(device_t dev, etherswitch_port_t *p) { struct ip17x_softc *sc; struct ifmedia *ifm; if_t ifp; struct mii_data *mii; int phy; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); phy = sc->portphy[p->es_port]; ifp = ip17x_ifpforport(sc, p->es_port); mii = ip17x_miiforport(sc, p->es_port); if (ifp == NULL || mii == NULL) return (ENXIO); /* Port flags. */ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { /* Set the PVID. */ if (p->es_pvid != 0) { if (IP17X_IS_SWITCH(sc, IP175C) && p->es_pvid > IP175C_LAST_VLAN) return (ENXIO); sc->pvid[phy] = p->es_pvid; } /* Mutually exclusive. */ if (p->es_flags & ETHERSWITCH_PORT_ADDTAG && p->es_flags & ETHERSWITCH_PORT_STRIPTAG) return (EINVAL); /* Reset the settings for this port. */ sc->addtag &= ~(1 << phy); sc->striptag &= ~(1 << phy); /* And then set it to the new value. */ if (p->es_flags & ETHERSWITCH_PORT_ADDTAG) sc->addtag |= (1 << phy); if (p->es_flags & ETHERSWITCH_PORT_STRIPTAG) sc->striptag |= (1 << phy); } /* Update the switch configuration. */ if (sc->hal.ip17x_hw_setup(sc)) return (ENXIO); /* Do not allow media changes on CPU port. */ if (phy == sc->cpuport) return (0); /* No media settings ? */ if (p->es_ifmr.ifm_count == 0) return (0); ifm = &mii->mii_media; return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); } static void ip17x_statchg(device_t dev) { DPRINTF(dev, "%s\n", __func__); } static int ip17x_ifmedia_upd(if_t ifp) { struct ip17x_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); DPRINTF(sc->sc_dev, "%s\n", __func__); mii = ip17x_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void ip17x_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct ip17x_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); DPRINTF(sc->sc_dev, "%s\n", __func__); mii = ip17x_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int ip17x_readreg(device_t dev, int addr) { struct ip17x_softc *sc __diagused; sc = device_get_softc(dev); IP17X_LOCK_ASSERT(sc, MA_OWNED); /* Not supported. */ return (0); } static int ip17x_writereg(device_t dev, int addr, int value) { struct ip17x_softc *sc __diagused; sc = device_get_softc(dev); IP17X_LOCK_ASSERT(sc, MA_OWNED); /* Not supported. */ return (0); } static int ip17x_getconf(device_t dev, etherswitch_conf_t *conf) { struct ip17x_softc *sc; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->hal.ip17x_get_vlan_mode(sc); return (0); } static int ip17x_setconf(device_t dev, etherswitch_conf_t *conf) { struct ip17x_softc *sc; sc = device_get_softc(dev); /* Set the VLAN mode. */ if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) sc->hal.ip17x_set_vlan_mode(sc, conf->vlan_mode); return (0); } static device_method_t ip17x_methods[] = { /* Device interface */ DEVMETHOD(device_identify, ip17x_identify), DEVMETHOD(device_probe, ip17x_probe), DEVMETHOD(device_attach, ip17x_attach), DEVMETHOD(device_detach, ip17x_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, ip17x_readphy), DEVMETHOD(miibus_writereg, ip17x_writephy), DEVMETHOD(miibus_statchg, ip17x_statchg), /* MDIO interface */ DEVMETHOD(mdio_readreg, ip17x_readphy), DEVMETHOD(mdio_writereg, ip17x_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_lock, ip17x_lock), DEVMETHOD(etherswitch_unlock, ip17x_unlock), DEVMETHOD(etherswitch_getinfo, ip17x_getinfo), DEVMETHOD(etherswitch_readreg, ip17x_readreg), DEVMETHOD(etherswitch_writereg, ip17x_writereg), DEVMETHOD(etherswitch_readphyreg, ip17x_readphy), DEVMETHOD(etherswitch_writephyreg, ip17x_writephy), DEVMETHOD(etherswitch_getport, ip17x_getport), DEVMETHOD(etherswitch_setport, ip17x_setport), DEVMETHOD(etherswitch_getvgroup, ip17x_getvgroup), DEVMETHOD(etherswitch_setvgroup, ip17x_setvgroup), DEVMETHOD(etherswitch_getconf, ip17x_getconf), DEVMETHOD(etherswitch_setconf, ip17x_setconf), DEVMETHOD_END }; DEFINE_CLASS_0(ip17x, ip17x_driver, ip17x_methods, sizeof(struct ip17x_softc)); DRIVER_MODULE(ip17x, mdio, ip17x_driver, 0, 0); DRIVER_MODULE(miibus, ip17x, miibus_driver, 0, 0); DRIVER_MODULE(etherswitch, ip17x, etherswitch_driver, 0, 0); MODULE_VERSION(ip17x, 1); #ifdef FDT MODULE_DEPEND(ip17x, mdio, 1, 1, 1); /* XXX which versions? */ #else DRIVER_MODULE(mdio, ip17x, mdio_driver, 0, 0); MODULE_DEPEND(ip17x, miibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(ip17x, etherswitch, 1, 1, 1); /* XXX which versions? */ #endif diff --git a/sys/dev/etherswitch/miiproxy.c b/sys/dev/etherswitch/miiproxy.c index 24822b15f89e..3a621eae0875 100644 --- a/sys/dev/etherswitch/miiproxy.c +++ b/sys/dev/etherswitch/miiproxy.c @@ -1,436 +1,436 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011-2012 Stefan Bethke. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" MALLOC_DECLARE(M_MIIPROXY); MALLOC_DEFINE(M_MIIPROXY, "miiproxy", "miiproxy data structures"); driver_t miiproxy_driver; driver_t mdioproxy_driver; struct miiproxy_softc { device_t parent; device_t proxy; device_t mdio; }; struct mdioproxy_softc { }; /* * The rendezvous data structures and functions allow two device endpoints to * match up, so that the proxy endpoint can be associated with a target * endpoint. The proxy has to know the device name of the target that it * wants to associate with, for example through a hint. The rendezvous code * makes no assumptions about the devices that want to meet. */ struct rendezvous_entry; enum rendezvous_op { RENDEZVOUS_ATTACH, RENDEZVOUS_DETACH }; typedef int (*rendezvous_callback_t)(enum rendezvous_op, struct rendezvous_entry *); static SLIST_HEAD(rendezvoushead, rendezvous_entry) rendezvoushead = SLIST_HEAD_INITIALIZER(rendezvoushead); struct rendezvous_endpoint { device_t device; const char *name; rendezvous_callback_t callback; }; struct rendezvous_entry { SLIST_ENTRY(rendezvous_entry) entries; struct rendezvous_endpoint proxy; struct rendezvous_endpoint target; }; /* * Call the callback routines for both the proxy and the target. If either * returns an error, undo the attachment. */ static int rendezvous_attach(struct rendezvous_entry *e, struct rendezvous_endpoint *ep) { int error; error = e->proxy.callback(RENDEZVOUS_ATTACH, e); if (error == 0) { error = e->target.callback(RENDEZVOUS_ATTACH, e); if (error != 0) { e->proxy.callback(RENDEZVOUS_DETACH, e); ep->device = NULL; ep->callback = NULL; } } return (error); } /* * Create an entry for the proxy in the rendezvous list. The name parameter * indicates the name of the device that is the target endpoint for this * rendezvous. The callback will be invoked as soon as the target is * registered: either immediately if the target registered itself earlier, * or once the target registers. Returns ENXIO if the target has not yet * registered. */ static int rendezvous_register_proxy(device_t dev, const char *name, rendezvous_callback_t callback) { struct rendezvous_entry *e; KASSERT(callback != NULL, ("callback must be set")); SLIST_FOREACH(e, &rendezvoushead, entries) { if (strcmp(name, e->target.name) == 0) { /* the target is already attached */ e->proxy.name = device_get_nameunit(dev); e->proxy.device = dev; e->proxy.callback = callback; return (rendezvous_attach(e, &e->proxy)); } } e = malloc(sizeof(*e), M_MIIPROXY, M_WAITOK | M_ZERO); e->proxy.name = device_get_nameunit(dev); e->proxy.device = dev; e->proxy.callback = callback; e->target.name = name; SLIST_INSERT_HEAD(&rendezvoushead, e, entries); return (ENXIO); } /* * Create an entry in the rendezvous list for the target. * Returns ENXIO if the proxy has not yet registered. */ static int rendezvous_register_target(device_t dev, rendezvous_callback_t callback) { struct rendezvous_entry *e; const char *name; KASSERT(callback != NULL, ("callback must be set")); name = device_get_nameunit(dev); SLIST_FOREACH(e, &rendezvoushead, entries) { if (strcmp(name, e->target.name) == 0) { e->target.device = dev; e->target.callback = callback; return (rendezvous_attach(e, &e->target)); } } e = malloc(sizeof(*e), M_MIIPROXY, M_WAITOK | M_ZERO); e->target.name = name; e->target.device = dev; e->target.callback = callback; SLIST_INSERT_HEAD(&rendezvoushead, e, entries); return (ENXIO); } /* * Remove the registration for the proxy. */ static int rendezvous_unregister_proxy(device_t dev) { struct rendezvous_entry *e; int error = 0; SLIST_FOREACH(e, &rendezvoushead, entries) { if (e->proxy.device == dev) { if (e->target.device == NULL) { SLIST_REMOVE(&rendezvoushead, e, rendezvous_entry, entries); free(e, M_MIIPROXY); return (0); } else { e->proxy.callback(RENDEZVOUS_DETACH, e); e->target.callback(RENDEZVOUS_DETACH, e); } e->proxy.device = NULL; e->proxy.callback = NULL; return (error); } } return (ENOENT); } /* * Remove the registration for the target. */ static int rendezvous_unregister_target(device_t dev) { struct rendezvous_entry *e; int error = 0; SLIST_FOREACH(e, &rendezvoushead, entries) { if (e->target.device == dev) { if (e->proxy.device == NULL) { SLIST_REMOVE(&rendezvoushead, e, rendezvous_entry, entries); free(e, M_MIIPROXY); return (0); } else { e->proxy.callback(RENDEZVOUS_DETACH, e); e->target.callback(RENDEZVOUS_DETACH, e); } e->target.device = NULL; e->target.callback = NULL; return (error); } } return (ENOENT); } /* * Functions of the proxy that is interposed between the ethernet interface * driver and the miibus device. */ static int miiproxy_rendezvous_callback(enum rendezvous_op op, struct rendezvous_entry *rendezvous) { struct miiproxy_softc *sc = device_get_softc(rendezvous->proxy.device); switch (op) { case RENDEZVOUS_ATTACH: sc->mdio = device_get_parent(rendezvous->target.device); break; case RENDEZVOUS_DETACH: sc->mdio = NULL; break; } return (0); } static int miiproxy_probe(device_t dev) { device_set_desc(dev, "MII/MDIO proxy, MII side"); return (BUS_PROBE_SPECIFIC); } static int miiproxy_attach(device_t dev) { /* * The ethernet interface needs to call mii_attach_proxy() to pass * the relevant parameters for rendezvous with the MDIO target. */ return (bus_generic_attach(dev)); } static int miiproxy_detach(device_t dev) { rendezvous_unregister_proxy(dev); bus_generic_detach(dev); return (0); } static int miiproxy_readreg(device_t dev, int phy, int reg) { struct miiproxy_softc *sc = device_get_softc(dev); if (sc->mdio != NULL) return (MDIO_READREG(sc->mdio, phy, reg)); return (-1); } static int miiproxy_writereg(device_t dev, int phy, int reg, int val) { struct miiproxy_softc *sc = device_get_softc(dev); if (sc->mdio != NULL) return (MDIO_WRITEREG(sc->mdio, phy, reg, val)); return (-1); } static void miiproxy_statchg(device_t dev) { MIIBUS_STATCHG(device_get_parent(dev)); } static void miiproxy_linkchg(device_t dev) { MIIBUS_LINKCHG(device_get_parent(dev)); } static void miiproxy_mediainit(device_t dev) { MIIBUS_MEDIAINIT(device_get_parent(dev)); } /* * Functions for the MDIO target device driver. */ static int mdioproxy_rendezvous_callback(enum rendezvous_op op, struct rendezvous_entry *rendezvous) { return (0); } static void mdioproxy_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, driver->name, -1) == NULL) { - BUS_ADD_CHILD(parent, 0, driver->name, -1); + BUS_ADD_CHILD(parent, 0, driver->name, DEVICE_UNIT_ANY); } } static int mdioproxy_probe(device_t dev) { device_set_desc(dev, "MII/MDIO proxy, MDIO side"); return (BUS_PROBE_SPECIFIC); } static int mdioproxy_attach(device_t dev) { rendezvous_register_target(dev, mdioproxy_rendezvous_callback); return (bus_generic_attach(dev)); } static int mdioproxy_detach(device_t dev) { rendezvous_unregister_target(dev); bus_generic_detach(dev); return (0); } /* * Attach this proxy in place of miibus. The target MDIO must be attached * already. Returns NULL on error. */ device_t mii_attach_proxy(device_t dev) { struct miiproxy_softc *sc; int error; const char *name; device_t miiproxy; if (resource_string_value(device_get_name(dev), device_get_unit(dev), "mdio", &name) != 0) { if (bootverbose) printf("mii_attach_proxy: not attaching, no mdio" " device hint for %s\n", device_get_nameunit(dev)); return (NULL); } miiproxy = device_add_child(dev, miiproxy_driver.name, DEVICE_UNIT_ANY); error = bus_generic_attach(dev); if (error != 0) { device_printf(dev, "can't attach miiproxy\n"); return (NULL); } sc = device_get_softc(miiproxy); sc->parent = dev; sc->proxy = miiproxy; if (rendezvous_register_proxy(miiproxy, name, miiproxy_rendezvous_callback) != 0) { device_printf(dev, "can't attach proxy\n"); return (NULL); } device_printf(miiproxy, "attached to target %s\n", device_get_nameunit(sc->mdio)); return (miiproxy); } static device_method_t miiproxy_methods[] = { /* device interface */ DEVMETHOD(device_probe, miiproxy_probe), DEVMETHOD(device_attach, miiproxy_attach), DEVMETHOD(device_detach, miiproxy_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, miiproxy_readreg), DEVMETHOD(miibus_writereg, miiproxy_writereg), DEVMETHOD(miibus_statchg, miiproxy_statchg), DEVMETHOD(miibus_linkchg, miiproxy_linkchg), DEVMETHOD(miibus_mediainit, miiproxy_mediainit), DEVMETHOD_END }; static device_method_t mdioproxy_methods[] = { /* device interface */ DEVMETHOD(device_identify, mdioproxy_identify), DEVMETHOD(device_probe, mdioproxy_probe), DEVMETHOD(device_attach, mdioproxy_attach), DEVMETHOD(device_detach, mdioproxy_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; DEFINE_CLASS_0(miiproxy, miiproxy_driver, miiproxy_methods, sizeof(struct miiproxy_softc)); DEFINE_CLASS_0(mdioproxy, mdioproxy_driver, mdioproxy_methods, sizeof(struct mdioproxy_softc)); DRIVER_MODULE(mdioproxy, mdio, mdioproxy_driver, 0, 0); DRIVER_MODULE(miibus, miiproxy, miibus_driver, 0, 0); MODULE_VERSION(miiproxy, 1); MODULE_DEPEND(miiproxy, miibus, 1, 1, 1); diff --git a/sys/dev/etherswitch/rtl8366/rtl8366rb.c b/sys/dev/etherswitch/rtl8366/rtl8366rb.c index 761a96b0ec80..e57b11f3270f 100644 --- a/sys/dev/etherswitch/rtl8366/rtl8366rb.c +++ b/sys/dev/etherswitch/rtl8366/rtl8366rb.c @@ -1,960 +1,960 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Hiroki Mori. * Copyright (c) 2011-2012 Stefan Bethke. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_etherswitch.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "iicbus_if.h" #include "miibus_if.h" #include "etherswitch_if.h" struct rtl8366rb_softc { struct mtx sc_mtx; /* serialize access to softc */ int smi_acquired; /* serialize access to SMI/I2C bus */ struct mtx callout_mtx; /* serialize callout */ device_t dev; int vid[RTL8366_NUM_VLANS]; char *ifname[RTL8366_NUM_PHYS]; device_t miibus[RTL8366_NUM_PHYS]; if_t ifp[RTL8366_NUM_PHYS]; struct callout callout_tick; etherswitch_info_t info; int chip_type; int phy4cpu; int numphys; }; #define RTL_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define RTL_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define RTL_LOCK_ASSERT(_sc, _what) mtx_assert(&(_s)c->sc_mtx, (_what)) #define RTL_TRYLOCK(_sc) mtx_trylock(&(_sc)->sc_mtx) #define RTL_WAITOK 0 #define RTL_NOWAIT 1 #define RTL_SMI_ACQUIRED 1 #define RTL_SMI_ACQUIRED_ASSERT(_sc) \ KASSERT((_sc)->smi_acquired == RTL_SMI_ACQUIRED, ("smi must be acquired @%s", __FUNCTION__)) #if defined(DEBUG) #define DPRINTF(dev, args...) device_printf(dev, args) #define DEVERR(dev, err, fmt, args...) do { \ if (err != 0) device_printf(dev, fmt, err, args); \ } while (0) #define DEBUG_INCRVAR(var) do { \ var++; \ } while (0) static int callout_blocked = 0; static int iic_select_retries = 0; static int phy_access_retries = 0; static SYSCTL_NODE(_debug, OID_AUTO, rtl8366rb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "rtl8366rb"); SYSCTL_INT(_debug_rtl8366rb, OID_AUTO, callout_blocked, CTLFLAG_RW, &callout_blocked, 0, "number of times the callout couldn't acquire the bus"); SYSCTL_INT(_debug_rtl8366rb, OID_AUTO, iic_select_retries, CTLFLAG_RW, &iic_select_retries, 0, "number of times the I2C bus selection had to be retried"); SYSCTL_INT(_debug_rtl8366rb, OID_AUTO, phy_access_retries, CTLFLAG_RW, &phy_access_retries, 0, "number of times PHY register access had to be retried"); #else #define DPRINTF(dev, args...) #define DEVERR(dev, err, fmt, args...) #define DEBUG_INCRVAR(var) #endif static int smi_probe(device_t dev); static int smi_read(device_t dev, uint16_t addr, uint16_t *data, int sleep); static int smi_write(device_t dev, uint16_t addr, uint16_t data, int sleep); static int smi_rmw(device_t dev, uint16_t addr, uint16_t mask, uint16_t data, int sleep); static void rtl8366rb_tick(void *arg); static int rtl8366rb_ifmedia_upd(if_t); static void rtl8366rb_ifmedia_sts(if_t, struct ifmediareq *); static void rtl8366rb_identify(driver_t *driver, device_t parent) { device_t child; struct iicbus_ivar *devi; if (device_find_child(parent, "rtl8366rb", -1) == NULL) { - child = BUS_ADD_CHILD(parent, 0, "rtl8366rb", -1); + child = BUS_ADD_CHILD(parent, 0, "rtl8366rb", DEVICE_UNIT_ANY); devi = IICBUS_IVAR(child); devi->addr = RTL8366_IIC_ADDR; } } static int rtl8366rb_probe(device_t dev) { struct rtl8366rb_softc *sc; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); if (smi_probe(dev) != 0) return (ENXIO); if (sc->chip_type == RTL8366RB) device_set_desc(dev, "RTL8366RB Ethernet Switch Controller"); else device_set_desc(dev, "RTL8366SR Ethernet Switch Controller"); return (BUS_PROBE_DEFAULT); } static void rtl8366rb_init(device_t dev) { struct rtl8366rb_softc *sc; int i; sc = device_get_softc(dev); /* Initialisation for TL-WR1043ND */ #ifdef RTL8366_SOFT_RESET smi_rmw(dev, RTL8366_RCR, RTL8366_RCR_SOFT_RESET, RTL8366_RCR_SOFT_RESET, RTL_WAITOK); #else smi_rmw(dev, RTL8366_RCR, RTL8366_RCR_HARD_RESET, RTL8366_RCR_HARD_RESET, RTL_WAITOK); #endif /* hard reset not return ack */ DELAY(100000); /* Enable 16 VLAN mode */ smi_rmw(dev, RTL8366_SGCR, RTL8366_SGCR_EN_VLAN | RTL8366_SGCR_EN_VLAN_4KTB, RTL8366_SGCR_EN_VLAN, RTL_WAITOK); /* Initialize our vlan table. */ for (i = 0; i <= 1; i++) sc->vid[i] = (i + 1) | ETHERSWITCH_VID_VALID; /* Remove port 0 from VLAN 1. */ smi_rmw(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, 0), (1 << 0), 0, RTL_WAITOK); /* Add port 0 untagged and port 5 tagged to VLAN 2. */ smi_rmw(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, 1), ((1 << 5 | 1 << 0) << RTL8366_VMCR_MU_MEMBER_SHIFT) | ((1 << 5 | 1 << 0) << RTL8366_VMCR_MU_UNTAG_SHIFT), ((1 << 5 | 1 << 0) << RTL8366_VMCR_MU_MEMBER_SHIFT | ((1 << 0) << RTL8366_VMCR_MU_UNTAG_SHIFT)), RTL_WAITOK); /* Set PVID 2 for port 0. */ smi_rmw(dev, RTL8366_PVCR_REG(0), RTL8366_PVCR_VAL(0, RTL8366_PVCR_PORT_MASK), RTL8366_PVCR_VAL(0, 1), RTL_WAITOK); } static int rtl8366rb_attach(device_t dev) { struct rtl8366rb_softc *sc; uint16_t rev = 0; char name[IFNAMSIZ]; int err = 0; int i; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->sc_mtx, "rtl8366rb", NULL, MTX_DEF); sc->smi_acquired = 0; mtx_init(&sc->callout_mtx, "rtl8366rbcallout", NULL, MTX_DEF); rtl8366rb_init(dev); smi_read(dev, RTL8366_CVCR, &rev, RTL_WAITOK); device_printf(dev, "rev. %d\n", rev & 0x000f); sc->phy4cpu = 0; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "phy4cpu", &sc->phy4cpu); sc->numphys = sc->phy4cpu ? RTL8366_NUM_PHYS - 1 : RTL8366_NUM_PHYS; sc->info.es_nports = sc->numphys + 1; sc->info.es_nvlangroups = RTL8366_NUM_VLANS; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q; if (sc->chip_type == RTL8366RB) sprintf(sc->info.es_name, "Realtek RTL8366RB"); else sprintf(sc->info.es_name, "Realtek RTL8366SR"); /* attach miibus and phys */ /* PHYs need an interface, so we generate a dummy one */ for (i = 0; i < sc->numphys; i++) { sc->ifp[i] = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp[i], sc); if_setflagbits(sc->ifp[i], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX, 0); snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(dev)); sc->ifname[i] = malloc(strlen(name)+1, M_DEVBUF, M_WAITOK); bcopy(name, sc->ifname[i], strlen(name)+1); if_initname(sc->ifp[i], sc->ifname[i], i); err = mii_attach(dev, &sc->miibus[i], sc->ifp[i], rtl8366rb_ifmedia_upd, \ rtl8366rb_ifmedia_sts, BMSR_DEFCAPMASK, \ i, MII_OFFSET_ANY, 0); if (err != 0) { device_printf(dev, "attaching PHY %d failed\n", i); return (err); } } bus_generic_probe(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); if (err != 0) return (err); callout_init_mtx(&sc->callout_tick, &sc->callout_mtx, 0); rtl8366rb_tick(sc); return (err); } static int rtl8366rb_detach(device_t dev) { struct rtl8366rb_softc *sc; int i; sc = device_get_softc(dev); for (i=0; i < sc->numphys; i++) { if (sc->miibus[i]) device_delete_child(dev, sc->miibus[i]); if (sc->ifp[i] != NULL) if_free(sc->ifp[i]); free(sc->ifname[i], M_DEVBUF); } bus_generic_detach(dev); callout_drain(&sc->callout_tick); mtx_destroy(&sc->callout_mtx); mtx_destroy(&sc->sc_mtx); return (0); } static void rtl8366rb_update_ifmedia(int portstatus, u_int *media_status, u_int *media_active) { *media_active = IFM_ETHER; *media_status = IFM_AVALID; if ((portstatus & RTL8366_PLSR_LINK) != 0) *media_status |= IFM_ACTIVE; else { *media_active |= IFM_NONE; return; } switch (portstatus & RTL8366_PLSR_SPEED_MASK) { case RTL8366_PLSR_SPEED_10: *media_active |= IFM_10_T; break; case RTL8366_PLSR_SPEED_100: *media_active |= IFM_100_TX; break; case RTL8366_PLSR_SPEED_1000: *media_active |= IFM_1000_T; break; } if ((portstatus & RTL8366_PLSR_FULLDUPLEX) != 0) *media_active |= IFM_FDX; else *media_active |= IFM_HDX; if ((portstatus & RTL8366_PLSR_TXPAUSE) != 0) *media_active |= IFM_ETH_TXPAUSE; if ((portstatus & RTL8366_PLSR_RXPAUSE) != 0) *media_active |= IFM_ETH_RXPAUSE; } static void rtl833rb_miipollstat(struct rtl8366rb_softc *sc) { int i; struct mii_data *mii; struct mii_softc *miisc; uint16_t value; int portstatus; for (i = 0; i < sc->numphys; i++) { mii = device_get_softc(sc->miibus[i]); if ((i % 2) == 0) { if (smi_read(sc->dev, RTL8366_PLSR_BASE + i/2, &value, RTL_NOWAIT) != 0) { DEBUG_INCRVAR(callout_blocked); return; } portstatus = value & 0xff; } else { portstatus = (value >> 8) & 0xff; } rtl8366rb_update_ifmedia(portstatus, &mii->mii_media_status, &mii->mii_media_active); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; mii_phy_update(miisc, MII_POLLSTAT); } } } static void rtl8366rb_tick(void *arg) { struct rtl8366rb_softc *sc; sc = arg; rtl833rb_miipollstat(sc); callout_reset(&sc->callout_tick, hz, rtl8366rb_tick, sc); } static int smi_probe(device_t dev) { struct rtl8366rb_softc *sc; device_t iicbus, iicha; int err, i, j; uint16_t chipid; char bytes[2]; int xferd; sc = device_get_softc(dev); iicbus = device_get_parent(dev); iicha = device_get_parent(iicbus); for (i = 0; i < 2; ++i) { iicbus_reset(iicbus, IIC_FASTEST, RTL8366_IIC_ADDR, NULL); for (j=3; j--; ) { IICBUS_STOP(iicha); /* * we go directly to the host adapter because iicbus.c * only issues a stop on a bus that was successfully started. */ } err = iicbus_request_bus(iicbus, dev, IIC_WAIT); if (err != 0) goto out; err = iicbus_start(iicbus, RTL8366_IIC_ADDR | RTL_IICBUS_READ, RTL_IICBUS_TIMEOUT); if (err != 0) goto out; if (i == 0) { bytes[0] = RTL8366RB_CIR & 0xff; bytes[1] = (RTL8366RB_CIR >> 8) & 0xff; } else { bytes[0] = RTL8366SR_CIR & 0xff; bytes[1] = (RTL8366SR_CIR >> 8) & 0xff; } err = iicbus_write(iicbus, bytes, 2, &xferd, RTL_IICBUS_TIMEOUT); if (err != 0) goto out; err = iicbus_read(iicbus, bytes, 2, &xferd, IIC_LAST_READ, 0); if (err != 0) goto out; chipid = ((bytes[1] & 0xff) << 8) | (bytes[0] & 0xff); if (i == 0 && chipid == RTL8366RB_CIR_ID8366RB) { DPRINTF(dev, "chip id 0x%04x\n", chipid); sc->chip_type = RTL8366RB; err = 0; break; } if (i == 1 && chipid == RTL8366SR_CIR_ID8366SR) { DPRINTF(dev, "chip id 0x%04x\n", chipid); sc->chip_type = RTL8366SR; err = 0; break; } if (i == 0) { iicbus_stop(iicbus); iicbus_release_bus(iicbus, dev); } } if (i == 2) err = ENXIO; out: iicbus_stop(iicbus); iicbus_release_bus(iicbus, dev); return (err == 0 ? 0 : ENXIO); } static int smi_acquire(struct rtl8366rb_softc *sc, int sleep) { int r = 0; if (sleep == RTL_WAITOK) RTL_LOCK(sc); else if (RTL_TRYLOCK(sc) == 0) return (EWOULDBLOCK); if (sc->smi_acquired == RTL_SMI_ACQUIRED) r = EBUSY; else { r = iicbus_request_bus(device_get_parent(sc->dev), sc->dev, \ sleep == RTL_WAITOK ? IIC_WAIT : IIC_DONTWAIT); if (r == 0) sc->smi_acquired = RTL_SMI_ACQUIRED; } RTL_UNLOCK(sc); return (r); } static int smi_release(struct rtl8366rb_softc *sc, int sleep) { if (sleep == RTL_WAITOK) RTL_LOCK(sc); else if (RTL_TRYLOCK(sc) == 0) return (EWOULDBLOCK); RTL_SMI_ACQUIRED_ASSERT(sc); iicbus_release_bus(device_get_parent(sc->dev), sc->dev); sc->smi_acquired = 0; RTL_UNLOCK(sc); return (0); } static int smi_select(device_t dev, int op, int sleep) { struct rtl8366rb_softc *sc; int err, i; device_t iicbus; struct iicbus_ivar *devi; int slave; sc = device_get_softc(dev); iicbus = device_get_parent(dev); devi = IICBUS_IVAR(dev); slave = devi->addr; RTL_SMI_ACQUIRED_ASSERT((struct rtl8366rb_softc *)device_get_softc(dev)); if (sc->chip_type == RTL8366SR) { // RTL8366SR work around // this is same work around at probe for (int i=3; i--; ) IICBUS_STOP(device_get_parent(device_get_parent(dev))); } /* * The chip does not use clock stretching when it is busy, * instead ignoring the command. Retry a few times. */ for (i = RTL_IICBUS_RETRIES; i--; ) { err = iicbus_start(iicbus, slave | op, RTL_IICBUS_TIMEOUT); if (err != IIC_ENOACK) break; if (sleep == RTL_WAITOK) { DEBUG_INCRVAR(iic_select_retries); pause("smi_select", RTL_IICBUS_RETRY_SLEEP); } else break; } return (err); } static int smi_read_locked(struct rtl8366rb_softc *sc, uint16_t addr, uint16_t *data, int sleep) { int err; device_t iicbus; char bytes[2]; int xferd; iicbus = device_get_parent(sc->dev); RTL_SMI_ACQUIRED_ASSERT(sc); bytes[0] = addr & 0xff; bytes[1] = (addr >> 8) & 0xff; err = smi_select(sc->dev, RTL_IICBUS_READ, sleep); if (err != 0) goto out; err = iicbus_write(iicbus, bytes, 2, &xferd, RTL_IICBUS_TIMEOUT); if (err != 0) goto out; err = iicbus_read(iicbus, bytes, 2, &xferd, IIC_LAST_READ, 0); if (err != 0) goto out; *data = ((bytes[1] & 0xff) << 8) | (bytes[0] & 0xff); out: iicbus_stop(iicbus); return (err); } static int smi_write_locked(struct rtl8366rb_softc *sc, uint16_t addr, uint16_t data, int sleep) { int err; device_t iicbus; char bytes[4]; int xferd; iicbus = device_get_parent(sc->dev); RTL_SMI_ACQUIRED_ASSERT(sc); bytes[0] = addr & 0xff; bytes[1] = (addr >> 8) & 0xff; bytes[2] = data & 0xff; bytes[3] = (data >> 8) & 0xff; err = smi_select(sc->dev, RTL_IICBUS_WRITE, sleep); if (err == 0) err = iicbus_write(iicbus, bytes, 4, &xferd, RTL_IICBUS_TIMEOUT); iicbus_stop(iicbus); return (err); } static int smi_read(device_t dev, uint16_t addr, uint16_t *data, int sleep) { struct rtl8366rb_softc *sc; int err; sc = device_get_softc(dev); err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); err = smi_read_locked(sc, addr, data, sleep); smi_release(sc, sleep); DEVERR(dev, err, "smi_read()=%d: addr=%04x\n", addr); return (err == 0 ? 0 : EIO); } static int smi_write(device_t dev, uint16_t addr, uint16_t data, int sleep) { struct rtl8366rb_softc *sc; int err; sc = device_get_softc(dev); err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); err = smi_write_locked(sc, addr, data, sleep); smi_release(sc, sleep); DEVERR(dev, err, "smi_write()=%d: addr=%04x\n", addr); return (err == 0 ? 0 : EIO); } static int smi_rmw(device_t dev, uint16_t addr, uint16_t mask, uint16_t data, int sleep) { struct rtl8366rb_softc *sc; int err; uint16_t oldv, newv; sc = device_get_softc(dev); err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); if (err == 0) { err = smi_read_locked(sc, addr, &oldv, sleep); if (err == 0) { newv = oldv & ~mask; newv |= data & mask; if (newv != oldv) err = smi_write_locked(sc, addr, newv, sleep); } } smi_release(sc, sleep); DEVERR(dev, err, "smi_rmw()=%d: addr=%04x\n", addr); return (err == 0 ? 0 : EIO); } static etherswitch_info_t * rtl_getinfo(device_t dev) { struct rtl8366rb_softc *sc; sc = device_get_softc(dev); return (&sc->info); } static int rtl_readreg(device_t dev, int reg) { uint16_t data; data = 0; smi_read(dev, reg, &data, RTL_WAITOK); return (data); } static int rtl_writereg(device_t dev, int reg, int value) { return (smi_write(dev, reg, value, RTL_WAITOK)); } static int rtl_getport(device_t dev, etherswitch_port_t *p) { struct rtl8366rb_softc *sc; struct ifmedia *ifm; struct mii_data *mii; struct ifmediareq *ifmr; uint16_t v; int err, vlangroup; sc = device_get_softc(dev); ifmr = &p->es_ifmr; if (p->es_port < 0 || p->es_port >= (sc->numphys + 1)) return (ENXIO); if (sc->phy4cpu && p->es_port == sc->numphys) { vlangroup = RTL8366_PVCR_GET(p->es_port + 1, rtl_readreg(dev, RTL8366_PVCR_REG(p->es_port + 1))); } else { vlangroup = RTL8366_PVCR_GET(p->es_port, rtl_readreg(dev, RTL8366_PVCR_REG(p->es_port))); } p->es_pvid = sc->vid[vlangroup] & ETHERSWITCH_VID_MASK; if (p->es_port < sc->numphys) { mii = device_get_softc(sc->miibus[p->es_port]); ifm = &mii->mii_media; err = ifmedia_ioctl(sc->ifp[p->es_port], &p->es_ifr, ifm, SIOCGIFMEDIA); if (err) return (err); } else { /* fill in fixed values for CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; smi_read(dev, RTL8366_PLSR_BASE + (RTL8366_NUM_PHYS)/2, &v, RTL_WAITOK); v = v >> (8 * ((RTL8366_NUM_PHYS) % 2)); rtl8366rb_update_ifmedia(v, &ifmr->ifm_status, &ifmr->ifm_active); ifmr->ifm_current = ifmr->ifm_active; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; /* Return our static media list. */ if (ifmr->ifm_count > 0) { ifmr->ifm_count = 1; ifmr->ifm_ulist[0] = IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX, 0); } else ifmr->ifm_count = 0; } return (0); } static int rtl_setport(device_t dev, etherswitch_port_t *p) { struct rtl8366rb_softc *sc; int i, err, vlangroup; struct ifmedia *ifm; struct mii_data *mii; int port; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port >= (sc->numphys + 1)) return (ENXIO); vlangroup = -1; for (i = 0; i < RTL8366_NUM_VLANS; i++) { if ((sc->vid[i] & ETHERSWITCH_VID_MASK) == p->es_pvid) { vlangroup = i; break; } } if (vlangroup == -1) return (ENXIO); if (sc->phy4cpu && p->es_port == sc->numphys) { port = p->es_port + 1; } else { port = p->es_port; } err = smi_rmw(dev, RTL8366_PVCR_REG(port), RTL8366_PVCR_VAL(port, RTL8366_PVCR_PORT_MASK), RTL8366_PVCR_VAL(port, vlangroup), RTL_WAITOK); if (err) return (err); /* CPU Port */ if (p->es_port == sc->numphys) return (0); mii = device_get_softc(sc->miibus[p->es_port]); ifm = &mii->mii_media; err = ifmedia_ioctl(sc->ifp[p->es_port], &p->es_ifr, ifm, SIOCSIFMEDIA); return (err); } static int rtl_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct rtl8366rb_softc *sc; uint16_t vmcr[3]; int i; int member, untagged; sc = device_get_softc(dev); for (i=0; ies_vlangroup)); vg->es_vid = sc->vid[vg->es_vlangroup]; member = RTL8366_VMCR_MEMBER(vmcr); untagged = RTL8366_VMCR_UNTAG(vmcr); if (sc->phy4cpu) { vg->es_member_ports = ((member & 0x20) >> 1) | (member & 0x0f); vg->es_untagged_ports = ((untagged & 0x20) >> 1) | (untagged & 0x0f); } else { vg->es_member_ports = member; vg->es_untagged_ports = untagged; } vg->es_fid = RTL8366_VMCR_FID(vmcr); return (0); } static int rtl_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct rtl8366rb_softc *sc; int g; int member, untagged; sc = device_get_softc(dev); g = vg->es_vlangroup; sc->vid[g] = vg->es_vid; /* VLAN group disabled ? */ if (vg->es_member_ports == 0 && vg->es_untagged_ports == 0 && vg->es_vid == 0) return (0); sc->vid[g] |= ETHERSWITCH_VID_VALID; rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_DOT1Q_REG, g), (vg->es_vid << RTL8366_VMCR_DOT1Q_VID_SHIFT) & RTL8366_VMCR_DOT1Q_VID_MASK); if (sc->phy4cpu) { /* add space at phy4 */ member = (vg->es_member_ports & 0x0f) | ((vg->es_member_ports & 0x10) << 1); untagged = (vg->es_untagged_ports & 0x0f) | ((vg->es_untagged_ports & 0x10) << 1); } else { member = vg->es_member_ports; untagged = vg->es_untagged_ports; } if (sc->chip_type == RTL8366RB) { rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, g), ((member << RTL8366_VMCR_MU_MEMBER_SHIFT) & RTL8366_VMCR_MU_MEMBER_MASK) | ((untagged << RTL8366_VMCR_MU_UNTAG_SHIFT) & RTL8366_VMCR_MU_UNTAG_MASK)); rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_FID_REG, g), vg->es_fid); } else { rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, g), ((member << RTL8366_VMCR_MU_MEMBER_SHIFT) & RTL8366_VMCR_MU_MEMBER_MASK) | ((untagged << RTL8366_VMCR_MU_UNTAG_SHIFT) & RTL8366_VMCR_MU_UNTAG_MASK) | ((vg->es_fid << RTL8366_VMCR_FID_FID_SHIFT) & RTL8366_VMCR_FID_FID_MASK)); } return (0); } static int rtl_getconf(device_t dev, etherswitch_conf_t *conf) { /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; return (0); } static int rtl_readphy(device_t dev, int phy, int reg) { struct rtl8366rb_softc *sc; uint16_t data; int err, i, sleep; sc = device_get_softc(dev); data = 0; if (phy < 0 || phy >= RTL8366_NUM_PHYS) return (ENXIO); if (reg < 0 || reg >= RTL8366_NUM_PHY_REG) return (ENXIO); sleep = RTL_WAITOK; err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); for (i = RTL_IICBUS_RETRIES; i--; ) { err = smi_write_locked(sc, RTL8366_PACR, RTL8366_PACR_READ, sleep); if (err == 0) err = smi_write_locked(sc, RTL8366_PHYREG(phy, 0, reg), 0, sleep); if (err == 0) { err = smi_read_locked(sc, RTL8366_PADR, &data, sleep); break; } DEBUG_INCRVAR(phy_access_retries); DPRINTF(dev, "rtl_readphy(): chip not responsive, retrying %d more times\n", i); pause("rtl_readphy", RTL_IICBUS_RETRY_SLEEP); } smi_release(sc, sleep); DEVERR(dev, err, "rtl_readphy()=%d: phy=%d.%02x\n", phy, reg); return (data); } static int rtl_writephy(device_t dev, int phy, int reg, int data) { struct rtl8366rb_softc *sc; int err, i, sleep; sc = device_get_softc(dev); if (phy < 0 || phy >= RTL8366_NUM_PHYS) return (ENXIO); if (reg < 0 || reg >= RTL8366_NUM_PHY_REG) return (ENXIO); sleep = RTL_WAITOK; err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); for (i = RTL_IICBUS_RETRIES; i--; ) { err = smi_write_locked(sc, RTL8366_PACR, RTL8366_PACR_WRITE, sleep); if (err == 0) err = smi_write_locked(sc, RTL8366_PHYREG(phy, 0, reg), data, sleep); if (err == 0) { break; } DEBUG_INCRVAR(phy_access_retries); DPRINTF(dev, "rtl_writephy(): chip not responsive, retrying %d more tiems\n", i); pause("rtl_writephy", RTL_IICBUS_RETRY_SLEEP); } smi_release(sc, sleep); DEVERR(dev, err, "rtl_writephy()=%d: phy=%d.%02x\n", phy, reg); return (err == 0 ? 0 : EIO); } static int rtl8366rb_ifmedia_upd(if_t ifp) { struct rtl8366rb_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus[if_getdunit(ifp)]); mii_mediachg(mii); return (0); } static void rtl8366rb_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct rtl8366rb_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus[if_getdunit(ifp)]); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static device_method_t rtl8366rb_methods[] = { /* Device interface */ DEVMETHOD(device_identify, rtl8366rb_identify), DEVMETHOD(device_probe, rtl8366rb_probe), DEVMETHOD(device_attach, rtl8366rb_attach), DEVMETHOD(device_detach, rtl8366rb_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, rtl_readphy), DEVMETHOD(miibus_writereg, rtl_writephy), /* MDIO interface */ DEVMETHOD(mdio_readreg, rtl_readphy), DEVMETHOD(mdio_writereg, rtl_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_getconf, rtl_getconf), DEVMETHOD(etherswitch_getinfo, rtl_getinfo), DEVMETHOD(etherswitch_readreg, rtl_readreg), DEVMETHOD(etherswitch_writereg, rtl_writereg), DEVMETHOD(etherswitch_readphyreg, rtl_readphy), DEVMETHOD(etherswitch_writephyreg, rtl_writephy), DEVMETHOD(etherswitch_getport, rtl_getport), DEVMETHOD(etherswitch_setport, rtl_setport), DEVMETHOD(etherswitch_getvgroup, rtl_getvgroup), DEVMETHOD(etherswitch_setvgroup, rtl_setvgroup), DEVMETHOD_END }; DEFINE_CLASS_0(rtl8366rb, rtl8366rb_driver, rtl8366rb_methods, sizeof(struct rtl8366rb_softc)); DRIVER_MODULE(rtl8366rb, iicbus, rtl8366rb_driver, 0, 0); DRIVER_MODULE(miibus, rtl8366rb, miibus_driver, 0, 0); DRIVER_MODULE(mdio, rtl8366rb, mdio_driver, 0, 0); DRIVER_MODULE(etherswitch, rtl8366rb, etherswitch_driver, 0, 0); MODULE_VERSION(rtl8366rb, 1); MODULE_DEPEND(rtl8366rb, iicbus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(rtl8366rb, miibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(rtl8366rb, etherswitch, 1, 1, 1); /* XXX which versions? */ diff --git a/sys/dev/firewire/firewire.c b/sys/dev/firewire/firewire.c index 4bb683cccfb7..93a414405c17 100644 --- a/sys/dev/firewire/firewire.c +++ b/sys/dev/firewire/firewire.c @@ -1,2386 +1,2386 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include /* used by smbus and newbus */ #include #include #include #include #include #include struct crom_src_buf { struct crom_src src; struct crom_chunk root; struct crom_chunk vendor; struct crom_chunk hw; }; int firewire_debug = 0, try_bmr = 1, hold_count = 0; SYSCTL_INT(_debug, OID_AUTO, firewire_debug, CTLFLAG_RW, &firewire_debug, 0, "FireWire driver debug flag"); SYSCTL_NODE(_hw, OID_AUTO, firewire, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "FireWire Subsystem"); SYSCTL_INT(_hw_firewire, OID_AUTO, try_bmr, CTLFLAG_RW, &try_bmr, 0, "Try to be a bus manager"); SYSCTL_INT(_hw_firewire, OID_AUTO, hold_count, CTLFLAG_RW, &hold_count, 0, "Number of count of bus resets for removing lost device information"); MALLOC_DEFINE(M_FW, "firewire", "FireWire"); MALLOC_DEFINE(M_FWXFER, "fw_xfer", "XFER/FireWire"); #define FW_MAXASYRTY 4 devclass_t firewire_devclass; static void firewire_identify(driver_t *, device_t); static int firewire_probe(device_t); static int firewire_attach(device_t); static int firewire_detach(device_t); static int firewire_resume(device_t); static void firewire_xfer_timeout(void *, int); static device_t firewire_add_child(device_t, u_int, const char *, int); static void fw_try_bmr(void *); static void fw_try_bmr_callback(struct fw_xfer *); static void fw_asystart(struct fw_xfer *); static int fw_get_tlabel(struct firewire_comm *, struct fw_xfer *); static void fw_bus_probe(void *); static void fw_attach_dev(struct firewire_comm *); static void fw_bus_probe_thread(void *); #ifdef FW_VMACCESS static void fw_vmaccess (struct fw_xfer *); #endif static int fw_bmr (struct firewire_comm *); static void fw_dump_hdr(struct fw_pkt *, char *); static device_method_t firewire_methods[] = { /* Device interface */ DEVMETHOD(device_identify, firewire_identify), DEVMETHOD(device_probe, firewire_probe), DEVMETHOD(device_attach, firewire_attach), DEVMETHOD(device_detach, firewire_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, firewire_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_add_child, firewire_add_child), DEVMETHOD_END }; char *linkspeed[] = { "S100", "S200", "S400", "S800", "S1600", "S3200", "undef", "undef" }; static char *tcode_str[] = { "WREQQ", "WREQB", "WRES", "undef", "RREQQ", "RREQB", "RRESQ", "RRESB", "CYCS", "LREQ", "STREAM", "LRES", "undef", "undef", "PHY", "undef" }; /* IEEE-1394a Table C-2 Gap count as a function of hops*/ #define MAX_GAPHOP 15 u_int gap_cnt[] = { 5, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40}; static driver_t firewire_driver = { "firewire", firewire_methods, sizeof(struct firewire_softc), }; /* * Lookup fwdev by node id. */ struct fw_device * fw_noderesolve_nodeid(struct firewire_comm *fc, int dst) { struct fw_device *fwdev; FW_GLOCK(fc); STAILQ_FOREACH(fwdev, &fc->devices, link) if (fwdev->dst == dst && fwdev->status != FWDEVINVAL) break; FW_GUNLOCK(fc); return fwdev; } /* * Lookup fwdev by EUI64. */ struct fw_device * fw_noderesolve_eui64(struct firewire_comm *fc, struct fw_eui64 *eui) { struct fw_device *fwdev; FW_GLOCK(fc); STAILQ_FOREACH(fwdev, &fc->devices, link) if (FW_EUI64_EQUAL(fwdev->eui, *eui)) break; FW_GUNLOCK(fc); if (fwdev == NULL) return NULL; if (fwdev->status == FWDEVINVAL) return NULL; return fwdev; } /* * Async. request procedure for userland application. */ int fw_asyreq(struct firewire_comm *fc, int sub, struct fw_xfer *xfer) { int err = 0; struct fw_xferq *xferq; int len; struct fw_pkt *fp; int tcode; struct tcode_info *info; if (xfer == NULL) return EINVAL; if (xfer->hand == NULL) { printf("hand == NULL\n"); return EINVAL; } fp = &xfer->send.hdr; tcode = fp->mode.common.tcode & 0xf; info = &fc->tcode[tcode]; if (info->flag == 0) { printf("invalid tcode=%x\n", tcode); return EINVAL; } /* XXX allow bus explore packets only after bus rest */ if ((fc->status < FWBUSEXPLORE) && ((tcode != FWTCODE_RREQQ) || (fp->mode.rreqq.dest_hi != 0xffff) || (fp->mode.rreqq.dest_lo < 0xf0000000) || (fp->mode.rreqq.dest_lo >= 0xf0001000))) { xfer->resp = EAGAIN; xfer->flag = FWXF_BUSY; return (EAGAIN); } if (info->flag & FWTI_REQ) xferq = fc->atq; else xferq = fc->ats; len = info->hdr_len; if (xfer->send.pay_len > MAXREC(fc->maxrec)) { printf("send.pay_len > maxrec\n"); return EINVAL; } if (info->flag & FWTI_BLOCK_STR) len = fp->mode.stream.len; else if (info->flag & FWTI_BLOCK_ASY) len = fp->mode.rresb.len; else len = 0; if (len != xfer->send.pay_len) { printf("len(%d) != send.pay_len(%d) %s(%x)\n", len, xfer->send.pay_len, tcode_str[tcode], tcode); return EINVAL; } if (xferq->start == NULL) { printf("xferq->start == NULL\n"); return EINVAL; } if (!(xferq->queued < xferq->maxq)) { device_printf(fc->bdev, "Discard a packet (queued=%d)\n", xferq->queued); return EAGAIN; } xfer->tl = -1; if (info->flag & FWTI_TLABEL) { if (fw_get_tlabel(fc, xfer) < 0) return EAGAIN; } xfer->resp = 0; xfer->fc = fc; xfer->q = xferq; fw_asystart(xfer); return err; } /* * Wakeup blocked process. */ void fw_xferwake(struct fw_xfer *xfer) { struct mtx *lock = &xfer->fc->wait_lock; mtx_lock(lock); xfer->flag |= FWXF_WAKE; mtx_unlock(lock); wakeup(xfer); return; } int fw_xferwait(struct fw_xfer *xfer) { struct mtx *lock = &xfer->fc->wait_lock; int err = 0; mtx_lock(lock); while ((xfer->flag & FWXF_WAKE) == 0) err = msleep(xfer, lock, PWAIT|PCATCH, "fw_xferwait", 0); mtx_unlock(lock); return (err); } /* * Async. request with given xfer structure. */ static void fw_asystart(struct fw_xfer *xfer) { struct firewire_comm *fc = xfer->fc; /* Protect from interrupt/timeout */ FW_GLOCK(fc); xfer->flag = FWXF_INQ; STAILQ_INSERT_TAIL(&xfer->q->q, xfer, link); #if 0 xfer->q->queued++; #endif FW_GUNLOCK(fc); /* XXX just queue for mbuf */ if (xfer->mbuf == NULL) xfer->q->start(fc); return; } static void firewire_identify(driver_t *driver, device_t parent) { - BUS_ADD_CHILD(parent, 0, "firewire", -1); + BUS_ADD_CHILD(parent, 0, "firewire", DEVICE_UNIT_ANY); } static int firewire_probe(device_t dev) { device_set_desc(dev, "IEEE1394(FireWire) bus"); return (0); } /* Just use a per-packet callout? */ static void firewire_xfer_timeout(void *arg, int pending) { struct firewire_comm *fc = (struct firewire_comm *)arg; struct fw_xfer *xfer, *txfer; struct timeval tv; struct timeval split_timeout; STAILQ_HEAD(, fw_xfer) xfer_timeout; int i; split_timeout.tv_sec = 0; split_timeout.tv_usec = 200 * 1000; /* 200 msec */ microtime(&tv); timevalsub(&tv, &split_timeout); STAILQ_INIT(&xfer_timeout); mtx_lock(&fc->tlabel_lock); for (i = 0; i < nitems(fc->tlabels); i++) { while ((xfer = STAILQ_FIRST(&fc->tlabels[i])) != NULL) { if ((xfer->flag & FWXF_SENT) == 0) /* not sent yet */ break; if (timevalcmp(&xfer->tv, &tv, >)) /* the rests are newer than this */ break; device_printf(fc->bdev, "split transaction timeout: tl=0x%x flag=0x%02x\n", i, xfer->flag); fw_dump_hdr(&xfer->send.hdr, "send"); xfer->resp = ETIMEDOUT; xfer->tl = -1; STAILQ_REMOVE_HEAD(&fc->tlabels[i], tlabel); STAILQ_INSERT_TAIL(&xfer_timeout, xfer, tlabel); } } mtx_unlock(&fc->tlabel_lock); fc->timeout(fc); STAILQ_FOREACH_SAFE(xfer, &xfer_timeout, tlabel, txfer) xfer->hand(xfer); } #define WATCHDOG_HZ 10 static void firewire_watchdog(void *arg) { struct firewire_comm *fc; static int watchdog_clock = 0; fc = arg; /* * At boot stage, the device interrupt is disabled and * We encounter a timeout easily. To avoid this, * ignore clock interrupt for a while. */ if (watchdog_clock > WATCHDOG_HZ * 15) taskqueue_enqueue(fc->taskqueue, &fc->task_timeout); else watchdog_clock++; callout_reset(&fc->timeout_callout, hz / WATCHDOG_HZ, firewire_watchdog, fc); } /* * The attach routine. */ static int firewire_attach(device_t dev) { int unit; struct firewire_softc *sc = device_get_softc(dev); device_t pa = device_get_parent(dev); struct firewire_comm *fc; fc = device_get_softc(pa); sc->fc = fc; fc->status = FWBUSNOTREADY; unit = device_get_unit(dev); if (fc->nisodma > FWMAXNDMA) fc->nisodma = FWMAXNDMA; fwdev_makedev(sc); fc->crom_src_buf = malloc(sizeof(struct crom_src_buf), M_FW, M_NOWAIT | M_ZERO); if (fc->crom_src_buf == NULL) { device_printf(fc->dev, "%s: unable to allocate crom src buffer\n", __func__); return ENOMEM; } fc->topology_map = malloc(sizeof(struct fw_topology_map), M_FW, M_NOWAIT | M_ZERO); if (fc->topology_map == NULL) { device_printf(fc->dev, "%s: unable to allocate topology map\n", __func__); free(fc->crom_src_buf, M_FW); return ENOMEM; } fc->speed_map = malloc(sizeof(struct fw_speed_map), M_FW, M_NOWAIT | M_ZERO); if (fc->speed_map == NULL) { device_printf(fc->dev, "%s: unable to allocate speed map\n", __func__); free(fc->crom_src_buf, M_FW); free(fc->topology_map, M_FW); return ENOMEM; } mtx_init(&fc->wait_lock, "fwwait", NULL, MTX_DEF); mtx_init(&fc->tlabel_lock, "fwtlabel", NULL, MTX_DEF); CALLOUT_INIT(&fc->timeout_callout); CALLOUT_INIT(&fc->bmr_callout); CALLOUT_INIT(&fc->busprobe_callout); TASK_INIT(&fc->task_timeout, 0, firewire_xfer_timeout, fc); callout_reset(&sc->fc->timeout_callout, hz, firewire_watchdog, sc->fc); /* create thread */ kproc_create(fw_bus_probe_thread, fc, &fc->probe_thread, 0, 0, "fw%d_probe", unit); /* Locate our children */ bus_generic_probe(dev); /* launch attachement of the added children */ bus_generic_attach(dev); /* bus_reset */ FW_GLOCK(fc); fw_busreset(fc, FWBUSNOTREADY); FW_GUNLOCK(fc); fc->ibr(fc); return 0; } /* * Attach it as child. */ static device_t firewire_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct firewire_softc *sc; sc = device_get_softc(dev); child = device_add_child(dev, name, unit); if (child) { device_set_ivars(child, sc->fc); device_probe_and_attach(child); } return child; } static int firewire_resume(device_t dev) { struct firewire_softc *sc; sc = device_get_softc(dev); sc->fc->status = FWBUSNOTREADY; bus_generic_resume(dev); return (0); } /* * Detach it. */ static int firewire_detach(device_t dev) { struct firewire_softc *sc; struct firewire_comm *fc; struct fw_device *fwdev, *fwdev_next; int err; sc = device_get_softc(dev); fc = sc->fc; mtx_lock(&fc->wait_lock); fc->status = FWBUSDETACH; wakeup(fc); if (msleep(fc->probe_thread, &fc->wait_lock, PWAIT, "fwthr", hz * 60)) printf("firewire probe thread didn't die\n"); mtx_unlock(&fc->wait_lock); if (fc->arq != 0 && fc->arq->maxq > 0) fw_drain_txq(fc); if ((err = fwdev_destroydev(sc)) != 0) return err; if ((err = bus_generic_detach(dev)) != 0) return err; callout_stop(&fc->timeout_callout); callout_stop(&fc->bmr_callout); callout_stop(&fc->busprobe_callout); /* XXX xfer_free and untimeout on all xfers */ for (fwdev = STAILQ_FIRST(&fc->devices); fwdev != NULL; fwdev = fwdev_next) { fwdev_next = STAILQ_NEXT(fwdev, link); free(fwdev, M_FW); } free(fc->topology_map, M_FW); free(fc->speed_map, M_FW); free(fc->crom_src_buf, M_FW); mtx_destroy(&fc->tlabel_lock); mtx_destroy(&fc->wait_lock); return (0); } static void fw_xferq_drain(struct fw_xferq *xferq) { struct fw_xfer *xfer; while ((xfer = STAILQ_FIRST(&xferq->q)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->q, link); #if 0 xferq->queued--; #endif xfer->resp = EAGAIN; xfer->flag = FWXF_SENTERR; fw_xfer_done(xfer); } } void fw_drain_txq(struct firewire_comm *fc) { struct fw_xfer *xfer, *txfer; STAILQ_HEAD(, fw_xfer) xfer_drain; int i; STAILQ_INIT(&xfer_drain); FW_GLOCK(fc); fw_xferq_drain(fc->atq); fw_xferq_drain(fc->ats); for (i = 0; i < fc->nisodma; i++) fw_xferq_drain(fc->it[i]); FW_GUNLOCK(fc); mtx_lock(&fc->tlabel_lock); for (i = 0; i < 0x40; i++) while ((xfer = STAILQ_FIRST(&fc->tlabels[i])) != NULL) { if (firewire_debug) printf("tl=%d flag=%d\n", i, xfer->flag); xfer->tl = -1; xfer->resp = EAGAIN; STAILQ_REMOVE_HEAD(&fc->tlabels[i], tlabel); STAILQ_INSERT_TAIL(&xfer_drain, xfer, tlabel); } mtx_unlock(&fc->tlabel_lock); STAILQ_FOREACH_SAFE(xfer, &xfer_drain, tlabel, txfer) xfer->hand(xfer); } static void fw_reset_csr(struct firewire_comm *fc) { int i; CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14; CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); CSRARC(fc, NODE_IDS) = 0x3f; CSRARC(fc, TOPO_MAP + 8) = 0; fc->irm = -1; fc->max_node = -1; for (i = 2; i < 0x100 / 4 - 2; i++) { CSRARC(fc, SPED_MAP + i * 4) = 0; } CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14; CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); CSRARC(fc, RESET_START) = 0; CSRARC(fc, SPLIT_TIMEOUT_HI) = 0; CSRARC(fc, SPLIT_TIMEOUT_LO) = 800 << 19; CSRARC(fc, CYCLE_TIME) = 0x0; CSRARC(fc, BUS_TIME) = 0x0; CSRARC(fc, BUS_MGR_ID) = 0x3f; CSRARC(fc, BANDWIDTH_AV) = 4915; CSRARC(fc, CHANNELS_AV_HI) = 0xffffffff; CSRARC(fc, CHANNELS_AV_LO) = 0xffffffff; CSRARC(fc, IP_CHANNELS) = (1U << 31); CSRARC(fc, CONF_ROM) = 0x04 << 24; CSRARC(fc, CONF_ROM + 4) = 0x31333934; /* means strings 1394 */ CSRARC(fc, CONF_ROM + 8) = 1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 0xff << 16 | 0x09 << 8; CSRARC(fc, CONF_ROM + 0xc) = 0; /* DV depend CSRs see blue book */ CSRARC(fc, oPCR) &= ~DV_BROADCAST_ON; CSRARC(fc, iPCR) &= ~DV_BROADCAST_ON; CSRARC(fc, STATE_CLEAR) &= ~(1 << 23 | 1 << 15 | 1 << 14); CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); } static void fw_init_crom(struct firewire_comm *fc) { struct crom_src *src; src = &fc->crom_src_buf->src; bzero(src, sizeof(struct crom_src)); /* BUS info sample */ src->hdr.info_len = 4; src->businfo.bus_name = CSR_BUS_NAME_IEEE1394; src->businfo.irmc = 1; src->businfo.cmc = 1; src->businfo.isc = 1; src->businfo.bmc = 1; src->businfo.pmc = 0; src->businfo.cyc_clk_acc = 100; src->businfo.max_rec = fc->maxrec; src->businfo.max_rom = MAXROM_4; #define FW_GENERATION_CHANGEABLE 2 src->businfo.generation = FW_GENERATION_CHANGEABLE; src->businfo.link_spd = fc->speed; src->businfo.eui64.hi = fc->eui.hi; src->businfo.eui64.lo = fc->eui.lo; STAILQ_INIT(&src->chunk_list); fc->crom_src = src; fc->crom_root = &fc->crom_src_buf->root; } static void fw_reset_crom(struct firewire_comm *fc) { struct crom_src_buf *buf; struct crom_src *src; struct crom_chunk *root; buf = fc->crom_src_buf; src = fc->crom_src; root = fc->crom_root; STAILQ_INIT(&src->chunk_list); bzero(root, sizeof(struct crom_chunk)); crom_add_chunk(src, NULL, root, 0); crom_add_entry(root, CSRKEY_NCAP, 0x0083c0); /* XXX */ /* private company_id */ crom_add_entry(root, CSRKEY_VENDOR, CSRVAL_VENDOR_PRIVATE); crom_add_simple_text(src, root, &buf->vendor, "FreeBSD Project"); crom_add_entry(root, CSRKEY_HW, __FreeBSD_version); mtx_lock(&prison0.pr_mtx); crom_add_simple_text(src, root, &buf->hw, prison0.pr_hostname); mtx_unlock(&prison0.pr_mtx); } /* * Called after bus reset. */ void fw_busreset(struct firewire_comm *fc, uint32_t new_status) { struct firewire_dev_comm *fdc; struct crom_src *src; device_t *devlistp; uint32_t *newrom; int i, devcnt; FW_GLOCK_ASSERT(fc); if (fc->status == FWBUSMGRELECT) callout_stop(&fc->bmr_callout); fc->status = new_status; fw_reset_csr(fc); if (fc->status == FWBUSNOTREADY) fw_init_crom(fc); fw_reset_crom(fc); if (device_get_children(fc->bdev, &devlistp, &devcnt) == 0) { for (i = 0; i < devcnt; i++) if (device_get_state(devlistp[i]) >= DS_ATTACHED) { fdc = device_get_softc(devlistp[i]); if (fdc->post_busreset != NULL) fdc->post_busreset(fdc); } free(devlistp, M_TEMP); } src = &fc->crom_src_buf->src; /* * If the old config rom needs to be overwritten, * bump the businfo.generation indicator to * indicate that we need to be reprobed * See 1394a-2000 8.3.2.5.4 for more details. * generation starts at 2 and rolls over at 0xF * back to 2. * * A generation of 0 indicates a device * that is not 1394a-2000 compliant. * A generation of 1 indicates a device that * does not change it's Bus Info Block or * Configuration ROM. */ #define FW_MAX_GENERATION 0xF newrom = malloc(CROMSIZE, M_FW, M_NOWAIT | M_ZERO); src = &fc->crom_src_buf->src; crom_load(src, newrom, CROMSIZE); if (bcmp(newrom, fc->config_rom, CROMSIZE) != 0) { /* Bump generation and reload. */ src->businfo.generation++; /* Handle generation count wraps. */ if (src->businfo.generation < FW_GENERATION_CHANGEABLE) src->businfo.generation = FW_GENERATION_CHANGEABLE; /* Recalculate CRC to account for generation change. */ crom_load(src, newrom, CROMSIZE); bcopy(newrom, fc->config_rom, CROMSIZE); } free(newrom, M_FW); } /* Call once after reboot */ void fw_init(struct firewire_comm *fc) { int i; #ifdef FW_VMACCESS struct fw_xfer *xfer; struct fw_bind *fwb; #endif fc->arq->queued = 0; fc->ars->queued = 0; fc->atq->queued = 0; fc->ats->queued = 0; fc->arq->buf = NULL; fc->ars->buf = NULL; fc->atq->buf = NULL; fc->ats->buf = NULL; fc->arq->flag = 0; fc->ars->flag = 0; fc->atq->flag = 0; fc->ats->flag = 0; STAILQ_INIT(&fc->atq->q); STAILQ_INIT(&fc->ats->q); for (i = 0; i < fc->nisodma; i++) { fc->it[i]->queued = 0; fc->ir[i]->queued = 0; fc->it[i]->start = NULL; fc->ir[i]->start = NULL; fc->it[i]->buf = NULL; fc->ir[i]->buf = NULL; fc->it[i]->flag = FWXFERQ_STREAM; fc->ir[i]->flag = FWXFERQ_STREAM; STAILQ_INIT(&fc->it[i]->q); STAILQ_INIT(&fc->ir[i]->q); } fc->arq->maxq = FWMAXQUEUE; fc->ars->maxq = FWMAXQUEUE; fc->atq->maxq = FWMAXQUEUE; fc->ats->maxq = FWMAXQUEUE; for (i = 0; i < fc->nisodma; i++) { fc->ir[i]->maxq = FWMAXQUEUE; fc->it[i]->maxq = FWMAXQUEUE; } CSRARC(fc, TOPO_MAP) = 0x3f1 << 16; CSRARC(fc, TOPO_MAP + 4) = 1; CSRARC(fc, SPED_MAP) = 0x3f1 << 16; CSRARC(fc, SPED_MAP + 4) = 1; STAILQ_INIT(&fc->devices); /* Initialize Async handlers */ STAILQ_INIT(&fc->binds); for (i = 0; i < 0x40; i++) { STAILQ_INIT(&fc->tlabels[i]); } /* DV depend CSRs see blue book */ #if 0 CSRARC(fc, oMPR) = 0x3fff0001; /* # output channel = 1 */ CSRARC(fc, oPCR) = 0x8000007a; for (i = 4; i < 0x7c/4; i += 4) { CSRARC(fc, i + oPCR) = 0x8000007a; } CSRARC(fc, iMPR) = 0x00ff0001; /* # input channel = 1 */ CSRARC(fc, iPCR) = 0x803f0000; for (i = 4; i < 0x7c/4; i += 4) { CSRARC(fc, i + iPCR) = 0x0; } #endif fc->crom_src_buf = NULL; #ifdef FW_VMACCESS xfer = fw_xfer_alloc(); if (xfer == NULL) return; fwb = malloc(sizeof(struct fw_bind), M_FW, M_NOWAIT); if (fwb == NULL) { fw_xfer_free(xfer); return; } xfer->hand = fw_vmaccess; xfer->fc = fc; xfer->sc = NULL; fwb->start_hi = 0x2; fwb->start_lo = 0; fwb->addrlen = 0xffffffff; fwb->xfer = xfer; fw_bindadd(fc, fwb); #endif } #define BIND_CMP(addr, fwb) (((addr) < (fwb)->start)? -1 : \ ((fwb)->end < (addr)) ? 1 : 0) /* * To lookup bound process from IEEE1394 address. */ struct fw_bind * fw_bindlookup(struct firewire_comm *fc, uint16_t dest_hi, uint32_t dest_lo) { u_int64_t addr; struct fw_bind *tfw, *r = NULL; addr = ((u_int64_t)dest_hi << 32) | dest_lo; FW_GLOCK(fc); STAILQ_FOREACH(tfw, &fc->binds, fclist) if (BIND_CMP(addr, tfw) == 0) { r = tfw; break; } FW_GUNLOCK(fc); return (r); } /* * To bind IEEE1394 address block to process. */ int fw_bindadd(struct firewire_comm *fc, struct fw_bind *fwb) { struct fw_bind *tfw, *prev = NULL; int r = 0; if (fwb->start > fwb->end) { printf("%s: invalid range\n", __func__); return EINVAL; } FW_GLOCK(fc); STAILQ_FOREACH(tfw, &fc->binds, fclist) { if (fwb->end < tfw->start) break; prev = tfw; } if (prev == NULL) STAILQ_INSERT_HEAD(&fc->binds, fwb, fclist); else if (prev->end < fwb->start) STAILQ_INSERT_AFTER(&fc->binds, prev, fwb, fclist); else { printf("%s: bind failed\n", __func__); r = EBUSY; } FW_GUNLOCK(fc); return (r); } /* * To free IEEE1394 address block. */ int fw_bindremove(struct firewire_comm *fc, struct fw_bind *fwb) { #if 0 struct fw_xfer *xfer, *next; #endif struct fw_bind *tfw; int s; s = splfw(); FW_GLOCK(fc); STAILQ_FOREACH(tfw, &fc->binds, fclist) if (tfw == fwb) { STAILQ_REMOVE(&fc->binds, fwb, fw_bind, fclist); goto found; } printf("%s: no such binding\n", __func__); FW_GUNLOCK(fc); splx(s); return (1); found: #if 0 /* shall we do this? */ for (xfer = STAILQ_FIRST(&fwb->xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free(xfer); } STAILQ_INIT(&fwb->xferlist); #endif FW_GUNLOCK(fc); splx(s); return 0; } int fw_xferlist_add(struct fw_xferlist *q, struct malloc_type *type, int slen, int rlen, int n, struct firewire_comm *fc, void *sc, void (*hand)(struct fw_xfer *)) { int i, s; struct fw_xfer *xfer; for (i = 0; i < n; i++) { xfer = fw_xfer_alloc_buf(type, slen, rlen); if (xfer == NULL) return (i); xfer->fc = fc; xfer->sc = sc; xfer->hand = hand; s = splfw(); STAILQ_INSERT_TAIL(q, xfer, link); splx(s); } return (n); } void fw_xferlist_remove(struct fw_xferlist *q) { struct fw_xfer *xfer, *next; for (xfer = STAILQ_FIRST(q); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free_buf(xfer); } STAILQ_INIT(q); } /* * dump packet header */ static void fw_dump_hdr(struct fw_pkt *fp, char *prefix) { printf("%s: dst=0x%02x tl=0x%02x rt=%d tcode=0x%x pri=0x%x " "src=0x%03x\n", prefix, fp->mode.hdr.dst & 0x3f, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tlrt & 3, fp->mode.hdr.tcode, fp->mode.hdr.pri, fp->mode.hdr.src); } /* * To free transaction label. */ static void fw_tl_free(struct firewire_comm *fc, struct fw_xfer *xfer) { struct fw_xfer *txfer; mtx_lock(&fc->tlabel_lock); if (xfer->tl < 0) { mtx_unlock(&fc->tlabel_lock); return; } /* make sure the label is allocated */ STAILQ_FOREACH(txfer, &fc->tlabels[xfer->tl], tlabel) if (txfer == xfer) break; if (txfer == NULL) { printf("%s: the xfer is not in the queue " "(tlabel=%d, flag=0x%x)\n", __FUNCTION__, xfer->tl, xfer->flag); fw_dump_hdr(&xfer->send.hdr, "send"); fw_dump_hdr(&xfer->recv.hdr, "recv"); kdb_backtrace(); mtx_unlock(&fc->tlabel_lock); return; } STAILQ_REMOVE(&fc->tlabels[xfer->tl], xfer, fw_xfer, tlabel); xfer->tl = -1; mtx_unlock(&fc->tlabel_lock); return; } /* * To obtain XFER structure by transaction label. */ static struct fw_xfer * fw_tl2xfer(struct firewire_comm *fc, int node, int tlabel, int tcode) { struct fw_xfer *xfer; int s = splfw(); int req; mtx_lock(&fc->tlabel_lock); STAILQ_FOREACH(xfer, &fc->tlabels[tlabel], tlabel) if (xfer->send.hdr.mode.hdr.dst == node) { mtx_unlock(&fc->tlabel_lock); splx(s); KASSERT(xfer->tl == tlabel, ("xfer->tl 0x%x != 0x%x", xfer->tl, tlabel)); /* extra sanity check */ req = xfer->send.hdr.mode.hdr.tcode; if (xfer->fc->tcode[req].valid_res != tcode) { printf("%s: invalid response tcode " "(0x%x for 0x%x)\n", __FUNCTION__, tcode, req); return (NULL); } if (firewire_debug > 2) printf("fw_tl2xfer: found tl=%d\n", tlabel); return (xfer); } mtx_unlock(&fc->tlabel_lock); if (firewire_debug > 1) printf("fw_tl2xfer: not found tl=%d\n", tlabel); splx(s); return (NULL); } /* * To allocate IEEE1394 XFER structure. */ struct fw_xfer * fw_xfer_alloc(struct malloc_type *type) { struct fw_xfer *xfer; xfer = malloc(sizeof(struct fw_xfer), type, M_NOWAIT | M_ZERO); if (xfer == NULL) return xfer; xfer->malloc = type; xfer->tl = -1; return xfer; } struct fw_xfer * fw_xfer_alloc_buf(struct malloc_type *type, int send_len, int recv_len) { struct fw_xfer *xfer; xfer = fw_xfer_alloc(type); if (xfer == NULL) return (NULL); xfer->send.pay_len = send_len; xfer->recv.pay_len = recv_len; if (send_len > 0) { xfer->send.payload = malloc(send_len, type, M_NOWAIT | M_ZERO); if (xfer->send.payload == NULL) { fw_xfer_free(xfer); return (NULL); } } if (recv_len > 0) { xfer->recv.payload = malloc(recv_len, type, M_NOWAIT); if (xfer->recv.payload == NULL) { if (xfer->send.payload != NULL) free(xfer->send.payload, type); fw_xfer_free(xfer); return (NULL); } } return (xfer); } /* * IEEE1394 XFER post process. */ void fw_xfer_done(struct fw_xfer *xfer) { if (xfer->hand == NULL) { printf("hand == NULL\n"); return; } if (xfer->fc == NULL) panic("fw_xfer_done: why xfer->fc is NULL?"); fw_tl_free(xfer->fc, xfer); xfer->hand(xfer); } void fw_xfer_unload(struct fw_xfer *xfer) { if (xfer == NULL) return; if (xfer->fc != NULL) { FW_GLOCK(xfer->fc); if (xfer->flag & FWXF_INQ) { STAILQ_REMOVE(&xfer->q->q, xfer, fw_xfer, link); xfer->flag &= ~FWXF_INQ; #if 0 xfer->q->queued--; #endif } FW_GUNLOCK(xfer->fc); /* * Ensure that any tlabel owner can't access this * xfer after it's freed. */ fw_tl_free(xfer->fc, xfer); #if 1 if (xfer->flag & FWXF_START) /* * This could happen if: * 1. We call fwohci_arcv() before fwohci_txd(). * 2. firewire_watch() is called. */ printf("fw_xfer_free FWXF_START\n"); #endif } xfer->flag = FWXF_INIT; xfer->resp = 0; } /* * To free IEEE1394 XFER structure. */ void fw_xfer_free_buf(struct fw_xfer *xfer) { if (xfer == NULL) { printf("%s: xfer == NULL\n", __func__); return; } fw_xfer_unload(xfer); if (xfer->send.payload != NULL) free(xfer->send.payload, xfer->malloc); if (xfer->recv.payload != NULL) free(xfer->recv.payload, xfer->malloc); free(xfer, xfer->malloc); } void fw_xfer_free(struct fw_xfer *xfer) { if (xfer == NULL) { printf("%s: xfer == NULL\n", __func__); return; } fw_xfer_unload(xfer); free(xfer, xfer->malloc); } void fw_asy_callback_free(struct fw_xfer *xfer) { #if 0 printf("asyreq done flag=0x%02x resp=%d\n", xfer->flag, xfer->resp); #endif fw_xfer_free(xfer); } /* * To configure PHY. */ static void fw_phy_config(struct firewire_comm *fc, int root_node, int gap_count) { struct fw_xfer *xfer; struct fw_pkt *fp; fc->status = FWBUSPHYCONF; xfer = fw_xfer_alloc(M_FWXFER); if (xfer == NULL) return; xfer->fc = fc; xfer->hand = fw_asy_callback_free; fp = &xfer->send.hdr; fp->mode.ld[1] = 0; if (root_node >= 0) fp->mode.ld[1] |= (1 << 23) | (root_node & 0x3f) << 24; if (gap_count >= 0) fp->mode.ld[1] |= (1 << 22) | (gap_count & 0x3f) << 16; fp->mode.ld[2] = ~fp->mode.ld[1]; /* XXX Dangerous, how to pass PHY packet to device driver */ fp->mode.common.tcode |= FWTCODE_PHY; if (firewire_debug) device_printf(fc->bdev, "%s: root_node=%d gap_count=%d\n", __func__, root_node, gap_count); fw_asyreq(fc, -1, xfer); } /* * Dump self ID. */ static void fw_print_sid(uint32_t sid) { union fw_self_id *s; s = (union fw_self_id *) &sid; if (s->p0.sequel) { if (s->p1.sequence_num == FW_SELF_ID_PAGE0) { printf("node:%d p3:%d p4:%d p5:%d p6:%d p7:%d" "p8:%d p9:%d p10:%d\n", s->p1.phy_id, s->p1.port3, s->p1.port4, s->p1.port5, s->p1.port6, s->p1.port7, s->p1.port8, s->p1.port9, s->p1.port10); } else if (s->p2.sequence_num == FW_SELF_ID_PAGE1) { printf("node:%d p11:%d p12:%d p13:%d p14:%d p15:%d\n", s->p2.phy_id, s->p2.port11, s->p2.port12, s->p2.port13, s->p2.port14, s->p2.port15); } else { printf("node:%d Unknown Self ID Page number %d\n", s->p1.phy_id, s->p1.sequence_num); } } else { printf("node:%d link:%d gap:%d spd:%d con:%d pwr:%d" " p0:%d p1:%d p2:%d i:%d m:%d\n", s->p0.phy_id, s->p0.link_active, s->p0.gap_count, s->p0.phy_speed, s->p0.contender, s->p0.power_class, s->p0.port0, s->p0.port1, s->p0.port2, s->p0.initiated_reset, s->p0.more_packets); } } /* * To receive self ID. */ void fw_sidrcv(struct firewire_comm *fc, uint32_t *sid, u_int len) { uint32_t *p; union fw_self_id *self_id; u_int i, j, node, c_port = 0, i_branch = 0; fc->sid_cnt = len / (sizeof(uint32_t) * 2); fc->max_node = fc->nodeid & 0x3f; CSRARC(fc, NODE_IDS) = ((uint32_t)fc->nodeid) << 16; fc->status = FWBUSCYMELECT; fc->topology_map->crc_len = 2; fc->topology_map->generation++; fc->topology_map->self_id_count = 0; fc->topology_map->node_count= 0; fc->speed_map->generation++; fc->speed_map->crc_len = 1 + (64 * 64 + 3) / 4; self_id = &fc->topology_map->self_id[0]; for (i = 0; i < fc->sid_cnt; i++) { if (sid[1] != ~sid[0]) { device_printf(fc->bdev, "%s: ERROR invalid self-id packet\n", __func__); sid += 2; continue; } *self_id = *((union fw_self_id *)sid); fc->topology_map->crc_len++; if (self_id->p0.sequel == 0) { fc->topology_map->node_count++; c_port = 0; if (firewire_debug) fw_print_sid(sid[0]); node = self_id->p0.phy_id; if (fc->max_node < node) fc->max_node = self_id->p0.phy_id; /* XXX I'm not sure this is the right speed_map */ fc->speed_map->speed[node][node] = self_id->p0.phy_speed; for (j = 0; j < node; j++) { fc->speed_map->speed[j][node] = fc->speed_map->speed[node][j] = min(fc->speed_map->speed[j][j], self_id->p0.phy_speed); } if ((fc->irm == -1 || self_id->p0.phy_id > fc->irm) && (self_id->p0.link_active && self_id->p0.contender)) fc->irm = self_id->p0.phy_id; if (self_id->p0.port0 >= 0x2) c_port++; if (self_id->p0.port1 >= 0x2) c_port++; if (self_id->p0.port2 >= 0x2) c_port++; } if (c_port > 2) i_branch += (c_port - 2); sid += 2; self_id++; fc->topology_map->self_id_count++; } /* CRC */ fc->topology_map->crc = fw_crc16( (uint32_t *)&fc->topology_map->generation, fc->topology_map->crc_len * 4); fc->speed_map->crc = fw_crc16( (uint32_t *)&fc->speed_map->generation, fc->speed_map->crc_len * 4); /* byteswap and copy to CSR */ p = (uint32_t *)fc->topology_map; for (i = 0; i <= fc->topology_map->crc_len; i++) CSRARC(fc, TOPO_MAP + i * 4) = htonl(*p++); p = (uint32_t *)fc->speed_map; CSRARC(fc, SPED_MAP) = htonl(*p++); CSRARC(fc, SPED_MAP + 4) = htonl(*p++); /* don't byte-swap uint8_t array */ bcopy(p, &CSRARC(fc, SPED_MAP + 8), (fc->speed_map->crc_len - 1) * 4); fc->max_hop = fc->max_node - i_branch; device_printf(fc->bdev, "%d nodes, maxhop <= %d %s irm(%d) %s\n", fc->max_node + 1, fc->max_hop, (fc->irm == -1) ? "Not IRM capable" : "cable IRM", fc->irm, (fc->irm == fc->nodeid) ? " (me) " : ""); if (try_bmr && (fc->irm != -1) && (CSRARC(fc, BUS_MGR_ID) == 0x3f)) { if (fc->irm == fc->nodeid) { fc->status = FWBUSMGRDONE; CSRARC(fc, BUS_MGR_ID) = fc->set_bmr(fc, fc->irm); fw_bmr(fc); } else { fc->status = FWBUSMGRELECT; callout_reset(&fc->bmr_callout, hz / 8, fw_try_bmr, fc); } } else fc->status = FWBUSMGRDONE; callout_reset(&fc->busprobe_callout, hz / 4, fw_bus_probe, fc); } /* * To probe devices on the IEEE1394 bus. */ static void fw_bus_probe(void *arg) { struct firewire_comm *fc; struct fw_device *fwdev; int s; s = splfw(); fc = arg; fc->status = FWBUSEXPLORE; /* Invalidate all devices, just after bus reset. */ if (firewire_debug) device_printf(fc->bdev, "%s:" "iterate and invalidate all nodes\n", __func__); STAILQ_FOREACH(fwdev, &fc->devices, link) if (fwdev->status != FWDEVINVAL) { fwdev->status = FWDEVINVAL; fwdev->rcnt = 0; if (firewire_debug) device_printf(fc->bdev, "%s:" "Invalidate Dev ID: %08x%08x\n", __func__, fwdev->eui.hi, fwdev->eui.lo); } else { if (firewire_debug) device_printf(fc->bdev, "%s:" "Dev ID: %08x%08x already invalid\n", __func__, fwdev->eui.hi, fwdev->eui.lo); } splx(s); wakeup(fc); } static int fw_explore_read_quads(struct fw_device *fwdev, int offset, uint32_t *quad, int length) { struct fw_xfer *xfer; uint32_t tmp; int i, error; for (i = 0; i < length; i++, offset += sizeof(uint32_t)) { xfer = fwmem_read_quad(fwdev, NULL, -1, 0xffff, 0xf0000000 | offset, &tmp, fw_xferwake); if (xfer == NULL) return (-1); fw_xferwait(xfer); if (xfer->resp == 0) quad[i] = ntohl(tmp); error = xfer->resp; fw_xfer_free(xfer); if (error) return (error); } return (0); } static int fw_explore_csrblock(struct fw_device *fwdev, int offset, int recur) { int err, i, off; struct csrdirectory *dir; struct csrreg *reg; dir = (struct csrdirectory *)&fwdev->csrrom[offset / sizeof(uint32_t)]; err = fw_explore_read_quads(fwdev, CSRROMOFF + offset, (uint32_t *)dir, 1); if (err) return (-1); offset += sizeof(uint32_t); reg = (struct csrreg *)&fwdev->csrrom[offset / sizeof(uint32_t)]; err = fw_explore_read_quads(fwdev, CSRROMOFF + offset, (uint32_t *)reg, dir->crc_len); if (err) return (-1); /* XXX check CRC */ off = CSRROMOFF + offset + sizeof(uint32_t) * (dir->crc_len - 1); if (fwdev->rommax < off) fwdev->rommax = off; if (recur == 0) return (0); for (i = 0; i < dir->crc_len; i++, offset += sizeof(uint32_t)) { if ((reg[i].key & CSRTYPE_MASK) == CSRTYPE_D) recur = 1; else if ((reg[i].key & CSRTYPE_MASK) == CSRTYPE_L) recur = 0; else continue; off = offset + reg[i].val * sizeof(uint32_t); if (off > CROMSIZE) { printf("%s: invalid offset %d\n", __FUNCTION__, off); return (-1); } err = fw_explore_csrblock(fwdev, off, recur); if (err) return (-1); } return (0); } static int fw_explore_node(struct fw_device *dfwdev) { struct firewire_comm *fc; struct fw_device *fwdev, *pfwdev, *tfwdev; uint32_t *csr; struct csrhdr *hdr; struct bus_info *binfo; int err, node; uint32_t speed_test = 0; fc = dfwdev->fc; csr = dfwdev->csrrom; node = dfwdev->dst; /* First quad */ err = fw_explore_read_quads(dfwdev, CSRROMOFF, &csr[0], 1); if (err) { dfwdev->status = FWDEVINVAL; return (-1); } hdr = (struct csrhdr *)&csr[0]; if (hdr->info_len != 4) { if (firewire_debug) device_printf(fc->bdev, "%s: node%d: wrong bus info len(%d)\n", __func__, node, hdr->info_len); dfwdev->status = FWDEVINVAL; return (-1); } /* bus info */ err = fw_explore_read_quads(dfwdev, CSRROMOFF + 0x04, &csr[1], 4); if (err) { dfwdev->status = FWDEVINVAL; return (-1); } binfo = (struct bus_info *)&csr[1]; if (binfo->bus_name != CSR_BUS_NAME_IEEE1394) { dfwdev->status = FWDEVINVAL; return (-1); } if (firewire_debug) device_printf(fc->bdev, "%s: node(%d) BUS INFO BLOCK:\n" "irmc(%d) cmc(%d) isc(%d) bmc(%d) pmc(%d) " "cyc_clk_acc(%d) max_rec(%d) max_rom(%d) " "generation(%d) link_spd(%d)\n", __func__, node, binfo->irmc, binfo->cmc, binfo->isc, binfo->bmc, binfo->pmc, binfo->cyc_clk_acc, binfo->max_rec, binfo->max_rom, binfo->generation, binfo->link_spd); STAILQ_FOREACH(fwdev, &fc->devices, link) if (FW_EUI64_EQUAL(fwdev->eui, binfo->eui64)) break; if (fwdev == NULL) { /* new device */ fwdev = malloc(sizeof(struct fw_device), M_FW, M_NOWAIT | M_ZERO); if (fwdev == NULL) { device_printf(fc->bdev, "%s: node%d: no memory\n", __func__, node); return (-1); } fwdev->fc = fc; fwdev->eui = binfo->eui64; fwdev->dst = dfwdev->dst; fwdev->maxrec = dfwdev->maxrec; fwdev->status = dfwdev->status; /* * Pre-1394a-2000 didn't have link_spd in * the Bus Info block, so try and use the * speed map value. * 1394a-2000 compliant devices only use * the Bus Info Block link spd value, so * ignore the speed map altogether. SWB */ if (binfo->link_spd == FWSPD_S100 /* 0 */) { device_printf(fc->bdev, "%s: " "Pre 1394a-2000 detected\n", __func__); fwdev->speed = fc->speed_map->speed[fc->nodeid][node]; } else fwdev->speed = binfo->link_spd; /* * Test this speed with a read to the CSRROM. * If it fails, slow down the speed and retry. */ while (fwdev->speed > FWSPD_S100 /* 0 */) { err = fw_explore_read_quads(fwdev, CSRROMOFF, &speed_test, 1); if (err) { device_printf(fc->bdev, "%s: fwdev->speed(%s) decremented due to negotiation\n", __func__, linkspeed[fwdev->speed]); fwdev->speed--; } else break; } /* * If the fwdev is not found in the * fc->devices TAILQ, then we will add it. */ pfwdev = NULL; STAILQ_FOREACH(tfwdev, &fc->devices, link) { if (tfwdev->eui.hi > fwdev->eui.hi || (tfwdev->eui.hi == fwdev->eui.hi && tfwdev->eui.lo > fwdev->eui.lo)) break; pfwdev = tfwdev; } if (pfwdev == NULL) STAILQ_INSERT_HEAD(&fc->devices, fwdev, link); else STAILQ_INSERT_AFTER(&fc->devices, pfwdev, fwdev, link); } else { fwdev->dst = node; fwdev->status = FWDEVINIT; /* unchanged ? */ if (bcmp(&csr[0], &fwdev->csrrom[0], sizeof(uint32_t) * 5) == 0) { if (firewire_debug) device_printf(fc->dev, "node%d: crom unchanged\n", node); return (0); } } bzero(&fwdev->csrrom[0], CROMSIZE); /* copy first quad and bus info block */ bcopy(&csr[0], &fwdev->csrrom[0], sizeof(uint32_t) * 5); fwdev->rommax = CSRROMOFF + sizeof(uint32_t) * 4; err = fw_explore_csrblock(fwdev, 0x14, 1); /* root directory */ if (err) { if (firewire_debug) device_printf(fc->dev, "%s: explore csrblock failed err(%d)\n", __func__, err); fwdev->status = FWDEVINVAL; fwdev->csrrom[0] = 0; } return (err); } /* * Find the self_id packet for a node, ignoring sequels. */ static union fw_self_id * fw_find_self_id(struct firewire_comm *fc, int node) { uint32_t i; union fw_self_id *s; for (i = 0; i < fc->topology_map->self_id_count; i++) { s = &fc->topology_map->self_id[i]; if (s->p0.sequel) continue; if (s->p0.phy_id == node) return s; } return 0; } static void fw_explore(struct firewire_comm *fc) { int node, err, s, i, todo, todo2, trys; char nodes[63]; struct fw_device dfwdev; union fw_self_id *fwsid; todo = 0; /* setup dummy fwdev */ dfwdev.fc = fc; dfwdev.speed = 0; dfwdev.maxrec = 8; /* 512 */ dfwdev.status = FWDEVINIT; for (node = 0; node <= fc->max_node; node++) { /* We don't probe myself and linkdown nodes */ if (node == fc->nodeid) { if (firewire_debug) device_printf(fc->bdev, "%s:" "found myself node(%d) fc->nodeid(%d) fc->max_node(%d)\n", __func__, node, fc->nodeid, fc->max_node); continue; } else if (firewire_debug) { device_printf(fc->bdev, "%s:" "node(%d) fc->max_node(%d) found\n", __func__, node, fc->max_node); } fwsid = fw_find_self_id(fc, node); if (!fwsid || !fwsid->p0.link_active) { if (firewire_debug) device_printf(fc->bdev, "%s: node%d: link down\n", __func__, node); continue; } nodes[todo++] = node; } s = splfw(); for (trys = 0; todo > 0 && trys < 3; trys++) { todo2 = 0; for (i = 0; i < todo; i++) { dfwdev.dst = nodes[i]; err = fw_explore_node(&dfwdev); if (err) nodes[todo2++] = nodes[i]; if (firewire_debug) device_printf(fc->bdev, "%s: node %d, err = %d\n", __func__, node, err); } todo = todo2; } splx(s); } static void fw_bus_probe_thread(void *arg) { struct firewire_comm *fc; fc = arg; mtx_lock(&fc->wait_lock); while (fc->status != FWBUSDETACH) { if (fc->status == FWBUSEXPLORE) { mtx_unlock(&fc->wait_lock); fw_explore(fc); fc->status = FWBUSEXPDONE; if (firewire_debug) printf("bus_explore done\n"); fw_attach_dev(fc); mtx_lock(&fc->wait_lock); } msleep((void *)fc, &fc->wait_lock, PWAIT|PCATCH, "-", 0); } mtx_unlock(&fc->wait_lock); kproc_exit(0); } /* * To attach sub-devices layer onto IEEE1394 bus. */ static void fw_attach_dev(struct firewire_comm *fc) { struct fw_device *fwdev, *next; int i, err; device_t *devlistp; int devcnt; struct firewire_dev_comm *fdc; for (fwdev = STAILQ_FIRST(&fc->devices); fwdev != NULL; fwdev = next) { next = STAILQ_NEXT(fwdev, link); if (fwdev->status == FWDEVINIT) { fwdev->status = FWDEVATTACHED; } else if (fwdev->status == FWDEVINVAL) { fwdev->rcnt++; if (firewire_debug) device_printf(fc->bdev, "%s:" "fwdev->rcnt(%d), hold_count(%d)\n", __func__, fwdev->rcnt, hold_count); if (fwdev->rcnt > hold_count) { /* * Remove devices which have not been seen * for a while. */ STAILQ_REMOVE(&fc->devices, fwdev, fw_device, link); free(fwdev, M_FW); } } } err = device_get_children(fc->bdev, &devlistp, &devcnt); if (err == 0) { for (i = 0; i < devcnt; i++) { if (device_get_state(devlistp[i]) >= DS_ATTACHED) { fdc = device_get_softc(devlistp[i]); if (fdc->post_explore != NULL) fdc->post_explore(fdc); } } free(devlistp, M_TEMP); } return; } /* * To allocate unique transaction label. */ static int fw_get_tlabel(struct firewire_comm *fc, struct fw_xfer *xfer) { u_int dst, new_tlabel; struct fw_xfer *txfer; int s; dst = xfer->send.hdr.mode.hdr.dst & 0x3f; s = splfw(); mtx_lock(&fc->tlabel_lock); new_tlabel = (fc->last_tlabel[dst] + 1) & 0x3f; STAILQ_FOREACH(txfer, &fc->tlabels[new_tlabel], tlabel) if ((txfer->send.hdr.mode.hdr.dst & 0x3f) == dst) break; if (txfer == NULL) { fc->last_tlabel[dst] = new_tlabel; STAILQ_INSERT_TAIL(&fc->tlabels[new_tlabel], xfer, tlabel); mtx_unlock(&fc->tlabel_lock); splx(s); xfer->tl = new_tlabel; xfer->send.hdr.mode.hdr.tlrt = new_tlabel << 2; if (firewire_debug > 1) printf("fw_get_tlabel: dst=%d tl=%d\n", dst, new_tlabel); return (new_tlabel); } mtx_unlock(&fc->tlabel_lock); splx(s); if (firewire_debug > 1) printf("fw_get_tlabel: no free tlabel\n"); return (-1); } static void fw_rcv_copy(struct fw_rcv_buf *rb) { struct fw_pkt *pkt; u_char *p; struct tcode_info *tinfo; u_int res, i, len, plen; rb->xfer->recv.spd = rb->spd; pkt = (struct fw_pkt *)rb->vec->iov_base; tinfo = &rb->fc->tcode[pkt->mode.hdr.tcode]; /* Copy header */ p = (u_char *)&rb->xfer->recv.hdr; bcopy(rb->vec->iov_base, p, tinfo->hdr_len); rb->vec->iov_base = (u_char *)rb->vec->iov_base + tinfo->hdr_len; rb->vec->iov_len -= tinfo->hdr_len; /* Copy payload */ p = (u_char *)rb->xfer->recv.payload; res = rb->xfer->recv.pay_len; /* special handling for RRESQ */ if (pkt->mode.hdr.tcode == FWTCODE_RRESQ && p != NULL && res >= sizeof(uint32_t)) { *(uint32_t *)p = pkt->mode.rresq.data; rb->xfer->recv.pay_len = sizeof(uint32_t); return; } if ((tinfo->flag & FWTI_BLOCK_ASY) == 0) return; plen = pkt->mode.rresb.len; for (i = 0; i < rb->nvec; i++, rb->vec++) { len = MIN(rb->vec->iov_len, plen); if (res < len) { device_printf(rb->fc->bdev, "%s:" " rcv buffer(%d) is %d bytes short.\n", __func__, rb->xfer->recv.pay_len, len - res); len = res; } bcopy(rb->vec->iov_base, p, len); p += len; res -= len; plen -= len; if (res == 0 || plen == 0) break; } rb->xfer->recv.pay_len -= res; } /* * Generic packet receiving process. */ void fw_rcv(struct fw_rcv_buf *rb) { struct fw_pkt *fp, *resfp; struct fw_bind *bind; int tcode; int oldstate; #if 0 int i, len; { uint32_t *qld; int i; qld = (uint32_t *)buf; printf("spd %d len:%d\n", spd, len); for (i = 0; i <= len && i < 32; i+= 4) { printf("0x%08x ", ntohl(qld[i/4])); if ((i % 16) == 15) printf("\n"); } if ((i % 16) != 15) printf("\n"); } #endif fp = (struct fw_pkt *)rb->vec[0].iov_base; tcode = fp->mode.common.tcode; switch (tcode) { case FWTCODE_WRES: case FWTCODE_RRESQ: case FWTCODE_RRESB: case FWTCODE_LRES: rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tcode); if (rb->xfer == NULL) { device_printf(rb->fc->bdev, "%s: unknown response " "%s(%x) src=0x%x tl=0x%x rt=%d data=0x%x\n", __func__, tcode_str[tcode], tcode, fp->mode.hdr.src, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tlrt & 3, fp->mode.rresq.data); #if 0 printf("try ad-hoc work around!!\n"); rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src, (fp->mode.hdr.tlrt >> 2)^3); if (rb->xfer == NULL) { printf("no use...\n"); return; } #else return; #endif } fw_rcv_copy(rb); if (rb->xfer->recv.hdr.mode.wres.rtcode != RESP_CMP) rb->xfer->resp = EIO; else rb->xfer->resp = 0; /* make sure the packet is drained in AT queue */ oldstate = rb->xfer->flag; rb->xfer->flag = FWXF_RCVD; switch (oldstate) { case FWXF_SENT: fw_xfer_done(rb->xfer); break; case FWXF_START: #if 0 if (firewire_debug) printf("not sent yet tl=%x\n", rb->xfer->tl); #endif break; default: device_printf(rb->fc->bdev, "%s: " "unexpected flag 0x%02x\n", __func__, rb->xfer->flag); } return; case FWTCODE_WREQQ: case FWTCODE_WREQB: case FWTCODE_RREQQ: case FWTCODE_RREQB: case FWTCODE_LREQ: bind = fw_bindlookup(rb->fc, fp->mode.rreqq.dest_hi, fp->mode.rreqq.dest_lo); if (bind == NULL) { device_printf(rb->fc->bdev, "%s: " "Unknown service addr 0x%04x:0x%08x %s(%x)" " src=0x%x data=%x\n", __func__, fp->mode.wreqq.dest_hi, fp->mode.wreqq.dest_lo, tcode_str[tcode], tcode, fp->mode.hdr.src, ntohl(fp->mode.wreqq.data)); if (rb->fc->status == FWBUSINIT) { device_printf(rb->fc->bdev, "%s: cannot respond(bus reset)!\n", __func__); return; } rb->xfer = fw_xfer_alloc(M_FWXFER); if (rb->xfer == NULL) { return; } rb->xfer->send.spd = rb->spd; rb->xfer->send.pay_len = 0; resfp = &rb->xfer->send.hdr; switch (tcode) { case FWTCODE_WREQQ: case FWTCODE_WREQB: resfp->mode.hdr.tcode = FWTCODE_WRES; break; case FWTCODE_RREQQ: resfp->mode.hdr.tcode = FWTCODE_RRESQ; break; case FWTCODE_RREQB: resfp->mode.hdr.tcode = FWTCODE_RRESB; break; case FWTCODE_LREQ: resfp->mode.hdr.tcode = FWTCODE_LRES; break; } resfp->mode.hdr.dst = fp->mode.hdr.src; resfp->mode.hdr.tlrt = fp->mode.hdr.tlrt; resfp->mode.hdr.pri = fp->mode.hdr.pri; resfp->mode.rresb.rtcode = RESP_ADDRESS_ERROR; resfp->mode.rresb.extcode = 0; resfp->mode.rresb.len = 0; /* rb->xfer->hand = fw_xferwake; */ rb->xfer->hand = fw_xfer_free; if (fw_asyreq(rb->fc, -1, rb->xfer)) fw_xfer_free(rb->xfer); return; } #if 0 len = 0; for (i = 0; i < rb->nvec; i++) len += rb->vec[i].iov_len; #endif rb->xfer = STAILQ_FIRST(&bind->xferlist); if (rb->xfer == NULL) { device_printf(rb->fc->bdev, "%s: " "Discard a packet for this bind.\n", __func__); return; } STAILQ_REMOVE_HEAD(&bind->xferlist, link); fw_rcv_copy(rb); rb->xfer->hand(rb->xfer); return; #if 0 /* shouldn't happen ?? or for GASP */ case FWTCODE_STREAM: { struct fw_xferq *xferq; xferq = rb->fc->ir[sub]; #if 0 printf("stream rcv dma %d len %d off %d spd %d\n", sub, len, off, spd); #endif if (xferq->queued >= xferq->maxq) { printf("receive queue is full\n"); return; } /* XXX get xfer from xfer queue, we don't need copy for per packet mode */ rb->xfer = fw_xfer_alloc_buf(M_FWXFER, 0, /* XXX */ vec[0].iov_len); if (rb->xfer == NULL) return; fw_rcv_copy(rb) s = splfw(); xferq->queued++; STAILQ_INSERT_TAIL(&xferq->q, rb->xfer, link); splx(s); sc = device_get_softc(rb->fc->bdev); if (SEL_WAITING(&xferq->rsel)) selwakeuppri(&xferq->rsel, FWPRI); if (xferq->flag & FWXFERQ_WAKEUP) { xferq->flag &= ~FWXFERQ_WAKEUP; wakeup((caddr_t)xferq); } if (xferq->flag & FWXFERQ_HANDLER) { xferq->hand(xferq); } return; break; } #endif default: device_printf(rb->fc->bdev,"%s: unknown tcode %d\n", __func__, tcode); break; } } /* * Post process for Bus Manager election process. */ static void fw_try_bmr_callback(struct fw_xfer *xfer) { struct firewire_comm *fc; int bmr; if (xfer == NULL) return; fc = xfer->fc; if (xfer->resp != 0) goto error; if (xfer->recv.payload == NULL) goto error; if (xfer->recv.hdr.mode.lres.rtcode != FWRCODE_COMPLETE) goto error; bmr = ntohl(xfer->recv.payload[0]); if (bmr == 0x3f) bmr = fc->nodeid; CSRARC(fc, BUS_MGR_ID) = fc->set_bmr(fc, bmr & 0x3f); fw_xfer_free_buf(xfer); fw_bmr(fc); return; error: device_printf(fc->bdev, "bus manager election failed\n"); fw_xfer_free_buf(xfer); } /* * To candidate Bus Manager election process. */ static void fw_try_bmr(void *arg) { struct fw_xfer *xfer; struct firewire_comm *fc = arg; struct fw_pkt *fp; int err = 0; xfer = fw_xfer_alloc_buf(M_FWXFER, 8, 4); if (xfer == NULL) return; xfer->send.spd = 0; fc->status = FWBUSMGRELECT; fp = &xfer->send.hdr; fp->mode.lreq.dest_hi = 0xffff; fp->mode.lreq.tlrt = 0; fp->mode.lreq.tcode = FWTCODE_LREQ; fp->mode.lreq.pri = 0; fp->mode.lreq.src = 0; fp->mode.lreq.len = 8; fp->mode.lreq.extcode = EXTCODE_CMP_SWAP; fp->mode.lreq.dst = FWLOCALBUS | fc->irm; fp->mode.lreq.dest_lo = 0xf0000000 | BUS_MGR_ID; xfer->send.payload[0] = htonl(0x3f); xfer->send.payload[1] = htonl(fc->nodeid); xfer->hand = fw_try_bmr_callback; err = fw_asyreq(fc, -1, xfer); if (err) { fw_xfer_free_buf(xfer); return; } return; } #ifdef FW_VMACCESS /* * Software implementation for physical memory block access. * XXX:Too slow, useful for debug purpose only. */ static void fw_vmaccess(struct fw_xfer *xfer) { struct fw_pkt *rfp, *sfp = NULL; uint32_t *ld = (uint32_t *)xfer->recv.buf; printf("vmaccess spd:%2x len:%03x data:%08x %08x %08x %08x\n", xfer->spd, xfer->recv.len, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); printf("vmaccess data:%08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); if (xfer->resp != 0) { fw_xfer_free(xfer); return; } if (xfer->recv.buf == NULL) { fw_xfer_free(xfer); return; } rfp = (struct fw_pkt *)xfer->recv.buf; switch (rfp->mode.hdr.tcode) { /* XXX need fix for 64bit arch */ case FWTCODE_WREQB: xfer->send.buf = malloc(12, M_FW, M_NOWAIT); xfer->send.len = 12; sfp = (struct fw_pkt *)xfer->send.buf; bcopy(rfp->mode.wreqb.payload, (caddr_t)ntohl(rfp->mode.wreqb.dest_lo),s ntohs(rfp->mode.wreqb.len)); sfp->mode.wres.tcode = FWTCODE_WRES; sfp->mode.wres.rtcode = 0; break; case FWTCODE_WREQQ: xfer->send.buf = malloc(12, M_FW, M_NOWAIT); xfer->send.len = 12; sfp->mode.wres.tcode = FWTCODE_WRES; *((uint32_t *)(ntohl(rfp->mode.wreqb.dest_lo))) = rfp->mode.wreqq.data; sfp->mode.wres.rtcode = 0; break; case FWTCODE_RREQB: xfer->send.buf = malloc(16 + rfp->mode.rreqb.len, M_FW, M_NOWAIT); xfer->send.len = 16 + ntohs(rfp->mode.rreqb.len); sfp = (struct fw_pkt *)xfer->send.buf; bcopy((caddr_t)ntohl(rfp->mode.rreqb.dest_lo), sfp->mode.rresb.payload, ntohs(rfp->mode.rreqb.len)); sfp->mode.rresb.tcode = FWTCODE_RRESB; sfp->mode.rresb.len = rfp->mode.rreqb.len; sfp->mode.rresb.rtcode = 0; sfp->mode.rresb.extcode = 0; break; case FWTCODE_RREQQ: xfer->send.buf = malloc(16, M_FW, M_NOWAIT); xfer->send.len = 16; sfp = (struct fw_pkt *)xfer->send.buf; sfp->mode.rresq.data = *(uint32_t *)(ntohl(rfp->mode.rreqq.dest_lo)); sfp->mode.wres.tcode = FWTCODE_RRESQ; sfp->mode.rresb.rtcode = 0; break; default: fw_xfer_free(xfer); return; } sfp->mode.hdr.dst = rfp->mode.hdr.src; xfer->dst = ntohs(rfp->mode.hdr.src); xfer->hand = fw_xfer_free; sfp->mode.hdr.tlrt = rfp->mode.hdr.tlrt; sfp->mode.hdr.pri = 0; fw_asyreq(xfer->fc, -1, xfer); /**/ return; } #endif /* * CRC16 check-sum for IEEE1394 register blocks. */ uint16_t fw_crc16(uint32_t *ptr, uint32_t len) { uint32_t i, sum, crc = 0; int shift; len = (len + 3) & ~3; for (i = 0; i < len; i += 4) { for (shift = 28; shift >= 0; shift -= 4) { sum = ((crc >> 12) ^ (ptr[i/4] >> shift)) & 0xf; crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum; } crc &= 0xffff; } return ((uint16_t) crc); } /* * Find the root node, if it is not * Cycle Master Capable, then we should * override this and become the Cycle * Master */ static int fw_bmr(struct firewire_comm *fc) { struct fw_device fwdev; union fw_self_id *self_id; int cmstr; uint32_t quad; /* Check to see if the current root node is cycle master capable */ self_id = fw_find_self_id(fc, fc->max_node); if (fc->max_node > 0) { /* XXX check cmc bit of businfo block rather than contender */ if (self_id->p0.link_active && self_id->p0.contender) cmstr = fc->max_node; else { device_printf(fc->bdev, "root node is not cycle master capable\n"); /* XXX shall we be the cycle master? */ cmstr = fc->nodeid; /* XXX need bus reset */ } } else cmstr = -1; device_printf(fc->bdev, "bus manager %d %s\n", CSRARC(fc, BUS_MGR_ID), (CSRARC(fc, BUS_MGR_ID) != fc->nodeid) ? "(me)" : ""); if (CSRARC(fc, BUS_MGR_ID) != fc->nodeid) { /* We are not the bus manager */ return (0); } /* Optimize gapcount */ if (fc->max_hop <= MAX_GAPHOP) fw_phy_config(fc, cmstr, gap_cnt[fc->max_hop]); /* If we are the cycle master, nothing to do */ if (cmstr == fc->nodeid || cmstr == -1) return 0; /* Bus probe has not finished, make dummy fwdev for cmstr */ bzero(&fwdev, sizeof(fwdev)); fwdev.fc = fc; fwdev.dst = cmstr; fwdev.speed = 0; fwdev.maxrec = 8; /* 512 */ fwdev.status = FWDEVINIT; /* Set cmstr bit on the cycle master */ quad = htonl(1 << 8); fwmem_write_quad(&fwdev, NULL, 0/*spd*/, 0xffff, 0xf0000000 | STATE_SET, &quad, fw_asy_callback_free); return 0; } int fw_open_isodma(struct firewire_comm *fc, int tx) { struct fw_xferq **xferqa; struct fw_xferq *xferq; int i; if (tx) xferqa = &fc->it[0]; else xferqa = &fc->ir[0]; FW_GLOCK(fc); for (i = 0; i < fc->nisodma; i++) { xferq = xferqa[i]; if ((xferq->flag & FWXFERQ_OPEN) == 0) { xferq->flag |= FWXFERQ_OPEN; break; } } if (i == fc->nisodma) { printf("no free dma channel (tx=%d)\n", tx); i = -1; } FW_GUNLOCK(fc); return (i); } static int fw_modevent(module_t mode, int type, void *data) { int err = 0; static eventhandler_tag fwdev_ehtag = NULL; switch (type) { case MOD_LOAD: firewire_devclass = devclass_create("firewire"); fwdev_ehtag = EVENTHANDLER_REGISTER(dev_clone, fwdev_clone, 0, 1000); break; case MOD_UNLOAD: if (fwdev_ehtag != NULL) EVENTHANDLER_DEREGISTER(dev_clone, fwdev_ehtag); break; case MOD_SHUTDOWN: break; default: return (EOPNOTSUPP); } return (err); } DRIVER_MODULE(firewire, fwohci, firewire_driver, fw_modevent, NULL); MODULE_VERSION(firewire, 1); diff --git a/sys/dev/firewire/sbp.c b/sys/dev/firewire/sbp.c index 5f4ebb747057..2a91f6987e69 100644 --- a/sys/dev/firewire/sbp.c +++ b/sys/dev/firewire/sbp.c @@ -1,2849 +1,2849 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ccb_sdev_ptr spriv_ptr0 #define ccb_sbp_ptr spriv_ptr1 #define SBP_NUM_TARGETS 8 /* MAX 64 */ /* * Scan_bus doesn't work for more than 8 LUNs * because of CAM_SCSI2_MAXLUN in cam_xpt.c */ #define SBP_NUM_LUNS 64 #define SBP_MAXPHYS (128 * 1024) #define SBP_DMA_SIZE PAGE_SIZE #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res) #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb)) #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS) /* * STATUS FIFO addressing * bit *----------------------- * 0- 1( 2): 0 (alignment) * 2- 7( 6): target * 8-15( 8): lun * 16-31( 8): reserved * 32-47(16): SBP_BIND_HI * 48-64(16): bus_id, node_id */ #define SBP_BIND_HI 0x1 #define SBP_DEV2ADDR(t, l) \ (((u_int64_t)SBP_BIND_HI << 32) \ | (((l) & 0xff) << 8) \ | (((t) & 0x3f) << 2)) #define SBP_ADDR2TRG(a) (((a) >> 2) & 0x3f) #define SBP_ADDR2LUN(a) (((a) >> 8) & 0xff) #define SBP_INITIATOR 7 static char *orb_fun_name[] = { ORB_FUN_NAMES }; static int debug = 0; static int auto_login = 1; static int max_speed = -1; static int sbp_cold = 1; static int ex_login = 1; static int login_delay = 1000; /* msec */ static int scan_delay = 500; /* msec */ static int use_doorbell = 0; static int sbp_tags = 0; SYSCTL_DECL(_hw_firewire); static SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "SBP-II Subsystem"); SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RWTUN, &debug, 0, "SBP debug flag"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RWTUN, &auto_login, 0, "SBP perform login automatically"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, max_speed, CTLFLAG_RWTUN, &max_speed, 0, "SBP transfer max speed"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, exclusive_login, CTLFLAG_RWTUN, &ex_login, 0, "SBP enable exclusive login"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, login_delay, CTLFLAG_RWTUN, &login_delay, 0, "SBP login delay in msec"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, scan_delay, CTLFLAG_RWTUN, &scan_delay, 0, "SBP scan delay in msec"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, use_doorbell, CTLFLAG_RWTUN, &use_doorbell, 0, "SBP use doorbell request"); SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, tags, CTLFLAG_RWTUN, &sbp_tags, 0, "SBP tagged queuing support"); #define NEED_RESPONSE 0 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE) #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE) struct sbp_ocb { STAILQ_ENTRY(sbp_ocb) ocb; union ccb *ccb; bus_addr_t bus_addr; uint32_t orb[8]; #define IND_PTR_OFFSET (8*sizeof(uint32_t)) struct ind_ptr ind_ptr[SBP_IND_MAX]; struct sbp_dev *sdev; int flags; /* XXX should be removed */ bus_dmamap_t dmamap; struct callout timer; }; #define OCB_ACT_MGM 0 #define OCB_ACT_CMD 1 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo)) struct sbp_dev { #define SBP_DEV_RESET 0 /* accept login */ #define SBP_DEV_LOGIN 1 /* to login */ #if 0 #define SBP_DEV_RECONN 2 /* to reconnect */ #endif #define SBP_DEV_TOATTACH 3 /* to attach */ #define SBP_DEV_PROBE 4 /* scan lun */ #define SBP_DEV_ATTACHED 5 /* in operation */ #define SBP_DEV_DEAD 6 /* unavailable unit */ #define SBP_DEV_RETRY 7 /* unavailable unit */ uint8_t status:4, timeout:4; uint8_t type; uint16_t lun_id; uint16_t freeze; #define ORB_LINK_DEAD (1 << 0) #define VALID_LUN (1 << 1) #define ORB_POINTER_ACTIVE (1 << 2) #define ORB_POINTER_NEED (1 << 3) #define ORB_DOORBELL_ACTIVE (1 << 4) #define ORB_DOORBELL_NEED (1 << 5) #define ORB_SHORTAGE (1 << 6) uint16_t flags; struct cam_path *path; struct sbp_target *target; struct fwdma_alloc dma; struct sbp_login_res *login; struct callout login_callout; struct sbp_ocb *ocb; STAILQ_HEAD(, sbp_ocb) ocbs; STAILQ_HEAD(, sbp_ocb) free_ocbs; struct sbp_ocb *last_ocb; char vendor[32]; char product[32]; char revision[10]; char bustgtlun[32]; }; struct sbp_target { int target_id; int num_lun; struct sbp_dev **luns; struct sbp_softc *sbp; struct fw_device *fwdev; uint32_t mgm_hi, mgm_lo; struct sbp_ocb *mgm_ocb_cur; STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue; struct callout mgm_ocb_timeout; struct callout scan_callout; STAILQ_HEAD(, fw_xfer) xferlist; int n_xfer; }; struct sbp_softc { struct firewire_dev_comm fd; struct cam_sim *sim; struct cam_path *path; struct sbp_target targets[SBP_NUM_TARGETS]; struct fw_bind fwb; bus_dma_tag_t dmat; struct timeval last_busreset; #define SIMQ_FREEZED 1 int flags; struct mtx mtx; }; #define SBP_LOCK(sbp) mtx_lock(&(sbp)->mtx) #define SBP_UNLOCK(sbp) mtx_unlock(&(sbp)->mtx) #define SBP_LOCK_ASSERT(sbp) mtx_assert(&(sbp)->mtx, MA_OWNED) static void sbp_post_explore (void *); static void sbp_recv (struct fw_xfer *); static void sbp_mgm_callback (struct fw_xfer *); #if 0 static void sbp_cmd_callback (struct fw_xfer *); #endif static void sbp_orb_pointer (struct sbp_dev *, struct sbp_ocb *); static void sbp_doorbell(struct sbp_dev *); static void sbp_execute_ocb (void *, bus_dma_segment_t *, int, int); static void sbp_free_ocb (struct sbp_dev *, struct sbp_ocb *); static void sbp_abort_ocb (struct sbp_ocb *, int); static void sbp_abort_all_ocbs (struct sbp_dev *, int); static struct fw_xfer * sbp_write_cmd (struct sbp_dev *, int, int); static struct sbp_ocb * sbp_get_ocb (struct sbp_dev *); static struct sbp_ocb * sbp_enqueue_ocb (struct sbp_dev *, struct sbp_ocb *); static struct sbp_ocb * sbp_dequeue_ocb (struct sbp_dev *, struct sbp_status *); static void sbp_cam_detach_sdev(struct sbp_dev *); static void sbp_free_sdev(struct sbp_dev *); static void sbp_cam_detach_target (struct sbp_target *); static void sbp_free_target (struct sbp_target *); static void sbp_mgm_timeout (void *arg); static void sbp_timeout (void *arg); static void sbp_mgm_orb (struct sbp_dev *, int, struct sbp_ocb *); static MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/FireWire"); /* cam related functions */ static void sbp_action(struct cam_sim *sim, union ccb *ccb); static void sbp_poll(struct cam_sim *sim); static void sbp_cam_scan_lun(struct cam_periph *, union ccb *); static void sbp_cam_scan_target(void *arg); static char *orb_status0[] = { /* 0 */ "No additional information to report", /* 1 */ "Request type not supported", /* 2 */ "Speed not supported", /* 3 */ "Page size not supported", /* 4 */ "Access denied", /* 5 */ "Logical unit not supported", /* 6 */ "Maximum payload too small", /* 7 */ "Reserved for future standardization", /* 8 */ "Resources unavailable", /* 9 */ "Function rejected", /* A */ "Login ID not recognized", /* B */ "Dummy ORB completed", /* C */ "Request aborted", /* FF */ "Unspecified error" #define MAX_ORB_STATUS0 0xd }; static char *orb_status1_object[] = { /* 0 */ "Operation request block (ORB)", /* 1 */ "Data buffer", /* 2 */ "Page table", /* 3 */ "Unable to specify" }; static char *orb_status1_serial_bus_error[] = { /* 0 */ "Missing acknowledge", /* 1 */ "Reserved; not to be used", /* 2 */ "Time-out error", /* 3 */ "Reserved; not to be used", /* 4 */ "Busy retry limit exceeded(X)", /* 5 */ "Busy retry limit exceeded(A)", /* 6 */ "Busy retry limit exceeded(B)", /* 7 */ "Reserved for future standardization", /* 8 */ "Reserved for future standardization", /* 9 */ "Reserved for future standardization", /* A */ "Reserved for future standardization", /* B */ "Tardy retry limit exceeded", /* C */ "Conflict error", /* D */ "Data error", /* E */ "Type error", /* F */ "Address error" }; static void sbp_identify(driver_t *driver, device_t parent) { SBP_DEBUG(0) printf("sbp_identify\n"); END_DEBUG if (device_find_child(parent, "sbp", -1) == NULL) - BUS_ADD_CHILD(parent, 0, "sbp", -1); + BUS_ADD_CHILD(parent, 0, "sbp", DEVICE_UNIT_ANY); } /* * sbp_probe() */ static int sbp_probe(device_t dev) { SBP_DEBUG(0) printf("sbp_probe\n"); END_DEBUG device_set_desc(dev, "SBP-2/SCSI over FireWire"); #if 0 if (bootverbose) debug = bootverbose; #endif return (0); } /* * Display device characteristics on the console */ static void sbp_show_sdev_info(struct sbp_dev *sdev) { struct fw_device *fwdev; fwdev = sdev->target->fwdev; device_printf(sdev->target->sbp->fd.dev, "%s: %s: ordered:%d type:%d EUI:%08x%08x node:%d " "speed:%d maxrec:%d\n", __func__, sdev->bustgtlun, (sdev->type & 0x40) >> 6, (sdev->type & 0x1f), fwdev->eui.hi, fwdev->eui.lo, fwdev->dst, fwdev->speed, fwdev->maxrec); device_printf(sdev->target->sbp->fd.dev, "%s: %s '%s' '%s' '%s'\n", __func__, sdev->bustgtlun, sdev->vendor, sdev->product, sdev->revision); } static struct { int bus; int target; struct fw_eui64 eui; } wired[] = { /* Bus Target EUI64 */ #if 0 {0, 2, {0x00018ea0, 0x01fd0154}}, /* Logitec HDD */ {0, 0, {0x00018ea6, 0x00100682}}, /* Logitec DVD */ {0, 1, {0x00d03200, 0xa412006a}}, /* Yano HDD */ #endif {-1, -1, {0,0}} }; static int sbp_new_target(struct sbp_softc *sbp, struct fw_device *fwdev) { int bus, i, target=-1; char w[SBP_NUM_TARGETS]; bzero(w, sizeof(w)); bus = device_get_unit(sbp->fd.dev); /* XXX wired-down configuration should be gotten from tunable or device hint */ for (i = 0; wired[i].bus >= 0; i++) { if (wired[i].bus == bus) { w[wired[i].target] = 1; if (wired[i].eui.hi == fwdev->eui.hi && wired[i].eui.lo == fwdev->eui.lo) target = wired[i].target; } } if (target >= 0) { if (target < SBP_NUM_TARGETS && sbp->targets[target].fwdev == NULL) return (target); device_printf(sbp->fd.dev, "target %d is not free for %08x:%08x\n", target, fwdev->eui.hi, fwdev->eui.lo); target = -1; } /* non-wired target */ for (i = 0; i < SBP_NUM_TARGETS; i++) if (sbp->targets[i].fwdev == NULL && w[i] == 0) { target = i; break; } return target; } static void sbp_alloc_lun(struct sbp_target *target) { struct crom_context cc; struct csrreg *reg; struct sbp_dev *sdev, **newluns; struct sbp_softc *sbp; int maxlun, lun, i; sbp = target->sbp; crom_init_context(&cc, target->fwdev->csrrom); /* XXX shoud parse appropriate unit directories only */ maxlun = -1; while (cc.depth >= 0) { reg = crom_search_key(&cc, CROM_LUN); if (reg == NULL) break; lun = reg->val & 0xffff; SBP_DEBUG(0) printf("target %d lun %d found\n", target->target_id, lun); END_DEBUG if (maxlun < lun) maxlun = lun; crom_next(&cc); } if (maxlun < 0) device_printf(target->sbp->fd.dev, "%d no LUN found\n", target->target_id); maxlun++; if (maxlun >= SBP_NUM_LUNS) maxlun = SBP_NUM_LUNS; /* Invalidiate stale devices */ for (lun = 0; lun < target->num_lun; lun++) { sdev = target->luns[lun]; if (sdev == NULL) continue; sdev->flags &= ~VALID_LUN; if (lun >= maxlun) { /* lost device */ sbp_cam_detach_sdev(sdev); sbp_free_sdev(sdev); target->luns[lun] = NULL; } } /* Reallocate */ if (maxlun != target->num_lun) { newluns = (struct sbp_dev **) realloc(target->luns, sizeof(struct sbp_dev *) * maxlun, M_SBP, M_NOWAIT | M_ZERO); if (newluns == NULL) { printf("%s: realloc failed\n", __func__); newluns = target->luns; maxlun = target->num_lun; } /* * We must zero the extended region for the case * realloc() doesn't allocate new buffer. */ if (maxlun > target->num_lun) bzero(&newluns[target->num_lun], sizeof(struct sbp_dev *) * (maxlun - target->num_lun)); target->luns = newluns; target->num_lun = maxlun; } crom_init_context(&cc, target->fwdev->csrrom); while (cc.depth >= 0) { int new = 0; reg = crom_search_key(&cc, CROM_LUN); if (reg == NULL) break; lun = reg->val & 0xffff; if (lun >= SBP_NUM_LUNS) { printf("too large lun %d\n", lun); goto next; } sdev = target->luns[lun]; if (sdev == NULL) { sdev = malloc(sizeof(struct sbp_dev), M_SBP, M_NOWAIT | M_ZERO); if (sdev == NULL) { printf("%s: malloc failed\n", __func__); goto next; } target->luns[lun] = sdev; sdev->lun_id = lun; sdev->target = target; STAILQ_INIT(&sdev->ocbs); callout_init_mtx(&sdev->login_callout, &sbp->mtx, 0); sdev->status = SBP_DEV_RESET; new = 1; snprintf(sdev->bustgtlun, 32, "%s:%d:%d", device_get_nameunit(sdev->target->sbp->fd.dev), sdev->target->target_id, sdev->lun_id); } sdev->flags |= VALID_LUN; sdev->type = (reg->val & 0xff0000) >> 16; if (new == 0) goto next; fwdma_malloc(sbp->fd.fc, /* alignment */ sizeof(uint32_t), SBP_DMA_SIZE, &sdev->dma, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); if (sdev->dma.v_addr == NULL) { printf("%s: dma space allocation failed\n", __func__); free(sdev, M_SBP); target->luns[lun] = NULL; goto next; } sdev->login = (struct sbp_login_res *) sdev->dma.v_addr; sdev->ocb = (struct sbp_ocb *) ((char *)sdev->dma.v_addr + SBP_LOGIN_SIZE); bzero((char *)sdev->ocb, sizeof(struct sbp_ocb) * SBP_QUEUE_LEN); STAILQ_INIT(&sdev->free_ocbs); for (i = 0; i < SBP_QUEUE_LEN; i++) { struct sbp_ocb *ocb; ocb = &sdev->ocb[i]; ocb->bus_addr = sdev->dma.bus_addr + SBP_LOGIN_SIZE + sizeof(struct sbp_ocb) * i + offsetof(struct sbp_ocb, orb[0]); if (bus_dmamap_create(sbp->dmat, 0, &ocb->dmamap)) { printf("sbp_attach: cannot create dmamap\n"); /* XXX */ goto next; } callout_init_mtx(&ocb->timer, &sbp->mtx, 0); SBP_LOCK(sbp); sbp_free_ocb(sdev, ocb); SBP_UNLOCK(sbp); } next: crom_next(&cc); } for (lun = 0; lun < target->num_lun; lun++) { sdev = target->luns[lun]; if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) { sbp_cam_detach_sdev(sdev); sbp_free_sdev(sdev); target->luns[lun] = NULL; } } } static struct sbp_target * sbp_alloc_target(struct sbp_softc *sbp, struct fw_device *fwdev) { int i; struct sbp_target *target; struct crom_context cc; struct csrreg *reg; SBP_DEBUG(1) printf("sbp_alloc_target\n"); END_DEBUG i = sbp_new_target(sbp, fwdev); if (i < 0) { device_printf(sbp->fd.dev, "increase SBP_NUM_TARGETS!\n"); return NULL; } /* new target */ target = &sbp->targets[i]; target->fwdev = fwdev; target->target_id = i; /* XXX we may want to reload mgm port after each bus reset */ /* XXX there might be multiple management agents */ crom_init_context(&cc, target->fwdev->csrrom); reg = crom_search_key(&cc, CROM_MGM); if (reg == NULL || reg->val == 0) { printf("NULL management address\n"); target->fwdev = NULL; return NULL; } target->mgm_hi = 0xffff; target->mgm_lo = 0xf0000000 | (reg->val << 2); target->mgm_ocb_cur = NULL; SBP_DEBUG(1) printf("target:%d mgm_port: %x\n", i, target->mgm_lo); END_DEBUG STAILQ_INIT(&target->xferlist); target->n_xfer = 0; STAILQ_INIT(&target->mgm_ocb_queue); callout_init_mtx(&target->mgm_ocb_timeout, &sbp->mtx, 0); callout_init_mtx(&target->scan_callout, &sbp->mtx, 0); target->luns = NULL; target->num_lun = 0; return target; } static void sbp_probe_lun(struct sbp_dev *sdev) { struct fw_device *fwdev; struct crom_context c, *cc = &c; struct csrreg *reg; bzero(sdev->vendor, sizeof(sdev->vendor)); bzero(sdev->product, sizeof(sdev->product)); fwdev = sdev->target->fwdev; crom_init_context(cc, fwdev->csrrom); /* get vendor string */ crom_search_key(cc, CSRKEY_VENDOR); crom_next(cc); crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor)); /* skip to the unit directory for SBP-2 */ while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) { if (reg->val == CSRVAL_T10SBP2) break; crom_next(cc); } /* get firmware revision */ reg = crom_search_key(cc, CSRKEY_FIRM_VER); if (reg != NULL) snprintf(sdev->revision, sizeof(sdev->revision), "%06x", reg->val); /* get product string */ crom_search_key(cc, CSRKEY_MODEL); crom_next(cc); crom_parse_text(cc, sdev->product, sizeof(sdev->product)); } static void sbp_login_callout(void *arg) { struct sbp_dev *sdev = (struct sbp_dev *)arg; SBP_LOCK_ASSERT(sdev->target->sbp); sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL); } static void sbp_login(struct sbp_dev *sdev) { struct timeval delta; struct timeval t; int ticks = 0; microtime(&delta); timevalsub(&delta, &sdev->target->sbp->last_busreset); t.tv_sec = login_delay / 1000; t.tv_usec = (login_delay % 1000) * 1000; timevalsub(&t, &delta); if (t.tv_sec >= 0 && t.tv_usec > 0) ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000; SBP_DEBUG(0) printf("%s: sec = %jd usec = %ld ticks = %d\n", __func__, (intmax_t)t.tv_sec, t.tv_usec, ticks); END_DEBUG callout_reset(&sdev->login_callout, ticks, sbp_login_callout, (void *)(sdev)); } #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \ && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2)) static void sbp_probe_target(struct sbp_target *target) { struct sbp_softc *sbp = target->sbp; struct sbp_dev *sdev; int i, alive; alive = SBP_FWDEV_ALIVE(target->fwdev); SBP_DEBUG(1) device_printf(sbp->fd.dev, "%s %d%salive\n", __func__, target->target_id, (!alive) ? " not " : ""); END_DEBUG sbp_alloc_lun(target); /* XXX untimeout mgm_ocb and dequeue */ for (i=0; i < target->num_lun; i++) { sdev = target->luns[i]; if (sdev == NULL) continue; if (alive && (sdev->status != SBP_DEV_DEAD)) { if (sdev->path != NULL) { xpt_freeze_devq(sdev->path, 1); sdev->freeze++; } sbp_probe_lun(sdev); sbp_show_sdev_info(sdev); SBP_LOCK(sbp); sbp_abort_all_ocbs(sdev, CAM_SCSI_BUS_RESET); SBP_UNLOCK(sbp); switch (sdev->status) { case SBP_DEV_RESET: /* new or revived target */ if (auto_login) sbp_login(sdev); break; case SBP_DEV_TOATTACH: case SBP_DEV_PROBE: case SBP_DEV_ATTACHED: case SBP_DEV_RETRY: default: sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL); break; } } else { switch (sdev->status) { case SBP_DEV_ATTACHED: SBP_DEBUG(0) /* the device has gone */ device_printf(sbp->fd.dev, "%s: lost target\n", __func__); END_DEBUG if (sdev->path) { xpt_freeze_devq(sdev->path, 1); sdev->freeze++; } sdev->status = SBP_DEV_RETRY; sbp_cam_detach_sdev(sdev); sbp_free_sdev(sdev); target->luns[i] = NULL; break; case SBP_DEV_PROBE: case SBP_DEV_TOATTACH: sdev->status = SBP_DEV_RESET; break; case SBP_DEV_RETRY: case SBP_DEV_RESET: case SBP_DEV_DEAD: break; } } } } static void sbp_post_busreset(void *arg) { struct sbp_softc *sbp; sbp = (struct sbp_softc *)arg; SBP_DEBUG(0) printf("sbp_post_busreset\n"); END_DEBUG SBP_LOCK(sbp); if ((sbp->flags & SIMQ_FREEZED) == 0) { xpt_freeze_simq(sbp->sim, /*count*/1); sbp->flags |= SIMQ_FREEZED; } microtime(&sbp->last_busreset); SBP_UNLOCK(sbp); } static void sbp_post_explore(void *arg) { struct sbp_softc *sbp = (struct sbp_softc *)arg; struct sbp_target *target; struct fw_device *fwdev; int i, alive; SBP_DEBUG(0) printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold); END_DEBUG /* We need physical access */ if (!firewire_phydma_enable) return; if (sbp_cold > 0) sbp_cold--; SBP_LOCK(sbp); /* Garbage Collection */ for (i = 0; i < SBP_NUM_TARGETS; i++) { target = &sbp->targets[i]; if (target->fwdev == NULL) continue; STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link) if (target->fwdev == fwdev) break; if (fwdev == NULL) { /* device has removed in lower driver */ sbp_cam_detach_target(target); sbp_free_target(target); } } /* traverse device list */ STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link) { SBP_DEBUG(0) device_printf(sbp->fd.dev,"%s:: EUI:%08x%08x %s attached, state=%d\n", __func__, fwdev->eui.hi, fwdev->eui.lo, (fwdev->status != FWDEVATTACHED) ? "not" : "", fwdev->status); END_DEBUG alive = SBP_FWDEV_ALIVE(fwdev); for (i = 0; i < SBP_NUM_TARGETS; i++) { target = &sbp->targets[i]; if (target->fwdev == fwdev) { /* known target */ break; } } if (i == SBP_NUM_TARGETS) { if (alive) { /* new target */ target = sbp_alloc_target(sbp, fwdev); if (target == NULL) continue; } else { continue; } } /* * It is safe to drop the lock here as the target is already * reserved, so there should be no contenders for it. * And the target is not yet exposed, so there should not be * any other accesses to it. * Finally, the list being iterated is protected somewhere else. */ SBP_UNLOCK(sbp); sbp_probe_target(target); SBP_LOCK(sbp); if (target->num_lun == 0) sbp_free_target(target); } if ((sbp->flags & SIMQ_FREEZED) != 0) { xpt_release_simq(sbp->sim, /*run queue*/TRUE); sbp->flags &= ~SIMQ_FREEZED; } SBP_UNLOCK(sbp); } #if NEED_RESPONSE static void sbp_loginres_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev,"%s\n", __func__); END_DEBUG /* recycle */ SBP_LOCK(sdev->target->sbp); STAILQ_INSERT_TAIL(&sdev->target->sbp->fwb.xferlist, xfer, link); SBP_UNLOCK(sdev->target->sbp); return; } #endif static __inline void sbp_xfer_free(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; fw_xfer_unload(xfer); SBP_LOCK_ASSERT(sdev->target->sbp); STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link); } static void sbp_reset_start_callback(struct fw_xfer *xfer) { struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc; struct sbp_target *target = sdev->target; int i; if (xfer->resp != 0) { device_printf(sdev->target->sbp->fd.dev, "%s: %s failed: resp=%d\n", __func__, sdev->bustgtlun, xfer->resp); } SBP_LOCK(target->sbp); for (i = 0; i < target->num_lun; i++) { tsdev = target->luns[i]; if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN) sbp_login(tsdev); } SBP_UNLOCK(target->sbp); } static void sbp_reset_start(struct sbp_dev *sdev) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__,sdev->bustgtlun); END_DEBUG xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); xfer->hand = sbp_reset_start_callback; fp = &xfer->send.hdr; fp->mode.wreqq.dest_hi = 0xffff; fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START; fp->mode.wreqq.data = htonl(0xf); fw_asyreq(xfer->fc, -1, xfer); } static void sbp_mgm_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); SBP_UNLOCK(sdev->target->sbp); } static struct sbp_dev * sbp_next_dev(struct sbp_target *target, int lun) { struct sbp_dev **sdevp; int i; for (i = lun, sdevp = &target->luns[lun]; i < target->num_lun; i++, sdevp++) if (*sdevp != NULL && (*sdevp)->status == SBP_DEV_PROBE) return (*sdevp); return (NULL); } #define SCAN_PRI 1 static void sbp_cam_scan_lun(struct cam_periph *periph, union ccb *ccb) { struct sbp_softc *sbp; struct sbp_target *target; struct sbp_dev *sdev; sdev = (struct sbp_dev *) ccb->ccb_h.ccb_sdev_ptr; target = sdev->target; sbp = target->sbp; SBP_LOCK(sbp); SBP_DEBUG(0) device_printf(sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { sdev->status = SBP_DEV_ATTACHED; } else { device_printf(sbp->fd.dev, "%s:%s failed\n", __func__, sdev->bustgtlun); } sdev = sbp_next_dev(target, sdev->lun_id + 1); if (sdev == NULL) { SBP_UNLOCK(sbp); xpt_free_ccb(ccb); return; } /* reuse ccb */ xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI); ccb->ccb_h.ccb_sdev_ptr = sdev; ccb->ccb_h.flags |= CAM_DEV_QFREEZE; SBP_UNLOCK(sbp); xpt_action(ccb); xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 1; } static void sbp_cam_scan_target(void *arg) { struct sbp_target *target = (struct sbp_target *)arg; struct sbp_dev *sdev; union ccb *ccb; SBP_LOCK_ASSERT(target->sbp); sdev = sbp_next_dev(target, 0); if (sdev == NULL) { printf("sbp_cam_scan_target: nothing to do for target%d\n", target->target_id); return; } SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("sbp_cam_scan_target: xpt_alloc_ccb_nowait() failed\n"); return; } SBP_UNLOCK(target->sbp); xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = sbp_cam_scan_lun; ccb->ccb_h.flags |= CAM_DEV_QFREEZE; ccb->crcn.flags = CAM_FLAG_NONE; ccb->ccb_h.ccb_sdev_ptr = sdev; /* The scan is in progress now. */ xpt_action(ccb); SBP_LOCK(target->sbp); xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 1; } static __inline void sbp_scan_dev(struct sbp_dev *sdev) { sdev->status = SBP_DEV_PROBE; callout_reset_sbt(&sdev->target->scan_callout, SBT_1MS * scan_delay, 0, sbp_cam_scan_target, (void *)sdev->target, 0); } static void sbp_do_attach(struct fw_xfer *xfer) { struct sbp_dev *sdev; struct sbp_target *target; struct sbp_softc *sbp; sdev = (struct sbp_dev *)xfer->sc; target = sdev->target; sbp = target->sbp; SBP_LOCK(sbp); SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG sbp_xfer_free(xfer); if (sdev->path == NULL) xpt_create_path(&sdev->path, NULL, cam_sim_path(target->sbp->sim), target->target_id, sdev->lun_id); /* * Let CAM scan the bus if we are in the boot process. * XXX xpt_scan_bus cannot detect LUN larger than 0 * if LUN 0 doesn't exist. */ if (sbp_cold > 0) { sdev->status = SBP_DEV_ATTACHED; SBP_UNLOCK(sbp); return; } sbp_scan_dev(sdev); SBP_UNLOCK(sbp); } static void sbp_agent_reset_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if (xfer->resp != 0) { device_printf(sdev->target->sbp->fd.dev, "%s:%s resp=%d\n", __func__, sdev->bustgtlun, xfer->resp); } SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); if (sdev->path) { xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 0; } SBP_UNLOCK(sdev->target->sbp); } static void sbp_agent_reset(struct sbp_dev *sdev) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_LOCK_ASSERT(sdev->target->sbp); SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04); if (xfer == NULL) return; if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE) xfer->hand = sbp_agent_reset_callback; else xfer->hand = sbp_do_attach; fp = &xfer->send.hdr; fp->mode.wreqq.data = htonl(0xf); fw_asyreq(xfer->fc, -1, xfer); sbp_abort_all_ocbs(sdev, CAM_BDR_SENT); } static void sbp_busy_timeout_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); sbp_agent_reset(sdev); SBP_UNLOCK(sdev->target->sbp); } static void sbp_busy_timeout(struct sbp_dev *sdev) { struct fw_pkt *fp; struct fw_xfer *xfer; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); xfer->hand = sbp_busy_timeout_callback; fp = &xfer->send.hdr; fp->mode.wreqq.dest_hi = 0xffff; fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT; fp->mode.wreqq.data = htonl((1 << (13 + 12)) | 0xf); fw_asyreq(xfer->fc, -1, xfer); } static void sbp_orb_pointer_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(2) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if (xfer->resp != 0) { /* XXX */ printf("%s: xfer->resp = %d\n", __func__, xfer->resp); } SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); sdev->flags &= ~ORB_POINTER_ACTIVE; if ((sdev->flags & ORB_POINTER_NEED) != 0) { struct sbp_ocb *ocb; sdev->flags &= ~ORB_POINTER_NEED; ocb = STAILQ_FIRST(&sdev->ocbs); if (ocb != NULL) sbp_orb_pointer(sdev, ocb); } SBP_UNLOCK(sdev->target->sbp); return; } static void sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s 0x%08x\n", __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); END_DEBUG SBP_LOCK_ASSERT(sdev->target->sbp); if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) { SBP_DEBUG(0) printf("%s: orb pointer active\n", __func__); END_DEBUG sdev->flags |= ORB_POINTER_NEED; return; } sdev->flags |= ORB_POINTER_ACTIVE; xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0x08); if (xfer == NULL) return; xfer->hand = sbp_orb_pointer_callback; fp = &xfer->send.hdr; fp->mode.wreqb.len = 8; fp->mode.wreqb.extcode = 0; xfer->send.payload[0] = htonl(((sdev->target->sbp->fd.fc->nodeid | FWLOCALBUS) << 16)); xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr); if (fw_asyreq(xfer->fc, -1, xfer) != 0) { sbp_xfer_free(xfer); ocb->ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ocb->ccb); } } static void sbp_doorbell_callback(struct fw_xfer *xfer) { struct sbp_dev *sdev; sdev = (struct sbp_dev *)xfer->sc; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if (xfer->resp != 0) { /* XXX */ device_printf(sdev->target->sbp->fd.dev, "%s: xfer->resp = %d\n", __func__, xfer->resp); } SBP_LOCK(sdev->target->sbp); sbp_xfer_free(xfer); sdev->flags &= ~ORB_DOORBELL_ACTIVE; if ((sdev->flags & ORB_DOORBELL_NEED) != 0) { sdev->flags &= ~ORB_DOORBELL_NEED; sbp_doorbell(sdev); } SBP_UNLOCK(sdev->target->sbp); } static void sbp_doorbell(struct sbp_dev *sdev) { struct fw_xfer *xfer; struct fw_pkt *fp; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) { sdev->flags |= ORB_DOORBELL_NEED; return; } sdev->flags |= ORB_DOORBELL_ACTIVE; xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x10); if (xfer == NULL) return; xfer->hand = sbp_doorbell_callback; fp = &xfer->send.hdr; fp->mode.wreqq.data = htonl(0xf); fw_asyreq(xfer->fc, -1, xfer); } static struct fw_xfer * sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset) { struct fw_xfer *xfer; struct fw_pkt *fp; struct sbp_target *target; int new = 0; SBP_LOCK_ASSERT(sdev->target->sbp); target = sdev->target; xfer = STAILQ_FIRST(&target->xferlist); if (xfer == NULL) { if (target->n_xfer > 5 /* XXX */) { printf("sbp: no more xfer for this target\n"); return (NULL); } xfer = fw_xfer_alloc_buf(M_SBP, 8, 0); if (xfer == NULL) { printf("sbp: fw_xfer_alloc_buf failed\n"); return NULL; } target->n_xfer++; if (debug) printf("sbp: alloc %d xfer\n", target->n_xfer); new = 1; } else { STAILQ_REMOVE_HEAD(&target->xferlist, link); } if (new) { xfer->recv.pay_len = 0; xfer->send.spd = min(sdev->target->fwdev->speed, max_speed); xfer->fc = sdev->target->sbp->fd.fc; } if (tcode == FWTCODE_WREQB) xfer->send.pay_len = 8; else xfer->send.pay_len = 0; xfer->sc = (caddr_t)sdev; fp = &xfer->send.hdr; fp->mode.wreqq.dest_hi = sdev->login->cmd_hi; fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset; fp->mode.wreqq.tlrt = 0; fp->mode.wreqq.tcode = tcode; fp->mode.wreqq.pri = 0; fp->mode.wreqq.dst = FWLOCALBUS | sdev->target->fwdev->dst; return xfer; } static void sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb) { struct fw_xfer *xfer; struct fw_pkt *fp; struct sbp_ocb *ocb; struct sbp_target *target; int nid; target = sdev->target; nid = target->sbp->fd.fc->nodeid | FWLOCALBUS; SBP_LOCK_ASSERT(target->sbp); if (func == ORB_FUN_RUNQUEUE) { ocb = STAILQ_FIRST(&target->mgm_ocb_queue); if (target->mgm_ocb_cur != NULL || ocb == NULL) { return; } STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb); goto start; } if ((ocb = sbp_get_ocb(sdev)) == NULL) { /* XXX */ return; } ocb->flags = OCB_ACT_MGM; ocb->sdev = sdev; bzero((void *)ocb->orb, sizeof(ocb->orb)); ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI); ocb->orb[7] = htonl(SBP_DEV2ADDR(target->target_id, sdev->lun_id)); SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s %s\n", __func__,sdev->bustgtlun, orb_fun_name[(func >> 16) & 0xf]); END_DEBUG switch (func) { case ORB_FUN_LGI: ocb->orb[0] = ocb->orb[1] = 0; /* password */ ocb->orb[2] = htonl(nid << 16); ocb->orb[3] = htonl(sdev->dma.bus_addr); ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id); if (ex_login) ocb->orb[4] |= htonl(ORB_EXV); ocb->orb[5] = htonl(SBP_LOGIN_SIZE); fwdma_sync(&sdev->dma, BUS_DMASYNC_PREREAD); break; case ORB_FUN_ATA: ocb->orb[0] = htonl((0 << 16) | 0); ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff); /* fall through */ case ORB_FUN_RCN: case ORB_FUN_LGO: case ORB_FUN_LUR: case ORB_FUN_RST: case ORB_FUN_ATS: ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id); break; } if (target->mgm_ocb_cur != NULL) { /* there is a standing ORB */ STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb); return; } start: target->mgm_ocb_cur = ocb; callout_reset(&target->mgm_ocb_timeout, 5 * hz, sbp_mgm_timeout, (caddr_t)ocb); xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0); if (xfer == NULL) { return; } xfer->hand = sbp_mgm_callback; fp = &xfer->send.hdr; fp->mode.wreqb.dest_hi = sdev->target->mgm_hi; fp->mode.wreqb.dest_lo = sdev->target->mgm_lo; fp->mode.wreqb.len = 8; fp->mode.wreqb.extcode = 0; xfer->send.payload[0] = htonl(nid << 16); xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff); fw_asyreq(xfer->fc, -1, xfer); } static void sbp_print_scsi_cmd(struct sbp_ocb *ocb) { struct ccb_scsiio *csio; csio = &ocb->ccb->csio; printf("%s:%d:%jx XPT_SCSI_IO: " "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x" ", flags: 0x%02x, " "%db cmd/%db data/%db sense\n", device_get_nameunit(ocb->sdev->target->sbp->fd.dev), ocb->ccb->ccb_h.target_id, (uintmax_t)ocb->ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->cdb_io.cdb_bytes[1], csio->cdb_io.cdb_bytes[2], csio->cdb_io.cdb_bytes[3], csio->cdb_io.cdb_bytes[4], csio->cdb_io.cdb_bytes[5], csio->cdb_io.cdb_bytes[6], csio->cdb_io.cdb_bytes[7], csio->cdb_io.cdb_bytes[8], csio->cdb_io.cdb_bytes[9], ocb->ccb->ccb_h.flags & CAM_DIR_MASK, csio->cdb_len, csio->dxfer_len, csio->sense_len); } static void sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb) { struct sbp_cmd_status *sbp_cmd_status; struct scsi_sense_data_fixed *sense; sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data; sense = (struct scsi_sense_data_fixed *)&ocb->ccb->csio.sense_data; SBP_DEBUG(0) sbp_print_scsi_cmd(ocb); /* XXX need decode status */ printf("%s: SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n", ocb->sdev->bustgtlun, sbp_cmd_status->status, sbp_cmd_status->sfmt, sbp_cmd_status->valid, sbp_cmd_status->s_key, sbp_cmd_status->s_code, sbp_cmd_status->s_qlfr, sbp_status->len); END_DEBUG switch (sbp_cmd_status->status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_BUSY: case SCSI_STATUS_CMD_TERMINATED: if (sbp_cmd_status->sfmt == SBP_SFMT_CURR) { sense->error_code = SSD_CURRENT_ERROR; } else { sense->error_code = SSD_DEFERRED_ERROR; } if (sbp_cmd_status->valid) sense->error_code |= SSD_ERRCODE_VALID; sense->flags = sbp_cmd_status->s_key; if (sbp_cmd_status->mark) sense->flags |= SSD_FILEMARK; if (sbp_cmd_status->eom) sense->flags |= SSD_EOM; if (sbp_cmd_status->ill_len) sense->flags |= SSD_ILI; bcopy(&sbp_cmd_status->info, &sense->info[0], 4); if (sbp_status->len <= 1) /* XXX not scsi status. shouldn't be happened */ sense->extra_len = 0; else if (sbp_status->len <= 4) /* add_sense_code(_qual), info, cmd_spec_info */ sense->extra_len = 6; else /* fru, sense_key_spec */ sense->extra_len = 10; bcopy(&sbp_cmd_status->cdb, &sense->cmd_spec_info[0], 4); sense->add_sense_code = sbp_cmd_status->s_code; sense->add_sense_code_qual = sbp_cmd_status->s_qlfr; sense->fru = sbp_cmd_status->fru; bcopy(&sbp_cmd_status->s_keydep[0], &sense->sense_key_spec[0], 3); ocb->ccb->csio.scsi_status = sbp_cmd_status->status; ocb->ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; /* { uint8_t j, *tmp; tmp = sense; for (j = 0; j < 32; j += 8) { printf("sense %02x%02x %02x%02x %02x%02x %02x%02x\n", tmp[j], tmp[j + 1], tmp[j + 2], tmp[j + 3], tmp[j + 4], tmp[j + 5], tmp[j + 6], tmp[j + 7]); } } */ break; default: device_printf(ocb->sdev->target->sbp->fd.dev, "%s:%s unknown scsi status 0x%x\n", __func__, ocb->sdev->bustgtlun, sbp_cmd_status->status); } } static void sbp_fix_inq_data(struct sbp_ocb *ocb) { union ccb *ccb; struct sbp_dev *sdev; struct scsi_inquiry_data *inq; ccb = ocb->ccb; sdev = ocb->sdev; if (ccb->csio.cdb_io.cdb_bytes[1] & SI_EVPD) return; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s\n", __func__, sdev->bustgtlun); END_DEBUG inq = (struct scsi_inquiry_data *) ccb->csio.data_ptr; switch (SID_TYPE(inq)) { case T_DIRECT: #if 0 /* * XXX Convert Direct Access device to RBC. * I've never seen FireWire DA devices which support READ_6. */ if (SID_TYPE(inq) == T_DIRECT) inq->device |= T_RBC; /* T_DIRECT == 0 */ #endif /* fall through */ case T_RBC: /* * Override vendor/product/revision information. * Some devices sometimes return strange strings. */ #if 1 bcopy(sdev->vendor, inq->vendor, sizeof(inq->vendor)); bcopy(sdev->product, inq->product, sizeof(inq->product)); bcopy(sdev->revision + 2, inq->revision, sizeof(inq->revision)); #endif break; } /* * Force to enable/disable tagged queuing. * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page. */ if (sbp_tags > 0) inq->flags |= SID_CmdQue; else if (sbp_tags < 0) inq->flags &= ~SID_CmdQue; } static void sbp_recv1(struct fw_xfer *xfer) { struct fw_pkt *rfp; #if NEED_RESPONSE struct fw_pkt *sfp; #endif struct sbp_softc *sbp; struct sbp_dev *sdev; struct sbp_ocb *ocb; struct sbp_login_res *login_res = NULL; struct sbp_status *sbp_status; struct sbp_target *target; int orb_fun, status_valid0, status_valid, t, l, reset_agent = 0; uint32_t addr; /* uint32_t *ld; ld = xfer->recv.buf; printf("sbp %x %d %d %08x %08x %08x %08x\n", xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11])); */ sbp = (struct sbp_softc *)xfer->sc; SBP_LOCK_ASSERT(sbp); if (xfer->resp != 0) { printf("sbp_recv: xfer->resp = %d\n", xfer->resp); goto done0; } if (xfer->recv.payload == NULL) { printf("sbp_recv: xfer->recv.payload == NULL\n"); goto done0; } rfp = &xfer->recv.hdr; if (rfp->mode.wreqb.tcode != FWTCODE_WREQB) { printf("sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode); goto done0; } sbp_status = (struct sbp_status *)xfer->recv.payload; addr = rfp->mode.wreqb.dest_lo; SBP_DEBUG(2) printf("received address 0x%x\n", addr); END_DEBUG t = SBP_ADDR2TRG(addr); if (t >= SBP_NUM_TARGETS) { device_printf(sbp->fd.dev, "sbp_recv1: invalid target %d\n", t); goto done0; } target = &sbp->targets[t]; l = SBP_ADDR2LUN(addr); if (l >= target->num_lun || target->luns[l] == NULL) { device_printf(sbp->fd.dev, "sbp_recv1: invalid lun %d (target=%d)\n", l, t); goto done0; } sdev = target->luns[l]; ocb = NULL; switch (sbp_status->src) { case 0: case 1: /* check mgm_ocb_cur first */ ocb = target->mgm_ocb_cur; if (ocb != NULL) { if (OCB_MATCH(ocb, sbp_status)) { callout_stop(&target->mgm_ocb_timeout); target->mgm_ocb_cur = NULL; break; } } ocb = sbp_dequeue_ocb(sdev, sbp_status); if (ocb == NULL) { device_printf(sdev->target->sbp->fd.dev, "%s:%s No ocb(%x) on the queue\n", __func__,sdev->bustgtlun, ntohl(sbp_status->orb_lo)); } break; case 2: /* unsolicit */ device_printf(sdev->target->sbp->fd.dev, "%s:%s unsolicit status received\n", __func__, sdev->bustgtlun); break; default: device_printf(sdev->target->sbp->fd.dev, "%s:%s unknown sbp_status->src\n", __func__, sdev->bustgtlun); } status_valid0 = (sbp_status->src < 2 && sbp_status->resp == ORB_RES_CMPL && sbp_status->dead == 0); status_valid = (status_valid0 && sbp_status->status == 0); if (!status_valid0 || debug > 2) { int status; SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s ORB status src:%x resp:%x dead:%x" " len:%x stat:%x orb:%x%08x\n", __func__, sdev->bustgtlun, sbp_status->src, sbp_status->resp, sbp_status->dead, sbp_status->len, sbp_status->status, ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo)); END_DEBUG device_printf(sdev->target->sbp->fd.dev, "%s\n", sdev->bustgtlun); status = sbp_status->status; switch (sbp_status->resp) { case 0: if (status > MAX_ORB_STATUS0) printf("%s\n", orb_status0[MAX_ORB_STATUS0]); else printf("%s\n", orb_status0[status]); break; case 1: printf("Obj: %s, Error: %s\n", orb_status1_object[(status >> 6) & 3], orb_status1_serial_bus_error[status & 0xf]); break; case 2: printf("Illegal request\n"); break; case 3: printf("Vendor dependent\n"); break; default: printf("unknown respose code %d\n", sbp_status->resp); } } /* we have to reset the fetch agent if it's dead */ if (sbp_status->dead) { if (sdev->path) { xpt_freeze_devq(sdev->path, 1); sdev->freeze++; } reset_agent = 1; } if (ocb == NULL) goto done; switch (ntohl(ocb->orb[4]) & ORB_FMT_MSK) { case ORB_FMT_NOP: break; case ORB_FMT_VED: break; case ORB_FMT_STD: switch (ocb->flags) { case OCB_ACT_MGM: orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK; reset_agent = 0; switch (orb_fun) { case ORB_FUN_LGI: fwdma_sync(&sdev->dma, BUS_DMASYNC_POSTREAD); login_res = sdev->login; login_res->len = ntohs(login_res->len); login_res->id = ntohs(login_res->id); login_res->cmd_hi = ntohs(login_res->cmd_hi); login_res->cmd_lo = ntohl(login_res->cmd_lo); if (status_valid) { SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s login: len %d, ID %d, cmd %08x%08x, recon_hold %d\n", __func__, sdev->bustgtlun, login_res->len, login_res->id, login_res->cmd_hi, login_res->cmd_lo, ntohs(login_res->recon_hold)); END_DEBUG sbp_busy_timeout(sdev); } else { /* forgot logout? */ device_printf(sdev->target->sbp->fd.dev, "%s:%s login failed\n", __func__, sdev->bustgtlun); sdev->status = SBP_DEV_RESET; } break; case ORB_FUN_RCN: login_res = sdev->login; if (status_valid) { SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s reconnect: len %d, ID %d, cmd %08x%08x\n", __func__, sdev->bustgtlun, login_res->len, login_res->id, login_res->cmd_hi, login_res->cmd_lo); END_DEBUG if (sdev->status == SBP_DEV_ATTACHED) sbp_scan_dev(sdev); else sbp_agent_reset(sdev); } else { /* reconnection hold time exceed? */ SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s reconnect failed\n", __func__, sdev->bustgtlun); END_DEBUG sbp_login(sdev); } break; case ORB_FUN_LGO: sdev->status = SBP_DEV_RESET; break; case ORB_FUN_RST: sbp_busy_timeout(sdev); break; case ORB_FUN_LUR: case ORB_FUN_ATA: case ORB_FUN_ATS: sbp_agent_reset(sdev); break; default: device_printf(sdev->target->sbp->fd.dev, "%s:%s unknown function %d\n", __func__, sdev->bustgtlun, orb_fun); break; } sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); break; case OCB_ACT_CMD: sdev->timeout = 0; if (ocb->ccb != NULL) { union ccb *ccb; ccb = ocb->ccb; if (sbp_status->len > 1) { sbp_scsi_status(sbp_status, ocb); } else { if (sbp_status->resp != ORB_RES_CMPL) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { ccb->ccb_h.status = CAM_REQ_CMP; } } /* fix up inq data */ if (ccb->csio.cdb_io.cdb_bytes[0] == INQUIRY) sbp_fix_inq_data(ocb); xpt_done(ccb); } break; default: break; } } if (!use_doorbell) sbp_free_ocb(sdev, ocb); done: if (reset_agent) sbp_agent_reset(sdev); done0: xfer->recv.pay_len = SBP_RECV_LEN; /* The received packet is usually small enough to be stored within * the buffer. In that case, the controller return ack_complete and * no respose is necessary. * * XXX fwohci.c and firewire.c should inform event_code such as * ack_complete or ack_pending to upper driver. */ #if NEED_RESPONSE xfer->send.off = 0; sfp = (struct fw_pkt *)xfer->send.buf; sfp->mode.wres.dst = rfp->mode.wreqb.src; xfer->dst = sfp->mode.wres.dst; xfer->spd = min(sdev->target->fwdev->speed, max_speed); xfer->hand = sbp_loginres_callback; sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt; sfp->mode.wres.tcode = FWTCODE_WRES; sfp->mode.wres.rtcode = 0; sfp->mode.wres.pri = 0; fw_asyreq(xfer->fc, -1, xfer); #else /* recycle */ STAILQ_INSERT_TAIL(&sbp->fwb.xferlist, xfer, link); #endif } static void sbp_recv(struct fw_xfer *xfer) { struct sbp_softc *sbp; sbp = (struct sbp_softc *)xfer->sc; SBP_LOCK(sbp); sbp_recv1(xfer); SBP_UNLOCK(sbp); } /* * sbp_attach() */ static int sbp_attach(device_t dev) { struct sbp_softc *sbp; struct cam_devq *devq; struct firewire_comm *fc; int i, error; if (DFLTPHYS > SBP_MAXPHYS) device_printf(dev, "Warning, DFLTPHYS(%dKB) is larger than " "SBP_MAXPHYS(%dKB).\n", DFLTPHYS / 1024, SBP_MAXPHYS / 1024); if (!firewire_phydma_enable) device_printf(dev, "Warning, hw.firewire.phydma_enable must be 1 " "for SBP over FireWire.\n"); SBP_DEBUG(0) printf("sbp_attach (cold=%d)\n", cold); END_DEBUG if (cold) sbp_cold++; sbp = device_get_softc(dev); sbp->fd.dev = dev; sbp->fd.fc = fc = device_get_ivars(dev); mtx_init(&sbp->mtx, "sbp", NULL, MTX_DEF); if (max_speed < 0) max_speed = fc->speed; error = bus_dma_tag_create(/*parent*/fc->dmat, /* XXX shoud be 4 for sane backend? */ /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/0x100000, /*nsegments*/SBP_IND_MAX, /*maxsegsz*/SBP_SEG_MAX, /*flags*/BUS_DMA_ALLOCNOW, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&sbp->mtx, &sbp->dmat); if (error != 0) { printf("sbp_attach: Could not allocate DMA tag " "- error %d\n", error); return (ENOMEM); } devq = cam_simq_alloc(/*maxopenings*/SBP_NUM_OCB); if (devq == NULL) return (ENXIO); for (i = 0; i < SBP_NUM_TARGETS; i++) { sbp->targets[i].fwdev = NULL; sbp->targets[i].luns = NULL; sbp->targets[i].sbp = sbp; } sbp->sim = cam_sim_alloc(sbp_action, sbp_poll, "sbp", sbp, device_get_unit(dev), &sbp->mtx, /*untagged*/ 1, /*tagged*/ SBP_QUEUE_LEN - 1, devq); if (sbp->sim == NULL) { cam_simq_free(devq); return (ENXIO); } SBP_LOCK(sbp); if (xpt_bus_register(sbp->sim, dev, /*bus*/0) != CAM_SUCCESS) goto fail; if (xpt_create_path(&sbp->path, NULL, cam_sim_path(sbp->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sbp->sim)); goto fail; } SBP_UNLOCK(sbp); /* We reserve 16 bit space (4 bytes X 64 targets X 256 luns) */ sbp->fwb.start = ((u_int64_t)SBP_BIND_HI << 32) | SBP_DEV2ADDR(0, 0); sbp->fwb.end = sbp->fwb.start + 0xffff; /* pre-allocate xfer */ STAILQ_INIT(&sbp->fwb.xferlist); fw_xferlist_add(&sbp->fwb.xferlist, M_SBP, /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB/2, fc, (void *)sbp, sbp_recv); fw_bindadd(fc, &sbp->fwb); sbp->fd.post_busreset = sbp_post_busreset; sbp->fd.post_explore = sbp_post_explore; if (fc->status != -1) { sbp_post_busreset(sbp); sbp_post_explore(sbp); } SBP_LOCK(sbp); xpt_async(AC_BUS_RESET, sbp->path, /*arg*/ NULL); SBP_UNLOCK(sbp); return (0); fail: SBP_UNLOCK(sbp); cam_sim_free(sbp->sim, /*free_devq*/TRUE); return (ENXIO); } static int sbp_logout_all(struct sbp_softc *sbp) { struct sbp_target *target; struct sbp_dev *sdev; int i, j; SBP_DEBUG(0) printf("sbp_logout_all\n"); END_DEBUG SBP_LOCK_ASSERT(sbp); for (i = 0; i < SBP_NUM_TARGETS; i++) { target = &sbp->targets[i]; if (target->luns == NULL) continue; for (j = 0; j < target->num_lun; j++) { sdev = target->luns[j]; if (sdev == NULL) continue; callout_stop(&sdev->login_callout); if (sdev->status >= SBP_DEV_TOATTACH && sdev->status <= SBP_DEV_ATTACHED) sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL); } } return 0; } static int sbp_shutdown(device_t dev) { struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev)); SBP_LOCK(sbp); sbp_logout_all(sbp); SBP_UNLOCK(sbp); return (0); } static void sbp_free_sdev(struct sbp_dev *sdev) { struct sbp_softc *sbp; int i; if (sdev == NULL) return; sbp = sdev->target->sbp; SBP_UNLOCK(sbp); callout_drain(&sdev->login_callout); for (i = 0; i < SBP_QUEUE_LEN; i++) { callout_drain(&sdev->ocb[i].timer); bus_dmamap_destroy(sbp->dmat, sdev->ocb[i].dmamap); } fwdma_free(sbp->fd.fc, &sdev->dma); free(sdev, M_SBP); SBP_LOCK(sbp); } static void sbp_free_target(struct sbp_target *target) { struct sbp_softc *sbp; struct fw_xfer *xfer, *next; int i; if (target->luns == NULL) return; sbp = target->sbp; SBP_LOCK_ASSERT(sbp); SBP_UNLOCK(sbp); callout_drain(&target->mgm_ocb_timeout); callout_drain(&target->scan_callout); SBP_LOCK(sbp); for (i = 0; i < target->num_lun; i++) sbp_free_sdev(target->luns[i]); STAILQ_FOREACH_SAFE(xfer, &target->xferlist, link, next) { fw_xfer_free_buf(xfer); } STAILQ_INIT(&target->xferlist); free(target->luns, M_SBP); target->num_lun = 0; target->luns = NULL; target->fwdev = NULL; } static int sbp_detach(device_t dev) { struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev)); struct firewire_comm *fc = sbp->fd.fc; int i; SBP_DEBUG(0) printf("sbp_detach\n"); END_DEBUG SBP_LOCK(sbp); for (i = 0; i < SBP_NUM_TARGETS; i++) sbp_cam_detach_target(&sbp->targets[i]); xpt_async(AC_LOST_DEVICE, sbp->path, NULL); xpt_free_path(sbp->path); xpt_bus_deregister(cam_sim_path(sbp->sim)); cam_sim_free(sbp->sim, /*free_devq*/ TRUE); sbp_logout_all(sbp); SBP_UNLOCK(sbp); /* XXX wait for logout completion */ pause("sbpdtc", hz/2); SBP_LOCK(sbp); for (i = 0; i < SBP_NUM_TARGETS; i++) sbp_free_target(&sbp->targets[i]); SBP_UNLOCK(sbp); fw_bindremove(fc, &sbp->fwb); fw_xferlist_remove(&sbp->fwb.xferlist); bus_dma_tag_destroy(sbp->dmat); mtx_destroy(&sbp->mtx); return (0); } static void sbp_cam_detach_sdev(struct sbp_dev *sdev) { if (sdev == NULL) return; if (sdev->status == SBP_DEV_DEAD) return; if (sdev->status == SBP_DEV_RESET) return; SBP_LOCK_ASSERT(sdev->target->sbp); sbp_abort_all_ocbs(sdev, CAM_DEV_NOT_THERE); if (sdev->path) { xpt_release_devq(sdev->path, sdev->freeze, TRUE); sdev->freeze = 0; xpt_async(AC_LOST_DEVICE, sdev->path, NULL); xpt_free_path(sdev->path); sdev->path = NULL; } } static void sbp_cam_detach_target(struct sbp_target *target) { int i; SBP_LOCK_ASSERT(target->sbp); if (target->luns != NULL) { SBP_DEBUG(0) printf("sbp_detach_target %d\n", target->target_id); END_DEBUG callout_stop(&target->scan_callout); for (i = 0; i < target->num_lun; i++) sbp_cam_detach_sdev(target->luns[i]); } } static void sbp_target_reset(struct sbp_dev *sdev, int method) { int i; struct sbp_target *target = sdev->target; struct sbp_dev *tsdev; SBP_LOCK_ASSERT(target->sbp); for (i = 0; i < target->num_lun; i++) { tsdev = target->luns[i]; if (tsdev == NULL) continue; if (tsdev->status == SBP_DEV_DEAD) continue; if (tsdev->status == SBP_DEV_RESET) continue; xpt_freeze_devq(tsdev->path, 1); tsdev->freeze++; sbp_abort_all_ocbs(tsdev, CAM_CMD_TIMEOUT); if (method == 2) tsdev->status = SBP_DEV_LOGIN; } switch (method) { case 1: printf("target reset\n"); sbp_mgm_orb(sdev, ORB_FUN_RST, NULL); break; case 2: printf("reset start\n"); sbp_reset_start(sdev); break; } } static void sbp_mgm_timeout(void *arg) { struct sbp_ocb *ocb = (struct sbp_ocb *)arg; struct sbp_dev *sdev = ocb->sdev; struct sbp_target *target = sdev->target; SBP_LOCK_ASSERT(target->sbp); device_printf(sdev->target->sbp->fd.dev, "%s:%s request timeout(mgm orb:0x%08x)\n", __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); target->mgm_ocb_cur = NULL; sbp_free_ocb(sdev, ocb); #if 0 /* XXX */ printf("run next request\n"); sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); #endif device_printf(sdev->target->sbp->fd.dev, "%s:%s reset start\n", __func__, sdev->bustgtlun); sbp_reset_start(sdev); } static void sbp_timeout(void *arg) { struct sbp_ocb *ocb = (struct sbp_ocb *)arg; struct sbp_dev *sdev = ocb->sdev; device_printf(sdev->target->sbp->fd.dev, "%s:%s request timeout(cmd orb:0x%08x) ... ", __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); SBP_LOCK_ASSERT(sdev->target->sbp); sdev->timeout++; switch (sdev->timeout) { case 1: printf("agent reset\n"); xpt_freeze_devq(sdev->path, 1); sdev->freeze++; sbp_abort_all_ocbs(sdev, CAM_CMD_TIMEOUT); sbp_agent_reset(sdev); break; case 2: case 3: sbp_target_reset(sdev, sdev->timeout - 1); break; #if 0 default: /* XXX give up */ sbp_cam_detach_target(target); if (target->luns != NULL) free(target->luns, M_SBP); target->num_lun = 0; target->luns = NULL; target->fwdev = NULL; #endif } } static void sbp_action(struct cam_sim *sim, union ccb *ccb) { struct sbp_softc *sbp = cam_sim_softc(sim); struct sbp_target *target = NULL; struct sbp_dev *sdev = NULL; if (sbp != NULL) SBP_LOCK_ASSERT(sbp); /* target:lun -> sdev mapping */ if (sbp != NULL && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.target_id < SBP_NUM_TARGETS) { target = &sbp->targets[ccb->ccb_h.target_id]; if (target->fwdev != NULL && ccb->ccb_h.target_lun != CAM_LUN_WILDCARD && ccb->ccb_h.target_lun < target->num_lun) { sdev = target->luns[ccb->ccb_h.target_lun]; if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED && sdev->status != SBP_DEV_PROBE) sdev = NULL; } } SBP_DEBUG(1) if (sdev == NULL) printf("invalid target %d lun %jx\n", ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); END_DEBUG switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: case XPT_RESET_DEV: case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: case XPT_CALC_GEOMETRY: if (sdev == NULL) { SBP_DEBUG(1) printf("%s:%d:%jx:func_code 0x%04x: " "Invalid target (target needed)\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.func_code); END_DEBUG ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } break; case XPT_PATH_INQ: case XPT_NOOP: /* The opcodes sometimes aimed at a target (sc is valid), * sometimes aimed at the SIM (sc is invalid and target is * CAM_TARGET_WILDCARD) */ if (sbp == NULL && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { SBP_DEBUG(0) printf("%s:%d:%jx func_code 0x%04x: " "Invalid target (no wildcard)\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.func_code); END_DEBUG ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } break; default: /* XXX Hm, we should check the input parameters */ break; } switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio; struct sbp_ocb *ocb; int speed; void *cdb; csio = &ccb->csio; mtx_assert(sim->mtx, MA_OWNED); SBP_DEBUG(2) printf("%s:%d:%jx XPT_SCSI_IO: " "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x" ", flags: 0x%02x, " "%db cmd/%db data/%db sense\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->cdb_io.cdb_bytes[1], csio->cdb_io.cdb_bytes[2], csio->cdb_io.cdb_bytes[3], csio->cdb_io.cdb_bytes[4], csio->cdb_io.cdb_bytes[5], csio->cdb_io.cdb_bytes[6], csio->cdb_io.cdb_bytes[7], csio->cdb_io.cdb_bytes[8], csio->cdb_io.cdb_bytes[9], ccb->ccb_h.flags & CAM_DIR_MASK, csio->cdb_len, csio->dxfer_len, csio->sense_len); END_DEBUG if (sdev == NULL) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } if (csio->cdb_len > sizeof(ocb->orb) - 5 * sizeof(uint32_t)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #if 0 /* if we are in probe stage, pass only probe commands */ if (sdev->status == SBP_DEV_PROBE) { char *name; name = xpt_path_periph(ccb->ccb_h.path)->periph_name; printf("probe stage, periph name: %s\n", name); if (strcmp(name, "probe") != 0) { ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } } #endif if ((ocb = sbp_get_ocb(sdev)) == NULL) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; if (sdev->freeze == 0) { xpt_freeze_devq(sdev->path, 1); sdev->freeze++; } xpt_done(ccb); return; } ocb->flags = OCB_ACT_CMD; ocb->sdev = sdev; ocb->ccb = ccb; ccb->ccb_h.ccb_sdev_ptr = sdev; ocb->orb[0] = htonl(1U << 31); ocb->orb[1] = 0; ocb->orb[2] = htonl(((sbp->fd.fc->nodeid | FWLOCALBUS) << 16)); ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET); speed = min(target->fwdev->speed, max_speed); ocb->orb[4] = htonl(ORB_NOTIFY | ORB_CMD_SPD(speed) | ORB_CMD_MAXP(speed + 7)); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ocb->orb[4] |= htonl(ORB_CMD_IN); } if (csio->ccb_h.flags & CAM_CDB_POINTER) cdb = (void *)csio->cdb_io.cdb_ptr; else cdb = (void *)&csio->cdb_io.cdb_bytes; bcopy(cdb, (void *)&ocb->orb[5], csio->cdb_len); /* printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3])); printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7])); */ if (ccb->csio.dxfer_len > 0) { int error; error = bus_dmamap_load_ccb(/*dma tag*/sbp->dmat, /*dma map*/ocb->dmamap, ccb, sbp_execute_ocb, ocb, /*flags*/0); if (error) printf("sbp: bus_dmamap_load error %d\n", error); } else sbp_execute_ocb(ocb, NULL, 0, 0); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { printf("sbp_action: block_size is 0.\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } SBP_DEBUG(1) printf("%s:%d:%d:%jx:XPT_CALC_GEOMETRY: " "Volume size = %jd\n", device_get_nameunit(sbp->fd.dev), cam_sim_path(sbp->sim), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, (uintmax_t)ccg->volume_size); END_DEBUG cam_calc_geometry(ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { SBP_DEBUG(1) printf("%s:%d:XPT_RESET_BUS: \n", device_get_nameunit(sbp->fd.dev), cam_sim_path(sbp->sim)); END_DEBUG ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; SBP_DEBUG(1) printf("%s:%d:%jx XPT_PATH_INQ:.\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); END_DEBUG cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_NO_6_BYTE; cpi->hba_eng_cnt = 0; cpi->max_target = SBP_NUM_TARGETS - 1; cpi->max_lun = SBP_NUM_LUNS - 1; cpi->initiator_id = SBP_INITIATOR; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 400 * 1000 / 8; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "SBP", HBA_IDLEN); strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->transport = XPORT_SPI; /* XX should have a FireWire */ cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; /* should have a FireWire */ cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; SBP_DEBUG(1) printf("%s:%d:%jx XPT_GET_TRAN_SETTINGS:.\n", device_get_nameunit(sbp->fd.dev), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); END_DEBUG cts->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_ABORT: ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: /* XXX */ default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } return; } static void sbp_execute_ocb(void *arg, bus_dma_segment_t *segments, int seg, int error) { int i; struct sbp_ocb *ocb; struct sbp_ocb *prev; bus_dma_segment_t *s; if (error) printf("sbp_execute_ocb: error=%d\n", error); ocb = (struct sbp_ocb *)arg; SBP_DEBUG(2) printf("sbp_execute_ocb: seg %d", seg); for (i = 0; i < seg; i++) printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr, (uintmax_t)segments[i].ds_len); printf("\n"); END_DEBUG if (seg == 1) { /* direct pointer */ s = &segments[0]; if (s->ds_len > SBP_SEG_MAX) panic("ds_len > SBP_SEG_MAX, fix busdma code"); ocb->orb[3] = htonl(s->ds_addr); ocb->orb[4] |= htonl(s->ds_len); } else if (seg > 1) { /* page table */ for (i = 0; i < seg; i++) { s = &segments[i]; SBP_DEBUG(0) /* XXX LSI Logic "< 16 byte" bug might be hit */ if (s->ds_len < 16) printf("sbp_execute_ocb: warning, " "segment length(%zd) is less than 16." "(seg=%d/%d)\n", (size_t)s->ds_len, i + 1, seg); END_DEBUG if (s->ds_len > SBP_SEG_MAX) panic("ds_len > SBP_SEG_MAX, fix busdma code"); ocb->ind_ptr[i].hi = htonl(s->ds_len << 16); ocb->ind_ptr[i].lo = htonl(s->ds_addr); } ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg); } if (seg > 0) bus_dmamap_sync(ocb->sdev->target->sbp->dmat, ocb->dmamap, (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); prev = sbp_enqueue_ocb(ocb->sdev, ocb); fwdma_sync(&ocb->sdev->dma, BUS_DMASYNC_PREWRITE); if (use_doorbell) { if (prev == NULL) { if (ocb->sdev->last_ocb != NULL) sbp_doorbell(ocb->sdev); else sbp_orb_pointer(ocb->sdev, ocb); } } else { if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) { ocb->sdev->flags &= ~ORB_LINK_DEAD; sbp_orb_pointer(ocb->sdev, ocb); } } } static void sbp_poll(struct cam_sim *sim) { struct sbp_softc *sbp; struct firewire_comm *fc; sbp = cam_sim_softc(sim); fc = sbp->fd.fc; fc->poll(fc, 0, -1); return; } static struct sbp_ocb * sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status) { struct sbp_ocb *ocb; struct sbp_ocb *next; int order = 0; SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s 0x%08x src %d\n", __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo), sbp_status->src); END_DEBUG SBP_LOCK_ASSERT(sdev->target->sbp); STAILQ_FOREACH_SAFE(ocb, &sdev->ocbs, ocb, next) { if (OCB_MATCH(ocb, sbp_status)) { /* found */ STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb); if (ocb->ccb != NULL) callout_stop(&ocb->timer); if (ntohl(ocb->orb[4]) & 0xffff) { bus_dmamap_sync(sdev->target->sbp->dmat, ocb->dmamap, (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sdev->target->sbp->dmat, ocb->dmamap); } if (!use_doorbell) { if (sbp_status->src == SRC_NO_NEXT) { if (next != NULL) sbp_orb_pointer(sdev, next); else if (order > 0) { /* * Unordered execution * We need to send pointer for * next ORB */ sdev->flags |= ORB_LINK_DEAD; } } } else { /* * XXX this is not correct for unordered * execution. */ if (sdev->last_ocb != NULL) { sbp_free_ocb(sdev, sdev->last_ocb); } sdev->last_ocb = ocb; if (next != NULL && sbp_status->src == SRC_NO_NEXT) sbp_doorbell(sdev); } break; } else order++; } SBP_DEBUG(0) if (ocb && order > 0) { device_printf(sdev->target->sbp->fd.dev, "%s:%s unordered execution order:%d\n", __func__, sdev->bustgtlun, order); } END_DEBUG return (ocb); } static struct sbp_ocb * sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) { struct sbp_ocb *prev, *prev2; SBP_LOCK_ASSERT(sdev->target->sbp); SBP_DEBUG(1) device_printf(sdev->target->sbp->fd.dev, "%s:%s 0x%08jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); END_DEBUG prev2 = prev = STAILQ_LAST(&sdev->ocbs, sbp_ocb, ocb); STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb); if (ocb->ccb != NULL) { callout_reset_sbt(&ocb->timer, SBT_1MS * ocb->ccb->ccb_h.timeout, 0, sbp_timeout, ocb, 0); } if (use_doorbell && prev == NULL) prev2 = sdev->last_ocb; if (prev2 != NULL && (ocb->sdev->flags & ORB_LINK_DEAD) == 0) { SBP_DEBUG(1) printf("linking chain 0x%jx -> 0x%jx\n", (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr); END_DEBUG /* * Suppress compiler optimization so that orb[1] must be written first. * XXX We may need an explicit memory barrier for other architectures * other than i386/amd64. */ *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr); *(volatile uint32_t *)&prev2->orb[0] = 0; } return prev; } static struct sbp_ocb * sbp_get_ocb(struct sbp_dev *sdev) { struct sbp_ocb *ocb; SBP_LOCK_ASSERT(sdev->target->sbp); ocb = STAILQ_FIRST(&sdev->free_ocbs); if (ocb == NULL) { sdev->flags |= ORB_SHORTAGE; printf("ocb shortage!!!\n"); return NULL; } STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb); ocb->ccb = NULL; return (ocb); } static void sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) { ocb->flags = 0; ocb->ccb = NULL; SBP_LOCK_ASSERT(sdev->target->sbp); STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb); if ((sdev->flags & ORB_SHORTAGE) != 0) { int count; sdev->flags &= ~ORB_SHORTAGE; count = sdev->freeze; sdev->freeze = 0; xpt_release_devq(sdev->path, count, TRUE); } } static void sbp_abort_ocb(struct sbp_ocb *ocb, int status) { struct sbp_dev *sdev; sdev = ocb->sdev; SBP_LOCK_ASSERT(sdev->target->sbp); SBP_DEBUG(0) device_printf(sdev->target->sbp->fd.dev, "%s:%s 0x%jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); END_DEBUG SBP_DEBUG(1) if (ocb->ccb != NULL) sbp_print_scsi_cmd(ocb); END_DEBUG if (ntohl(ocb->orb[4]) & 0xffff) { bus_dmamap_sync(sdev->target->sbp->dmat, ocb->dmamap, (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sdev->target->sbp->dmat, ocb->dmamap); } if (ocb->ccb != NULL) { callout_stop(&ocb->timer); ocb->ccb->ccb_h.status = status; xpt_done(ocb->ccb); } sbp_free_ocb(sdev, ocb); } static void sbp_abort_all_ocbs(struct sbp_dev *sdev, int status) { struct sbp_ocb *ocb, *next; STAILQ_HEAD(, sbp_ocb) temp; STAILQ_INIT(&temp); SBP_LOCK_ASSERT(sdev->target->sbp); STAILQ_CONCAT(&temp, &sdev->ocbs); STAILQ_INIT(&sdev->ocbs); STAILQ_FOREACH_SAFE(ocb, &temp, ocb, next) { sbp_abort_ocb(ocb, status); } if (sdev->last_ocb != NULL) { sbp_free_ocb(sdev, sdev->last_ocb); sdev->last_ocb = NULL; } } static device_method_t sbp_methods[] = { /* device interface */ DEVMETHOD(device_identify, sbp_identify), DEVMETHOD(device_probe, sbp_probe), DEVMETHOD(device_attach, sbp_attach), DEVMETHOD(device_detach, sbp_detach), DEVMETHOD(device_shutdown, sbp_shutdown), { 0, 0 } }; static driver_t sbp_driver = { "sbp", sbp_methods, sizeof(struct sbp_softc), }; DRIVER_MODULE(sbp, firewire, sbp_driver, 0, 0); MODULE_VERSION(sbp, 1); MODULE_DEPEND(sbp, firewire, 1, 1, 1); MODULE_DEPEND(sbp, cam, 1, 1, 1); diff --git a/sys/dev/hid/hidbus.c b/sys/dev/hid/hidbus.c index 003017c0f3ea..548027b0320a 100644 --- a/sys/dev/hid/hidbus.c +++ b/sys/dev/hid/hidbus.c @@ -1,976 +1,976 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019-2020 Vladimir Kondratyev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define HID_DEBUG_VAR hid_debug #include #include #include #include "hid_if.h" #define INPUT_EPOCH global_epoch_preempt #define HID_RSIZE_MAX 1024 static hid_intr_t hidbus_intr; static device_probe_t hidbus_probe; static device_attach_t hidbus_attach; static device_detach_t hidbus_detach; struct hidbus_ivars { int32_t usage; uint8_t index; uint32_t flags; uintptr_t driver_info; /* for internal use */ struct mtx *mtx; /* child intr mtx */ hid_intr_t *intr_handler; /* executed under mtx*/ void *intr_ctx; unsigned int refcnt; /* protected by mtx */ struct epoch_context epoch_ctx; CK_STAILQ_ENTRY(hidbus_ivars) link; }; struct hidbus_softc { device_t dev; struct sx sx; struct mtx mtx; bool nowrite; struct hid_rdesc_info rdesc; bool overloaded; int nest; /* Child attach nesting lvl */ int nauto; /* Number of autochildren */ CK_STAILQ_HEAD(, hidbus_ivars) tlcs; }; static int hidbus_fill_rdesc_info(struct hid_rdesc_info *hri, const void *data, hid_size_t len) { int error = 0; hri->data = __DECONST(void *, data); hri->len = len; /* * If report descriptor is not available yet, set maximal * report sizes high enough to allow hidraw to work. */ hri->isize = len == 0 ? HID_RSIZE_MAX : hid_report_size_max(data, len, hid_input, &hri->iid); hri->osize = len == 0 ? HID_RSIZE_MAX : hid_report_size_max(data, len, hid_output, &hri->oid); hri->fsize = len == 0 ? HID_RSIZE_MAX : hid_report_size_max(data, len, hid_feature, &hri->fid); if (hri->isize > HID_RSIZE_MAX) { DPRINTF("input size is too large, %u bytes (truncating)\n", hri->isize); hri->isize = HID_RSIZE_MAX; error = EOVERFLOW; } if (hri->osize > HID_RSIZE_MAX) { DPRINTF("output size is too large, %u bytes (truncating)\n", hri->osize); hri->osize = HID_RSIZE_MAX; error = EOVERFLOW; } if (hri->fsize > HID_RSIZE_MAX) { DPRINTF("feature size is too large, %u bytes (truncating)\n", hri->fsize); hri->fsize = HID_RSIZE_MAX; error = EOVERFLOW; } return (error); } int hidbus_locate(const void *desc, hid_size_t size, int32_t u, enum hid_kind k, uint8_t tlc_index, uint8_t index, struct hid_location *loc, uint32_t *flags, uint8_t *id, struct hid_absinfo *ai) { struct hid_data *d; struct hid_item h; int i; d = hid_start_parse(desc, size, 1 << k); HIDBUS_FOREACH_ITEM(d, &h, tlc_index) { for (i = 0; i < h.nusages; i++) { if (h.kind == k && h.usages[i] == u) { if (index--) break; if (loc != NULL) *loc = h.loc; if (flags != NULL) *flags = h.flags; if (id != NULL) *id = h.report_ID; if (ai != NULL && (h.flags&HIO_RELATIVE) == 0) *ai = (struct hid_absinfo) { .max = h.logical_maximum, .min = h.logical_minimum, .res = hid_item_resolution(&h), }; hid_end_parse(d); return (1); } } } if (loc != NULL) loc->size = 0; if (flags != NULL) *flags = 0; if (id != NULL) *id = 0; hid_end_parse(d); return (0); } bool hidbus_is_collection(const void *desc, hid_size_t size, int32_t usage, uint8_t tlc_index) { struct hid_data *d; struct hid_item h; bool ret = false; d = hid_start_parse(desc, size, 0); HIDBUS_FOREACH_ITEM(d, &h, tlc_index) { if (h.kind == hid_collection && h.usage == usage) { ret = true; break; } } hid_end_parse(d); return (ret); } static device_t hidbus_add_child(device_t dev, u_int order, const char *name, int unit) { struct hidbus_softc *sc = device_get_softc(dev); struct hidbus_ivars *tlc; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); tlc = malloc(sizeof(struct hidbus_ivars), M_DEVBUF, M_WAITOK | M_ZERO); tlc->mtx = &sc->mtx; device_set_ivars(child, tlc); sx_xlock(&sc->sx); CK_STAILQ_INSERT_TAIL(&sc->tlcs, tlc, link); sx_unlock(&sc->sx); return (child); } static int hidbus_enumerate_children(device_t dev, const void* data, hid_size_t len) { struct hidbus_softc *sc = device_get_softc(dev); struct hid_data *hd; struct hid_item hi; device_t child; uint8_t index = 0; if (data == NULL || len == 0) return (ENXIO); /* Add a child for each top level collection */ hd = hid_start_parse(data, len, 1 << hid_input); while (hid_get_item(hd, &hi)) { if (hi.kind != hid_collection || hi.collevel != 1) continue; - child = BUS_ADD_CHILD(dev, 0, NULL, -1); + child = BUS_ADD_CHILD(dev, 0, NULL, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(dev, "Could not add HID device\n"); continue; } hidbus_set_index(child, index); hidbus_set_usage(child, hi.usage); hidbus_set_flags(child, HIDBUS_FLAG_AUTOCHILD); index++; DPRINTF("Add child TLC: 0x%04x:0x%04x\n", HID_GET_USAGE_PAGE(hi.usage), HID_GET_USAGE(hi.usage)); } hid_end_parse(hd); if (index == 0) return (ENXIO); sc->nauto = index; return (0); } static int hidbus_attach_children(device_t dev) { struct hidbus_softc *sc = device_get_softc(dev); int error; HID_INTR_SETUP(device_get_parent(dev), dev, hidbus_intr, sc, &sc->rdesc); error = hidbus_enumerate_children(dev, sc->rdesc.data, sc->rdesc.len); if (error != 0) DPRINTF("failed to enumerate children: error %d\n", error); /* * hidbus_attach_children() can recurse through device_identify-> * hid_set_report_descr() call sequence. Do not perform children * attach twice in that case. */ sc->nest++; bus_generic_probe(dev); sc->nest--; if (sc->nest != 0) return (0); if (hid_is_keyboard(sc->rdesc.data, sc->rdesc.len) != 0) error = bus_generic_attach(dev); else error = bus_delayed_attach_children(dev); if (error != 0) device_printf(dev, "failed to attach child: error %d\n", error); return (error); } static int hidbus_detach_children(device_t dev) { device_t *children, bus; bool is_bus; int i, error; error = 0; is_bus = device_get_devclass(dev) == devclass_find("hidbus"); bus = is_bus ? dev : device_get_parent(dev); KASSERT(device_get_devclass(bus) == devclass_find("hidbus"), ("Device is not hidbus or it's child")); if (is_bus) { /* If hidbus is passed, delete all children. */ bus_generic_detach(bus); device_delete_children(bus); } else { /* * If hidbus child is passed, delete all hidbus children * except caller. Deleting the caller may result in deadlock. */ error = device_get_children(bus, &children, &i); if (error != 0) return (error); while (i-- > 0) { if (children[i] == dev) continue; DPRINTF("Delete child. index=%d (%s)\n", hidbus_get_index(children[i]), device_get_nameunit(children[i])); error = device_delete_child(bus, children[i]); if (error) { DPRINTF("Failed deleting %s\n", device_get_nameunit(children[i])); break; } } free(children, M_TEMP); } HID_INTR_UNSETUP(device_get_parent(bus), bus); return (error); } static int hidbus_probe(device_t dev) { device_set_desc(dev, "HID bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } static int hidbus_attach(device_t dev) { struct hidbus_softc *sc = device_get_softc(dev); struct hid_device_info *devinfo = device_get_ivars(dev); void *d_ptr = NULL; hid_size_t d_len; int error; sc->dev = dev; CK_STAILQ_INIT(&sc->tlcs); mtx_init(&sc->mtx, "hidbus ivar lock", NULL, MTX_DEF); sx_init(&sc->sx, "hidbus ivar list lock"); /* * Ignore error. It is possible for non-HID device e.g. XBox360 gamepad * to emulate HID through overloading of report descriptor. */ d_len = devinfo->rdescsize; if (d_len != 0) { d_ptr = malloc(d_len, M_DEVBUF, M_ZERO | M_WAITOK); error = hid_get_rdesc(dev, d_ptr, d_len); if (error != 0) { free(d_ptr, M_DEVBUF); d_len = 0; d_ptr = NULL; } } hidbus_fill_rdesc_info(&sc->rdesc, d_ptr, d_len); sc->nowrite = hid_test_quirk(devinfo, HQ_NOWRITE); error = hidbus_attach_children(dev); if (error != 0) { hidbus_detach(dev); return (ENXIO); } return (0); } static int hidbus_detach(device_t dev) { struct hidbus_softc *sc = device_get_softc(dev); hidbus_detach_children(dev); sx_destroy(&sc->sx); mtx_destroy(&sc->mtx); free(sc->rdesc.data, M_DEVBUF); return (0); } static void hidbus_child_detached(device_t bus, device_t child) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); KASSERT(tlc->refcnt == 0, ("Child device is running")); tlc->mtx = &sc->mtx; tlc->intr_handler = NULL; tlc->flags &= ~HIDBUS_FLAG_CAN_POLL; } /* * Epoch callback indicating tlc is safe to destroy */ static void hidbus_ivar_dtor(epoch_context_t ctx) { struct hidbus_ivars *tlc; tlc = __containerof(ctx, struct hidbus_ivars, epoch_ctx); free(tlc, M_DEVBUF); } static void hidbus_child_deleted(device_t bus, device_t child) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); sx_xlock(&sc->sx); KASSERT(tlc->refcnt == 0, ("Child device is running")); CK_STAILQ_REMOVE(&sc->tlcs, tlc, hidbus_ivars, link); sx_unlock(&sc->sx); epoch_call(INPUT_EPOCH, hidbus_ivar_dtor, &tlc->epoch_ctx); } static int hidbus_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); switch (which) { case HIDBUS_IVAR_INDEX: *result = tlc->index; break; case HIDBUS_IVAR_USAGE: *result = tlc->usage; break; case HIDBUS_IVAR_FLAGS: *result = tlc->flags; break; case HIDBUS_IVAR_DRIVER_INFO: *result = tlc->driver_info; break; case HIDBUS_IVAR_LOCK: *result = (uintptr_t)(tlc->mtx == &sc->mtx ? NULL : tlc->mtx); break; default: return (EINVAL); } return (0); } static int hidbus_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); switch (which) { case HIDBUS_IVAR_INDEX: tlc->index = value; break; case HIDBUS_IVAR_USAGE: tlc->usage = value; break; case HIDBUS_IVAR_FLAGS: tlc->flags = value; if ((value & HIDBUS_FLAG_CAN_POLL) != 0) HID_INTR_SETUP( device_get_parent(bus), bus, NULL, NULL, NULL); break; case HIDBUS_IVAR_DRIVER_INFO: tlc->driver_info = value; break; case HIDBUS_IVAR_LOCK: tlc->mtx = (struct mtx *)value == NULL ? &sc->mtx : (struct mtx *)value; break; default: return (EINVAL); } return (0); } /* Location hint for devctl(8) */ static int hidbus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct hidbus_ivars *tlc = device_get_ivars(child); sbuf_printf(sb, "index=%hhu", tlc->index); return (0); } /* PnP information for devctl(8) */ static int hidbus_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { struct hidbus_ivars *tlc = device_get_ivars(child); struct hid_device_info *devinfo = device_get_ivars(bus); sbuf_printf(sb, "page=0x%04x usage=0x%04x bus=0x%02hx " "vendor=0x%04hx product=0x%04hx version=0x%04hx%s%s", HID_GET_USAGE_PAGE(tlc->usage), HID_GET_USAGE(tlc->usage), devinfo->idBus, devinfo->idVendor, devinfo->idProduct, devinfo->idVersion, devinfo->idPnP[0] == '\0' ? "" : " _HID=", devinfo->idPnP[0] == '\0' ? "" : devinfo->idPnP); return (0); } void hidbus_set_desc(device_t child, const char *suffix) { device_t bus = device_get_parent(child); struct hidbus_softc *sc = device_get_softc(bus); struct hid_device_info *devinfo = device_get_ivars(bus); struct hidbus_ivars *tlc = device_get_ivars(child); /* Do not add NULL suffix or if device name already contains it. */ if (suffix != NULL && strcasestr(devinfo->name, suffix) == NULL && (sc->nauto > 1 || (tlc->flags & HIDBUS_FLAG_AUTOCHILD) == 0)) device_set_descf(child, "%s %s", devinfo->name, suffix); else device_set_desc(child, devinfo->name); } device_t hidbus_find_child(device_t bus, int32_t usage) { device_t *children, child; int ccount, i; bus_topo_assert(); /* Get a list of all hidbus children */ if (device_get_children(bus, &children, &ccount) != 0) return (NULL); /* Scan through to find required TLC */ for (i = 0, child = NULL; i < ccount; i++) { if (hidbus_get_usage(children[i]) == usage) { child = children[i]; break; } } free(children, M_TEMP); return (child); } void hidbus_intr(void *context, void *buf, hid_size_t len) { struct hidbus_softc *sc = context; struct hidbus_ivars *tlc; struct epoch_tracker et; /* * Broadcast input report to all subscribers. * TODO: Add check for input report ID. * * Relock mutex on every TLC item as we can't hold any locks over whole * TLC list here due to LOR with open()/close() handlers. */ if (!HID_IN_POLLING_MODE()) epoch_enter_preempt(INPUT_EPOCH, &et); CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) { if (tlc->refcnt == 0 || tlc->intr_handler == NULL) continue; if (HID_IN_POLLING_MODE()) { if ((tlc->flags & HIDBUS_FLAG_CAN_POLL) != 0) tlc->intr_handler(tlc->intr_ctx, buf, len); } else { mtx_lock(tlc->mtx); tlc->intr_handler(tlc->intr_ctx, buf, len); mtx_unlock(tlc->mtx); } } if (!HID_IN_POLLING_MODE()) epoch_exit_preempt(INPUT_EPOCH, &et); } void hidbus_set_intr(device_t child, hid_intr_t *handler, void *context) { struct hidbus_ivars *tlc = device_get_ivars(child); tlc->intr_handler = handler; tlc->intr_ctx = context; } static int hidbus_intr_start(device_t bus, device_t child) { MPASS(bus == device_get_parent(child)); struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *ivar = device_get_ivars(child); struct hidbus_ivars *tlc; bool refcnted = false; int error; if (sx_xlock_sig(&sc->sx) != 0) return (EINTR); CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) { refcnted |= (tlc->refcnt != 0); if (tlc == ivar) { mtx_lock(tlc->mtx); ++tlc->refcnt; mtx_unlock(tlc->mtx); } } error = refcnted ? 0 : hid_intr_start(bus); sx_unlock(&sc->sx); return (error); } static int hidbus_intr_stop(device_t bus, device_t child) { MPASS(bus == device_get_parent(child)); struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *ivar = device_get_ivars(child); struct hidbus_ivars *tlc; bool refcnted = false; int error; if (sx_xlock_sig(&sc->sx) != 0) return (EINTR); CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) { if (tlc == ivar) { mtx_lock(tlc->mtx); MPASS(tlc->refcnt != 0); --tlc->refcnt; mtx_unlock(tlc->mtx); } refcnted |= (tlc->refcnt != 0); } error = refcnted ? 0 : hid_intr_stop(bus); sx_unlock(&sc->sx); return (error); } static void hidbus_intr_poll(device_t bus, device_t child __unused) { hid_intr_poll(bus); } struct hid_rdesc_info * hidbus_get_rdesc_info(device_t child) { device_t bus = device_get_parent(child); struct hidbus_softc *sc = device_get_softc(bus); return (&sc->rdesc); } /* * HID interface. * * Hidbus as well as any hidbus child can be passed as first arg. */ /* Read cached report descriptor */ int hid_get_report_descr(device_t dev, void **data, hid_size_t *len) { device_t bus; struct hidbus_softc *sc; bus = device_get_devclass(dev) == devclass_find("hidbus") ? dev : device_get_parent(dev); sc = device_get_softc(bus); /* * Do not send request to a transport backend. * Use cached report descriptor instead of it. */ if (sc->rdesc.data == NULL || sc->rdesc.len == 0) return (ENXIO); if (data != NULL) *data = sc->rdesc.data; if (len != NULL) *len = sc->rdesc.len; return (0); } /* * Replace cached report descriptor with top level driver provided one. * * It deletes all hidbus children except caller and enumerates them again after * new descriptor has been registered. Currently it can not be called from * autoenumerated (by report's TLC) child device context as it results in child * duplication. To overcome this limitation hid_set_report_descr() should be * called from device_identify driver's handler with hidbus itself passed as * 'device_t dev' parameter. */ int hid_set_report_descr(device_t dev, const void *data, hid_size_t len) { struct hid_rdesc_info rdesc; device_t bus; struct hidbus_softc *sc; bool is_bus; int error; bus_topo_assert(); is_bus = device_get_devclass(dev) == devclass_find("hidbus"); bus = is_bus ? dev : device_get_parent(dev); sc = device_get_softc(bus); /* * Do not overload already overloaded report descriptor in * device_identify handler. It causes infinite recursion loop. */ if (is_bus && sc->overloaded) return(0); DPRINTFN(5, "len=%d\n", len); DPRINTFN(5, "data = %*D\n", len, data, " "); error = hidbus_fill_rdesc_info(&rdesc, data, len); if (error != 0) return (error); error = hidbus_detach_children(dev); if (error != 0) return(error); /* Make private copy to handle a case of dynamicaly allocated data. */ rdesc.data = malloc(len, M_DEVBUF, M_ZERO | M_WAITOK); bcopy(data, rdesc.data, len); sc->overloaded = true; free(sc->rdesc.data, M_DEVBUF); bcopy(&rdesc, &sc->rdesc, sizeof(struct hid_rdesc_info)); error = hidbus_attach_children(bus); return (error); } static int hidbus_get_rdesc(device_t dev, device_t child __unused, void *data, hid_size_t len) { return (hid_get_rdesc(dev, data, len)); } static int hidbus_read(device_t dev, device_t child __unused, void *data, hid_size_t maxlen, hid_size_t *actlen) { return (hid_read(dev, data, maxlen, actlen)); } static int hidbus_write(device_t dev, device_t child __unused, const void *data, hid_size_t len) { struct hidbus_softc *sc; uint8_t id; sc = device_get_softc(dev); /* * Output interrupt endpoint is often optional. If HID device * does not provide it, send reports via control pipe. */ if (sc->nowrite) { /* try to extract the ID byte */ id = (sc->rdesc.oid & (len > 0)) ? *(const uint8_t*)data : 0; return (hid_set_report(dev, data, len, HID_OUTPUT_REPORT, id)); } return (hid_write(dev, data, len)); } static int hidbus_get_report(device_t dev, device_t child __unused, void *data, hid_size_t maxlen, hid_size_t *actlen, uint8_t type, uint8_t id) { return (hid_get_report(dev, data, maxlen, actlen, type, id)); } static int hidbus_set_report(device_t dev, device_t child __unused, const void *data, hid_size_t len, uint8_t type, uint8_t id) { return (hid_set_report(dev, data, len, type, id)); } static int hidbus_set_idle(device_t dev, device_t child __unused, uint16_t duration, uint8_t id) { return (hid_set_idle(dev, duration, id)); } static int hidbus_set_protocol(device_t dev, device_t child __unused, uint16_t protocol) { return (hid_set_protocol(dev, protocol)); } static int hidbus_ioctl(device_t dev, device_t child __unused, unsigned long cmd, uintptr_t data) { return (hid_ioctl(dev, cmd, data)); } /*------------------------------------------------------------------------* * hidbus_lookup_id * * This functions takes an array of "struct hid_device_id" and tries * to match the entries with the information in "struct hid_device_info". * * Return values: * NULL: No match found. * Else: Pointer to matching entry. *------------------------------------------------------------------------*/ const struct hid_device_id * hidbus_lookup_id(device_t dev, const struct hid_device_id *id, int nitems_id) { const struct hid_device_id *id_end; const struct hid_device_info *info; int32_t usage; bool is_child; if (id == NULL) { goto done; } id_end = id + nitems_id; info = hid_get_device_info(dev); is_child = device_get_devclass(dev) != devclass_find("hidbus"); if (is_child) usage = hidbus_get_usage(dev); /* * Keep on matching array entries until we find a match or * until we reach the end of the matching array: */ for (; id != id_end; id++) { if (is_child && (id->match_flag_page) && (id->page != HID_GET_USAGE_PAGE(usage))) { continue; } if (is_child && (id->match_flag_usage) && (id->usage != HID_GET_USAGE(usage))) { continue; } if ((id->match_flag_bus) && (id->idBus != info->idBus)) { continue; } if ((id->match_flag_vendor) && (id->idVendor != info->idVendor)) { continue; } if ((id->match_flag_product) && (id->idProduct != info->idProduct)) { continue; } if ((id->match_flag_ver_lo) && (id->idVersion_lo > info->idVersion)) { continue; } if ((id->match_flag_ver_hi) && (id->idVersion_hi < info->idVersion)) { continue; } if (id->match_flag_pnp && strncmp(id->idPnP, info->idPnP, HID_PNP_ID_SIZE) != 0) { continue; } /* We found a match! */ return (id); } done: return (NULL); } /*------------------------------------------------------------------------* * hidbus_lookup_driver_info - factored out code * * Return values: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ int hidbus_lookup_driver_info(device_t child, const struct hid_device_id *id, int nitems_id) { id = hidbus_lookup_id(child, id, nitems_id); if (id) { /* copy driver info */ hidbus_set_driver_info(child, id->driver_info); return (0); } return (ENXIO); } const struct hid_device_info * hid_get_device_info(device_t dev) { device_t bus; bus = device_get_devclass(dev) == devclass_find("hidbus") ? dev : device_get_parent(dev); return (device_get_ivars(bus)); } static device_method_t hidbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, hidbus_probe), DEVMETHOD(device_attach, hidbus_attach), DEVMETHOD(device_detach, hidbus_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* bus interface */ DEVMETHOD(bus_add_child, hidbus_add_child), DEVMETHOD(bus_child_detached, hidbus_child_detached), DEVMETHOD(bus_child_deleted, hidbus_child_deleted), DEVMETHOD(bus_read_ivar, hidbus_read_ivar), DEVMETHOD(bus_write_ivar, hidbus_write_ivar), DEVMETHOD(bus_child_pnpinfo, hidbus_child_pnpinfo), DEVMETHOD(bus_child_location, hidbus_child_location), /* hid interface */ DEVMETHOD(hid_intr_start, hidbus_intr_start), DEVMETHOD(hid_intr_stop, hidbus_intr_stop), DEVMETHOD(hid_intr_poll, hidbus_intr_poll), DEVMETHOD(hid_get_rdesc, hidbus_get_rdesc), DEVMETHOD(hid_read, hidbus_read), DEVMETHOD(hid_write, hidbus_write), DEVMETHOD(hid_get_report, hidbus_get_report), DEVMETHOD(hid_set_report, hidbus_set_report), DEVMETHOD(hid_set_idle, hidbus_set_idle), DEVMETHOD(hid_set_protocol, hidbus_set_protocol), DEVMETHOD(hid_ioctl, hidbus_ioctl), DEVMETHOD_END }; driver_t hidbus_driver = { "hidbus", hidbus_methods, sizeof(struct hidbus_softc), }; MODULE_DEPEND(hidbus, hid, 1, 1, 1); MODULE_VERSION(hidbus, 1); DRIVER_MODULE(hidbus, atopcase, hidbus_driver, 0, 0); DRIVER_MODULE(hidbus, hvhid, hidbus_driver, 0, 0); DRIVER_MODULE(hidbus, iichid, hidbus_driver, 0, 0); DRIVER_MODULE(hidbus, usbhid, hidbus_driver, 0, 0); diff --git a/sys/dev/iicbus/acpi_iicbus.c b/sys/dev/iicbus/acpi_iicbus.c index 4b61387b0ed6..eb5f31d6132f 100644 --- a/sys/dev/iicbus/acpi_iicbus.c +++ b/sys/dev/iicbus/acpi_iicbus.c @@ -1,775 +1,775 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019-2020 Vladimir Kondratyev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ACPI_IICBUS_LOCAL_BUFSIZE 32 /* Fits max SMBUS block size */ /* * Make a copy of ACPI_RESOURCE_I2C_SERIALBUS type and replace "pointer to ACPI * object name string" field with pointer to ACPI object itself. * This saves us extra strdup()/free() pair on acpi_iicbus_get_i2cres call. */ typedef ACPI_RESOURCE_I2C_SERIALBUS ACPI_IICBUS_RESOURCE_I2C_SERIALBUS; #define ResourceSource_Handle ResourceSource.StringPtr /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("IIC") struct gsb_buffer { UINT8 status; UINT8 len; UINT8 data[]; } __packed; struct acpi_iicbus_softc { struct iicbus_softc super_sc; ACPI_CONNECTION_INFO space_handler_info; bool space_handler_installed; }; struct acpi_iicbus_ivars { struct iicbus_ivar super_ivar; ACPI_HANDLE handle; }; static int install_space_handler = 0; TUNABLE_INT("hw.iicbus.enable_acpi_space_handler", &install_space_handler); static inline bool acpi_resource_is_i2c_serialbus(ACPI_RESOURCE *res) { return (res->Type == ACPI_RESOURCE_TYPE_SERIAL_BUS && res->Data.CommonSerialBus.Type == ACPI_RESOURCE_SERIAL_TYPE_I2C); } /* * IICBUS Address space handler */ static int acpi_iicbus_sendb(device_t dev, u_char slave, char byte) { struct iic_msg msgs[] = { { slave, IIC_M_WR, 1, &byte }, }; return (iicbus_transfer(dev, msgs, nitems(msgs))); } static int acpi_iicbus_recvb(device_t dev, u_char slave, char *byte) { char buf; struct iic_msg msgs[] = { { slave, IIC_M_RD, 1, &buf }, }; int error; error = iicbus_transfer(dev, msgs, nitems(msgs)); if (error == 0) *byte = buf; return (error); } static int acpi_iicbus_write(device_t dev, u_char slave, char cmd, void *buf, uint16_t buflen) { struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, 1, &cmd }, { slave, IIC_M_WR | IIC_M_NOSTART, buflen, buf }, }; return (iicbus_transfer(dev, msgs, nitems(msgs))); } static int acpi_iicbus_read(device_t dev, u_char slave, char cmd, void *buf, uint16_t buflen) { uint8_t local_buffer[ACPI_IICBUS_LOCAL_BUFSIZE]; struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, 1, &cmd }, { slave, IIC_M_RD, buflen, NULL }, }; int error; if (buflen <= sizeof(local_buffer)) msgs[1].buf = local_buffer; else msgs[1].buf = malloc(buflen, M_DEVBUF, M_WAITOK); error = iicbus_transfer(dev, msgs, nitems(msgs)); if (error == 0) memcpy(buf, msgs[1].buf, buflen); if (msgs[1].buf != local_buffer) free(msgs[1].buf, M_DEVBUF); return (error); } static int acpi_iicbus_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { uint8_t bytes[2] = { cmd, count }; struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, nitems(bytes), bytes }, { slave, IIC_M_WR | IIC_M_NOSTART, count, buf }, }; if (count == 0) return (errno2iic(EINVAL)); return (iicbus_transfer(dev, msgs, nitems(msgs))); } static int acpi_iicbus_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { uint8_t local_buffer[ACPI_IICBUS_LOCAL_BUFSIZE]; u_char len; struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, 1, &cmd }, { slave, IIC_M_RD | IIC_M_NOSTOP, 1, &len }, }; struct iic_msg block_msg[] = { { slave, IIC_M_RD | IIC_M_NOSTART, 0, NULL }, }; device_t parent = device_get_parent(dev); int error; /* Have to do this because the command is split in two transfers. */ error = iicbus_request_bus(parent, dev, IIC_WAIT); if (error == 0) error = iicbus_transfer(dev, msgs, nitems(msgs)); if (error == 0) { /* * If the slave offers an empty reply, * read one byte to generate the stop or abort. */ if (len == 0) block_msg[0].len = 1; else block_msg[0].len = len; if (len <= sizeof(local_buffer)) block_msg[0].buf = local_buffer; else block_msg[0].buf = malloc(len, M_DEVBUF, M_WAITOK); error = iicbus_transfer(dev, block_msg, nitems(block_msg)); if (len == 0) error = errno2iic(EBADMSG); if (error == 0) { *count = len; memcpy(buf, block_msg[0].buf, len); } if (block_msg[0].buf != local_buffer) free(block_msg[0].buf, M_DEVBUF); } (void)iicbus_release_bus(parent, dev); return (error); } static ACPI_STATUS acpi_iicbus_space_handler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 BitWidth, UINT64 *Value, void *HandlerContext, void *RegionContext) { struct gsb_buffer *gsb; struct acpi_iicbus_softc *sc; device_t dev; ACPI_CONNECTION_INFO *info; ACPI_RESOURCE_I2C_SERIALBUS *sb; ACPI_RESOURCE *res; ACPI_STATUS s; int val; gsb = (struct gsb_buffer *)Value; if (gsb == NULL) return (AE_BAD_PARAMETER); info = HandlerContext; s = AcpiBufferToResource(info->Connection, info->Length, &res); if (ACPI_FAILURE(s)) return (s); if (!acpi_resource_is_i2c_serialbus(res)) { s = AE_BAD_PARAMETER; goto err; } sb = &res->Data.I2cSerialBus; /* XXX Ignore 10bit addressing for now */ if (sb->AccessMode == ACPI_I2C_10BIT_MODE) { s = AE_BAD_PARAMETER; goto err; } #define AML_FIELD_ATTRIB_MASK 0x0F #define AML_FIELD_ATTRIO(attr, io) (((attr) << 16) | (io)) Function &= AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_MASK, ACPI_IO_MASK); sc = __containerof(info, struct acpi_iicbus_softc, space_handler_info); dev = sc->super_sc.dev; /* the address is expected to need shifting */ sb->SlaveAddress <<= 1; switch (Function) { case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_SEND_RECEIVE, ACPI_READ): val = acpi_iicbus_recvb(dev, sb->SlaveAddress, gsb->data); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_SEND_RECEIVE, ACPI_WRITE): val = acpi_iicbus_sendb(dev, sb->SlaveAddress, gsb->data[0]); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BYTE, ACPI_READ): val = acpi_iicbus_read(dev, sb->SlaveAddress, Address, gsb->data, 1); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BYTE, ACPI_WRITE): val = acpi_iicbus_write(dev, sb->SlaveAddress, Address, gsb->data, 1); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_WORD, ACPI_READ): val = acpi_iicbus_read(dev, sb->SlaveAddress, Address, gsb->data, 2); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_WORD, ACPI_WRITE): val = acpi_iicbus_write(dev, sb->SlaveAddress, Address, gsb->data, 2); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BLOCK, ACPI_READ): val = acpi_iicbus_bread(dev, sb->SlaveAddress, Address, &gsb->len, gsb->data); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BLOCK, ACPI_WRITE): val = acpi_iicbus_bwrite(dev, sb->SlaveAddress, Address, gsb->len, gsb->data); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BYTES, ACPI_READ): val = acpi_iicbus_read(dev, sb->SlaveAddress, Address, gsb->data, info->AccessLength); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BYTES, ACPI_WRITE): val = acpi_iicbus_write(dev, sb->SlaveAddress, Address, gsb->data, info->AccessLength); break; default: device_printf(dev, "protocol(0x%04x) is not supported.\n", Function); s = AE_BAD_PARAMETER; goto err; } gsb->status = val; err: ACPI_FREE(res); return (s); } static int acpi_iicbus_install_address_space_handler(struct acpi_iicbus_softc *sc) { ACPI_HANDLE handle; ACPI_STATUS s; handle = acpi_get_handle(device_get_parent(sc->super_sc.dev)); s = AcpiInstallAddressSpaceHandler(handle, ACPI_ADR_SPACE_GSBUS, &acpi_iicbus_space_handler, NULL, &sc->space_handler_info); if (ACPI_FAILURE(s)) { device_printf(sc->super_sc.dev, "Failed to install GSBUS Address Space Handler in ACPI\n"); return (ENXIO); } return (0); } static int acpi_iicbus_remove_address_space_handler(struct acpi_iicbus_softc *sc) { ACPI_HANDLE handle; ACPI_STATUS s; handle = acpi_get_handle(device_get_parent(sc->super_sc.dev)); s = AcpiRemoveAddressSpaceHandler(handle, ACPI_ADR_SPACE_GSBUS, &acpi_iicbus_space_handler); if (ACPI_FAILURE(s)) { device_printf(sc->super_sc.dev, "Failed to remove GSBUS Address Space Handler from ACPI\n"); return (ENXIO); } return (0); } static ACPI_STATUS acpi_iicbus_get_i2cres_cb(ACPI_RESOURCE *res, void *context) { ACPI_IICBUS_RESOURCE_I2C_SERIALBUS *sb = context; ACPI_STATUS status; ACPI_HANDLE handle; if (acpi_resource_is_i2c_serialbus(res)) { status = AcpiGetHandle(ACPI_ROOT_OBJECT, res->Data.I2cSerialBus.ResourceSource.StringPtr, &handle); if (ACPI_FAILURE(status)) return (status); memcpy(sb, &res->Data.I2cSerialBus, sizeof(ACPI_IICBUS_RESOURCE_I2C_SERIALBUS)); /* * replace "pointer to ACPI object name string" field * with pointer to ACPI object itself. */ sb->ResourceSource_Handle = handle; return (AE_CTRL_TERMINATE); } else if (res->Type == ACPI_RESOURCE_TYPE_END_TAG) return (AE_NOT_FOUND); return (AE_OK); } static ACPI_STATUS acpi_iicbus_get_i2cres(ACPI_HANDLE handle, ACPI_RESOURCE_I2C_SERIALBUS *sb) { return (AcpiWalkResources(handle, "_CRS", acpi_iicbus_get_i2cres_cb, sb)); } static ACPI_STATUS acpi_iicbus_parse_resources_cb(ACPI_RESOURCE *res, void *context) { device_t dev = context; struct iicbus_ivar *super_devi = device_get_ivars(dev); struct resource_list *rl = &super_devi->rl; int irq, gpio_pin; switch(res->Type) { case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: if (res->Data.ExtendedIrq.InterruptCount > 0) { irq = res->Data.ExtendedIrq.Interrupts[0]; if (bootverbose) printf(" IRQ: %d\n", irq); resource_list_add_next(rl, SYS_RES_IRQ, irq, irq, 1); return (AE_CTRL_TERMINATE); } break; case ACPI_RESOURCE_TYPE_GPIO: if (res->Data.Gpio.ConnectionType == ACPI_RESOURCE_GPIO_TYPE_INT) { /* Not supported by FreeBSD yet */ gpio_pin = res->Data.Gpio.PinTable[0]; if (bootverbose) printf(" GPIO IRQ pin: %d\n", gpio_pin); return (AE_CTRL_TERMINATE); } break; default: break; } return (AE_OK); } static ACPI_STATUS acpi_iicbus_parse_resources(ACPI_HANDLE handle, device_t dev) { return (AcpiWalkResources(handle, "_CRS", acpi_iicbus_parse_resources_cb, dev)); } static void acpi_iicbus_dump_res(device_t dev, ACPI_IICBUS_RESOURCE_I2C_SERIALBUS *sb) { device_printf(dev, "found ACPI child\n"); printf(" SlaveAddress: 0x%04hx\n", sb->SlaveAddress); printf(" ConnectionSpeed: %uHz\n", sb->ConnectionSpeed); printf(" SlaveMode: %s\n", sb->SlaveMode == ACPI_CONTROLLER_INITIATED ? "ControllerInitiated" : "DeviceInitiated"); printf(" AddressingMode: %uBit\n", sb->AccessMode == 0 ? 7 : 10); printf(" ConnectionSharing: %s\n", sb->ConnectionSharing == 0 ? "Exclusive" : "Shared"); } static device_t acpi_iicbus_add_child(device_t dev, u_int order, const char *name, int unit) { return (iicbus_add_child_common( dev, order, name, unit, sizeof(struct acpi_iicbus_ivars))); } static ACPI_STATUS acpi_iicbus_enumerate_child(ACPI_HANDLE handle, UINT32 level, void *context, void **result) { device_t iicbus, child, acpi_child, acpi0; struct iicbus_softc *super_sc; ACPI_IICBUS_RESOURCE_I2C_SERIALBUS sb; ACPI_STATUS status; UINT32 sta; iicbus = context; super_sc = device_get_softc(iicbus); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (!ACPI_FAILURE(acpi_GetInteger(handle, "_STA", &sta)) && !ACPI_DEVICE_PRESENT(sta)) return (AE_OK); if (!acpi_has_hid(handle)) return (AE_OK); /* * Read "I2C Serial Bus Connection Resource Descriptor" * described in p.19.6.57 of ACPI specification. */ bzero(&sb, sizeof(ACPI_IICBUS_RESOURCE_I2C_SERIALBUS)); if (ACPI_FAILURE(acpi_iicbus_get_i2cres(handle, &sb)) || sb.SlaveAddress == 0) return (AE_OK); if (sb.ResourceSource_Handle != acpi_get_handle(device_get_parent(iicbus))) return (AE_OK); if (bootverbose) acpi_iicbus_dump_res(iicbus, &sb); /* Find out speed of the slowest slave */ if (super_sc->bus_freq == 0 || super_sc->bus_freq > sb.ConnectionSpeed) super_sc->bus_freq = sb.ConnectionSpeed; /* Delete existing child of acpi bus */ acpi_child = acpi_get_device(handle); if (acpi_child != NULL) { acpi0 = devclass_get_device(devclass_find("acpi"), 0); if (device_get_parent(acpi_child) != acpi0) return (AE_OK); if (device_is_attached(acpi_child)) return (AE_OK); if (device_delete_child(acpi0, acpi_child) != 0) return (AE_OK); } - child = BUS_ADD_CHILD(iicbus, 0, NULL, -1); + child = BUS_ADD_CHILD(iicbus, 0, NULL, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(iicbus, "add child failed\n"); return (AE_OK); } iicbus_set_addr(child, sb.SlaveAddress); acpi_set_handle(child, handle); (void)acpi_iicbus_parse_resources(handle, child); /* * Update ACPI-CA to use the IIC enumerated device_t for this handle. */ status = AcpiAttachData(handle, acpi_fake_objhandler, child); if (ACPI_FAILURE(status)) printf("WARNING: Unable to attach object data to %s - %s\n", acpi_name(handle), AcpiFormatException(status)); return (AE_OK); } static ACPI_STATUS acpi_iicbus_enumerate_children(device_t dev) { return (AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_iicbus_enumerate_child, NULL, dev, NULL)); } static void acpi_iicbus_set_power_children(device_t dev, int state, bool all_children) { device_t *devlist; int i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) if (all_children || device_is_attached(devlist[i]) != 0) acpi_set_powerstate(devlist[i], state); free(devlist, M_TEMP); } static int acpi_iicbus_probe(device_t dev) { ACPI_HANDLE handle; device_t controller; if (acpi_disabled("iicbus")) return (ENXIO); controller = device_get_parent(dev); if (controller == NULL) return (ENXIO); handle = acpi_get_handle(controller); if (handle == NULL) return (ENXIO); device_set_desc(dev, "Philips I2C bus (ACPI-hinted)"); return (BUS_PROBE_DEFAULT); } static int acpi_iicbus_attach(device_t dev) { struct acpi_iicbus_softc *sc = device_get_softc(dev); int error; if (ACPI_FAILURE(acpi_iicbus_enumerate_children(dev))) device_printf(dev, "children enumeration failed\n"); acpi_iicbus_set_power_children(dev, ACPI_STATE_D0, true); error = iicbus_attach_common(dev, sc->super_sc.bus_freq); if (error == 0 && install_space_handler != 0 && acpi_iicbus_install_address_space_handler(sc) == 0) sc->space_handler_installed = true; return (error); } static int acpi_iicbus_detach(device_t dev) { struct acpi_iicbus_softc *sc = device_get_softc(dev); if (sc->space_handler_installed) acpi_iicbus_remove_address_space_handler(sc); acpi_iicbus_set_power_children(dev, ACPI_STATE_D3, false); return (iicbus_detach(dev)); } static int acpi_iicbus_suspend(device_t dev) { int error; error = bus_generic_suspend(dev); if (error == 0) acpi_iicbus_set_power_children(dev, ACPI_STATE_D3, false); return (error); } static int acpi_iicbus_resume(device_t dev) { acpi_iicbus_set_power_children(dev, ACPI_STATE_D0, false); return (bus_generic_resume(dev)); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. */ static void acpi_iicbus_probe_nomatch(device_t bus, device_t child) { iicbus_probe_nomatch(bus, child); acpi_set_powerstate(child, ACPI_STATE_D3); } /* * If a new driver has a chance to probe a child, first power it up. */ static void acpi_iicbus_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); } } free(devlist, M_TEMP); } static void acpi_iicbus_child_deleted(device_t bus, device_t child) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); if (acpi_get_device(devi->handle) == child) AcpiDetachData(devi->handle, acpi_fake_objhandler); } static int acpi_iicbus_read_ivar(device_t bus, device_t child, int which, uintptr_t *res) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: *res = (uintptr_t)devi->handle; break; default: return (iicbus_read_ivar(bus, child, which, res)); } return (0); } static int acpi_iicbus_write_ivar(device_t bus, device_t child, int which, uintptr_t val) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: if (devi->handle != NULL) return (EINVAL); devi->handle = (ACPI_HANDLE)val; break; default: return (iicbus_write_ivar(bus, child, which, val)); } return (0); } /* Location hint for devctl(8). Concatenate IIC and ACPI hints. */ static int acpi_iicbus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); int error; /* read IIC location hint string into the buffer. */ error = iicbus_child_location(bus, child, sb); if (error != 0) return (error); /* Place ACPI string right after IIC one's terminating NUL. */ if (devi->handle != NULL) sbuf_printf(sb, " handle=%s", acpi_name(devi->handle)); return (0); } /* PnP information for devctl(8). Concatenate IIC and ACPI info strings. */ static int acpi_iicbus_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); int error; /* read IIC PnP string into the buffer. */ error = iicbus_child_pnpinfo(bus, child, sb); if (error != 0) return (error); if (devi->handle == NULL) return (0); error = acpi_pnpinfo(devi->handle, sb); return (error); } static device_method_t acpi_iicbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_iicbus_probe), DEVMETHOD(device_attach, acpi_iicbus_attach), DEVMETHOD(device_detach, acpi_iicbus_detach), DEVMETHOD(device_suspend, acpi_iicbus_suspend), DEVMETHOD(device_resume, acpi_iicbus_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_iicbus_add_child), DEVMETHOD(bus_probe_nomatch, acpi_iicbus_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_iicbus_driver_added), DEVMETHOD(bus_child_deleted, acpi_iicbus_child_deleted), DEVMETHOD(bus_read_ivar, acpi_iicbus_read_ivar), DEVMETHOD(bus_write_ivar, acpi_iicbus_write_ivar), DEVMETHOD(bus_child_location, acpi_iicbus_child_location), DEVMETHOD(bus_child_pnpinfo, acpi_iicbus_child_pnpinfo), DEVMETHOD(bus_get_device_path, acpi_get_acpi_device_path), DEVMETHOD_END, }; DEFINE_CLASS_1(iicbus, acpi_iicbus_driver, acpi_iicbus_methods, sizeof(struct acpi_iicbus_softc), iicbus_driver); MODULE_VERSION(acpi_iicbus, 1); MODULE_DEPEND(acpi_iicbus, acpi, 1, 1, 1); diff --git a/sys/dev/iicbus/iic.c b/sys/dev/iicbus/iic.c index 0d65bdea5782..ec37a6e19342 100644 --- a/sys/dev/iicbus/iic.c +++ b/sys/dev/iicbus/iic.c @@ -1,611 +1,611 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998, 2001 Nicolas Souchu * Copyright (c) 2023 Juniper Networks, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" struct iic_softc { device_t sc_dev; struct cdev *sc_devnode; }; struct iic_cdevpriv { struct sx lock; struct iic_softc *sc; bool started; uint8_t addr; }; #ifdef COMPAT_FREEBSD32 struct iic_msg32 { uint16_t slave; uint16_t flags; uint16_t len; uint32_t buf; }; struct iiccmd32 { u_char slave; uint32_t count; uint32_t last; uint32_t buf; }; struct iic_rdwr_data32 { uint32_t msgs; uint32_t nmsgs; }; #define I2CWRITE32 _IOW('i', 4, struct iiccmd32) #define I2CREAD32 _IOW('i', 5, struct iiccmd32) #define I2CRDWR32 _IOW('i', 6, struct iic_rdwr_data32) #endif #define IIC_LOCK(cdp) sx_xlock(&(cdp)->lock) #define IIC_UNLOCK(cdp) sx_xunlock(&(cdp)->lock) static MALLOC_DEFINE(M_IIC, "iic", "I2C device data"); static int iic_probe(device_t); static int iic_attach(device_t); static int iic_detach(device_t); static void iic_identify(driver_t *driver, device_t parent); static void iicdtor(void *data); static int iicuio_move(struct iic_cdevpriv *priv, struct uio *uio, int last); static int iicuio(struct cdev *dev, struct uio *uio, int ioflag); static int iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags, bool compat32); static device_method_t iic_methods[] = { /* device interface */ DEVMETHOD(device_identify, iic_identify), DEVMETHOD(device_probe, iic_probe), DEVMETHOD(device_attach, iic_attach), DEVMETHOD(device_detach, iic_detach), /* iicbus interface */ DEVMETHOD(iicbus_intr, iicbus_generic_intr), { 0, 0 } }; static driver_t iic_driver = { "iic", iic_methods, sizeof(struct iic_softc), }; static d_open_t iicopen; static d_ioctl_t iicioctl; static struct cdevsw iic_cdevsw = { .d_version = D_VERSION, .d_open = iicopen, .d_read = iicuio, .d_write = iicuio, .d_ioctl = iicioctl, .d_name = "iic", }; static void iic_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "iic", -1) == NULL) - BUS_ADD_CHILD(parent, 0, "iic", -1); + BUS_ADD_CHILD(parent, 0, "iic", DEVICE_UNIT_ANY); } static int iic_probe(device_t dev) { if (iicbus_get_addr(dev) > 0) return (ENXIO); device_set_desc(dev, "I2C generic I/O"); return (0); } static int iic_attach(device_t dev) { struct iic_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_devnode = make_dev(&iic_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "iic%d", device_get_unit(dev)); if (sc->sc_devnode == NULL) { device_printf(dev, "failed to create character device\n"); return (ENXIO); } sc->sc_devnode->si_drv1 = sc; return (0); } static int iic_detach(device_t dev) { struct iic_softc *sc; sc = device_get_softc(dev); if (sc->sc_devnode) destroy_dev(sc->sc_devnode); return (0); } static int iicopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct iic_cdevpriv *priv; int error; priv = malloc(sizeof(*priv), M_IIC, M_WAITOK | M_ZERO); sx_init(&priv->lock, "iic"); priv->sc = dev->si_drv1; error = devfs_set_cdevpriv(priv, iicdtor); if (error != 0) free(priv, M_IIC); return (error); } static void iicdtor(void *data) { device_t iicdev, parent; struct iic_cdevpriv *priv; priv = data; KASSERT(priv != NULL, ("iic cdevpriv should not be NULL!")); iicdev = priv->sc->sc_dev; parent = device_get_parent(iicdev); if (priv->started) { iicbus_stop(parent); iicbus_reset(parent, IIC_UNKNOWN, 0, NULL); iicbus_release_bus(parent, iicdev); } sx_destroy(&priv->lock); free(priv, M_IIC); } static int iicuio_move(struct iic_cdevpriv *priv, struct uio *uio, int last) { device_t parent; int error, num_bytes, transferred_bytes, written_bytes; char buffer[128]; parent = device_get_parent(priv->sc->sc_dev); error = 0; /* * We can only transfer up to sizeof(buffer) bytes in 1 shot, so loop until * everything has been transferred. */ while ((error == 0) && (uio->uio_resid > 0)) { num_bytes = MIN(uio->uio_resid, sizeof(buffer)); transferred_bytes = 0; switch (uio->uio_rw) { case UIO_WRITE: error = uiomove(buffer, num_bytes, uio); while ((error == 0) && (transferred_bytes < num_bytes)) { written_bytes = 0; error = iicbus_write(parent, &buffer[transferred_bytes], num_bytes - transferred_bytes, &written_bytes, 0); transferred_bytes += written_bytes; } break; case UIO_READ: error = iicbus_read(parent, buffer, num_bytes, &transferred_bytes, ((uio->uio_resid <= sizeof(buffer)) ? last : 0), 0); if (error == 0) error = uiomove(buffer, transferred_bytes, uio); break; } } return (error); } static int iicuio(struct cdev *dev, struct uio *uio, int ioflag) { device_t parent; struct iic_cdevpriv *priv; int error; uint8_t addr; priv = NULL; error = devfs_get_cdevpriv((void**)&priv); if (error != 0) return (error); KASSERT(priv != NULL, ("iic cdevpriv should not be NULL!")); IIC_LOCK(priv); if (priv->started || (priv->addr == 0)) { IIC_UNLOCK(priv); return (ENXIO); } parent = device_get_parent(priv->sc->sc_dev); error = iicbus_request_bus(parent, priv->sc->sc_dev, (ioflag & O_NONBLOCK) ? IIC_DONTWAIT : (IIC_WAIT | IIC_INTR)); if (error != 0) { IIC_UNLOCK(priv); return (error); } switch (uio->uio_rw) { case UIO_READ: addr = priv->addr | LSB; break; case UIO_WRITE: addr = priv->addr & ~LSB; break; } error = iicbus_start(parent, addr, 0); if (error != 0) { iicbus_release_bus(parent, priv->sc->sc_dev); IIC_UNLOCK(priv); return (error); } error = iicuio_move(priv, uio, IIC_LAST_READ); iicbus_stop(parent); iicbus_release_bus(parent, priv->sc->sc_dev); IIC_UNLOCK(priv); return (error); } #ifdef COMPAT_FREEBSD32 static int iic_copyinmsgs32(struct iic_rdwr_data *d, struct iic_msg *buf) { struct iic_msg32 msg32; struct iic_msg32 *m32; int error, i; m32 = (struct iic_msg32 *)d->msgs; for (i = 0; i < d->nmsgs; i++) { error = copyin(&m32[i], &msg32, sizeof(msg32)); if (error != 0) return (error); CP(msg32, buf[i], slave); CP(msg32, buf[i], flags); CP(msg32, buf[i], len); PTRIN_CP(msg32, buf[i], buf); } return (0); } #endif static int iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags, bool compat32 __unused) { #ifdef COMPAT_FREEBSD32 struct iic_rdwr_data dswab; struct iic_rdwr_data32 *d32; #endif struct iic_msg *buf, *m; void **usrbufs; device_t iicdev, parent; int error; uint32_t i; iicdev = priv->sc->sc_dev; parent = device_get_parent(iicdev); error = 0; #ifdef COMPAT_FREEBSD32 if (compat32) { d32 = (struct iic_rdwr_data32 *)d; PTRIN_CP(*d32, dswab, msgs); CP(*d32, dswab, nmsgs); d = &dswab; } #endif if (d->nmsgs > IIC_RDRW_MAX_MSGS) return (EINVAL); buf = malloc(sizeof(*d->msgs) * d->nmsgs, M_IIC, M_WAITOK); #ifdef COMPAT_FREEBSD32 if (compat32) error = iic_copyinmsgs32(d, buf); else #endif error = copyin(d->msgs, buf, sizeof(*d->msgs) * d->nmsgs); if (error != 0) { free(buf, M_IIC); return (error); } /* Alloc kernel buffers for userland data, copyin write data */ usrbufs = malloc(sizeof(void *) * d->nmsgs, M_IIC, M_WAITOK | M_ZERO); for (i = 0; i < d->nmsgs; i++) { m = &(buf[i]); usrbufs[i] = m->buf; /* * At least init the buffer to NULL so we can safely free() it later. * If the copyin() to buf failed, don't try to malloc bogus m->len. */ m->buf = NULL; if (error != 0) continue; /* m->len is uint16_t, so allocation size is capped at 64K. */ m->buf = malloc(m->len, M_IIC, M_WAITOK); if (!(m->flags & IIC_M_RD)) error = copyin(usrbufs[i], m->buf, m->len); } if (error == 0) error = iicbus_request_bus(parent, iicdev, (flags & O_NONBLOCK) ? IIC_DONTWAIT : (IIC_WAIT | IIC_INTR)); if (error == 0) { error = iicbus_transfer(iicdev, buf, d->nmsgs); iicbus_release_bus(parent, iicdev); } /* Copyout all read segments, free up kernel buffers */ for (i = 0; i < d->nmsgs; i++) { m = &(buf[i]); if ((error == 0) && (m->flags & IIC_M_RD)) error = copyout(m->buf, usrbufs[i], m->len); free(m->buf, M_IIC); } free(usrbufs, M_IIC); free(buf, M_IIC); return (error); } static int iicioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { #ifdef COMPAT_FREEBSD32 struct iiccmd iicswab; #endif device_t parent, iicdev; struct iiccmd *s; #ifdef COMPAT_FREEBSD32 struct iiccmd32 *s32; #endif struct uio ubuf; struct iovec uvec; struct iic_cdevpriv *priv; int error; bool compat32; s = (struct iiccmd *)data; #ifdef COMPAT_FREEBSD32 s32 = (struct iiccmd32 *)data; #endif error = devfs_get_cdevpriv((void**)&priv); if (error != 0) return (error); KASSERT(priv != NULL, ("iic cdevpriv should not be NULL!")); iicdev = priv->sc->sc_dev; parent = device_get_parent(iicdev); IIC_LOCK(priv); #ifdef COMPAT_FREEBSD32 switch (cmd) { case I2CWRITE32: case I2CREAD32: CP(*s32, iicswab, slave); CP(*s32, iicswab, count); CP(*s32, iicswab, last); PTRIN_CP(*s32, iicswab, buf); s = &iicswab; break; default: break; } #endif switch (cmd) { case I2CSTART: if (priv->started) { error = EINVAL; break; } error = iicbus_request_bus(parent, iicdev, (flags & O_NONBLOCK) ? IIC_DONTWAIT : (IIC_WAIT | IIC_INTR)); if (error == 0) error = iicbus_start(parent, s->slave, 0); if (error == 0) { priv->addr = s->slave; priv->started = true; } else iicbus_release_bus(parent, iicdev); break; case I2CSTOP: if (priv->started) { error = iicbus_stop(parent); iicbus_release_bus(parent, iicdev); priv->started = false; } break; case I2CRSTCARD: /* * Bus should be owned before we reset it. * We allow the bus to be already owned as the result of an in-progress * sequence; however, bus reset will always be followed by release * (a new start is presumably needed for I/O anyway). */ if (!priv->started) error = iicbus_request_bus(parent, iicdev, (flags & O_NONBLOCK) ? IIC_DONTWAIT : (IIC_WAIT | IIC_INTR)); if (error == 0) { error = iicbus_reset(parent, IIC_UNKNOWN, 0, NULL); /* * Ignore IIC_ENOADDR as it only means we have a master-only * controller. */ if (error == IIC_ENOADDR) error = 0; iicbus_release_bus(parent, iicdev); priv->started = false; } break; case I2CWRITE: #ifdef COMPAT_FREEBSD32 case I2CWRITE32: #endif if (!priv->started) { error = EINVAL; break; } uvec.iov_base = s->buf; uvec.iov_len = s->count; ubuf.uio_iov = &uvec; ubuf.uio_iovcnt = 1; ubuf.uio_segflg = UIO_USERSPACE; ubuf.uio_td = td; ubuf.uio_resid = s->count; ubuf.uio_offset = 0; ubuf.uio_rw = UIO_WRITE; error = iicuio_move(priv, &ubuf, 0); break; case I2CREAD: #ifdef COMPAT_FREEBSD32 case I2CREAD32: #endif if (!priv->started) { error = EINVAL; break; } uvec.iov_base = s->buf; uvec.iov_len = s->count; ubuf.uio_iov = &uvec; ubuf.uio_iovcnt = 1; ubuf.uio_segflg = UIO_USERSPACE; ubuf.uio_td = td; ubuf.uio_resid = s->count; ubuf.uio_offset = 0; ubuf.uio_rw = UIO_READ; error = iicuio_move(priv, &ubuf, s->last); break; #ifdef COMPAT_FREEBSD32 case I2CRDWR32: #endif case I2CRDWR: /* * The rdwr list should be a self-contained set of * transactions. Fail if another transaction is in progress. */ if (priv->started) { error = EINVAL; break; } #ifdef COMPAT_FREEBSD32 compat32 = (cmd == I2CRDWR32); #else compat32 = false; #endif error = iicrdwr(priv, (struct iic_rdwr_data *)data, flags, compat32); break; case I2CRPTSTART: if (!priv->started) { error = EINVAL; break; } error = iicbus_repeated_start(parent, s->slave, 0); break; case I2CSADDR: priv->addr = *((uint8_t*)data); break; default: error = ENOTTY; } IIC_UNLOCK(priv); return (error); } DRIVER_MODULE(iic, iicbus, iic_driver, 0, 0); MODULE_DEPEND(iic, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER); MODULE_VERSION(iic, 1); diff --git a/sys/dev/iicbus/iicsmb.c b/sys/dev/iicbus/iicsmb.c index 8576491b08a8..3e7e06a85f09 100644 --- a/sys/dev/iicbus/iicsmb.c +++ b/sys/dev/iicbus/iicsmb.c @@ -1,478 +1,478 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998, 2001 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * I2C to SMB bridge * * Example: * * smb bttv * \ / * smbus * / \ * iicsmb bti2c * | * iicbus * / | \ * iicbb pcf ... * | * lpbb */ #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #include "smbus_if.h" struct iicsmb_softc { #define SMB_WAITING_ADDR 0x0 #define SMB_WAITING_LOW 0x1 #define SMB_WAITING_HIGH 0x2 #define SMB_DONE 0x3 int state; u_char devaddr; /* slave device address */ char low; /* low byte received first */ char high; /* high byte */ struct mtx lock; device_t smbus; }; static int iicsmb_probe(device_t); static int iicsmb_attach(device_t); static int iicsmb_detach(device_t); static void iicsmb_identify(driver_t *driver, device_t parent); static int iicsmb_intr(device_t dev, int event, char *buf); static int iicsmb_callback(device_t dev, int index, void *data); static int iicsmb_quick(device_t dev, u_char slave, int how); static int iicsmb_sendb(device_t dev, u_char slave, char byte); static int iicsmb_recvb(device_t dev, u_char slave, char *byte); static int iicsmb_writeb(device_t dev, u_char slave, char cmd, char byte); static int iicsmb_writew(device_t dev, u_char slave, char cmd, short word); static int iicsmb_readb(device_t dev, u_char slave, char cmd, char *byte); static int iicsmb_readw(device_t dev, u_char slave, char cmd, short *word); static int iicsmb_pcall(device_t dev, u_char slave, char cmd, short sdata, short *rdata); static int iicsmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf); static int iicsmb_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf); static device_method_t iicsmb_methods[] = { /* device interface */ DEVMETHOD(device_identify, iicsmb_identify), DEVMETHOD(device_probe, iicsmb_probe), DEVMETHOD(device_attach, iicsmb_attach), DEVMETHOD(device_detach, iicsmb_detach), /* iicbus interface */ DEVMETHOD(iicbus_intr, iicsmb_intr), /* smbus interface */ DEVMETHOD(smbus_callback, iicsmb_callback), DEVMETHOD(smbus_quick, iicsmb_quick), DEVMETHOD(smbus_sendb, iicsmb_sendb), DEVMETHOD(smbus_recvb, iicsmb_recvb), DEVMETHOD(smbus_writeb, iicsmb_writeb), DEVMETHOD(smbus_writew, iicsmb_writew), DEVMETHOD(smbus_readb, iicsmb_readb), DEVMETHOD(smbus_readw, iicsmb_readw), DEVMETHOD(smbus_pcall, iicsmb_pcall), DEVMETHOD(smbus_bwrite, iicsmb_bwrite), DEVMETHOD(smbus_bread, iicsmb_bread), DEVMETHOD_END }; static driver_t iicsmb_driver = { "iicsmb", iicsmb_methods, sizeof(struct iicsmb_softc), }; static void iicsmb_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "iicsmb", -1) == NULL) - BUS_ADD_CHILD(parent, 0, "iicsmb", -1); + BUS_ADD_CHILD(parent, 0, "iicsmb", DEVICE_UNIT_ANY); } static int iicsmb_probe(device_t dev) { device_set_desc(dev, "SMBus over I2C bridge"); return (BUS_PROBE_NOWILDCARD); } static int iicsmb_attach(device_t dev) { struct iicsmb_softc *sc = (struct iicsmb_softc *)device_get_softc(dev); mtx_init(&sc->lock, "iicsmb", NULL, MTX_DEF); sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY); /* probe and attach the smbus */ bus_generic_attach(dev); return (0); } static int iicsmb_detach(device_t dev) { struct iicsmb_softc *sc = (struct iicsmb_softc *)device_get_softc(dev); bus_generic_detach(dev); device_delete_children(dev); mtx_destroy(&sc->lock); return (0); } /* * iicsmb_intr() * * iicbus interrupt handler */ static int iicsmb_intr(device_t dev, int event, char *buf) { struct iicsmb_softc *sc = (struct iicsmb_softc *)device_get_softc(dev); mtx_lock(&sc->lock); switch (event) { case INTR_GENERAL: case INTR_START: sc->state = SMB_WAITING_ADDR; break; case INTR_STOP: /* call smbus intr handler */ smbus_intr(sc->smbus, sc->devaddr, sc->low, sc->high, SMB_ENOERR); break; case INTR_RECEIVE: switch (sc->state) { case SMB_DONE: /* XXX too much data, discard */ printf("%s: too much data from 0x%x\n", __func__, sc->devaddr & 0xff); goto end; case SMB_WAITING_ADDR: sc->devaddr = (u_char)*buf; sc->state = SMB_WAITING_LOW; break; case SMB_WAITING_LOW: sc->low = *buf; sc->state = SMB_WAITING_HIGH; break; case SMB_WAITING_HIGH: sc->high = *buf; sc->state = SMB_DONE; break; } end: break; case INTR_TRANSMIT: case INTR_NOACK: break; case INTR_ERROR: switch (*buf) { case IIC_EBUSERR: smbus_intr(sc->smbus, sc->devaddr, 0, 0, SMB_EBUSERR); break; default: printf("%s unknown error 0x%x!\n", __func__, (int)*buf); break; } break; default: panic("%s: unknown event (%d)!", __func__, event); } mtx_unlock(&sc->lock); return (0); } static int iicsmb_callback(device_t dev, int index, void *data) { device_t parent = device_get_parent(dev); int error = 0; int how; switch (index) { case SMB_REQUEST_BUS: /* request underlying iicbus */ how = *(int *)data; error = iicbus_request_bus(parent, dev, how); break; case SMB_RELEASE_BUS: /* release underlying iicbus */ error = iicbus_release_bus(parent, dev); break; default: error = EINVAL; } return (error); } static int iic2smb_error(int error) { switch (error) { case IIC_NOERR: return (SMB_ENOERR); case IIC_EBUSERR: return (SMB_EBUSERR); case IIC_ENOACK: return (SMB_ENOACK); case IIC_ETIMEOUT: return (SMB_ETIMEOUT); case IIC_EBUSBSY: return (SMB_EBUSY); case IIC_ESTATUS: return (SMB_EBUSERR); case IIC_EUNDERFLOW: return (SMB_EBUSERR); case IIC_EOVERFLOW: return (SMB_EBUSERR); case IIC_ENOTSUPP: return (SMB_ENOTSUPP); case IIC_ENOADDR: return (SMB_EBUSERR); case IIC_ERESOURCE: return (SMB_EBUSERR); default: return (SMB_EBUSERR); } } #define TRANSFER_MSGS(dev, msgs) iicbus_transfer(dev, msgs, nitems(msgs)) static int iicsmb_quick(device_t dev, u_char slave, int how) { struct iic_msg msgs[] = { { slave, how == SMB_QWRITE ? IIC_M_WR : IIC_M_RD, 0, NULL }, }; int error; switch (how) { case SMB_QWRITE: case SMB_QREAD: break; default: return (SMB_EINVAL); } error = TRANSFER_MSGS(dev, msgs); return (iic2smb_error(error)); } static int iicsmb_sendb(device_t dev, u_char slave, char byte) { struct iic_msg msgs[] = { { slave, IIC_M_WR, 1, &byte }, }; int error; error = TRANSFER_MSGS(dev, msgs); return (iic2smb_error(error)); } static int iicsmb_recvb(device_t dev, u_char slave, char *byte) { struct iic_msg msgs[] = { { slave, IIC_M_RD, 1, byte }, }; int error; error = TRANSFER_MSGS(dev, msgs); return (iic2smb_error(error)); } static int iicsmb_writeb(device_t dev, u_char slave, char cmd, char byte) { uint8_t bytes[] = { cmd, byte }; struct iic_msg msgs[] = { { slave, IIC_M_WR, nitems(bytes), bytes }, }; int error; error = TRANSFER_MSGS(dev, msgs); return (iic2smb_error(error)); } static int iicsmb_writew(device_t dev, u_char slave, char cmd, short word) { uint8_t bytes[] = { cmd, word & 0xff, word >> 8 }; struct iic_msg msgs[] = { { slave, IIC_M_WR, nitems(bytes), bytes }, }; int error; error = TRANSFER_MSGS(dev, msgs); return (iic2smb_error(error)); } static int iicsmb_readb(device_t dev, u_char slave, char cmd, char *byte) { struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, 1, &cmd }, { slave, IIC_M_RD, 1, byte }, }; int error; error = TRANSFER_MSGS(dev, msgs); return (iic2smb_error(error)); } static int iicsmb_readw(device_t dev, u_char slave, char cmd, short *word) { uint8_t buf[2]; struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, 1, &cmd }, { slave, IIC_M_RD, nitems(buf), buf }, }; int error; error = TRANSFER_MSGS(dev, msgs); if (error == 0) *word = ((uint16_t)buf[1] << 8) | buf[0]; return (iic2smb_error(error)); } static int iicsmb_pcall(device_t dev, u_char slave, char cmd, short sdata, short *rdata) { uint8_t in[3] = { cmd, sdata & 0xff, sdata >> 8 }; uint8_t out[2]; struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, nitems(in), in }, { slave, IIC_M_RD, nitems(out), out }, }; int error; error = TRANSFER_MSGS(dev, msgs); if (error == 0) *rdata = ((uint16_t)out[1] << 8) | out[0]; return (iic2smb_error(error)); } static int iicsmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { uint8_t bytes[2] = { cmd, count }; struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, nitems(bytes), bytes }, { slave, IIC_M_WR | IIC_M_NOSTART, count, buf }, }; int error; if (count > SMB_MAXBLOCKSIZE || count == 0) return (SMB_EINVAL); error = TRANSFER_MSGS(dev, msgs); return (iic2smb_error(error)); } static int iicsmb_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, 1, &cmd }, { slave, IIC_M_RD | IIC_M_NOSTOP, 1, count }, }; struct iic_msg block_msg[] = { { slave, IIC_M_RD | IIC_M_NOSTART, 0, buf }, }; device_t parent = device_get_parent(dev); int error; /* Have to do this because the command is split in two transfers. */ error = iicbus_request_bus(parent, dev, IIC_WAIT | IIC_RECURSIVE); if (error == 0) error = TRANSFER_MSGS(dev, msgs); if (error == 0) { /* * If the slave offers an empty or a too long reply, * read one byte to generate the stop or abort. */ if (*count > SMB_MAXBLOCKSIZE || *count == 0) block_msg[0].len = 1; else block_msg[0].len = *count; error = TRANSFER_MSGS(dev, block_msg); if (*count > SMB_MAXBLOCKSIZE || *count == 0) error = SMB_EINVAL; } (void)iicbus_release_bus(parent, dev); return (iic2smb_error(error)); } DRIVER_MODULE(iicsmb, iicbus, iicsmb_driver, 0, 0); DRIVER_MODULE(smbus, iicsmb, smbus_driver, 0, 0); MODULE_DEPEND(iicsmb, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER); MODULE_DEPEND(iicsmb, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); MODULE_VERSION(iicsmb, 1); diff --git a/sys/dev/ipmi/ipmi_isa.c b/sys/dev/ipmi/ipmi_isa.c index 432c63b327ee..7ae55baf2f8f 100644 --- a/sys/dev/ipmi/ipmi_isa.c +++ b/sys/dev/ipmi/ipmi_isa.c @@ -1,292 +1,292 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006 IronPort Systems Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef LOCAL_MODULE #include #include #else #include #include #endif static void ipmi_isa_identify(driver_t *driver, device_t parent) { struct ipmi_get_info info; uint32_t devid; if (ipmi_smbios_identify(&info) && info.iface_type != SSIF_MODE && device_find_child(parent, "ipmi", -1) == NULL) { /* * XXX: Hack alert. On some broken systems, the IPMI * interface is described via SMBIOS, but the actual * IO resource is in a PCI device BAR, so we have to let * the PCI device attach ipmi instead. In that case don't * create an isa ipmi device. For now we hardcode the list * of bus, device, function tuples. */ devid = pci_cfgregread(0, 0, 4, 2, PCIR_DEVVENDOR, 4); if (devid != 0xffffffff && ipmi_pci_match(devid & 0xffff, devid >> 16) != NULL) return; - BUS_ADD_CHILD(parent, 0, "ipmi", -1); + BUS_ADD_CHILD(parent, 0, "ipmi", DEVICE_UNIT_ANY); } } static int ipmi_isa_probe(device_t dev) { /* * Give other drivers precedence. Unfortunately, this doesn't * work if we have an SMBIOS table that duplicates a PCI device * that's later on the bus than the PCI-ISA bridge. */ if (ipmi_attached) return (ENXIO); /* Skip any PNP devices. */ if (isa_get_logicalid(dev) != 0) return (ENXIO); device_set_desc(dev, "IPMI System Interface"); return (BUS_PROBE_DEFAULT); } static int ipmi_hint_identify(device_t dev, struct ipmi_get_info *info) { const char *mode, *name; int i, unit, val; /* We require at least a "mode" hint. */ name = device_get_name(dev); unit = device_get_unit(dev); if (resource_string_value(name, unit, "mode", &mode) != 0) return (0); /* Set the mode and default I/O resources for each mode. */ bzero(info, sizeof(struct ipmi_get_info)); if (strcasecmp(mode, "KCS") == 0) { info->iface_type = KCS_MODE; info->address = 0xca2; info->io_mode = 1; info->offset = 1; } else if (strcasecmp(mode, "SMIC") == 0) { info->iface_type = SMIC_MODE; info->address = 0xca9; info->io_mode = 1; info->offset = 1; } else if (strcasecmp(mode, "BT") == 0) { info->iface_type = BT_MODE; info->address = 0xe4; info->io_mode = 1; info->offset = 1; } else { device_printf(dev, "Invalid mode %s\n", mode); return (0); } /* * Kill any resources that isahint.c might have setup for us * since it will conflict with how we do resources. */ for (i = 0; i < 2; i++) { bus_delete_resource(dev, SYS_RES_MEMORY, i); bus_delete_resource(dev, SYS_RES_IOPORT, i); } /* Allow the I/O address to be overridden via hints. */ if (resource_int_value(name, unit, "port", &val) == 0 && val != 0) { info->address = val; info->io_mode = 1; } else if (resource_int_value(name, unit, "maddr", &val) == 0 && val != 0) { info->address = val; info->io_mode = 0; } /* Allow the spacing to be overridden. */ if (resource_int_value(name, unit, "spacing", &val) == 0) { switch (val) { case 8: info->offset = 1; break; case 16: info->offset = 2; break; case 32: info->offset = 4; break; default: device_printf(dev, "Invalid register spacing\n"); return (0); } } return (1); } static int ipmi_isa_attach(device_t dev) { struct ipmi_softc *sc = device_get_softc(dev); struct ipmi_get_info info; const char *mode; int count, error, i, type; /* * Pull info out of the SMBIOS table. If that doesn't work, use * hints to enumerate a device. */ if (!ipmi_smbios_identify(&info) && !ipmi_hint_identify(dev, &info)) return (ENXIO); switch (info.iface_type) { case KCS_MODE: count = IPMI_IF_KCS_NRES; mode = "KCS"; break; case SMIC_MODE: count = IPMI_IF_SMIC_NRES; mode = "SMIC"; break; case BT_MODE: count = IPMI_IF_BT_NRES; mode = "BT"; break; default: return (ENXIO); } error = 0; sc->ipmi_dev = dev; device_printf(dev, "%s mode found at %s 0x%jx alignment 0x%x on %s\n", mode, info.io_mode ? "io" : "mem", (uintmax_t)info.address, info.offset, device_get_name(device_get_parent(dev))); if (info.io_mode) type = SYS_RES_IOPORT; else type = SYS_RES_MEMORY; sc->ipmi_io_type = type; sc->ipmi_io_spacing = info.offset; if (info.offset == 1) { sc->ipmi_io_rid = 0; sc->ipmi_io_res[0] = bus_alloc_resource(dev, type, &sc->ipmi_io_rid, info.address, info.address + count - 1, count, RF_ACTIVE); if (sc->ipmi_io_res[0] == NULL) { device_printf(dev, "couldn't configure I/O resource\n"); return (ENXIO); } } else { for (i = 0; i < count; i++) { sc->ipmi_io_rid = i; sc->ipmi_io_res[i] = bus_alloc_resource(dev, type, &sc->ipmi_io_rid, info.address + i * info.offset, info.address + i * info.offset, 1, RF_ACTIVE); if (sc->ipmi_io_res[i] == NULL) { device_printf(dev, "couldn't configure I/O resource\n"); error = ENXIO; sc->ipmi_io_rid = 0; goto bad; } } sc->ipmi_io_rid = 0; } if (info.irq != 0) { sc->ipmi_irq_rid = 0; sc->ipmi_irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->ipmi_irq_rid, info.irq, info.irq, 1, RF_SHAREABLE | RF_ACTIVE); } error = ENXIO; switch (info.iface_type) { case KCS_MODE: error = ipmi_kcs_attach(sc); break; case SMIC_MODE: error = ipmi_smic_attach(sc); break; case BT_MODE: error = ipmi_bt_attach(sc); break; } if (error) goto bad; error = ipmi_attach(dev); if (error) goto bad; return (0); bad: ipmi_release_resources(dev); return (error); } static device_method_t ipmi_methods[] = { /* Device interface */ DEVMETHOD(device_identify, ipmi_isa_identify), DEVMETHOD(device_probe, ipmi_isa_probe), DEVMETHOD(device_attach, ipmi_isa_attach), DEVMETHOD(device_detach, ipmi_detach), { 0, 0 } }; static driver_t ipmi_isa_driver = { "ipmi", ipmi_methods, sizeof(struct ipmi_softc), }; DRIVER_MODULE(ipmi_isa, isa, ipmi_isa_driver, 0, 0); #ifdef ARCH_MAY_USE_EFI MODULE_DEPEND(ipmi_isa, efirt, 1, 1, 1); #endif diff --git a/sys/dev/ipmi/ipmi_smbus.c b/sys/dev/ipmi/ipmi_smbus.c index 59b8dd76675a..1772d9313892 100644 --- a/sys/dev/ipmi/ipmi_smbus.c +++ b/sys/dev/ipmi/ipmi_smbus.c @@ -1,134 +1,134 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006 IronPort Systems Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #ifdef LOCAL_MODULE #include #else #include #endif static void ipmi_smbus_identify(driver_t *driver, device_t parent); static int ipmi_smbus_probe(device_t dev); static int ipmi_smbus_attach(device_t dev); static void ipmi_smbus_identify(driver_t *driver, device_t parent) { struct ipmi_get_info info; if (ipmi_smbios_identify(&info) && info.iface_type == SSIF_MODE && device_find_child(parent, "ipmi", -1) == NULL) - BUS_ADD_CHILD(parent, 0, "ipmi", -1); + BUS_ADD_CHILD(parent, 0, "ipmi", DEVICE_UNIT_ANY); } static int ipmi_smbus_probe(device_t dev) { device_set_desc(dev, "IPMI System Interface"); return (BUS_PROBE_DEFAULT); } static int ipmi_smbus_attach(device_t dev) { struct ipmi_softc *sc = device_get_softc(dev); struct ipmi_get_info info; int error; /* This should never fail. */ if (!ipmi_smbios_identify(&info)) return (ENXIO); if (info.iface_type != SSIF_MODE) { device_printf(dev, "No SSIF IPMI interface found\n"); return (ENXIO); } sc->ipmi_dev = dev; if (info.irq != 0) { sc->ipmi_irq_rid = 0; sc->ipmi_irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->ipmi_irq_rid, info.irq, info.irq, 1, RF_SHAREABLE | RF_ACTIVE); } device_printf(dev, "SSIF mode found at address 0x%llx on %s\n", (long long)info.address, device_get_name(device_get_parent(dev))); error = ipmi_ssif_attach(sc, device_get_parent(dev), info.address); if (error) goto bad; error = ipmi_attach(dev); if (error) goto bad; return (0); bad: ipmi_release_resources(dev); return (error); } static device_method_t ipmi_methods[] = { /* Device interface */ DEVMETHOD(device_identify, ipmi_smbus_identify), DEVMETHOD(device_probe, ipmi_smbus_probe), DEVMETHOD(device_attach, ipmi_smbus_attach), DEVMETHOD(device_detach, ipmi_detach), { 0, 0 } }; static driver_t ipmi_smbus_driver = { "ipmi", ipmi_methods, sizeof(struct ipmi_softc) }; DRIVER_MODULE(ipmi_smbus, smbus, ipmi_smbus_driver, 0, 0); MODULE_DEPEND(ipmi_smbus, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); #ifdef ARCH_MAY_USE_EFI MODULE_DEPEND(ipmi_smbus, efirt, 1, 1, 1); #endif diff --git a/sys/dev/isl/isl.c b/sys/dev/isl/isl.c index 558fc88975fa..009c02ad2b35 100644 --- a/sys/dev/isl/isl.c +++ b/sys/dev/isl/isl.c @@ -1,340 +1,340 @@ /*- * Copyright (c) 2015 Michael Gmelin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Driver for intersil I2C ISL29018 Digital Ambient Light Sensor and Proximity * Sensor with Interrupt Function, only tested connected over SMBus (ig4iic). * * Datasheet: * http://www.intersil.com/en/products/optoelectronics/ambient-light-and-proximity-sensors/light-to-digital-sensors/ISL29018.html * http://www.intersil.com/content/dam/Intersil/documents/isl2/isl29018.pdf */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #include "bus_if.h" #include "device_if.h" #define ISL_METHOD_ALS 0x10 #define ISL_METHOD_IR 0x11 #define ISL_METHOD_PROX 0x12 #define ISL_METHOD_RESOLUTION 0x13 #define ISL_METHOD_RANGE 0x14 struct isl_softc { device_t dev; struct sx isl_sx; }; /* Returns < 0 on problem. */ static int isl_read_sensor(device_t dev, uint8_t cmd_mask); static int isl_read_byte(device_t dev, uint8_t reg, uint8_t *val) { uint16_t addr = iicbus_get_addr(dev); struct iic_msg msgs[] = { { addr, IIC_M_WR | IIC_M_NOSTOP, 1, ® }, { addr, IIC_M_RD, 1, val }, }; return (iicbus_transfer(dev, msgs, nitems(msgs))); } static int isl_write_byte(device_t dev, uint8_t reg, uint8_t val) { uint16_t addr = iicbus_get_addr(dev); uint8_t bytes[] = { reg, val }; struct iic_msg msgs[] = { { addr, IIC_M_WR, nitems(bytes), bytes }, }; return (iicbus_transfer(dev, msgs, nitems(msgs))); } /* * Initialize the device */ static int init_device(device_t dev, int probe) { int error; /* * init procedure: send 0x00 to test ref and cmd reg 1 */ error = isl_write_byte(dev, REG_TEST, 0); if (error) goto done; error = isl_write_byte(dev, REG_CMD1, 0); if (error) goto done; pause("islinit", hz/100); done: if (error && !probe) device_printf(dev, "Unable to initialize\n"); return (error); } static int isl_probe(device_t); static int isl_attach(device_t); static int isl_detach(device_t); static int isl_sysctl(SYSCTL_HANDLER_ARGS); static device_method_t isl_methods[] = { /* device interface */ DEVMETHOD(device_probe, isl_probe), DEVMETHOD(device_attach, isl_attach), DEVMETHOD(device_detach, isl_detach), DEVMETHOD_END }; static driver_t isl_driver = { "isl", isl_methods, sizeof(struct isl_softc), }; #if 0 static void isl_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "asl", -1)) { if (bootverbose) printf("asl: device(s) already created\n"); return; } /* Check if we can communicate to our slave. */ if (init_device(dev, 0x88, 1) == 0) - BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "isl", -1); + BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "isl", DEVICE_UNIT_ANY); } #endif static int isl_probe(device_t dev) { uint32_t addr = iicbus_get_addr(dev); if (addr != 0x88) return (ENXIO); if (init_device(dev, 1) != 0) return (ENXIO); device_set_desc(dev, "ISL Digital Ambient Light Sensor"); return (BUS_PROBE_VENDOR); } static int isl_attach(device_t dev) { struct isl_softc *sc; struct sysctl_ctx_list *sysctl_ctx; struct sysctl_oid *sysctl_tree; int use_als; int use_ir; int use_prox; sc = device_get_softc(dev); sc->dev = dev; if (init_device(dev, 0) != 0) return (ENXIO); sx_init(&sc->isl_sx, "ISL read lock"); sysctl_ctx = device_get_sysctl_ctx(dev); sysctl_tree = device_get_sysctl_tree(dev); use_als = isl_read_sensor(dev, CMD1_MASK_ALS_ONCE) >= 0; use_ir = isl_read_sensor(dev, CMD1_MASK_IR_ONCE) >= 0; use_prox = isl_read_sensor(dev, CMD1_MASK_PROX_ONCE) >= 0; if (use_als) { SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "als", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, ISL_METHOD_ALS, isl_sysctl, "I", "Current ALS sensor read-out"); } if (use_ir) { SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "ir", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, ISL_METHOD_IR, isl_sysctl, "I", "Current IR sensor read-out"); } if (use_prox) { SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "prox", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, ISL_METHOD_PROX, isl_sysctl, "I", "Current proximity sensor read-out"); } SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "resolution", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, ISL_METHOD_RESOLUTION, isl_sysctl, "I", "Current proximity sensor resolution"); SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "range", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, ISL_METHOD_RANGE, isl_sysctl, "I", "Current proximity sensor range"); return (0); } static int isl_detach(device_t dev) { struct isl_softc *sc; sc = device_get_softc(dev); sx_destroy(&sc->isl_sx); return (0); } static int isl_sysctl(SYSCTL_HANDLER_ARGS) { static int resolutions[] = { 16, 12, 8, 4}; static int ranges[] = { 1000, 4000, 16000, 64000}; struct isl_softc *sc; uint8_t rbyte; int arg; int resolution; int range; sc = (struct isl_softc *)oidp->oid_arg1; arg = -1; sx_xlock(&sc->isl_sx); if (isl_read_byte(sc->dev, REG_CMD2, &rbyte) != 0) { sx_xunlock(&sc->isl_sx); return (-1); } resolution = resolutions[(rbyte & CMD2_MASK_RESOLUTION) >> CMD2_SHIFT_RESOLUTION]; range = ranges[(rbyte & CMD2_MASK_RANGE) >> CMD2_SHIFT_RANGE]; switch (oidp->oid_arg2) { case ISL_METHOD_ALS: arg = (isl_read_sensor(sc->dev, CMD1_MASK_ALS_ONCE) * range) >> resolution; break; case ISL_METHOD_IR: arg = isl_read_sensor(sc->dev, CMD1_MASK_IR_ONCE); break; case ISL_METHOD_PROX: arg = isl_read_sensor(sc->dev, CMD1_MASK_PROX_ONCE); break; case ISL_METHOD_RESOLUTION: arg = (1 << resolution); break; case ISL_METHOD_RANGE: arg = range; break; } sx_xunlock(&sc->isl_sx); SYSCTL_OUT(req, &arg, sizeof(arg)); return (0); } static int isl_read_sensor(device_t dev, uint8_t cmd_mask) { uint8_t rbyte; uint8_t cmd; int ret; if (isl_read_byte(dev, REG_CMD1, &rbyte) != 0) { device_printf(dev, "Couldn't read first byte before issuing command %d\n", cmd_mask); return (-1); } cmd = (rbyte & 0x1f) | cmd_mask; if (isl_write_byte(dev, REG_CMD1, cmd) != 0) { device_printf(dev, "Couldn't write command %d\n", cmd_mask); return (-1); } pause("islconv", hz/10); if (isl_read_byte(dev, REG_DATA1, &rbyte) != 0) { device_printf(dev, "Couldn't read first byte after command %d\n", cmd_mask); return (-1); } ret = rbyte; if (isl_read_byte(dev, REG_DATA2, &rbyte) != 0) { device_printf(dev, "Couldn't read second byte after command %d\n", cmd_mask); return (-1); } ret += rbyte << 8; return (ret); } DRIVER_MODULE(isl, iicbus, isl_driver, NULL, NULL); MODULE_DEPEND(isl, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER); MODULE_VERSION(isl, 1); diff --git a/sys/dev/mdio/mdio.c b/sys/dev/mdio/mdio.c index 55122edd16cc..c34493254e0f 100644 --- a/sys/dev/mdio/mdio.c +++ b/sys/dev/mdio/mdio.c @@ -1,133 +1,133 @@ /*- * Copyright (c) 2011-2012 Stefan Bethke. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include "mdio_if.h" static void mdio_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, mdio_driver.name, -1) == NULL) - BUS_ADD_CHILD(parent, 0, mdio_driver.name, -1); + BUS_ADD_CHILD(parent, 0, mdio_driver.name, DEVICE_UNIT_ANY); } static int mdio_probe(device_t dev) { device_set_desc(dev, "MDIO"); return (BUS_PROBE_SPECIFIC); } static int mdio_attach(device_t dev) { bus_generic_probe(dev); bus_enumerate_hinted_children(dev); return (bus_generic_attach(dev)); } static int mdio_detach(device_t dev) { bus_generic_detach(dev); return (0); } static int mdio_readreg(device_t dev, int phy, int reg) { return (MDIO_READREG(device_get_parent(dev), phy, reg)); } static int mdio_writereg(device_t dev, int phy, int reg, int val) { return (MDIO_WRITEREG(device_get_parent(dev), phy, reg, val)); } static int mdio_readextreg(device_t dev, int phy, int devad, int reg) { return (MDIO_READEXTREG(device_get_parent(dev), phy, devad, reg)); } static int mdio_writeextreg(device_t dev, int phy, int devad, int reg, int val) { return (MDIO_WRITEEXTREG(device_get_parent(dev), phy, devad, reg, val)); } static void mdio_hinted_child(device_t dev, const char *name, int unit) { device_add_child(dev, name, unit); } static device_method_t mdio_methods[] = { /* device interface */ DEVMETHOD(device_identify, mdio_identify), DEVMETHOD(device_probe, mdio_probe), DEVMETHOD(device_attach, mdio_attach), DEVMETHOD(device_detach, mdio_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), DEVMETHOD(bus_hinted_child, mdio_hinted_child), /* MDIO access */ DEVMETHOD(mdio_readreg, mdio_readreg), DEVMETHOD(mdio_writereg, mdio_writereg), DEVMETHOD(mdio_readextreg, mdio_readextreg), DEVMETHOD(mdio_writeextreg, mdio_writeextreg), DEVMETHOD_END }; driver_t mdio_driver = { "mdio", mdio_methods, 0 }; MODULE_VERSION(mdio, 1); diff --git a/sys/dev/nvdimm/nvdimm_acpi.c b/sys/dev/nvdimm/nvdimm_acpi.c index 995e60e8b4ee..a8e5db0542f2 100644 --- a/sys/dev/nvdimm/nvdimm_acpi.c +++ b/sys/dev/nvdimm/nvdimm_acpi.c @@ -1,281 +1,281 @@ /*- * Copyright (c) 2017 The FreeBSD Foundation * Copyright (c) 2018, 2019 Intel Corporation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define _COMPONENT ACPI_OEM ACPI_MODULE_NAME("NVDIMM_ACPI") struct nvdimm_root_dev { SLIST_HEAD(, SPA_mapping) spas; }; static MALLOC_DEFINE(M_NVDIMM_ACPI, "nvdimm_acpi", "NVDIMM ACPI bus memory"); static ACPI_STATUS find_dimm(ACPI_HANDLE handle, UINT32 nesting_level, void *context, void **return_value) { ACPI_DEVICE_INFO *device_info; ACPI_STATUS status; device_info = NULL; status = AcpiGetObjectInfo(handle, &device_info); if (ACPI_FAILURE(status)) return_ACPI_STATUS(AE_ERROR); if (device_info->Address == (uintptr_t)context) { *(ACPI_HANDLE *)return_value = handle; status = AE_CTRL_TERMINATE; } else status = AE_OK; AcpiOsFree(device_info); return_ACPI_STATUS(status); } static ACPI_HANDLE get_dimm_acpi_handle(ACPI_HANDLE root_handle, nfit_handle_t adr) { ACPI_HANDLE res; ACPI_STATUS status; res = NULL; status = AcpiWalkNamespace(ACPI_TYPE_DEVICE, root_handle, 1, find_dimm, NULL, (void *)(uintptr_t)adr, &res); if (ACPI_FAILURE(status)) res = NULL; return (res); } static int nvdimm_root_create_devs(device_t dev, ACPI_TABLE_NFIT *nfitbl) { ACPI_HANDLE root_handle, dimm_handle; device_t child; nfit_handle_t *dimm_ids, *dimm; uintptr_t *ivars; int num_dimm_ids; root_handle = acpi_get_handle(dev); acpi_nfit_get_dimm_ids(nfitbl, &dimm_ids, &num_dimm_ids); for (dimm = dimm_ids; dimm < dimm_ids + num_dimm_ids; dimm++) { dimm_handle = get_dimm_acpi_handle(root_handle, *dimm); if (dimm_handle == NULL) continue; - child = BUS_ADD_CHILD(dev, 100, "nvdimm", -1); + child = BUS_ADD_CHILD(dev, 100, "nvdimm", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(dev, "failed to create nvdimm\n"); return (ENXIO); } ivars = mallocarray(NVDIMM_ROOT_IVAR_MAX, sizeof(uintptr_t), M_NVDIMM_ACPI, M_ZERO | M_WAITOK); device_set_ivars(child, ivars); nvdimm_root_set_acpi_handle(child, dimm_handle); nvdimm_root_set_device_handle(child, *dimm); } free(dimm_ids, M_NVDIMM_ACPI); return (0); } static int nvdimm_root_create_spas(struct nvdimm_root_dev *dev, ACPI_TABLE_NFIT *nfitbl) { ACPI_NFIT_SYSTEM_ADDRESS **spas, **spa; struct SPA_mapping *spa_mapping; enum SPA_mapping_type spa_type; int error, num_spas; error = 0; acpi_nfit_get_spa_ranges(nfitbl, &spas, &num_spas); for (spa = spas; spa < spas + num_spas; spa++) { spa_type = nvdimm_spa_type_from_uuid( (struct uuid *)(*spa)->RangeGuid); if (spa_type == SPA_TYPE_UNKNOWN) continue; spa_mapping = malloc(sizeof(struct SPA_mapping), M_NVDIMM_ACPI, M_WAITOK | M_ZERO); error = nvdimm_spa_init(spa_mapping, *spa, spa_type); if (error != 0) { nvdimm_spa_fini(spa_mapping); free(spa_mapping, M_NVDIMM_ACPI); break; } if (nvdimm_spa_type_user_accessible(spa_type) && spa_type != SPA_TYPE_CONTROL_REGION) nvdimm_create_namespaces(spa_mapping, nfitbl); SLIST_INSERT_HEAD(&dev->spas, spa_mapping, link); } free(spas, M_NVDIMM_ACPI); return (error); } static char *nvdimm_root_id[] = {"ACPI0012", NULL}; static int nvdimm_root_probe(device_t dev) { int rv; if (acpi_disabled("nvdimm")) return (ENXIO); rv = ACPI_ID_PROBE(device_get_parent(dev), dev, nvdimm_root_id, NULL); if (rv <= 0) device_set_desc(dev, "ACPI NVDIMM root device"); return (rv); } static int nvdimm_root_attach(device_t dev) { struct nvdimm_root_dev *root; ACPI_TABLE_NFIT *nfitbl; ACPI_STATUS status; int error; status = AcpiGetTable(ACPI_SIG_NFIT, 1, (ACPI_TABLE_HEADER **)&nfitbl); if (ACPI_FAILURE(status)) { device_printf(dev, "cannot get NFIT\n"); return (ENXIO); } error = nvdimm_root_create_devs(dev, nfitbl); if (error != 0) return (error); error = bus_generic_attach(dev); if (error != 0) return (error); root = device_get_softc(dev); error = nvdimm_root_create_spas(root, nfitbl); AcpiPutTable(&nfitbl->Header); return (error); } static int nvdimm_root_detach(device_t dev) { struct nvdimm_root_dev *root; struct SPA_mapping *spa, *next; device_t *children; int i, error, num_children; root = device_get_softc(dev); SLIST_FOREACH_SAFE(spa, &root->spas, link, next) { nvdimm_destroy_namespaces(spa); nvdimm_spa_fini(spa); SLIST_REMOVE_HEAD(&root->spas, link); free(spa, M_NVDIMM_ACPI); } error = bus_generic_detach(dev); if (error != 0) return (error); error = device_get_children(dev, &children, &num_children); if (error != 0) return (error); for (i = 0; i < num_children; i++) free(device_get_ivars(children[i]), M_NVDIMM_ACPI); free(children, M_TEMP); error = device_delete_children(dev); return (error); } static int nvdimm_root_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { if (index < 0 || index >= NVDIMM_ROOT_IVAR_MAX) return (ENOENT); *result = ((uintptr_t *)device_get_ivars(child))[index]; return (0); } static int nvdimm_root_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { if (index < 0 || index >= NVDIMM_ROOT_IVAR_MAX) return (ENOENT); ((uintptr_t *)device_get_ivars(child))[index] = value; return (0); } static int nvdimm_root_child_location(device_t dev, device_t child, struct sbuf *sb) { ACPI_HANDLE handle; handle = nvdimm_root_get_acpi_handle(child); if (handle != NULL) sbuf_printf(sb, "handle=%s", acpi_name(handle)); return (0); } static device_method_t nvdimm_acpi_methods[] = { DEVMETHOD(device_probe, nvdimm_root_probe), DEVMETHOD(device_attach, nvdimm_root_attach), DEVMETHOD(device_detach, nvdimm_root_detach), DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, nvdimm_root_read_ivar), DEVMETHOD(bus_write_ivar, nvdimm_root_write_ivar), DEVMETHOD(bus_child_location, nvdimm_root_child_location), DEVMETHOD(bus_get_device_path, acpi_get_acpi_device_path), DEVMETHOD_END }; static driver_t nvdimm_acpi_driver = { "nvdimm_acpi_root", nvdimm_acpi_methods, sizeof(struct nvdimm_root_dev), }; DRIVER_MODULE(nvdimm_acpi_root, acpi, nvdimm_acpi_driver, NULL, NULL); MODULE_DEPEND(nvdimm_acpi_root, acpi, 1, 1, 1); diff --git a/sys/dev/nvdimm/nvdimm_e820.c b/sys/dev/nvdimm/nvdimm_e820.c index 3fbe2df31209..2a6f29ebad21 100644 --- a/sys/dev/nvdimm/nvdimm_e820.c +++ b/sys/dev/nvdimm/nvdimm_e820.c @@ -1,389 +1,389 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Dell EMC Isilon * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct nvdimm_e820_bus { SLIST_HEAD(, SPA_mapping) spas; }; #define NVDIMM_E820 "nvdimm_e820" static MALLOC_DEFINE(M_NVDIMM_E820, NVDIMM_E820, "NVDIMM e820 bus memory"); static const struct bios_smap *smapbase; static struct { vm_paddr_t start; vm_paddr_t size; } pram_segments[VM_PHYSSEG_MAX]; static unsigned pram_nreg; static void nvdimm_e820_dump_prams(device_t dev, const char *func, int hintunit) { char buffer[256]; struct sbuf sb; bool printed = false; unsigned i; sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN); sbuf_set_drain(&sb, sbuf_printf_drain, NULL); sbuf_printf(&sb, "%s: %s: ", device_get_nameunit(dev), func); if (hintunit < 0) sbuf_cat(&sb, "Found BIOS PRAM regions: "); else sbuf_printf(&sb, "Remaining unallocated PRAM regions after " "hint %d: ", hintunit); for (i = 0; i < pram_nreg; i++) { if (pram_segments[i].size == 0) continue; if (printed) sbuf_putc(&sb, ','); else printed = true; sbuf_printf(&sb, "0x%jx-0x%jx", (uintmax_t)pram_segments[i].start, (uintmax_t)pram_segments[i].start + pram_segments[i].size - 1); } if (!printed) sbuf_cat(&sb, ""); sbuf_putc(&sb, '\n'); sbuf_finish(&sb); sbuf_delete(&sb); } static int nvdimm_e820_create_spas(device_t dev) { static const vm_size_t HINT_ALL = (vm_size_t)-1; ACPI_NFIT_SYSTEM_ADDRESS nfit_sa; struct SPA_mapping *spa_mapping; enum SPA_mapping_type spa_type; struct nvdimm_e820_bus *sc; const char *hinttype; long hintaddrl, hintsizel; vm_paddr_t hintaddr; vm_size_t hintsize; unsigned i, j; int error; sc = device_get_softc(dev); error = 0; nfit_sa = (ACPI_NFIT_SYSTEM_ADDRESS) { 0 }; if (bootverbose) nvdimm_e820_dump_prams(dev, __func__, -1); for (i = 0; resource_long_value("nvdimm_spa", i, "maddr", &hintaddrl) == 0; i++) { if (resource_long_value("nvdimm_spa", i, "msize", &hintsizel) != 0) { device_printf(dev, "hint.nvdimm_spa.%u missing msize\n", i); continue; } hintaddr = (vm_paddr_t)hintaddrl; hintsize = (vm_size_t)hintsizel; if ((hintaddr & PAGE_MASK) != 0 || ((hintsize & PAGE_MASK) != 0 && hintsize != HINT_ALL)) { device_printf(dev, "hint.nvdimm_spa.%u addr or size " "not page aligned\n", i); continue; } if (resource_string_value("nvdimm_spa", i, "type", &hinttype) != 0) { device_printf(dev, "hint.nvdimm_spa.%u missing type\n", i); continue; } spa_type = nvdimm_spa_type_from_name(hinttype); if (spa_type == SPA_TYPE_UNKNOWN) { device_printf(dev, "hint.nvdimm_spa%u.type does not " "match any known SPA types\n", i); continue; } for (j = 0; j < pram_nreg; j++) { if (pram_segments[j].start <= hintaddr && (hintsize == HINT_ALL || (pram_segments[j].start + pram_segments[j].size) >= (hintaddr + hintsize))) break; } if (j == pram_nreg) { device_printf(dev, "hint.nvdimm_spa%u hint does not " "match any region\n", i); continue; } /* Carve off "SPA" from available regions. */ if (pram_segments[j].start == hintaddr) { /* Easy case first: beginning of segment. */ if (hintsize == HINT_ALL) hintsize = pram_segments[j].size; pram_segments[j].start += hintsize; pram_segments[j].size -= hintsize; /* We might leave an empty segment; who cares. */ } else if (hintsize == HINT_ALL || (pram_segments[j].start + pram_segments[j].size) == (hintaddr + hintsize)) { /* 2nd easy case: end of segment. */ if (hintsize == HINT_ALL) hintsize = pram_segments[j].size - (hintaddr - pram_segments[j].start); pram_segments[j].size -= hintsize; } else { /* Hard case: mid segment. */ if (pram_nreg == nitems(pram_segments)) { /* Improbable, but handle gracefully. */ device_printf(dev, "Ran out of %zu segments\n", nitems(pram_segments)); error = ENOBUFS; break; } if (j != pram_nreg - 1) { memmove(&pram_segments[j + 2], &pram_segments[j + 1], (pram_nreg - 1 - j) * sizeof(pram_segments[0])); } pram_nreg++; pram_segments[j + 1].start = hintaddr + hintsize; pram_segments[j + 1].size = (pram_segments[j].start + pram_segments[j].size) - (hintaddr + hintsize); pram_segments[j].size = hintaddr - pram_segments[j].start; } if (bootverbose) nvdimm_e820_dump_prams(dev, __func__, (int)i); spa_mapping = malloc(sizeof(*spa_mapping), M_NVDIMM_E820, M_WAITOK | M_ZERO); /* Mock up a super primitive table for nvdimm_spa_init(). */ nfit_sa.RangeIndex = i; nfit_sa.Flags = 0; nfit_sa.Address = hintaddr; nfit_sa.Length = hintsize; nfit_sa.MemoryMapping = EFI_MD_ATTR_WB | EFI_MD_ATTR_WT | EFI_MD_ATTR_UC; error = nvdimm_spa_init(spa_mapping, &nfit_sa, spa_type); if (error != 0) { nvdimm_spa_fini(spa_mapping); free(spa_mapping, M_NVDIMM_E820); break; } SLIST_INSERT_HEAD(&sc->spas, spa_mapping, link); } return (error); } static int nvdimm_e820_remove_spas(device_t dev) { struct nvdimm_e820_bus *sc; struct SPA_mapping *spa, *next; sc = device_get_softc(dev); SLIST_FOREACH_SAFE(spa, &sc->spas, link, next) { nvdimm_spa_fini(spa); SLIST_REMOVE_HEAD(&sc->spas, link); free(spa, M_NVDIMM_E820); } return (0); } static void nvdimm_e820_identify(driver_t *driver, device_t parent) { device_t child; caddr_t kmdp; if (resource_disabled(driver->name, 0)) return; /* Just create a single instance of the fake bus. */ if (device_find_child(parent, driver->name, -1) != NULL) return; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); smapbase = (const void *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP); /* Only supports BIOS SMAP for now. */ if (smapbase == NULL) return; - child = BUS_ADD_CHILD(parent, 0, driver->name, -1); + child = BUS_ADD_CHILD(parent, 0, driver->name, DEVICE_UNIT_ANY); if (child == NULL) device_printf(parent, "add %s child failed\n", driver->name); } static int nvdimm_e820_probe(device_t dev) { /* * nexus panics if a child doesn't have ivars. BUS_ADD_CHILD uses * nexus_add_child, which creates fuckin ivars. but sometimes if you * unload and reload nvdimm_e820, the device node stays but the ivars * are deleted??? avoid trivial panic but this is a kludge. */ if (device_get_ivars(dev) == NULL) return (ENXIO); device_quiet(dev); device_set_desc(dev, "Legacy e820 NVDIMM root device"); return (BUS_PROBE_NOWILDCARD); } static int nvdimm_e820_attach(device_t dev) { const struct bios_smap *smapend, *smap; uint32_t smapsize; unsigned nregions; int error; smapsize = *((const uint32_t *)smapbase - 1); smapend = (const void *)((const char *)smapbase + smapsize); for (nregions = 0, smap = smapbase; smap < smapend; smap++) { if (smap->type != SMAP_TYPE_PRAM || smap->length == 0) continue; pram_segments[nregions].start = smap->base; pram_segments[nregions].size = smap->length; device_printf(dev, "Found PRAM 0x%jx +0x%jx\n", (uintmax_t)smap->base, (uintmax_t)smap->length); nregions++; } if (nregions == 0) { device_printf(dev, "No e820 PRAM regions detected\n"); return (ENXIO); } pram_nreg = nregions; error = nvdimm_e820_create_spas(dev); return (error); } static int nvdimm_e820_detach(device_t dev) { int error; error = nvdimm_e820_remove_spas(dev); return (error); } static device_method_t nvdimm_e820_methods[] = { DEVMETHOD(device_identify, nvdimm_e820_identify), DEVMETHOD(device_probe, nvdimm_e820_probe), DEVMETHOD(device_attach, nvdimm_e820_attach), DEVMETHOD(device_detach, nvdimm_e820_detach), DEVMETHOD_END }; static driver_t nvdimm_e820_driver = { NVDIMM_E820, nvdimm_e820_methods, sizeof(struct nvdimm_e820_bus), }; static int nvdimm_e820_chainevh(struct module *m, int e, void *arg __unused) { devclass_t dc; device_t dev, parent; int i, error, maxunit; switch (e) { case MOD_UNLOAD: dc = devclass_find(nvdimm_e820_driver.name); maxunit = devclass_get_maxunit(dc); for (i = 0; i < maxunit; i++) { dev = devclass_get_device(dc, i); if (dev == NULL) continue; parent = device_get_parent(dev); if (parent == NULL) { /* Not sure how this would happen. */ continue; } error = device_delete_child(parent, dev); if (error != 0) return (error); } break; default: /* Prevent compiler warning about unhandled cases. */ break; } return (0); } DRIVER_MODULE(nvdimm_e820, nexus, nvdimm_e820_driver, nvdimm_e820_chainevh, NULL); diff --git a/sys/dev/ppbus/if_plip.c b/sys/dev/ppbus/if_plip.c index c2adcb602f6b..598c0c49b82e 100644 --- a/sys/dev/ppbus/if_plip.c +++ b/sys/dev/ppbus/if_plip.c @@ -1,839 +1,839 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997 Poul-Henning Kamp * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From Id: lpt.c,v 1.55.2.1 1996/11/12 09:08:38 phk Exp */ #include /* * Parallel port TCP/IP interfaces added. I looked at the driver from * MACH but this is a complete rewrite, and btw. incompatible, and it * should perform better too. I have never run the MACH driver though. * * This driver sends two bytes (0x08, 0x00) in front of each packet, * to allow us to distinguish another format later. * * Now added a Linux/Crynwr compatibility mode which is enabled using * IF_LINK0 - Tim Wilkinson. * * TODO: * Make HDLC/PPP mode, use IF_LLC1 to enable. * * Connect the two computers using a Laplink parallel cable to use this * feature: * * +----------------------------------------+ * |A-name A-End B-End Descr. Port/Bit | * +----------------------------------------+ * |DATA0 2 15 Data 0/0x01 | * |-ERROR 15 2 1/0x08 | * +----------------------------------------+ * |DATA1 3 13 Data 0/0x02 | * |+SLCT 13 3 1/0x10 | * +----------------------------------------+ * |DATA2 4 12 Data 0/0x04 | * |+PE 12 4 1/0x20 | * +----------------------------------------+ * |DATA3 5 10 Strobe 0/0x08 | * |-ACK 10 5 1/0x40 | * +----------------------------------------+ * |DATA4 6 11 Data 0/0x10 | * |BUSY 11 6 1/~0x80 | * +----------------------------------------+ * |GND 18-25 18-25 GND - | * +----------------------------------------+ * * Expect transfer-rates up to 75 kbyte/sec. * * If GCC could correctly grok * register int port asm("edx") * the code would be cleaner * * Poul-Henning Kamp */ /* * Update for ppbus, PLIP support only - Nicolas Souchu */ #include "opt_plip.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #include #ifndef LPMTU /* MTU for the lp# interfaces */ #define LPMTU 1500 #endif #ifndef LPMAXSPIN1 /* DELAY factor for the lp# interfaces */ #define LPMAXSPIN1 8000 /* Spinning for remote intr to happen */ #endif #ifndef LPMAXSPIN2 /* DELAY factor for the lp# interfaces */ #define LPMAXSPIN2 500 /* Spinning for remote handshake to happen */ #endif #ifndef LPMAXERRS /* Max errors before !RUNNING */ #define LPMAXERRS 100 #endif #define CLPIPHDRLEN 14 /* We send dummy ethernet addresses (two) + packet type in front of packet */ #define CLPIP_SHAKE 0x80 /* This bit toggles between nibble reception */ #define MLPIPHDRLEN CLPIPHDRLEN #define LPIPHDRLEN 2 /* We send 0x08, 0x00 in front of packet */ #define LPIP_SHAKE 0x40 /* This bit toggles between nibble reception */ #if !defined(MLPIPHDRLEN) || LPIPHDRLEN > MLPIPHDRLEN #define MLPIPHDRLEN LPIPHDRLEN #endif #define LPIPTBLSIZE 256 /* Size of octet translation table */ #define lprintf if (lptflag) printf #ifdef PLIP_DEBUG static int volatile lptflag = 1; #else static int volatile lptflag = 0; #endif struct lp_data { struct ifnet *sc_ifp; device_t sc_dev; u_char *sc_ifbuf; int sc_iferrs; struct resource *res_irq; void *sc_intr_cookie; }; static struct mtx lp_tables_lock; MTX_SYSINIT(lp_tables, &lp_tables_lock, "plip tables", MTX_DEF); /* Tables for the lp# interface */ static u_char *txmith; #define txmitl (txmith + (1 * LPIPTBLSIZE)) #define trecvh (txmith + (2 * LPIPTBLSIZE)) #define trecvl (txmith + (3 * LPIPTBLSIZE)) static u_char *ctxmith; #define ctxmitl (ctxmith + (1 * LPIPTBLSIZE)) #define ctrecvh (ctxmith + (2 * LPIPTBLSIZE)) #define ctrecvl (ctxmith + (3 * LPIPTBLSIZE)) /* Functions for the lp# interface */ static int lpinittables(void); static int lpioctl(if_t, u_long, caddr_t); static int lpoutput(if_t, struct mbuf *, const struct sockaddr *, struct route *); static void lpstop(struct lp_data *); static void lp_intr(void *); static int lp_module_handler(module_t, int, void *); #define DEVTOSOFTC(dev) \ ((struct lp_data *)device_get_softc(dev)) static int lp_module_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_UNLOAD: mtx_lock(&lp_tables_lock); if (txmith != NULL) { free(txmith, M_DEVBUF); txmith = NULL; } if (ctxmith != NULL) { free(ctxmith, M_DEVBUF); ctxmith = NULL; } mtx_unlock(&lp_tables_lock); break; case MOD_LOAD: case MOD_QUIESCE: break; default: return (EOPNOTSUPP); } return (0); } static void lp_identify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, "plip", -1); if (!dev) - BUS_ADD_CHILD(parent, 0, "plip", -1); + BUS_ADD_CHILD(parent, 0, "plip", DEVICE_UNIT_ANY); } static int lp_probe(device_t dev) { device_set_desc(dev, "PLIP network interface"); return (0); } static int lp_attach(device_t dev) { struct lp_data *lp = DEVTOSOFTC(dev); if_t ifp; int error, rid = 0; lp->sc_dev = dev; /* * Reserve the interrupt resource. If we don't have one, the * attach fails. */ lp->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE); if (lp->res_irq == NULL) { device_printf(dev, "cannot reserve interrupt, failed.\n"); return (ENXIO); } ifp = lp->sc_ifp = if_alloc(IFT_PARA); if_setsoftc(ifp, lp); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setmtu(ifp, LPMTU); if_setflags(ifp, IFF_SIMPLEX | IFF_POINTOPOINT | IFF_MULTICAST); if_setioctlfn(ifp, lpioctl); if_setoutputfn(ifp, lpoutput); if_setsendqlen(ifp, ifqmaxlen); if_attach(ifp); bpfattach(ifp, DLT_NULL, sizeof(u_int32_t)); /* * Attach our interrupt handler. It is only called while we * own the ppbus. */ error = bus_setup_intr(dev, lp->res_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, lp_intr, lp, &lp->sc_intr_cookie); if (error) { bpfdetach(ifp); if_detach(ifp); bus_release_resource(dev, SYS_RES_IRQ, 0, lp->res_irq); device_printf(dev, "Unable to register interrupt handler\n"); return (error); } return (0); } static int lp_detach(device_t dev) { struct lp_data *sc = device_get_softc(dev); device_t ppbus = device_get_parent(dev); ppb_lock(ppbus); lpstop(sc); ppb_unlock(ppbus); bpfdetach(sc->sc_ifp); if_detach(sc->sc_ifp); bus_teardown_intr(dev, sc->res_irq, sc->sc_intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->res_irq); return (0); } /* * Build the translation tables for the LPIP (BSD unix) protocol. * We don't want to calculate these nasties in our tight loop, so we * precalculate them when we initialize. */ static int lpinittables(void) { int i; mtx_lock(&lp_tables_lock); if (txmith == NULL) txmith = malloc(4 * LPIPTBLSIZE, M_DEVBUF, M_NOWAIT); if (txmith == NULL) { mtx_unlock(&lp_tables_lock); return (1); } if (ctxmith == NULL) ctxmith = malloc(4 * LPIPTBLSIZE, M_DEVBUF, M_NOWAIT); if (ctxmith == NULL) { mtx_unlock(&lp_tables_lock); return (1); } for (i = 0; i < LPIPTBLSIZE; i++) { ctxmith[i] = (i & 0xF0) >> 4; ctxmitl[i] = 0x10 | (i & 0x0F); ctrecvh[i] = (i & 0x78) << 1; ctrecvl[i] = (i & 0x78) >> 3; } for (i = 0; i < LPIPTBLSIZE; i++) { txmith[i] = ((i & 0x80) >> 3) | ((i & 0x70) >> 4) | 0x08; txmitl[i] = ((i & 0x08) << 1) | (i & 0x07); trecvh[i] = ((~i) & 0x80) | ((i & 0x38) << 1); trecvl[i] = (((~i) & 0x80) >> 4) | ((i & 0x38) >> 3); } mtx_unlock(&lp_tables_lock); return (0); } static void lpstop(struct lp_data *sc) { device_t ppbus = device_get_parent(sc->sc_dev); ppb_assert_locked(ppbus); ppb_wctr(ppbus, 0x00); if_setdrvflagbits(sc->sc_ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); free(sc->sc_ifbuf, M_DEVBUF); sc->sc_ifbuf = NULL; /* IFF_UP is not set, try to release the bus anyway */ ppb_release_bus(ppbus, sc->sc_dev); } static int lpinit_locked(if_t ifp) { struct lp_data *sc = if_getsoftc(ifp); device_t dev = sc->sc_dev; device_t ppbus = device_get_parent(dev); int error; ppb_assert_locked(ppbus); error = ppb_request_bus(ppbus, dev, PPB_DONTWAIT); if (error) return (error); /* Now IFF_UP means that we own the bus */ ppb_set_mode(ppbus, PPB_COMPATIBLE); if (lpinittables()) { ppb_release_bus(ppbus, dev); return (ENOBUFS); } sc->sc_ifbuf = malloc(if_getmtu(sc->sc_ifp) + MLPIPHDRLEN, M_DEVBUF, M_NOWAIT); if (sc->sc_ifbuf == NULL) { ppb_release_bus(ppbus, dev); return (ENOBUFS); } ppb_wctr(ppbus, IRQENABLE); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); return (0); } /* * Process an ioctl request. */ static int lpioctl(if_t ifp, u_long cmd, caddr_t data) { struct lp_data *sc = if_getsoftc(ifp); device_t dev = sc->sc_dev; device_t ppbus = device_get_parent(dev); struct ifaddr *ifa = (struct ifaddr *)data; struct ifreq *ifr = (struct ifreq *)data; u_char *ptr; int error; switch (cmd) { case SIOCAIFADDR: case SIOCSIFADDR: if (ifa->ifa_addr->sa_family != AF_INET) return (EAFNOSUPPORT); if_setflagbits(ifp, IFF_UP, 0); /* FALLTHROUGH */ case SIOCSIFFLAGS: error = 0; ppb_lock(ppbus); if ((!(if_getflags(ifp) & IFF_UP)) && (if_getdrvflags(ifp) & IFF_DRV_RUNNING)) lpstop(sc); else if (((if_getflags(ifp) & IFF_UP)) && (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) error = lpinit_locked(ifp); ppb_unlock(ppbus); return (error); case SIOCSIFMTU: ppb_lock(ppbus); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { ptr = malloc(ifr->ifr_mtu + MLPIPHDRLEN, M_DEVBUF, M_NOWAIT); if (ptr == NULL) { ppb_unlock(ppbus); return (ENOBUFS); } if (sc->sc_ifbuf) free(sc->sc_ifbuf, M_DEVBUF); sc->sc_ifbuf = ptr; } if_setmtu(ifp, ifr->ifr_mtu); ppb_unlock(ppbus); break; case SIOCGIFMTU: ifr->ifr_mtu = if_getmtu(sc->sc_ifp); break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifr == NULL) { return (EAFNOSUPPORT); /* XXX */ } switch (ifr->ifr_addr.sa_family) { case AF_INET: break; default: return (EAFNOSUPPORT); } break; case SIOCGIFMEDIA: /* * No ifmedia support at this stage; maybe use it * in future for eg. protocol selection. */ return (EINVAL); default: lprintf("LP:ioctl(0x%lx)\n", cmd); return (EINVAL); } return (0); } static __inline int clpoutbyte(u_char byte, int spin, device_t ppbus) { ppb_wdtr(ppbus, ctxmitl[byte]); while (ppb_rstr(ppbus) & CLPIP_SHAKE) if (--spin == 0) { return (1); } ppb_wdtr(ppbus, ctxmith[byte]); while (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) if (--spin == 0) { return (1); } return (0); } static __inline int clpinbyte(int spin, device_t ppbus) { u_char c, cl; while ((ppb_rstr(ppbus) & CLPIP_SHAKE)) if (!--spin) { return (-1); } cl = ppb_rstr(ppbus); ppb_wdtr(ppbus, 0x10); while (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) if (!--spin) { return (-1); } c = ppb_rstr(ppbus); ppb_wdtr(ppbus, 0x00); return (ctrecvl[cl] | ctrecvh[c]); } static void lptap(if_t ifp, struct mbuf *m) { u_int32_t af = AF_INET; bpf_mtap2_if(ifp, &af, sizeof(af), m); } static void lp_intr(void *arg) { struct lp_data *sc = arg; device_t ppbus = device_get_parent(sc->sc_dev); int len, j; u_char *bp; u_char c, cl; struct mbuf *top; ppb_assert_locked(ppbus); if (if_getflags(sc->sc_ifp) & IFF_LINK0) { /* Ack. the request */ ppb_wdtr(ppbus, 0x01); /* Get the packet length */ j = clpinbyte(LPMAXSPIN2, ppbus); if (j == -1) goto err; len = j; j = clpinbyte(LPMAXSPIN2, ppbus); if (j == -1) goto err; len = len + (j << 8); if (len > if_getmtu(sc->sc_ifp) + MLPIPHDRLEN) goto err; bp = sc->sc_ifbuf; while (len--) { j = clpinbyte(LPMAXSPIN2, ppbus); if (j == -1) { goto err; } *bp++ = j; } /* Get and ignore checksum */ j = clpinbyte(LPMAXSPIN2, ppbus); if (j == -1) { goto err; } len = bp - sc->sc_ifbuf; if (len <= CLPIPHDRLEN) goto err; sc->sc_iferrs = 0; len -= CLPIPHDRLEN; if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, len); top = m_devget(sc->sc_ifbuf + CLPIPHDRLEN, len, 0, sc->sc_ifp, 0); if (top) { ppb_unlock(ppbus); lptap(sc->sc_ifp, top); M_SETFIB(top, if_getfib(sc->sc_ifp)); /* mbuf is free'd on failure. */ netisr_queue(NETISR_IP, top); ppb_lock(ppbus); } return; } while ((ppb_rstr(ppbus) & LPIP_SHAKE)) { len = if_getmtu(sc->sc_ifp) + LPIPHDRLEN; bp = sc->sc_ifbuf; while (len--) { cl = ppb_rstr(ppbus); ppb_wdtr(ppbus, 8); j = LPMAXSPIN2; while ((ppb_rstr(ppbus) & LPIP_SHAKE)) if (!--j) goto err; c = ppb_rstr(ppbus); ppb_wdtr(ppbus, 0); *bp++= trecvh[cl] | trecvl[c]; j = LPMAXSPIN2; while (!((cl = ppb_rstr(ppbus)) & LPIP_SHAKE)) { if (cl != c && (((cl = ppb_rstr(ppbus)) ^ 0xb8) & 0xf8) == (c & 0xf8)) goto end; if (!--j) goto err; } } end: len = bp - sc->sc_ifbuf; if (len <= LPIPHDRLEN) goto err; sc->sc_iferrs = 0; len -= LPIPHDRLEN; if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, len); top = m_devget(sc->sc_ifbuf + LPIPHDRLEN, len, 0, sc->sc_ifp, 0); if (top) { ppb_unlock(ppbus); lptap(sc->sc_ifp, top); M_SETFIB(top, if_getfib(sc->sc_ifp)); /* mbuf is free'd on failure. */ netisr_queue(NETISR_IP, top); ppb_lock(ppbus); } } return; err: ppb_wdtr(ppbus, 0); lprintf("R"); if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); sc->sc_iferrs++; /* * We are not able to send receive anything for now, * so stop wasting our time */ if (sc->sc_iferrs > LPMAXERRS) { if_printf(sc->sc_ifp, "Too many errors, Going off-line.\n"); ppb_wctr(ppbus, 0x00); if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING); sc->sc_iferrs = 0; } } static __inline int lpoutbyte(u_char byte, int spin, device_t ppbus) { ppb_wdtr(ppbus, txmith[byte]); while (!(ppb_rstr(ppbus) & LPIP_SHAKE)) if (--spin == 0) return (1); ppb_wdtr(ppbus, txmitl[byte]); while (ppb_rstr(ppbus) & LPIP_SHAKE) if (--spin == 0) return (1); return (0); } static int lpoutput(if_t ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { struct lp_data *sc = if_getsoftc(ifp); device_t dev = sc->sc_dev; device_t ppbus = device_get_parent(dev); int err; struct mbuf *mm; u_char *cp = "\0\0"; u_char chksum = 0; int count = 0; int i, len, spin; /* We need a sensible value if we abort */ cp++; ppb_lock(ppbus); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); err = 1; /* assume we're aborting because of an error */ /* Suspend (on laptops) or receive-errors might have taken us offline */ ppb_wctr(ppbus, IRQENABLE); if (if_getflags(ifp) & IFF_LINK0) { if (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) { lprintf("&"); lp_intr(sc); } /* Alert other end to pending packet */ spin = LPMAXSPIN1; ppb_wdtr(ppbus, 0x08); while ((ppb_rstr(ppbus) & 0x08) == 0) if (--spin == 0) { goto nend; } /* Calculate length of packet, then send that */ count += 14; /* Ethernet header len */ mm = m; for (mm = m; mm; mm = mm->m_next) { count += mm->m_len; } if (clpoutbyte(count & 0xFF, LPMAXSPIN1, ppbus)) goto nend; if (clpoutbyte((count >> 8) & 0xFF, LPMAXSPIN1, ppbus)) goto nend; /* Send dummy ethernet header */ for (i = 0; i < 12; i++) { if (clpoutbyte(i, LPMAXSPIN1, ppbus)) goto nend; chksum += i; } if (clpoutbyte(0x08, LPMAXSPIN1, ppbus)) goto nend; if (clpoutbyte(0x00, LPMAXSPIN1, ppbus)) goto nend; chksum += 0x08 + 0x00; /* Add into checksum */ mm = m; do { cp = mtod(mm, u_char *); len = mm->m_len; while (len--) { chksum += *cp; if (clpoutbyte(*cp++, LPMAXSPIN2, ppbus)) goto nend; } } while ((mm = mm->m_next)); /* Send checksum */ if (clpoutbyte(chksum, LPMAXSPIN2, ppbus)) goto nend; /* Go quiescent */ ppb_wdtr(ppbus, 0); err = 0; /* No errors */ nend: if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (err) { /* if we didn't timeout... */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); lprintf("X"); } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); lptap(ifp, m); } m_freem(m); if (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) { lprintf("^"); lp_intr(sc); } ppb_unlock(ppbus); return (0); } if (ppb_rstr(ppbus) & LPIP_SHAKE) { lprintf("&"); lp_intr(sc); } if (lpoutbyte(0x08, LPMAXSPIN1, ppbus)) goto end; if (lpoutbyte(0x00, LPMAXSPIN2, ppbus)) goto end; mm = m; do { cp = mtod(mm, u_char *); len = mm->m_len; while (len--) if (lpoutbyte(*cp++, LPMAXSPIN2, ppbus)) goto end; } while ((mm = mm->m_next)); err = 0; /* no errors were encountered */ end: --cp; ppb_wdtr(ppbus, txmitl[*cp] ^ 0x17); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (err) { /* if we didn't timeout... */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); lprintf("X"); } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); lptap(ifp, m); } m_freem(m); if (ppb_rstr(ppbus) & LPIP_SHAKE) { lprintf("^"); lp_intr(sc); } ppb_unlock(ppbus); return (0); } static device_method_t lp_methods[] = { /* device interface */ DEVMETHOD(device_identify, lp_identify), DEVMETHOD(device_probe, lp_probe), DEVMETHOD(device_attach, lp_attach), DEVMETHOD(device_detach, lp_detach), { 0, 0 } }; static driver_t lp_driver = { "plip", lp_methods, sizeof(struct lp_data), }; DRIVER_MODULE(plip, ppbus, lp_driver, lp_module_handler, NULL); MODULE_DEPEND(plip, ppbus, 1, 1, 1); diff --git a/sys/dev/ppbus/lpbb.c b/sys/dev/ppbus/lpbb.c index ce16471e7629..3380cdfdaed4 100644 --- a/sys/dev/ppbus/lpbb.c +++ b/sys/dev/ppbus/lpbb.c @@ -1,266 +1,266 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998, 2001 Nicolas Souchu, Marc Bouget * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #include /* * I2C Bit-Banging over parallel port * * See the Official Philips interface description in lpbb(4) */ #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #include #include #include #include "iicbb_if.h" static int lpbb_detect(device_t dev); static void lpbb_identify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, "lpbb", -1); if (!dev) - BUS_ADD_CHILD(parent, 0, "lpbb", -1); + BUS_ADD_CHILD(parent, 0, "lpbb", DEVICE_UNIT_ANY); } static int lpbb_probe(device_t dev) { /* Perhaps call this during identify instead? */ if (!lpbb_detect(dev)) return (ENXIO); device_set_desc(dev, "Parallel I2C bit-banging interface"); return (0); } static int lpbb_attach(device_t dev) { device_t bitbang; /* add generic bit-banging code */ bitbang = device_add_child(dev, "iicbb", DEVICE_UNIT_ANY); device_probe_and_attach(bitbang); return (0); } static int lpbb_callback(device_t dev, int index, caddr_t data) { device_t ppbus = device_get_parent(dev); int error = 0; int how; switch (index) { case IIC_REQUEST_BUS: /* request the ppbus */ how = *(int *)data; ppb_lock(ppbus); error = ppb_request_bus(ppbus, dev, how); ppb_unlock(ppbus); break; case IIC_RELEASE_BUS: /* release the ppbus */ ppb_lock(ppbus); error = ppb_release_bus(ppbus, dev); ppb_unlock(ppbus); break; default: error = EINVAL; } return (error); } #define SDA_out 0x80 #define SCL_out 0x08 #define SDA_in 0x80 #define SCL_in 0x08 #define ALIM 0x20 #define I2CKEY 0x50 /* Reset bus by setting SDA first and then SCL. */ static void lpbb_reset_bus(device_t dev) { device_t ppbus = device_get_parent(dev); ppb_assert_locked(ppbus); ppb_wdtr(ppbus, (u_char)~SDA_out); ppb_wctr(ppbus, (u_char)(ppb_rctr(ppbus) | SCL_out)); } static int lpbb_getscl(device_t dev) { device_t ppbus = device_get_parent(dev); int rval; ppb_lock(ppbus); rval = ((ppb_rstr(ppbus) & SCL_in) == SCL_in); ppb_unlock(ppbus); return (rval); } static int lpbb_getsda(device_t dev) { device_t ppbus = device_get_parent(dev); int rval; ppb_lock(ppbus); rval = ((ppb_rstr(ppbus) & SDA_in) == SDA_in); ppb_unlock(ppbus); return (rval); } static void lpbb_setsda(device_t dev, int val) { device_t ppbus = device_get_parent(dev); ppb_lock(ppbus); if (val == 0) ppb_wdtr(ppbus, (u_char)SDA_out); else ppb_wdtr(ppbus, (u_char)~SDA_out); ppb_unlock(ppbus); } static void lpbb_setscl(device_t dev, int val) { device_t ppbus = device_get_parent(dev); ppb_lock(ppbus); if (val == 0) ppb_wctr(ppbus, (u_char)(ppb_rctr(ppbus) & ~SCL_out)); else ppb_wctr(ppbus, (u_char)(ppb_rctr(ppbus) | SCL_out)); ppb_unlock(ppbus); } static int lpbb_detect(device_t dev) { device_t ppbus = device_get_parent(dev); ppb_lock(ppbus); if (ppb_request_bus(ppbus, dev, PPB_DONTWAIT)) { ppb_unlock(ppbus); device_printf(dev, "can't allocate ppbus\n"); return (0); } lpbb_reset_bus(dev); if ((ppb_rstr(ppbus) & I2CKEY) || ((ppb_rstr(ppbus) & ALIM) != ALIM)) { ppb_release_bus(ppbus, dev); ppb_unlock(ppbus); return (0); } ppb_release_bus(ppbus, dev); ppb_unlock(ppbus); return (1); } static int lpbb_reset(device_t dev, u_char speed, u_char addr, u_char * oldaddr) { device_t ppbus = device_get_parent(dev); ppb_lock(ppbus); if (ppb_request_bus(ppbus, dev, PPB_DONTWAIT)) { ppb_unlock(ppbus); device_printf(dev, "can't allocate ppbus\n"); return (0); } lpbb_reset_bus(dev); ppb_release_bus(ppbus, dev); ppb_unlock(ppbus); return (IIC_ENOADDR); } static device_method_t lpbb_methods[] = { /* device interface */ DEVMETHOD(device_identify, lpbb_identify), DEVMETHOD(device_probe, lpbb_probe), DEVMETHOD(device_attach, lpbb_attach), /* iicbb interface */ DEVMETHOD(iicbb_callback, lpbb_callback), DEVMETHOD(iicbb_setsda, lpbb_setsda), DEVMETHOD(iicbb_setscl, lpbb_setscl), DEVMETHOD(iicbb_getsda, lpbb_getsda), DEVMETHOD(iicbb_getscl, lpbb_getscl), DEVMETHOD(iicbb_reset, lpbb_reset), DEVMETHOD_END }; static driver_t lpbb_driver = { "lpbb", lpbb_methods, 1, }; DRIVER_MODULE(lpbb, ppbus, lpbb_driver, 0, 0); DRIVER_MODULE(iicbb, lpbb, iicbb_driver, 0, 0); MODULE_DEPEND(lpbb, ppbus, 1, 1, 1); MODULE_DEPEND(lpbb, iicbb, IICBB_MINVER, IICBB_PREFVER, IICBB_MAXVER); MODULE_VERSION(lpbb, 1); diff --git a/sys/dev/ppbus/lpt.c b/sys/dev/ppbus/lpt.c index 644e0f08008b..994adcc6a608 100644 --- a/sys/dev/ppbus/lpt.c +++ b/sys/dev/ppbus/lpt.c @@ -1,996 +1,996 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1990 William F. Jolitz, TeleMuse * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This software is a component of "386BSD" developed by * William F. Jolitz, TeleMuse. * 4. Neither the name of the developer nor the name "386BSD" * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ * AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS * SOFTWARE SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT. * THE DEVELOPER URGES THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT * NOT MAKE USE OF THIS WORK. * * FOR USERS WHO WISH TO UNDERSTAND THE 386BSD SYSTEM DEVELOPED * BY WILLIAM F. JOLITZ, WE RECOMMEND THE USER STUDY WRITTEN * REFERENCES SUCH AS THE "PORTING UNIX TO THE 386" SERIES * (BEGINNING JANUARY 1991 "DR. DOBBS JOURNAL", USA AND BEGINNING * JUNE 1991 "UNIX MAGAZIN", GERMANY) BY WILLIAM F. JOLITZ AND * LYNNE GREER JOLITZ, AS WELL AS OTHER BOOKS ON UNIX AND THE * ON-LINE 386BSD USER MANUAL BEFORE USE. A BOOK DISCUSSING THE INTERNALS * OF 386BSD ENTITLED "386BSD FROM THE INSIDE OUT" WILL BE AVAILABLE LATE 1992. * * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: unknown origin, 386BSD 0.1 * From Id: lpt.c,v 1.55.2.1 1996/11/12 09:08:38 phk Exp * From Id: nlpt.c,v 1.14 1999/02/08 13:55:43 des Exp */ #include /* * Device Driver for AT parallel printer port * Written by William Jolitz 12/18/90 */ /* * Updated for ppbus by Nicolas Souchu * [Mon Jul 28 1997] */ #include "opt_lpt.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #include #ifndef LPT_DEBUG #define lprintf(args) #else #define lprintf(args) \ do { \ if (lptflag) \ printf args; \ } while (0) static int volatile lptflag = 1; #endif #define LPINITRDY 4 /* wait up to 4 seconds for a ready */ #define LPTOUTINITIAL 10 /* initial timeout to wait for ready 1/10 s */ #define LPTOUTMAX 1 /* maximal timeout 1 s */ #define LPPRI (PZERO+8) #define BUFSIZE 1024 #define BUFSTATSIZE 32 struct lpt_data { device_t sc_dev; struct cdev *sc_cdev; struct cdev *sc_cdev_bypass; short sc_state; /* default case: negative prime, negative ack, handshake strobe, prime once */ u_char sc_control; char sc_flags; #define LP_POS_INIT 0x04 /* if we are a positive init signal */ #define LP_POS_ACK 0x08 /* if we are a positive going ack */ #define LP_NO_PRIME 0x10 /* don't prime the printer at all */ #define LP_PRIMEOPEN 0x20 /* prime on every open */ #define LP_AUTOLF 0x40 /* tell printer to do an automatic lf */ #define LP_BYPASS 0x80 /* bypass printer ready checks */ void *sc_inbuf; void *sc_statbuf; short sc_xfercnt ; char sc_primed; char *sc_cp ; u_short sc_irq ; /* IRQ status of port */ #define LP_HAS_IRQ 0x01 /* we have an irq available */ #define LP_USE_IRQ 0x02 /* we are using our irq */ #define LP_ENABLE_IRQ 0x04 /* enable IRQ on open */ #define LP_ENABLE_EXT 0x10 /* we shall use advanced mode when possible */ u_char sc_backoff ; /* time to call lptout() again */ struct callout sc_timer; struct resource *sc_intr_resource; /* interrupt resource */ void *sc_intr_cookie; /* interrupt cookie */ }; #define LPT_NAME "lpt" /* our official name */ static callout_func_t lptout; static int lpt_port_test(device_t dev, u_char data, u_char mask); static int lpt_detect(device_t dev); #define DEVTOSOFTC(dev) \ ((struct lpt_data *)device_get_softc(dev)) static void lptintr(void *arg); /* bits for state */ #define OPEN (1<<0) /* device is open */ #define ASLP (1<<1) /* awaiting draining of printer */ #define EERROR (1<<2) /* error was received from printer */ #define OBUSY (1<<3) /* printer is busy doing output */ #define LPTOUT (1<<4) /* timeout while not selected */ #define TOUT (1<<5) /* timeout while not selected */ #define LPTINIT (1<<6) /* waiting to initialize for open */ #define INTERRUPTED (1<<7) /* write call was interrupted */ #define HAVEBUS (1<<8) /* the driver owns the bus */ /* status masks to interrogate printer status */ #define RDY_MASK (LPS_SEL|LPS_OUT|LPS_NBSY|LPS_NERR) /* ready ? */ #define LP_READY (LPS_SEL|LPS_NBSY|LPS_NERR) /* Printer Ready condition - from lpa.c */ /* Only used in polling code */ #define LPS_INVERT (LPS_NBSY | LPS_NACK | LPS_SEL | LPS_NERR) #define LPS_MASK (LPS_NBSY | LPS_NACK | LPS_OUT | LPS_SEL | LPS_NERR) #define NOT_READY(ppbus) ((ppb_rstr(ppbus)^LPS_INVERT)&LPS_MASK) #define MAX_SLEEP (hz*5) /* Timeout while waiting for device ready */ #define MAX_SPIN 20 /* Max delay for device ready in usecs */ static d_open_t lptopen; static d_close_t lptclose; static d_write_t lptwrite; static d_read_t lptread; static d_ioctl_t lptioctl; static struct cdevsw lpt_cdevsw = { .d_version = D_VERSION, .d_open = lptopen, .d_close = lptclose, .d_read = lptread, .d_write = lptwrite, .d_ioctl = lptioctl, .d_name = LPT_NAME, }; static int lpt_request_ppbus(device_t dev, int how) { device_t ppbus = device_get_parent(dev); struct lpt_data *sc = DEVTOSOFTC(dev); int error; /* * We might already have the bus for a write(2) after an interrupted * write(2) call. */ ppb_assert_locked(ppbus); if (sc->sc_state & HAVEBUS) return (0); error = ppb_request_bus(ppbus, dev, how); if (error == 0) sc->sc_state |= HAVEBUS; return (error); } static int lpt_release_ppbus(device_t dev) { device_t ppbus = device_get_parent(dev); struct lpt_data *sc = DEVTOSOFTC(dev); int error = 0; ppb_assert_locked(ppbus); if (sc->sc_state & HAVEBUS) { error = ppb_release_bus(ppbus, dev); if (error == 0) sc->sc_state &= ~HAVEBUS; } return (error); } /* * Internal routine to lptprobe to do port tests of one byte value */ static int lpt_port_test(device_t ppbus, u_char data, u_char mask) { int temp, timeout; data = data & mask; ppb_wdtr(ppbus, data); timeout = 10000; do { DELAY(10); temp = ppb_rdtr(ppbus) & mask; } while (temp != data && --timeout); lprintf(("out=%x\tin=%x\ttout=%d\n", data, temp, timeout)); return (temp == data); } /* * Probe simplified by replacing multiple loops with a hardcoded * test pattern - 1999/02/08 des@freebsd.org * * New lpt port probe Geoff Rehmet - Rhodes University - 14/2/94 * Based partially on Rod Grimes' printer probe * * Logic: * 1) If no port address was given, use the bios detected ports * and autodetect what ports the printers are on. * 2) Otherwise, probe the data port at the address given, * using the method in Rod Grimes' port probe. * (Much code ripped off directly from Rod's probe.) * * Comments from Rod's probe: * Logic: * 1) You should be able to write to and read back the same value * to the data port. Do an alternating zeros, alternating ones, * walking zero, and walking one test to check for stuck bits. * * 2) You should be able to write to and read back the same value * to the control port lower 5 bits, the upper 3 bits are reserved * per the IBM PC technical reference manuals and different boards * do different things with them. Do an alternating zeros, alternating * ones, walking zero, and walking one test to check for stuck bits. * * Some printers drag the strobe line down when the are powered off * so this bit has been masked out of the control port test. * * XXX Some printers may not like a fast pulse on init or strobe, I * don't know at this point, if that becomes a problem these bits * should be turned off in the mask byte for the control port test. * * We are finally left with a mask of 0x14, due to some printers * being adamant about holding other bits high ........ * * Before probing the control port, we write a 0 to the data port - * If not, some printers chuck out garbage when the strobe line * gets toggled. * * 3) Set the data and control ports to a value of 0 * * This probe routine has been tested on Epson Lx-800, HP LJ3P, * Epson FX-1170 and C.Itoh 8510RM * printers. * Quick exit on fail added. */ static int lpt_detect(device_t dev) { device_t ppbus = device_get_parent(dev); static u_char testbyte[18] = { 0x55, /* alternating zeros */ 0xaa, /* alternating ones */ 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f, /* walking zero */ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 /* walking one */ }; int i, error, status; status = 1; /* assume success */ ppb_lock(ppbus); if ((error = lpt_request_ppbus(dev, PPB_DONTWAIT))) { ppb_unlock(ppbus); device_printf(dev, "cannot alloc ppbus (%d)!\n", error); return (0); } for (i = 0; i < 18 && status; i++) if (!lpt_port_test(ppbus, testbyte[i], 0xff)) { status = 0; break; } /* write 0's to control and data ports */ ppb_wdtr(ppbus, 0); ppb_wctr(ppbus, 0); lpt_release_ppbus(dev); ppb_unlock(ppbus); return (status); } static void lpt_identify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, LPT_NAME, -1); if (!dev) - BUS_ADD_CHILD(parent, 0, LPT_NAME, -1); + BUS_ADD_CHILD(parent, 0, LPT_NAME, DEVICE_UNIT_ANY); } /* * lpt_probe() */ static int lpt_probe(device_t dev) { if (!lpt_detect(dev)) return (ENXIO); device_set_desc(dev, "Printer"); return (0); } static int lpt_attach(device_t dev) { device_t ppbus = device_get_parent(dev); struct lpt_data *sc = DEVTOSOFTC(dev); int rid = 0, unit = device_get_unit(dev); int error; sc->sc_primed = 0; /* not primed yet */ ppb_init_callout(ppbus, &sc->sc_timer, 0); ppb_lock(ppbus); if ((error = lpt_request_ppbus(dev, PPB_DONTWAIT))) { ppb_unlock(ppbus); device_printf(dev, "cannot alloc ppbus (%d)!\n", error); return (0); } ppb_wctr(ppbus, LPC_NINIT); lpt_release_ppbus(dev); ppb_unlock(ppbus); /* declare our interrupt handler */ sc->sc_intr_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE); if (sc->sc_intr_resource) { error = bus_setup_intr(dev, sc->sc_intr_resource, INTR_TYPE_TTY | INTR_MPSAFE, NULL, lptintr, sc, &sc->sc_intr_cookie); if (error) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->sc_intr_resource); device_printf(dev, "Unable to register interrupt handler\n"); return (error); } sc->sc_irq = LP_HAS_IRQ | LP_USE_IRQ | LP_ENABLE_IRQ; device_printf(dev, "Interrupt-driven port\n"); } else { sc->sc_irq = 0; device_printf(dev, "Polled port\n"); } lprintf(("irq %x\n", sc->sc_irq)); sc->sc_inbuf = malloc(BUFSIZE, M_DEVBUF, M_WAITOK); sc->sc_statbuf = malloc(BUFSTATSIZE, M_DEVBUF, M_WAITOK); sc->sc_dev = dev; sc->sc_cdev = make_dev(&lpt_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, LPT_NAME "%d", unit); sc->sc_cdev->si_drv1 = sc; sc->sc_cdev->si_drv2 = 0; sc->sc_cdev_bypass = make_dev(&lpt_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, LPT_NAME "%d.ctl", unit); sc->sc_cdev_bypass->si_drv1 = sc; sc->sc_cdev_bypass->si_drv2 = (void *)LP_BYPASS; return (0); } static int lpt_detach(device_t dev) { struct lpt_data *sc = DEVTOSOFTC(dev); device_t ppbus = device_get_parent(dev); destroy_dev(sc->sc_cdev); destroy_dev(sc->sc_cdev_bypass); ppb_lock(ppbus); lpt_release_ppbus(dev); ppb_unlock(ppbus); callout_drain(&sc->sc_timer); if (sc->sc_intr_resource != NULL) { bus_teardown_intr(dev, sc->sc_intr_resource, sc->sc_intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr_resource); } free(sc->sc_inbuf, M_DEVBUF); free(sc->sc_statbuf, M_DEVBUF); return (0); } static void lptout(void *arg) { struct lpt_data *sc = arg; device_t dev = sc->sc_dev; device_t ppbus __unused; ppbus = device_get_parent(dev); ppb_assert_locked(ppbus); lprintf(("T %x ", ppb_rstr(ppbus))); if (sc->sc_state & OPEN) { sc->sc_backoff++; if (sc->sc_backoff > hz/LPTOUTMAX) sc->sc_backoff = hz/LPTOUTMAX; callout_reset(&sc->sc_timer, sc->sc_backoff, lptout, sc); } else sc->sc_state &= ~TOUT; if (sc->sc_state & EERROR) sc->sc_state &= ~EERROR; /* * Avoid possible hangs due to missed interrupts */ if (sc->sc_xfercnt) { lptintr(sc); } else { sc->sc_state &= ~OBUSY; wakeup(dev); } } /* * lptopen -- reset the printer, then wait until it's selected and not busy. * If LP_BYPASS flag is selected, then we do not try to select the * printer -- this is just used for passing ioctls. */ static int lptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { int trys, err; struct lpt_data *sc = dev->si_drv1; device_t lptdev; device_t ppbus; if (!sc) return (ENXIO); lptdev = sc->sc_dev; ppbus = device_get_parent(lptdev); ppb_lock(ppbus); if (sc->sc_state) { lprintf(("%s: still open %x\n", device_get_nameunit(lptdev), sc->sc_state)); ppb_unlock(ppbus); return(EBUSY); } else sc->sc_state |= LPTINIT; sc->sc_flags = (uintptr_t)dev->si_drv2; /* Check for open with BYPASS flag set. */ if (sc->sc_flags & LP_BYPASS) { sc->sc_state = OPEN; ppb_unlock(ppbus); return(0); } /* request the ppbus only if we don't have it already */ if ((err = lpt_request_ppbus(lptdev, PPB_WAIT|PPB_INTR)) != 0) { /* give it a chance to try later */ sc->sc_state = 0; ppb_unlock(ppbus); return (err); } lprintf(("%s flags 0x%x\n", device_get_nameunit(lptdev), sc->sc_flags)); /* set IRQ status according to ENABLE_IRQ flag */ if (sc->sc_irq & LP_ENABLE_IRQ) sc->sc_irq |= LP_USE_IRQ; else sc->sc_irq &= ~LP_USE_IRQ; /* init printer */ if ((sc->sc_flags & LP_NO_PRIME) == 0) { if ((sc->sc_flags & LP_PRIMEOPEN) || sc->sc_primed == 0) { ppb_wctr(ppbus, 0); sc->sc_primed++; DELAY(500); } } ppb_wctr(ppbus, LPC_SEL|LPC_NINIT); /* wait till ready (printer running diagnostics) */ trys = 0; do { /* ran out of waiting for the printer */ if (trys++ >= LPINITRDY*4) { lprintf(("status %x\n", ppb_rstr(ppbus))); lpt_release_ppbus(lptdev); sc->sc_state = 0; ppb_unlock(ppbus); return (EBUSY); } /* wait 1/4 second, give up if we get a signal */ if (ppb_sleep(ppbus, lptdev, LPPRI | PCATCH, "lptinit", hz / 4) != EWOULDBLOCK) { lpt_release_ppbus(lptdev); sc->sc_state = 0; ppb_unlock(ppbus); return (EBUSY); } /* is printer online and ready for output */ } while ((ppb_rstr(ppbus) & (LPS_SEL|LPS_OUT|LPS_NBSY|LPS_NERR)) != (LPS_SEL|LPS_NBSY|LPS_NERR)); sc->sc_control = LPC_SEL|LPC_NINIT; if (sc->sc_flags & LP_AUTOLF) sc->sc_control |= LPC_AUTOL; /* enable interrupt if interrupt-driven */ if (sc->sc_irq & LP_USE_IRQ) sc->sc_control |= LPC_ENA; ppb_wctr(ppbus, sc->sc_control); sc->sc_state &= ~LPTINIT; sc->sc_state |= OPEN; sc->sc_xfercnt = 0; /* only use timeout if using interrupt */ lprintf(("irq %x\n", sc->sc_irq)); if (sc->sc_irq & LP_USE_IRQ) { sc->sc_state |= TOUT; sc->sc_backoff = hz / LPTOUTINITIAL; callout_reset(&sc->sc_timer, sc->sc_backoff, lptout, sc); } /* release the ppbus */ lpt_release_ppbus(lptdev); ppb_unlock(ppbus); lprintf(("opened.\n")); return(0); } /* * lptclose -- close the device, free the local line buffer. * * Check for interrupted write call added. */ static int lptclose(struct cdev *dev, int flags, int fmt, struct thread *td) { struct lpt_data *sc = dev->si_drv1; device_t lptdev = sc->sc_dev; device_t ppbus = device_get_parent(lptdev); int err; ppb_lock(ppbus); if (sc->sc_flags & LP_BYPASS) goto end_close; if ((err = lpt_request_ppbus(lptdev, PPB_WAIT|PPB_INTR)) != 0) { ppb_unlock(ppbus); return (err); } /* if the last write was interrupted, don't complete it */ if ((!(sc->sc_state & INTERRUPTED)) && (sc->sc_irq & LP_USE_IRQ)) while ((ppb_rstr(ppbus) & (LPS_SEL|LPS_OUT|LPS_NBSY|LPS_NERR)) != (LPS_SEL|LPS_NBSY|LPS_NERR) || sc->sc_xfercnt) /* wait 1 second, give up if we get a signal */ if (ppb_sleep(ppbus, lptdev, LPPRI | PCATCH, "lpclose", hz) != EWOULDBLOCK) break; sc->sc_state &= ~OPEN; callout_stop(&sc->sc_timer); ppb_wctr(ppbus, LPC_NINIT); /* * unregistration of interrupt forced by release */ lpt_release_ppbus(lptdev); end_close: sc->sc_state = 0; sc->sc_xfercnt = 0; ppb_unlock(ppbus); lprintf(("closed.\n")); return(0); } /* * lpt_pushbytes() * Workhorse for actually spinning and writing bytes to printer * Derived from lpa.c * Originally by ? * * This code is only used when we are polling the port */ static int lpt_pushbytes(struct lpt_data *sc) { device_t dev = sc->sc_dev; device_t ppbus = device_get_parent(dev); int spin, err, tic; char ch; ppb_assert_locked(ppbus); lprintf(("p")); /* loop for every character .. */ while (sc->sc_xfercnt > 0) { /* printer data */ ch = *(sc->sc_cp); sc->sc_cp++; sc->sc_xfercnt--; /* * Wait for printer ready. * Loop 20 usecs testing BUSY bit, then sleep * for exponentially increasing timeout. (vak) */ for (spin = 0; NOT_READY(ppbus) && spin < MAX_SPIN; ++spin) DELAY(1); /* XXX delay is NOT this accurate! */ if (spin >= MAX_SPIN) { tic = 0; while (NOT_READY(ppbus)) { /* * Now sleep, every cycle a * little longer .. */ tic = tic + tic + 1; /* * But no more than 10 seconds. (vak) */ if (tic > MAX_SLEEP) tic = MAX_SLEEP; err = ppb_sleep(ppbus, dev, LPPRI, LPT_NAME "poll", tic); if (err != EWOULDBLOCK) { return (err); } } } /* output data */ ppb_wdtr(ppbus, ch); /* strobe */ ppb_wctr(ppbus, sc->sc_control|LPC_STB); ppb_wctr(ppbus, sc->sc_control); } return(0); } /* * lptread --retrieve printer status in IEEE1284 NIBBLE mode */ static int lptread(struct cdev *dev, struct uio *uio, int ioflag) { struct lpt_data *sc = dev->si_drv1; device_t lptdev = sc->sc_dev; device_t ppbus = device_get_parent(lptdev); int error = 0, len; if (sc->sc_flags & LP_BYPASS) { /* we can't do reads in bypass mode */ return (EPERM); } ppb_lock(ppbus); if ((error = ppb_1284_negociate(ppbus, PPB_NIBBLE, 0))) { ppb_unlock(ppbus); return (error); } /* read data in an other buffer, read/write may be simultaneous */ len = 0; while (uio->uio_resid) { if ((error = ppb_1284_read(ppbus, PPB_NIBBLE, sc->sc_statbuf, min(BUFSTATSIZE, uio->uio_resid), &len))) { goto error; } if (!len) goto error; /* no more data */ ppb_unlock(ppbus); error = uiomove(sc->sc_statbuf, len, uio); ppb_lock(ppbus); if (error) goto error; } error: ppb_1284_terminate(ppbus); ppb_unlock(ppbus); return (error); } /* * lptwrite --copy a line from user space to a local buffer, then call * putc to get the chars moved to the output queue. * * Flagging of interrupted write added. */ static int lptwrite(struct cdev *dev, struct uio *uio, int ioflag) { register unsigned n; int err; struct lpt_data *sc = dev->si_drv1; device_t lptdev = sc->sc_dev; device_t ppbus = device_get_parent(lptdev); if (sc->sc_flags & LP_BYPASS) { /* we can't do writes in bypass mode */ return (EPERM); } /* request the ppbus only if we don't have it already */ ppb_lock(ppbus); if ((err = lpt_request_ppbus(lptdev, PPB_WAIT|PPB_INTR)) != 0) { ppb_unlock(ppbus); return (err); } sc->sc_state &= ~INTERRUPTED; while ((n = min(BUFSIZE, uio->uio_resid)) != 0) { sc->sc_cp = sc->sc_inbuf; ppb_unlock(ppbus); err = uiomove(sc->sc_cp, n, uio); ppb_lock(ppbus); if (err) break; sc->sc_xfercnt = n; if (sc->sc_irq & LP_ENABLE_EXT) { /* try any extended mode */ err = ppb_write(ppbus, sc->sc_cp, sc->sc_xfercnt, 0); switch (err) { case 0: /* if not all data was sent, we could rely * on polling for the last bytes */ sc->sc_xfercnt = 0; break; case EINTR: sc->sc_state |= INTERRUPTED; ppb_unlock(ppbus); return (err); case EINVAL: /* advanced mode not avail */ log(LOG_NOTICE, "%s: advanced mode not avail, polling\n", device_get_nameunit(sc->sc_dev)); break; default: ppb_unlock(ppbus); return (err); } } else while ((sc->sc_xfercnt > 0)&&(sc->sc_irq & LP_USE_IRQ)) { lprintf(("i")); /* if the printer is ready for a char, */ /* give it one */ if ((sc->sc_state & OBUSY) == 0){ lprintf(("\nC %d. ", sc->sc_xfercnt)); lptintr(sc); } lprintf(("W ")); if (sc->sc_state & OBUSY) if ((err = ppb_sleep(ppbus, lptdev, LPPRI|PCATCH, LPT_NAME "write", 0))) { sc->sc_state |= INTERRUPTED; ppb_unlock(ppbus); return(err); } } /* check to see if we must do a polled write */ if (!(sc->sc_irq & LP_USE_IRQ) && (sc->sc_xfercnt)) { lprintf(("p")); err = lpt_pushbytes(sc); if (err) { ppb_unlock(ppbus); return (err); } } } /* we have not been interrupted, release the ppbus */ lpt_release_ppbus(lptdev); ppb_unlock(ppbus); return (err); } /* * lptintr -- handle printer interrupts which occur when the printer is * ready to accept another char. * * do checking for interrupted write call. */ static void lptintr(void *arg) { struct lpt_data *sc = arg; device_t lptdev = sc->sc_dev; device_t ppbus = device_get_parent(lptdev); int sts = 0; int i; /* * Is printer online and ready for output? * * Avoid falling back to lptout() too quickly. First spin-loop * to see if the printer will become ready ``really soon now''. */ for (i = 0; i < 100 && ((sts=ppb_rstr(ppbus)) & RDY_MASK) != LP_READY; i++) ; if ((sts & RDY_MASK) == LP_READY) { sc->sc_state = (sc->sc_state | OBUSY) & ~EERROR; sc->sc_backoff = hz / LPTOUTINITIAL; if (sc->sc_xfercnt) { /* send char */ /*lprintf(("%x ", *sc->sc_cp)); */ ppb_wdtr(ppbus, *sc->sc_cp++) ; ppb_wctr(ppbus, sc->sc_control|LPC_STB); /* DELAY(X) */ ppb_wctr(ppbus, sc->sc_control); /* any more data for printer */ if (--(sc->sc_xfercnt) > 0) return; } /* * No more data waiting for printer. * Wakeup is not done if write call was not interrupted. */ sc->sc_state &= ~OBUSY; if (!(sc->sc_state & INTERRUPTED)) wakeup(lptdev); lprintf(("w ")); return; } else { /* check for error */ if (((sts & (LPS_NERR | LPS_OUT) ) != LPS_NERR) && (sc->sc_state & OPEN)) sc->sc_state |= EERROR; /* lptout() will jump in and try to restart. */ } lprintf(("sts %x ", sts)); } static int lptioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { int error = 0; struct lpt_data *sc = dev->si_drv1; device_t ppbus; u_char old_sc_irq; /* old printer IRQ status */ switch (cmd) { case LPT_IRQ : ppbus = device_get_parent(sc->sc_dev); ppb_lock(ppbus); if (sc->sc_irq & LP_HAS_IRQ) { /* * NOTE: * If the IRQ status is changed, * this will only be visible on the * next open. * * If interrupt status changes, * this gets syslog'd. */ old_sc_irq = sc->sc_irq; switch (*(int*)data) { case 0: sc->sc_irq &= (~LP_ENABLE_IRQ); break; case 1: sc->sc_irq &= (~LP_ENABLE_EXT); sc->sc_irq |= LP_ENABLE_IRQ; break; case 2: /* classic irq based transfer and advanced * modes are in conflict */ sc->sc_irq &= (~LP_ENABLE_IRQ); sc->sc_irq |= LP_ENABLE_EXT; break; case 3: sc->sc_irq &= (~LP_ENABLE_EXT); break; default: break; } if (old_sc_irq != sc->sc_irq ) log(LOG_NOTICE, "%s: switched to %s %s mode\n", device_get_nameunit(sc->sc_dev), (sc->sc_irq & LP_ENABLE_IRQ)? "interrupt-driven":"polled", (sc->sc_irq & LP_ENABLE_EXT)? "extended":"standard"); } else /* polled port */ error = EOPNOTSUPP; ppb_unlock(ppbus); break; default: error = ENODEV; } return(error); } static device_method_t lpt_methods[] = { /* device interface */ DEVMETHOD(device_identify, lpt_identify), DEVMETHOD(device_probe, lpt_probe), DEVMETHOD(device_attach, lpt_attach), DEVMETHOD(device_detach, lpt_detach), { 0, 0 } }; static driver_t lpt_driver = { LPT_NAME, lpt_methods, sizeof(struct lpt_data), }; DRIVER_MODULE(lpt, ppbus, lpt_driver, 0, 0); MODULE_DEPEND(lpt, ppbus, 1, 1, 1); diff --git a/sys/dev/ppbus/pcfclock.c b/sys/dev/ppbus/pcfclock.c index e0d2e71b49e5..7cee6692b367 100644 --- a/sys/dev/ppbus/pcfclock.c +++ b/sys/dev/ppbus/pcfclock.c @@ -1,333 +1,333 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Sascha Schumann. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY SASCHA SCHUMANN ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * */ #include #include "opt_pcfclock.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #define PCFCLOCK_NAME "pcfclock" struct pcfclock_data { device_t dev; struct cdev *cdev; }; static d_open_t pcfclock_open; static d_close_t pcfclock_close; static d_read_t pcfclock_read; static struct cdevsw pcfclock_cdevsw = { .d_version = D_VERSION, .d_open = pcfclock_open, .d_close = pcfclock_close, .d_read = pcfclock_read, .d_name = PCFCLOCK_NAME, }; #ifndef PCFCLOCK_MAX_RETRIES #define PCFCLOCK_MAX_RETRIES 10 #endif #define AFC_HI 0 #define AFC_LO AUTOFEED /* AUTO FEED is used as clock */ #define AUTOFEED_CLOCK(val) \ ctr = (ctr & ~(AUTOFEED)) ^ (val); ppb_wctr(ppbus, ctr) /* SLCT is used as clock */ #define CLOCK_OK \ ((ppb_rstr(ppbus) & SELECT) == (i & 1 ? SELECT : 0)) /* PE is used as data */ #define BIT_SET (ppb_rstr(ppbus)&PERROR) /* the first byte sent as reply must be 00001001b */ #define PCFCLOCK_CORRECT_SYNC(buf) (buf[0] == 9) #define NR(buf, off) (buf[off+1]*10+buf[off]) /* check for correct input values */ #define PCFCLOCK_CORRECT_FORMAT(buf) (\ NR(buf, 14) <= 99 && \ NR(buf, 12) <= 12 && \ NR(buf, 10) <= 31 && \ NR(buf, 6) <= 23 && \ NR(buf, 4) <= 59 && \ NR(buf, 2) <= 59) #define PCFCLOCK_BATTERY_STATUS_LOW(buf) (buf[8] & 4) #define PCFCLOCK_CMD_TIME 0 /* send current time */ #define PCFCLOCK_CMD_COPY 7 /* copy received signal to PC */ static void pcfclock_identify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, PCFCLOCK_NAME, -1); if (!dev) - BUS_ADD_CHILD(parent, 0, PCFCLOCK_NAME, -1); + BUS_ADD_CHILD(parent, 0, PCFCLOCK_NAME, DEVICE_UNIT_ANY); } static int pcfclock_probe(device_t dev) { device_set_desc(dev, "PCF-1.0"); return (0); } static int pcfclock_attach(device_t dev) { struct pcfclock_data *sc = device_get_softc(dev); int unit; unit = device_get_unit(dev); sc->dev = dev; sc->cdev = make_dev(&pcfclock_cdevsw, unit, UID_ROOT, GID_WHEEL, 0400, PCFCLOCK_NAME "%d", unit); if (sc->cdev == NULL) { device_printf(dev, "Failed to create character device\n"); return (ENXIO); } sc->cdev->si_drv1 = sc; return (0); } static int pcfclock_open(struct cdev *dev, int flag, int fms, struct thread *td) { struct pcfclock_data *sc = dev->si_drv1; device_t pcfclockdev; device_t ppbus; int res; if (!sc) return (ENXIO); pcfclockdev = sc->dev; ppbus = device_get_parent(pcfclockdev); ppb_lock(ppbus); res = ppb_request_bus(ppbus, pcfclockdev, (flag & O_NONBLOCK) ? PPB_DONTWAIT : PPB_WAIT); ppb_unlock(ppbus); return (res); } static int pcfclock_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct pcfclock_data *sc = dev->si_drv1; device_t pcfclockdev = sc->dev; device_t ppbus = device_get_parent(pcfclockdev); ppb_lock(ppbus); ppb_release_bus(ppbus, pcfclockdev); ppb_unlock(ppbus); return (0); } static void pcfclock_write_cmd(struct cdev *dev, unsigned char command) { struct pcfclock_data *sc = dev->si_drv1; device_t pcfclockdev = sc->dev; device_t ppbus = device_get_parent(pcfclockdev); unsigned char ctr = 14; char i; for (i = 0; i <= 7; i++) { ppb_wdtr(ppbus, i); AUTOFEED_CLOCK(i & 1 ? AFC_HI : AFC_LO); DELAY(3000); } ppb_wdtr(ppbus, command); AUTOFEED_CLOCK(AFC_LO); DELAY(3000); AUTOFEED_CLOCK(AFC_HI); } static void pcfclock_display_data(struct cdev *dev, char buf[18]) { struct pcfclock_data *sc = dev->si_drv1; #ifdef PCFCLOCK_VERBOSE int year; year = NR(buf, 14); if (year < 70) year += 100; device_printf(sc->dev, "%02d.%02d.%4d %02d:%02d:%02d, " "battery status: %s\n", NR(buf, 10), NR(buf, 12), 1900 + year, NR(buf, 6), NR(buf, 4), NR(buf, 2), PCFCLOCK_BATTERY_STATUS_LOW(buf) ? "LOW" : "ok"); #else if (PCFCLOCK_BATTERY_STATUS_LOW(buf)) device_printf(sc->dev, "BATTERY STATUS LOW ON\n"); #endif } static int pcfclock_read_data(struct cdev *dev, char *buf, ssize_t bits) { struct pcfclock_data *sc = dev->si_drv1; device_t pcfclockdev = sc->dev; device_t ppbus = device_get_parent(pcfclockdev); int i; char waitfor; int offset; /* one byte per four bits */ bzero(buf, ((bits + 3) >> 2) + 1); waitfor = 100; for (i = 0; i <= bits; i++) { /* wait for clock, maximum (waitfor*100) usec */ while (!CLOCK_OK && --waitfor > 0) DELAY(100); /* timed out? */ if (!waitfor) return (EIO); waitfor = 100; /* reload */ /* give it some time */ DELAY(500); /* calculate offset into buffer */ offset = i >> 2; buf[offset] <<= 1; if (BIT_SET) buf[offset] |= 1; } return (0); } static int pcfclock_read_dev(struct cdev *dev, char *buf, int maxretries) { struct pcfclock_data *sc = dev->si_drv1; device_t pcfclockdev = sc->dev; device_t ppbus = device_get_parent(pcfclockdev); int error = 0; ppb_set_mode(ppbus, PPB_COMPATIBLE); while (--maxretries > 0) { pcfclock_write_cmd(dev, PCFCLOCK_CMD_TIME); if (pcfclock_read_data(dev, buf, 68)) continue; if (!PCFCLOCK_CORRECT_SYNC(buf)) continue; if (!PCFCLOCK_CORRECT_FORMAT(buf)) continue; break; } if (!maxretries) error = EIO; return (error); } static int pcfclock_read(struct cdev *dev, struct uio *uio, int ioflag) { struct pcfclock_data *sc = dev->si_drv1; device_t ppbus; char buf[18]; int error = 0; if (uio->uio_resid < 18) return (ERANGE); ppbus = device_get_parent(sc->dev); ppb_lock(ppbus); error = pcfclock_read_dev(dev, buf, PCFCLOCK_MAX_RETRIES); ppb_unlock(ppbus); if (error) { device_printf(sc->dev, "no PCF found\n"); } else { pcfclock_display_data(dev, buf); uiomove(buf, 18, uio); } return (error); } static device_method_t pcfclock_methods[] = { /* device interface */ DEVMETHOD(device_identify, pcfclock_identify), DEVMETHOD(device_probe, pcfclock_probe), DEVMETHOD(device_attach, pcfclock_attach), { 0, 0 } }; static driver_t pcfclock_driver = { PCFCLOCK_NAME, pcfclock_methods, sizeof(struct pcfclock_data), }; DRIVER_MODULE(pcfclock, ppbus, pcfclock_driver, 0, 0); diff --git a/sys/dev/ppbus/ppi.c b/sys/dev/ppbus/ppi.c index 3789da6fbed5..65921b53e0c6 100644 --- a/sys/dev/ppbus/ppi.c +++ b/sys/dev/ppbus/ppi.c @@ -1,618 +1,618 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, 1998, 1999 Nicolas Souchu, Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #include #include "opt_ppb_1284.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PERIPH_1284 #include #include #endif #include #include "ppbus_if.h" #include #define BUFSIZE 512 struct ppi_data { device_t ppi_device; struct cdev *ppi_cdev; struct sx ppi_lock; int ppi_flags; #define HAVE_PPBUS (1<<0) int ppi_mode; /* IEEE1284 mode */ char ppi_buffer[BUFSIZE]; #ifdef PERIPH_1284 struct resource *intr_resource; /* interrupt resource */ void *intr_cookie; /* interrupt registration cookie */ #endif /* PERIPH_1284 */ }; #define DEVTOSOFTC(dev) \ ((struct ppi_data *)device_get_softc(dev)) #ifdef PERIPH_1284 static void ppiintr(void *arg); #endif static d_open_t ppiopen; static d_close_t ppiclose; static d_ioctl_t ppiioctl; static d_write_t ppiwrite; static d_read_t ppiread; static struct cdevsw ppi_cdevsw = { .d_version = D_VERSION, .d_open = ppiopen, .d_close = ppiclose, .d_read = ppiread, .d_write = ppiwrite, .d_ioctl = ppiioctl, .d_name = "ppi", }; #ifdef PERIPH_1284 static void ppi_enable_intr(device_t ppidev) { char r; device_t ppbus = device_get_parent(ppidev); r = ppb_rctr(ppbus); ppb_wctr(ppbus, r | IRQENABLE); return; } static void ppi_disable_intr(device_t ppidev) { char r; device_t ppbus = device_get_parent(ppidev); r = ppb_rctr(ppbus); ppb_wctr(ppbus, r & ~IRQENABLE); return; } #endif /* PERIPH_1284 */ static void ppi_identify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, "ppi", -1); if (!dev) - BUS_ADD_CHILD(parent, 0, "ppi", -1); + BUS_ADD_CHILD(parent, 0, "ppi", DEVICE_UNIT_ANY); } /* * ppi_probe() */ static int ppi_probe(device_t dev) { /* probe is always ok */ device_set_desc(dev, "Parallel I/O"); return (0); } /* * ppi_attach() */ static int ppi_attach(device_t dev) { struct ppi_data *ppi = DEVTOSOFTC(dev); #ifdef PERIPH_1284 int error, rid = 0; /* declare our interrupt handler */ ppi->intr_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (ppi->intr_resource) { /* register our interrupt handler */ error = bus_setup_intr(dev, ppi->intr_resource, INTR_TYPE_TTY | INTR_MPSAFE, NULL, ppiintr, dev, &ppi->intr_cookie); if (error) { bus_release_resource(dev, SYS_RES_IRQ, rid, ppi->intr_resource); device_printf(dev, "Unable to register interrupt handler\n"); return (error); } } #endif /* PERIPH_1284 */ sx_init(&ppi->ppi_lock, "ppi"); ppi->ppi_cdev = make_dev(&ppi_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "ppi%d", device_get_unit(dev)); if (ppi->ppi_cdev == NULL) { device_printf(dev, "Failed to create character device\n"); return (ENXIO); } ppi->ppi_cdev->si_drv1 = ppi; ppi->ppi_device = dev; return (0); } static int ppi_detach(device_t dev) { struct ppi_data *ppi = DEVTOSOFTC(dev); destroy_dev(ppi->ppi_cdev); #ifdef PERIPH_1284 if (ppi->intr_resource != NULL) { bus_teardown_intr(dev, ppi->intr_resource, ppi->intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, 0, ppi->intr_resource); } #endif sx_destroy(&ppi->ppi_lock); return (0); } #ifdef PERIPH_1284 /* * Cable * ----- * * Use an IEEE1284 compliant (DB25/DB25) cable with the following tricks: * * nStrobe <-> nAck 1 <-> 10 * nAutofd <-> Busy 11 <-> 14 * nSelectin <-> Select 17 <-> 13 * nInit <-> nFault 15 <-> 16 * */ static void ppiintr(void *arg) { device_t ppidev = (device_t)arg; device_t ppbus = device_get_parent(ppidev); struct ppi_data *ppi = DEVTOSOFTC(ppidev); ppb_assert_locked(ppbus); ppi_disable_intr(ppidev); switch (ppb_1284_get_state(ppbus)) { /* accept IEEE1284 negotiation then wakeup a waiting process to * continue negotiation at process level */ case PPB_FORWARD_IDLE: /* Event 1 */ if ((ppb_rstr(ppbus) & (SELECT | nBUSY)) == (SELECT | nBUSY)) { /* IEEE1284 negotiation */ #ifdef DEBUG_1284 printf("N"); #endif /* Event 2 - prepare for reading the ext. value */ ppb_wctr(ppbus, (PCD | STROBE | nINIT) & ~SELECTIN); ppb_1284_set_state(ppbus, PPB_NEGOCIATION); } else { #ifdef DEBUG_1284 printf("0x%x", ppb_rstr(ppbus)); #endif ppb_peripheral_terminate(ppbus, PPB_DONTWAIT); break; } /* wake up any process waiting for negotiation from * remote master host */ /* XXX should set a variable to warn the process about * the interrupt */ wakeup(ppi); break; default: #ifdef DEBUG_1284 printf("?%d", ppb_1284_get_state(ppbus)); #endif ppb_1284_set_state(ppbus, PPB_FORWARD_IDLE); ppb_set_mode(ppbus, PPB_COMPATIBLE); break; } ppi_enable_intr(ppidev); return; } #endif /* PERIPH_1284 */ static int ppiopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct ppi_data *ppi = dev->si_drv1; device_t ppidev = ppi->ppi_device; device_t ppbus = device_get_parent(ppidev); int res; sx_xlock(&ppi->ppi_lock); if (!(ppi->ppi_flags & HAVE_PPBUS)) { ppb_lock(ppbus); res = ppb_request_bus(ppbus, ppidev, (flags & O_NONBLOCK) ? PPB_DONTWAIT : PPB_WAIT | PPB_INTR); ppb_unlock(ppbus); if (res) { sx_xunlock(&ppi->ppi_lock); return (res); } ppi->ppi_flags |= HAVE_PPBUS; } sx_xunlock(&ppi->ppi_lock); return (0); } static int ppiclose(struct cdev *dev, int flags, int fmt, struct thread *td) { struct ppi_data *ppi = dev->si_drv1; device_t ppidev = ppi->ppi_device; device_t ppbus = device_get_parent(ppidev); sx_xlock(&ppi->ppi_lock); ppb_lock(ppbus); #ifdef PERIPH_1284 switch (ppb_1284_get_state(ppbus)) { case PPB_PERIPHERAL_IDLE: ppb_peripheral_terminate(ppbus, 0); break; case PPB_REVERSE_IDLE: case PPB_EPP_IDLE: case PPB_ECP_FORWARD_IDLE: default: ppb_1284_terminate(ppbus); break; } #endif /* PERIPH_1284 */ /* unregistration of interrupt forced by release */ ppb_release_bus(ppbus, ppidev); ppb_unlock(ppbus); ppi->ppi_flags &= ~HAVE_PPBUS; sx_xunlock(&ppi->ppi_lock); return (0); } /* * ppiread() * * IEEE1284 compliant read. * * First, try negotiation to BYTE then NIBBLE mode * If no data is available, wait for it otherwise transfer as much as possible */ static int ppiread(struct cdev *dev, struct uio *uio, int ioflag) { #ifdef PERIPH_1284 struct ppi_data *ppi = dev->si_drv1; device_t ppidev = ppi->ppi_device; device_t ppbus = device_get_parent(ppidev); int len, error = 0; char *buffer; buffer = malloc(BUFSIZE, M_DEVBUF, M_WAITOK); ppb_lock(ppbus); switch (ppb_1284_get_state(ppbus)) { case PPB_PERIPHERAL_IDLE: ppb_peripheral_terminate(ppbus, 0); /* FALLTHROUGH */ case PPB_FORWARD_IDLE: /* if can't negotiate NIBBLE mode then try BYTE mode, * the peripheral may be a computer */ if ((ppb_1284_negociate(ppbus, ppi->ppi_mode = PPB_NIBBLE, 0))) { /* XXX Wait 2 seconds to let the remote host some * time to terminate its interrupt */ ppb_sleep(ppbus, ppi, PPBPRI, "ppiread", 2 * hz); if ((error = ppb_1284_negociate(ppbus, ppi->ppi_mode = PPB_BYTE, 0))) { ppb_unlock(ppbus); free(buffer, M_DEVBUF); return (error); } } break; case PPB_REVERSE_IDLE: case PPB_EPP_IDLE: case PPB_ECP_FORWARD_IDLE: default: break; } #ifdef DEBUG_1284 printf("N"); #endif /* read data */ len = 0; while (uio->uio_resid) { error = ppb_1284_read(ppbus, ppi->ppi_mode, buffer, min(BUFSIZE, uio->uio_resid), &len); ppb_unlock(ppbus); if (error) goto error; if (!len) goto error; /* no more data */ #ifdef DEBUG_1284 printf("d"); #endif if ((error = uiomove(buffer, len, uio))) goto error; ppb_lock(ppbus); } ppb_unlock(ppbus); error: free(buffer, M_DEVBUF); #else /* PERIPH_1284 */ int error = ENODEV; #endif return (error); } /* * ppiwrite() * * IEEE1284 compliant write * * Actually, this is the peripheral side of a remote IEEE1284 read * * The first part of the negotiation (IEEE1284 device detection) is * done at interrupt level, then the remaining is done by the writing * process * * Once negotiation done, transfer data */ static int ppiwrite(struct cdev *dev, struct uio *uio, int ioflag) { #ifdef PERIPH_1284 struct ppi_data *ppi = dev->si_drv1; device_t ppidev = ppi->ppi_device; device_t ppbus = device_get_parent(ppidev); int len, error = 0, sent; char *buffer; #if 0 int ret; #define ADDRESS MS_PARAM(0, 0, MS_TYP_PTR) #define LENGTH MS_PARAM(0, 1, MS_TYP_INT) struct ppb_microseq msq[] = { { MS_OP_PUT, { MS_UNKNOWN, MS_UNKNOWN, MS_UNKNOWN } }, MS_RET(0) }; buffer = malloc(BUFSIZE, M_DEVBUF, M_WAITOK); ppb_lock(ppbus); /* negotiate ECP mode */ if (ppb_1284_negociate(ppbus, PPB_ECP, 0)) { printf("ppiwrite: ECP negotiation failed\n"); } while (!error && (len = min(uio->uio_resid, BUFSIZE))) { ppb_unlock(ppbus); uiomove(buffer, len, uio); ppb_MS_init_msq(msq, 2, ADDRESS, buffer, LENGTH, len); ppb_lock(ppbus); error = ppb_MS_microseq(ppbus, msq, &ret); } #else buffer = malloc(BUFSIZE, M_DEVBUF, M_WAITOK); ppb_lock(ppbus); #endif /* we have to be peripheral to be able to send data, so * wait for the appropriate state */ if (ppb_1284_get_state(ppbus) < PPB_PERIPHERAL_NEGOCIATION) ppb_1284_terminate(ppbus); while (ppb_1284_get_state(ppbus) != PPB_PERIPHERAL_IDLE) { /* XXX should check a variable before sleeping */ #ifdef DEBUG_1284 printf("s"); #endif ppi_enable_intr(ppidev); /* sleep until IEEE1284 negotiation starts */ error = ppb_sleep(ppbus, ppi, PCATCH | PPBPRI, "ppiwrite", 0); switch (error) { case 0: /* negotiate peripheral side with BYTE mode */ ppb_peripheral_negociate(ppbus, PPB_BYTE, 0); break; case EWOULDBLOCK: break; default: goto error; } } #ifdef DEBUG_1284 printf("N"); #endif /* negotiation done, write bytes to master host */ while ((len = min(uio->uio_resid, BUFSIZE)) != 0) { ppb_unlock(ppbus); uiomove(buffer, len, uio); ppb_lock(ppbus); if ((error = byte_peripheral_write(ppbus, buffer, len, &sent))) goto error; #ifdef DEBUG_1284 printf("d"); #endif } error: ppb_unlock(ppbus); free(buffer, M_DEVBUF); #else /* PERIPH_1284 */ int error = ENODEV; #endif return (error); } static int ppiioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct ppi_data *ppi = dev->si_drv1; device_t ppidev = ppi->ppi_device; device_t ppbus = device_get_parent(ppidev); int error = 0; u_int8_t *val = (u_int8_t *)data; ppb_lock(ppbus); switch (cmd) { case PPIGDATA: /* get data register */ *val = ppb_rdtr(ppbus); break; case PPIGSTATUS: /* get status bits */ *val = ppb_rstr(ppbus); break; case PPIGCTRL: /* get control bits */ *val = ppb_rctr(ppbus); break; case PPIGEPPD: /* get EPP data bits */ *val = ppb_repp_D(ppbus); break; case PPIGECR: /* get ECP bits */ *val = ppb_recr(ppbus); break; case PPIGFIFO: /* read FIFO */ *val = ppb_rfifo(ppbus); break; case PPISDATA: /* set data register */ ppb_wdtr(ppbus, *val); break; case PPISSTATUS: /* set status bits */ ppb_wstr(ppbus, *val); break; case PPISCTRL: /* set control bits */ ppb_wctr(ppbus, *val); break; case PPISEPPD: /* set EPP data bits */ ppb_wepp_D(ppbus, *val); break; case PPISECR: /* set ECP bits */ ppb_wecr(ppbus, *val); break; case PPISFIFO: /* write FIFO */ ppb_wfifo(ppbus, *val); break; case PPIGEPPA: /* get EPP address bits */ *val = ppb_repp_A(ppbus); break; case PPISEPPA: /* set EPP address bits */ ppb_wepp_A(ppbus, *val); break; default: error = ENOTTY; break; } ppb_unlock(ppbus); return (error); } static device_method_t ppi_methods[] = { /* device interface */ DEVMETHOD(device_identify, ppi_identify), DEVMETHOD(device_probe, ppi_probe), DEVMETHOD(device_attach, ppi_attach), DEVMETHOD(device_detach, ppi_detach), { 0, 0 } }; static driver_t ppi_driver = { "ppi", ppi_methods, sizeof(struct ppi_data), }; DRIVER_MODULE(ppi, ppbus, ppi_driver, 0, 0); MODULE_DEPEND(ppi, ppbus, 1, 1, 1); diff --git a/sys/dev/ppbus/pps.c b/sys/dev/ppbus/pps.c index a5c0a56d4f94..5a2791aa0335 100644 --- a/sys/dev/ppbus/pps.c +++ b/sys/dev/ppbus/pps.c @@ -1,345 +1,345 @@ /*- * SPDX-License-Identifier: Beerware * * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * * * This driver implements a draft-mogul-pps-api-02.txt PPS source. * * The input pin is pin#10 * The echo output pin is pin#14 * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #include #define PPS_NAME "pps" /* our official name */ #define PRVERBOSE(fmt, arg...) if (bootverbose) printf(fmt, ##arg); struct pps_data { struct ppb_device pps_dev; struct pps_state pps[9]; struct cdev *devs[9]; device_t ppsdev; device_t ppbus; int busy; struct callout timeout; int lastdata; struct sx lock; struct resource *intr_resource; /* interrupt resource */ void *intr_cookie; /* interrupt registration cookie */ }; static void ppsintr(void *arg); static void ppshcpoll(void *arg); #define DEVTOSOFTC(dev) \ ((struct pps_data *)device_get_softc(dev)) static d_open_t ppsopen; static d_close_t ppsclose; static d_ioctl_t ppsioctl; static struct cdevsw pps_cdevsw = { .d_version = D_VERSION, .d_open = ppsopen, .d_close = ppsclose, .d_ioctl = ppsioctl, .d_name = PPS_NAME, }; static void ppsidentify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, PPS_NAME, -1); if (!dev) - BUS_ADD_CHILD(parent, 0, PPS_NAME, -1); + BUS_ADD_CHILD(parent, 0, PPS_NAME, DEVICE_UNIT_ANY); } static int ppstry(device_t ppbus, int send, int expect) { int i; ppb_wdtr(ppbus, send); i = ppb_rdtr(ppbus); PRVERBOSE("S: %02x E: %02x G: %02x\n", send, expect, i); return (i != expect); } static int ppsprobe(device_t ppsdev) { device_set_desc(ppsdev, "Pulse per second Timing Interface"); return (0); } static int ppsattach(device_t dev) { struct pps_data *sc = DEVTOSOFTC(dev); device_t ppbus = device_get_parent(dev); struct cdev *d; int error, i, unit, rid = 0; /* declare our interrupt handler */ sc->intr_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE); /* interrupts seem mandatory */ if (sc->intr_resource == NULL) { device_printf(dev, "Unable to allocate interrupt resource\n"); return (ENXIO); } error = bus_setup_intr(dev, sc->intr_resource, INTR_TYPE_TTY | INTR_MPSAFE, NULL, ppsintr, sc, &sc->intr_cookie); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->intr_resource); device_printf(dev, "Unable to register interrupt handler\n"); return (error); } sx_init(&sc->lock, "pps"); ppb_init_callout(ppbus, &sc->timeout, 0); sc->ppsdev = dev; sc->ppbus = ppbus; unit = device_get_unit(ppbus); d = make_dev(&pps_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, PPS_NAME "%d", unit); sc->devs[0] = d; sc->pps[0].ppscap = PPS_CAPTUREASSERT | PPS_ECHOASSERT; sc->pps[0].driver_abi = PPS_ABI_VERSION; sc->pps[0].driver_mtx = ppb_get_lock(ppbus); d->si_drv1 = sc; d->si_drv2 = (void*)0; pps_init_abi(&sc->pps[0]); ppb_lock(ppbus); if (ppb_request_bus(ppbus, dev, PPB_DONTWAIT)) { ppb_unlock(ppbus); return (0); } do { i = ppb_set_mode(sc->ppbus, PPB_EPP); PRVERBOSE("EPP: %d %d\n", i, PPB_IN_EPP_MODE(sc->ppbus)); if (i == -1) break; i = 0; ppb_wctr(ppbus, i); if (ppstry(ppbus, 0x00, 0x00)) break; if (ppstry(ppbus, 0x55, 0x55)) break; if (ppstry(ppbus, 0xaa, 0xaa)) break; if (ppstry(ppbus, 0xff, 0xff)) break; i = IRQENABLE | PCD | STROBE | nINIT | SELECTIN; ppb_wctr(ppbus, i); PRVERBOSE("CTR = %02x (%02x)\n", ppb_rctr(ppbus), i); if (ppstry(ppbus, 0x00, 0x00)) break; if (ppstry(ppbus, 0x55, 0x00)) break; if (ppstry(ppbus, 0xaa, 0x00)) break; if (ppstry(ppbus, 0xff, 0x00)) break; i = IRQENABLE | PCD | nINIT | SELECTIN; ppb_wctr(ppbus, i); PRVERBOSE("CTR = %02x (%02x)\n", ppb_rctr(ppbus), i); ppstry(ppbus, 0x00, 0xff); ppstry(ppbus, 0x55, 0xff); ppstry(ppbus, 0xaa, 0xff); ppstry(ppbus, 0xff, 0xff); ppb_unlock(ppbus); for (i = 1; i < 9; i++) { d = make_dev(&pps_cdevsw, unit + 0x10000 * i, UID_ROOT, GID_WHEEL, 0600, PPS_NAME "%db%d", unit, i - 1); sc->devs[i] = d; sc->pps[i].ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR; sc->pps[i].driver_abi = PPS_ABI_VERSION; sc->pps[i].driver_mtx = ppb_get_lock(ppbus); d->si_drv1 = sc; d->si_drv2 = (void *)(intptr_t)i; pps_init_abi(&sc->pps[i]); } ppb_lock(ppbus); } while (0); i = ppb_set_mode(sc->ppbus, PPB_COMPATIBLE); ppb_release_bus(ppbus, dev); ppb_unlock(ppbus); return (0); } static int ppsopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct pps_data *sc = dev->si_drv1; device_t ppbus = sc->ppbus; int subdev = (intptr_t)dev->si_drv2; int i; /* * The sx lock is here solely to serialize open()'s to close * the race of concurrent open()'s when pps(4) doesn't own the * ppbus. */ sx_xlock(&sc->lock); ppb_lock(ppbus); if (!sc->busy) { device_t ppsdev = sc->ppsdev; if (ppb_request_bus(ppbus, ppsdev, PPB_WAIT|PPB_INTR)) { ppb_unlock(ppbus); sx_xunlock(&sc->lock); return (EINTR); } i = ppb_set_mode(sc->ppbus, PPB_PS2); PRVERBOSE("EPP: %d %d\n", i, PPB_IN_EPP_MODE(sc->ppbus)); i = IRQENABLE | PCD | nINIT | SELECTIN; ppb_wctr(ppbus, i); } if (subdev > 0 && !(sc->busy & ~1)) { /* XXX: Timeout of 1? hz/100 instead perhaps? */ callout_reset(&sc->timeout, 1, ppshcpoll, sc); sc->lastdata = ppb_rdtr(sc->ppbus); } sc->busy |= (1 << subdev); ppb_unlock(ppbus); sx_xunlock(&sc->lock); return(0); } static int ppsclose(struct cdev *dev, int flags, int fmt, struct thread *td) { struct pps_data *sc = dev->si_drv1; int subdev = (intptr_t)dev->si_drv2; sx_xlock(&sc->lock); sc->pps[subdev].ppsparam.mode = 0; /* PHK ??? */ ppb_lock(sc->ppbus); sc->busy &= ~(1 << subdev); if (subdev > 0 && !(sc->busy & ~1)) callout_stop(&sc->timeout); if (!sc->busy) { device_t ppsdev = sc->ppsdev; device_t ppbus = sc->ppbus; ppb_wdtr(ppbus, 0); ppb_wctr(ppbus, 0); ppb_set_mode(ppbus, PPB_COMPATIBLE); ppb_release_bus(ppbus, ppsdev); } ppb_unlock(sc->ppbus); sx_xunlock(&sc->lock); return(0); } static void ppshcpoll(void *arg) { struct pps_data *sc = arg; int i, j, k, l; KASSERT(sc->busy & ~1, ("pps polling w/o opened devices")); i = ppb_rdtr(sc->ppbus); if (i == sc->lastdata) return; l = sc->lastdata ^ i; k = 1; for (j = 1; j < 9; j ++) { if (l & k) { pps_capture(&sc->pps[j]); pps_event(&sc->pps[j], i & k ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR); } k += k; } sc->lastdata = i; callout_reset(&sc->timeout, 1, ppshcpoll, sc); } static void ppsintr(void *arg) { struct pps_data *sc = (struct pps_data *)arg; ppb_assert_locked(sc->ppbus); pps_capture(&sc->pps[0]); if (!(ppb_rstr(sc->ppbus) & nACK)) return; if (sc->pps[0].ppsparam.mode & PPS_ECHOASSERT) ppb_wctr(sc->ppbus, IRQENABLE | AUTOFEED); pps_event(&sc->pps[0], PPS_CAPTUREASSERT); if (sc->pps[0].ppsparam.mode & PPS_ECHOASSERT) ppb_wctr(sc->ppbus, IRQENABLE); } static int ppsioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct pps_data *sc = dev->si_drv1; int subdev = (intptr_t)dev->si_drv2; int err; ppb_lock(sc->ppbus); err = pps_ioctl(cmd, data, &sc->pps[subdev]); ppb_unlock(sc->ppbus); return (err); } static device_method_t pps_methods[] = { /* device interface */ DEVMETHOD(device_identify, ppsidentify), DEVMETHOD(device_probe, ppsprobe), DEVMETHOD(device_attach, ppsattach), { 0, 0 } }; static driver_t pps_driver = { PPS_NAME, pps_methods, sizeof(struct pps_data), }; DRIVER_MODULE(pps, ppbus, pps_driver, 0, 0); MODULE_DEPEND(pps, ppbus, 1, 1, 1); diff --git a/sys/dev/smbios/smbios.c b/sys/dev/smbios/smbios.c index 883e8f501b59..dbf766b86422 100644 --- a/sys/dev/smbios/smbios.c +++ b/sys/dev/smbios/smbios.c @@ -1,323 +1,323 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2003 Matthew N. Dodd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__amd64__) || defined(__i386__) #include #endif #include /* * System Management BIOS Reference Specification, v2.4 Final * http://www.dmtf.org/standards/published_documents/DSP0134.pdf */ struct smbios_softc { device_t dev; union { struct smbios_eps * eps; struct smbios3_eps * eps3; }; bool is_eps3; }; static void smbios_identify (driver_t *, device_t); static int smbios_probe (device_t); static int smbios_attach (device_t); static int smbios_detach (device_t); static int smbios_modevent (module_t, int, void *); static int smbios_cksum (void *); static bool smbios_eps3 (void *); static void smbios_identify (driver_t *driver, device_t parent) { #ifdef ARCH_MAY_USE_EFI struct uuid efi_smbios = EFI_TABLE_SMBIOS; struct uuid efi_smbios3 = EFI_TABLE_SMBIOS3; void *addr_efi; #endif struct smbios_eps *eps; struct smbios3_eps *eps3; void *ptr; device_t child; vm_paddr_t addr = 0; size_t map_size = sizeof (*eps); int length; if (!device_is_alive(parent)) return; #ifdef ARCH_MAY_USE_EFI if (!efi_get_table(&efi_smbios3, &addr_efi)) { addr = (vm_paddr_t)addr_efi; map_size = sizeof (*eps3); } else if (!efi_get_table(&efi_smbios, &addr_efi)) { addr = (vm_paddr_t)addr_efi; } #endif #if defined(__amd64__) || defined(__i386__) if (addr == 0) addr = bios_sigsearch(SMBIOS_START, SMBIOS_SIG, SMBIOS_LEN, SMBIOS_STEP, SMBIOS_OFF); #endif if (addr != 0) { ptr = pmap_mapbios(addr, map_size); if (ptr == NULL) return; if (map_size == sizeof (*eps3)) { eps3 = ptr; length = eps3->length; if (memcmp(eps3->anchor_string, SMBIOS3_SIG, SMBIOS3_LEN) != 0) { printf("smbios3: corrupt sig %s found\n", eps3->anchor_string); return; } } else { eps = ptr; length = eps->length; if (memcmp(eps->anchor_string, SMBIOS_SIG, SMBIOS_LEN) != 0) { printf("smbios: corrupt sig %s found\n", eps->anchor_string); return; } } if (length != map_size) { u_int8_t major, minor; major = eps->major_version; minor = eps->minor_version; /* SMBIOS v2.1 implementation might use 0x1e. */ if (length == 0x1e && major == 2 && minor == 1) { length = 0x1f; } else { pmap_unmapbios(eps, map_size); return; } } - child = BUS_ADD_CHILD(parent, 5, "smbios", -1); + child = BUS_ADD_CHILD(parent, 5, "smbios", DEVICE_UNIT_ANY); device_set_driver(child, driver); /* smuggle the phys addr into probe and attach */ bus_set_resource(child, SYS_RES_MEMORY, 0, addr, length); device_set_desc(child, "System Management BIOS"); pmap_unmapbios(ptr, map_size); } return; } static int smbios_probe (device_t dev) { vm_paddr_t pa; vm_size_t size; void *va; int error; error = 0; pa = bus_get_resource_start(dev, SYS_RES_MEMORY, 0); size = bus_get_resource_count(dev, SYS_RES_MEMORY, 0); va = pmap_mapbios(pa, size); if (va == NULL) { device_printf(dev, "Unable to map memory.\n"); return (ENOMEM); } if (smbios_cksum(va)) { device_printf(dev, "SMBIOS checksum failed.\n"); error = ENXIO; } pmap_unmapbios(va, size); return (error); } static int smbios_attach (device_t dev) { struct smbios_softc *sc; void *va; vm_paddr_t pa; vm_size_t size; sc = device_get_softc(dev); sc->dev = dev; pa = bus_get_resource_start(dev, SYS_RES_MEMORY, 0); size = bus_get_resource_count(dev, SYS_RES_MEMORY, 0); va = pmap_mapbios(pa, size); if (va == NULL) { device_printf(dev, "Unable to map memory.\n"); return (ENOMEM); } sc->is_eps3 = smbios_eps3(va); if (sc->is_eps3) { sc->eps3 = va; device_printf(dev, "Version: %u.%u", sc->eps3->major_version, sc->eps3->minor_version); } else { sc->eps = va; device_printf(dev, "Version: %u.%u", sc->eps->major_version, sc->eps->minor_version); if (bcd2bin(sc->eps->BCD_revision)) printf(", BCD Revision: %u.%u", bcd2bin(sc->eps->BCD_revision >> 4), bcd2bin(sc->eps->BCD_revision & 0x0f)); } printf("\n"); return (0); } static int smbios_detach (device_t dev) { struct smbios_softc *sc; vm_size_t size; void *va; sc = device_get_softc(dev); va = (sc->is_eps3 ? (void *)sc->eps3 : (void *)sc->eps); if (sc->is_eps3) va = sc->eps3; else va = sc->eps; size = bus_get_resource_count(dev, SYS_RES_MEMORY, 0); if (va != NULL) pmap_unmapbios(va, size); return (0); } static int smbios_modevent (module_t mod, int what, void *arg) { device_t * devs; int count; int i; switch (what) { case MOD_LOAD: break; case MOD_UNLOAD: devclass_get_devices(devclass_find("smbios"), &devs, &count); for (i = 0; i < count; i++) { device_delete_child(device_get_parent(devs[i]), devs[i]); } free(devs, M_TEMP); break; default: break; } return (0); } static device_method_t smbios_methods[] = { /* Device interface */ DEVMETHOD(device_identify, smbios_identify), DEVMETHOD(device_probe, smbios_probe), DEVMETHOD(device_attach, smbios_attach), DEVMETHOD(device_detach, smbios_detach), { 0, 0 } }; static driver_t smbios_driver = { "smbios", smbios_methods, sizeof(struct smbios_softc), }; DRIVER_MODULE(smbios, nexus, smbios_driver, smbios_modevent, NULL); #ifdef ARCH_MAY_USE_EFI MODULE_DEPEND(smbios, efirt, 1, 1, 1); #endif MODULE_VERSION(smbios, 1); static bool smbios_eps3 (void *v) { struct smbios3_eps *e; e = (struct smbios3_eps *)v; return (memcmp(e->anchor_string, SMBIOS3_SIG, SMBIOS3_LEN) == 0); } static int smbios_cksum (void *v) { struct smbios3_eps *eps3; struct smbios_eps *eps; u_int8_t *ptr; u_int8_t cksum; u_int8_t length; int i; if (smbios_eps3(v)) { eps3 = (struct smbios3_eps *)v; length = eps3->length; } else { eps = (struct smbios_eps *)v; length = eps->length; } ptr = (u_int8_t *)v; cksum = 0; for (i = 0; i < length; i++) { cksum += ptr[i]; } return (cksum); } diff --git a/sys/dev/smbus/smb.c b/sys/dev/smbus/smb.c index ee323c835f10..0efa93ae0e89 100644 --- a/sys/dev/smbus/smb.c +++ b/sys/dev/smbus/smb.c @@ -1,420 +1,420 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998, 2001 Nicolas Souchu * Copyright (c) 2023 Juniper Networks, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #ifdef COMPAT_FREEBSD32 struct smbcmd32 { u_char cmd; u_char reserved; u_short op; union { char byte; char buf[2]; short word; } wdata; union { char byte; char buf[2]; short word; } rdata; int slave; uint32_t wbuf; int wcount; uint32_t rbuf; int rcount; }; #define SMB_QUICK_WRITE32 _IOW('i', 1, struct smbcmd32) #define SMB_QUICK_READ32 _IOW('i', 2, struct smbcmd32) #define SMB_SENDB32 _IOW('i', 3, struct smbcmd32) #define SMB_RECVB32 _IOWR('i', 4, struct smbcmd32) #define SMB_WRITEB32 _IOW('i', 5, struct smbcmd32) #define SMB_WRITEW32 _IOW('i', 6, struct smbcmd32) #define SMB_READB32 _IOWR('i', 7, struct smbcmd32) #define SMB_READW32 _IOWR('i', 8, struct smbcmd32) #define SMB_PCALL32 _IOWR('i', 9, struct smbcmd32) #define SMB_BWRITE32 _IOW('i', 10, struct smbcmd32) #define SMB_BREAD32 _IOWR('i', 11, struct smbcmd32) #define SMB_OLD_READB32 _IOW('i', 7, struct smbcmd32) #define SMB_OLD_READW32 _IOW('i', 8, struct smbcmd32) #define SMB_OLD_PCALL32 _IOW('i', 9, struct smbcmd32) #endif #define SMB_OLD_READB _IOW('i', 7, struct smbcmd) #define SMB_OLD_READW _IOW('i', 8, struct smbcmd) #define SMB_OLD_PCALL _IOW('i', 9, struct smbcmd) struct smb_softc { device_t sc_dev; struct cdev *sc_devnode; }; static void smb_identify(driver_t *driver, device_t parent); static int smb_probe(device_t); static int smb_attach(device_t); static int smb_detach(device_t); static device_method_t smb_methods[] = { /* device interface */ DEVMETHOD(device_identify, smb_identify), DEVMETHOD(device_probe, smb_probe), DEVMETHOD(device_attach, smb_attach), DEVMETHOD(device_detach, smb_detach), /* smbus interface */ DEVMETHOD(smbus_intr, smbus_generic_intr), { 0, 0 } }; static driver_t smb_driver = { "smb", smb_methods, sizeof(struct smb_softc), }; static d_ioctl_t smbioctl; static struct cdevsw smb_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_ioctl = smbioctl, .d_name = "smb", }; static void smb_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "smb", -1) == NULL) - BUS_ADD_CHILD(parent, 0, "smb", -1); + BUS_ADD_CHILD(parent, 0, "smb", DEVICE_UNIT_ANY); } static int smb_probe(device_t dev) { if (smbus_get_addr(dev) != -1) return (ENXIO); device_set_desc(dev, "SMBus generic I/O"); return (BUS_PROBE_NOWILDCARD); } static int smb_attach(device_t dev) { struct smb_softc *sc; struct make_dev_args mda; int error; sc = device_get_softc(dev); sc->sc_dev = dev; make_dev_args_init(&mda); mda.mda_devsw = &smb_cdevsw; mda.mda_unit = device_get_unit(dev); mda.mda_uid = UID_ROOT; mda.mda_gid = GID_WHEEL; mda.mda_mode = 0600; mda.mda_si_drv1 = sc; error = make_dev_s(&mda, &sc->sc_devnode, "smb%d", mda.mda_unit); return (error); } static int smb_detach(device_t dev) { struct smb_softc *sc; sc = device_get_softc(dev); destroy_dev(sc->sc_devnode); return (0); } #ifdef COMPAT_FREEBSD32 static void smbcopyincmd32(struct smbcmd32 *uaddr, struct smbcmd *kaddr) { CP(*uaddr, *kaddr, cmd); CP(*uaddr, *kaddr, op); CP(*uaddr, *kaddr, wdata.word); CP(*uaddr, *kaddr, slave); PTRIN_CP(*uaddr, *kaddr, wbuf); CP(*uaddr, *kaddr, wcount); PTRIN_CP(*uaddr, *kaddr, rbuf); CP(*uaddr, *kaddr, rcount); } #endif static int smbioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { char buf[SMB_MAXBLOCKSIZE]; device_t parent; #ifdef COMPAT_FREEBSD32 struct smbcmd sswab; struct smbcmd32 *s32 = (struct smbcmd32 *)data; #endif struct smbcmd *s = (struct smbcmd *)data; struct smb_softc *sc = dev->si_drv1; device_t smbdev = sc->sc_dev; int error; int unit; u_char bcount; /* * If a specific slave device is being used, override any passed-in * slave. */ unit = dev2unit(dev); if (unit & 0x0400) s->slave = unit & 0x03ff; parent = device_get_parent(smbdev); /* Make sure that LSB bit is cleared. */ if (s->slave & 0x1) return (EINVAL); /* Allocate the bus. */ if ((error = smbus_request_bus(parent, smbdev, (flags & O_NONBLOCK) ? SMB_DONTWAIT : (SMB_WAIT | SMB_INTR)))) return (error); #ifdef COMPAT_FREEBSD32 switch (cmd) { case SMB_QUICK_WRITE32: case SMB_QUICK_READ32: case SMB_SENDB32: case SMB_RECVB32: case SMB_WRITEB32: case SMB_WRITEW32: case SMB_OLD_READB32: case SMB_READB32: case SMB_OLD_READW32: case SMB_READW32: case SMB_OLD_PCALL32: case SMB_PCALL32: case SMB_BWRITE32: case SMB_BREAD32: smbcopyincmd32(s32, &sswab); s = &sswab; break; default: break; } #endif switch (cmd) { case SMB_QUICK_WRITE: #ifdef COMPAT_FREEBSD32 case SMB_QUICK_WRITE32: #endif error = smbus_error(smbus_quick(parent, s->slave, SMB_QWRITE)); break; case SMB_QUICK_READ: #ifdef COMPAT_FREEBSD32 case SMB_QUICK_READ32: #endif error = smbus_error(smbus_quick(parent, s->slave, SMB_QREAD)); break; case SMB_SENDB: #ifdef COMPAT_FREEBSD32 case SMB_SENDB32: #endif error = smbus_error(smbus_sendb(parent, s->slave, s->cmd)); break; case SMB_RECVB: #ifdef COMPAT_FREEBSD32 case SMB_RECVB32: #endif error = smbus_error(smbus_recvb(parent, s->slave, &s->cmd)); break; case SMB_WRITEB: #ifdef COMPAT_FREEBSD32 case SMB_WRITEB32: #endif error = smbus_error(smbus_writeb(parent, s->slave, s->cmd, s->wdata.byte)); break; case SMB_WRITEW: #ifdef COMPAT_FREEBSD32 case SMB_WRITEW32: #endif error = smbus_error(smbus_writew(parent, s->slave, s->cmd, s->wdata.word)); break; case SMB_OLD_READB: case SMB_READB: #ifdef COMPAT_FREEBSD32 case SMB_OLD_READB32: case SMB_READB32: #endif /* NB: for SMB_OLD_READB the read data goes to rbuf only. */ error = smbus_error(smbus_readb(parent, s->slave, s->cmd, &s->rdata.byte)); if (error) break; if (s->rbuf && s->rcount >= 1) { error = copyout(&s->rdata.byte, s->rbuf, 1); s->rcount = 1; } break; case SMB_OLD_READW: case SMB_READW: #ifdef COMPAT_FREEBSD32 case SMB_OLD_READW32: case SMB_READW32: #endif /* NB: for SMB_OLD_READW the read data goes to rbuf only. */ error = smbus_error(smbus_readw(parent, s->slave, s->cmd, &s->rdata.word)); if (error) break; if (s->rbuf && s->rcount >= 2) { buf[0] = (u_char)s->rdata.word; buf[1] = (u_char)(s->rdata.word >> 8); error = copyout(buf, s->rbuf, 2); s->rcount = 2; } break; case SMB_OLD_PCALL: case SMB_PCALL: #ifdef COMPAT_FREEBSD32 case SMB_OLD_PCALL32: case SMB_PCALL32: #endif /* NB: for SMB_OLD_PCALL the read data goes to rbuf only. */ error = smbus_error(smbus_pcall(parent, s->slave, s->cmd, s->wdata.word, &s->rdata.word)); if (error) break; if (s->rbuf && s->rcount >= 2) { buf[0] = (u_char)s->rdata.word; buf[1] = (u_char)(s->rdata.word >> 8); error = copyout(buf, s->rbuf, 2); s->rcount = 2; } break; case SMB_BWRITE: #ifdef COMPAT_FREEBSD32 case SMB_BWRITE32: #endif if (s->wcount < 0) { error = EINVAL; break; } if (s->wcount > SMB_MAXBLOCKSIZE) s->wcount = SMB_MAXBLOCKSIZE; if (s->wcount) error = copyin(s->wbuf, buf, s->wcount); if (error) break; error = smbus_error(smbus_bwrite(parent, s->slave, s->cmd, s->wcount, buf)); break; case SMB_BREAD: #ifdef COMPAT_FREEBSD32 case SMB_BREAD32: #endif if (s->rcount < 0) { error = EINVAL; break; } if (s->rcount > SMB_MAXBLOCKSIZE) s->rcount = SMB_MAXBLOCKSIZE; error = smbus_error(smbus_bread(parent, s->slave, s->cmd, &bcount, buf)); if (error) break; if (s->rcount > bcount) s->rcount = bcount; error = copyout(buf, s->rbuf, s->rcount); break; default: error = ENOTTY; } #ifdef COMPAT_FREEBSD32 switch (cmd) { case SMB_RECVB32: CP(*s, *s32, cmd); break; case SMB_OLD_READB32: case SMB_READB32: case SMB_OLD_READW32: case SMB_READW32: case SMB_OLD_PCALL32: case SMB_PCALL32: CP(*s, *s32, rdata.word); break; case SMB_BREAD32: if (s->rbuf == NULL) CP(*s, *s32, rdata.word); CP(*s, *s32, rcount); break; default: break; } #endif smbus_release_bus(parent, smbdev); return (error); } DRIVER_MODULE(smb, smbus, smb_driver, 0, 0); MODULE_DEPEND(smb, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); MODULE_VERSION(smb, 1); diff --git a/sys/dev/superio/superio.c b/sys/dev/superio/superio.c index ddb06af93bec..960b89c0cdc0 100644 --- a/sys/dev/superio/superio.c +++ b/sys/dev/superio/superio.c @@ -1,1108 +1,1108 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Andriy Gapon * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "isa_if.h" typedef void (*sio_conf_enter_f)(struct resource*, uint16_t); typedef void (*sio_conf_exit_f)(struct resource*, uint16_t); struct sio_conf_methods { sio_conf_enter_f enter; sio_conf_exit_f exit; superio_vendor_t vendor; }; struct sio_device { uint8_t ldn; superio_dev_type_t type; }; struct superio_devinfo { STAILQ_ENTRY(superio_devinfo) link; struct resource_list resources; device_t dev; uint8_t ldn; superio_dev_type_t type; uint16_t iobase; uint16_t iobase2; uint8_t irq; uint8_t dma; }; struct siosc { struct mtx conf_lock; STAILQ_HEAD(, superio_devinfo) devlist; struct resource* io_res; struct cdev *chardev; int io_rid; uint16_t io_port; const struct sio_conf_methods *methods; const struct sio_device *known_devices; superio_vendor_t vendor; uint16_t devid; uint8_t revid; int extid; uint8_t current_ldn; uint8_t ldn_reg; uint8_t enable_reg; }; static d_ioctl_t superio_ioctl; static struct cdevsw superio_cdevsw = { .d_version = D_VERSION, .d_ioctl = superio_ioctl, .d_name = "superio", }; #define NUMPORTS 2 static uint8_t sio_read(struct resource* res, uint8_t reg) { bus_write_1(res, 0, reg); return (bus_read_1(res, 1)); } /* Read a word from two one-byte registers, big endian. */ static uint16_t sio_readw(struct resource* res, uint8_t reg) { uint16_t v; v = sio_read(res, reg); v <<= 8; v |= sio_read(res, reg + 1); return (v); } static void sio_write(struct resource* res, uint8_t reg, uint8_t val) { bus_write_1(res, 0, reg); bus_write_1(res, 1, val); } static void sio_ldn_select(struct siosc *sc, uint8_t ldn) { mtx_assert(&sc->conf_lock, MA_OWNED); if (ldn == sc->current_ldn) return; sio_write(sc->io_res, sc->ldn_reg, ldn); sc->current_ldn = ldn; } static uint8_t sio_ldn_read(struct siosc *sc, uint8_t ldn, uint8_t reg) { mtx_assert(&sc->conf_lock, MA_OWNED); if (reg >= sc->enable_reg) { sio_ldn_select(sc, ldn); KASSERT(sc->current_ldn == ldn, ("sio_ldn_select failed")); } return (sio_read(sc->io_res, reg)); } static uint16_t sio_ldn_readw(struct siosc *sc, uint8_t ldn, uint8_t reg) { mtx_assert(&sc->conf_lock, MA_OWNED); if (reg >= sc->enable_reg) { sio_ldn_select(sc, ldn); KASSERT(sc->current_ldn == ldn, ("sio_ldn_select failed")); } return (sio_readw(sc->io_res, reg)); } static void sio_ldn_write(struct siosc *sc, uint8_t ldn, uint8_t reg, uint8_t val) { mtx_assert(&sc->conf_lock, MA_OWNED); if (reg <= sc->ldn_reg) { printf("ignored attempt to write special register 0x%x\n", reg); return; } sio_ldn_select(sc, ldn); KASSERT(sc->current_ldn == ldn, ("sio_ldn_select failed")); sio_write(sc->io_res, reg, val); } static void sio_conf_enter(struct siosc *sc) { mtx_lock(&sc->conf_lock); sc->methods->enter(sc->io_res, sc->io_port); } static void sio_conf_exit(struct siosc *sc) { sc->methods->exit(sc->io_res, sc->io_port); sc->current_ldn = 0xff; mtx_unlock(&sc->conf_lock); } static void ite_conf_enter(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0x87); bus_write_1(res, 0, 0x01); bus_write_1(res, 0, 0x55); bus_write_1(res, 0, port == 0x2e ? 0x55 : 0xaa); } static void ite_conf_exit(struct resource* res, uint16_t port) { sio_write(res, 0x02, 0x02); } static const struct sio_conf_methods ite_conf_methods = { .enter = ite_conf_enter, .exit = ite_conf_exit, .vendor = SUPERIO_VENDOR_ITE }; static void nvt_conf_enter(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0x87); bus_write_1(res, 0, 0x87); } static void nvt_conf_exit(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0xaa); } static const struct sio_conf_methods nvt_conf_methods = { .enter = nvt_conf_enter, .exit = nvt_conf_exit, .vendor = SUPERIO_VENDOR_NUVOTON }; static void fintek_conf_enter(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0x87); bus_write_1(res, 0, 0x87); } static void fintek_conf_exit(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0xaa); } static const struct sio_conf_methods fintek_conf_methods = { .enter = fintek_conf_enter, .exit = fintek_conf_exit, .vendor = SUPERIO_VENDOR_FINTEK }; static const struct sio_conf_methods * const methods_table[] = { &ite_conf_methods, &nvt_conf_methods, &fintek_conf_methods, NULL }; static const uint16_t ports_table[] = { 0x2e, 0x4e, 0 }; const struct sio_device ite_devices[] = { { .ldn = 4, .type = SUPERIO_DEV_HWM }, { .ldn = 7, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device w83627_devices[] = { { .ldn = 8, .type = SUPERIO_DEV_WDT }, { .ldn = 9, .type = SUPERIO_DEV_GPIO }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nvt_devices[] = { { .ldn = 8, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nct5104_devices[] = { { .ldn = 7, .type = SUPERIO_DEV_GPIO }, { .ldn = 8, .type = SUPERIO_DEV_WDT }, { .ldn = 15, .type = SUPERIO_DEV_GPIO }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nct5585_devices[] = { { .ldn = 9, .type = SUPERIO_DEV_GPIO }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nct611x_devices[] = { { .ldn = 0x7, .type = SUPERIO_DEV_GPIO }, { .ldn = 0x8, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nct67xx_devices[] = { { .ldn = 0x8, .type = SUPERIO_DEV_WDT }, { .ldn = 0x9, .type = SUPERIO_DEV_GPIO }, { .ldn = 0xb, .type = SUPERIO_DEV_HWM }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device fintek_devices[] = { { .ldn = 6, .type = SUPERIO_DEV_GPIO }, { .ldn = 7, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; static const struct { superio_vendor_t vendor; uint16_t devid; uint16_t mask; int extid; /* Extra ID: used to handle conflicting devid. */ const char *descr; const struct sio_device *devices; } superio_table[] = { { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8613, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8712, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8716, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8718, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8720, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8721, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8726, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8728, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8771, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x1061, .mask = 0x00, .descr = "Nuvoton NCT5104D/NCT6102D/NCT6106D (rev. A)", .devices = nct5104_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x5200, .mask = 0xff, .descr = "Winbond 83627HF/F/HG/G", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x5900, .mask = 0xff, .descr = "Winbond 83627S", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x6000, .mask = 0xff, .descr = "Winbond 83697HF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x6800, .mask = 0xff, .descr = "Winbond 83697UG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x7000, .mask = 0xff, .descr = "Winbond 83637HF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x8200, .mask = 0xff, .descr = "Winbond 83627THF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x8500, .mask = 0xff, .descr = "Winbond 83687THF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x8800, .mask = 0xff, .descr = "Winbond 83627EHF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xa000, .mask = 0xff, .descr = "Winbond 83627DHG", .devices = w83627_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xa200, .mask = 0xff, .descr = "Winbond 83627UHG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xa500, .mask = 0xff, .descr = "Winbond 83667HG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xb000, .mask = 0xff, .descr = "Winbond 83627DHG-P", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xb300, .mask = 0xff, .descr = "Winbond 83667HG-B", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xb400, .mask = 0xff, .descr = "Nuvoton NCT6775", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc300, .mask = 0xff, .descr = "Nuvoton NCT6776", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc400, .mask = 0xff, .descr = "Nuvoton NCT5104D/NCT6102D/NCT6106D (rev. B+)", .devices = nct5104_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc500, .mask = 0xff, .descr = "Nuvoton NCT6779D", .devices = nct67xx_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd42a, .extid = 1, .descr = "Nuvoton NCT6796D-E", .devices = nct67xx_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd42a, .extid = 2, .descr = "Nuvoton NCT5585D", .devices = nct5585_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc800, .mask = 0xff, .descr = "Nuvoton NCT6791", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc900, .mask = 0xff, .descr = "Nuvoton NCT6792", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd100, .mask = 0xff, .descr = "Nuvoton NCT6793", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd200, .mask = 0xff, .descr = "Nuvoton NCT6112D/NCT6114D/NCT6116D", .devices = nct611x_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd300, .mask = 0xff, .descr = "Nuvoton NCT6795", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_FINTEK, .devid = 0x1210, .mask = 0xff, .descr = "Fintek F81803", .devices = fintek_devices, }, { .vendor = SUPERIO_VENDOR_FINTEK, .devid = 0x0704, .descr = "Fintek F81865", .devices = fintek_devices, }, { 0, 0 } }; static const char * devtype_to_str(superio_dev_type_t type) { switch (type) { case SUPERIO_DEV_NONE: return ("none"); case SUPERIO_DEV_HWM: return ("HWM"); case SUPERIO_DEV_WDT: return ("WDT"); case SUPERIO_DEV_GPIO: return ("GPIO"); case SUPERIO_DEV_MAX: return ("invalid"); } return ("invalid"); } static int superio_detect(device_t dev, bool claim, struct siosc *sc) { struct resource *res; rman_res_t port; rman_res_t count; uint16_t devid; uint8_t revid; int error; int rid; int i, m; int prefer; error = bus_get_resource(dev, SYS_RES_IOPORT, 0, &port, &count); if (error != 0) return (error); if (port > UINT16_MAX || count < NUMPORTS) { device_printf(dev, "unexpected I/O range size\n"); return (ENXIO); } /* * Make a temporary resource reservation for hardware probing. * If we can't get the resources we need then * we need to abort. Possibly this indicates * the resources were used by another device * in which case the probe would have failed anyhow. */ rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res == NULL) { if (claim) device_printf(dev, "failed to allocate I/O resource\n"); return (ENXIO); } prefer = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer", &prefer); if (bootverbose && prefer > 0) device_printf(dev, "prefer extid %d\n", prefer); for (m = 0; methods_table[m] != NULL; m++) { methods_table[m]->enter(res, port); if (methods_table[m]->vendor == SUPERIO_VENDOR_ITE) { devid = sio_readw(res, 0x20); revid = sio_read(res, 0x22); } else if (methods_table[m]->vendor == SUPERIO_VENDOR_NUVOTON) { devid = sio_read(res, 0x20); revid = sio_read(res, 0x21); devid = (devid << 8) | revid; } else if (methods_table[m]->vendor == SUPERIO_VENDOR_FINTEK) { devid = sio_read(res, 0x20); revid = sio_read(res, 0x21); devid = (devid << 8) | revid; } else { continue; } methods_table[m]->exit(res, port); for (i = 0; superio_table[i].vendor != 0; i++) { uint16_t mask; mask = superio_table[i].mask; if (superio_table[i].vendor != methods_table[m]->vendor) continue; if ((superio_table[i].devid & ~mask) != (devid & ~mask)) continue; if (prefer > 0 && prefer != superio_table[i].extid) continue; break; } /* Found a matching SuperIO entry. */ if (superio_table[i].vendor != 0) break; } if (methods_table[m] == NULL) error = ENXIO; else error = 0; if (!claim || error != 0) { bus_release_resource(dev, SYS_RES_IOPORT, rid, res); return (error); } sc->methods = methods_table[m]; sc->vendor = sc->methods->vendor; sc->known_devices = superio_table[i].devices; sc->io_res = res; sc->io_rid = rid; sc->io_port = port; sc->devid = devid; sc->revid = revid; sc->extid = superio_table[i].extid; KASSERT(sc->vendor == SUPERIO_VENDOR_ITE || sc->vendor == SUPERIO_VENDOR_NUVOTON || sc->vendor == SUPERIO_VENDOR_FINTEK, ("Only ITE, Nuvoton and Fintek SuperIO-s are supported")); sc->ldn_reg = 0x07; sc->enable_reg = 0x30; /* FIXME enable_reg not used by nctgpio(4). */ sc->current_ldn = 0xff; /* no device should have this */ if (superio_table[i].descr != NULL) { device_set_desc(dev, superio_table[i].descr); } else if (sc->vendor == SUPERIO_VENDOR_ITE) { device_set_descf(dev, "ITE IT%4x SuperIO (revision 0x%02x)", sc->devid, sc->revid); } return (0); } static void superio_identify(driver_t *driver, device_t parent) { device_t child; int i; /* * Don't create child devices if any already exist. * Those could be created via isa hints or if this * driver is loaded, unloaded and then loaded again. */ if (device_find_child(parent, "superio", -1)) { if (bootverbose) printf("superio: device(s) already created\n"); return; } /* * Create a child for each candidate port. * It would be nice if we could somehow clean up those * that this driver fails to probe. */ for (i = 0; ports_table[i] != 0; i++) { child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "superio", -1); if (child == NULL) { device_printf(parent, "failed to add superio child\n"); continue; } bus_set_resource(child, SYS_RES_IOPORT, 0, ports_table[i], 2); if (superio_detect(child, false, NULL) != 0) device_delete_child(parent, child); } } static int superio_probe(device_t dev) { struct siosc *sc; int error; /* Make sure we do not claim some ISA PNP device. */ if (isa_get_logicalid(dev) != 0) return (ENXIO); /* * XXX We can populate the softc now only because we return * BUS_PROBE_SPECIFIC */ sc = device_get_softc(dev); error = superio_detect(dev, true, sc); if (error != 0) return (error); return (BUS_PROBE_SPECIFIC); } static void superio_add_known_child(device_t dev, superio_dev_type_t type, uint8_t ldn) { struct siosc *sc = device_get_softc(dev); struct superio_devinfo *dinfo; device_t child; - child = BUS_ADD_CHILD(dev, 0, NULL, -1); + child = BUS_ADD_CHILD(dev, 0, NULL, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(dev, "failed to add child for ldn %d, type %s\n", ldn, devtype_to_str(type)); return; } dinfo = device_get_ivars(child); dinfo->ldn = ldn; dinfo->type = type; sio_conf_enter(sc); dinfo->iobase = sio_ldn_readw(sc, ldn, 0x60); dinfo->iobase2 = sio_ldn_readw(sc, ldn, 0x62); dinfo->irq = sio_ldn_readw(sc, ldn, 0x70); dinfo->dma = sio_ldn_readw(sc, ldn, 0x74); sio_conf_exit(sc); STAILQ_INSERT_TAIL(&sc->devlist, dinfo, link); } static int superio_attach(device_t dev) { struct siosc *sc = device_get_softc(dev); int i; mtx_init(&sc->conf_lock, device_get_nameunit(dev), "superio", MTX_DEF); STAILQ_INIT(&sc->devlist); for (i = 0; sc->known_devices[i].type != SUPERIO_DEV_NONE; i++) { superio_add_known_child(dev, sc->known_devices[i].type, sc->known_devices[i].ldn); } bus_generic_probe(dev); bus_generic_attach(dev); sc->chardev = make_dev(&superio_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "superio%d", device_get_unit(dev)); if (sc->chardev == NULL) device_printf(dev, "failed to create character device\n"); else sc->chardev->si_drv1 = sc; return (0); } static int superio_detach(device_t dev) { struct siosc *sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error != 0) return (error); if (sc->chardev != NULL) destroy_dev(sc->chardev); device_delete_children(dev); bus_release_resource(dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); mtx_destroy(&sc->conf_lock); return (0); } static device_t superio_add_child(device_t dev, u_int order, const char *name, int unit) { struct superio_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->ldn = 0xff; dinfo->type = SUPERIO_DEV_NONE; dinfo->dev = child; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } static int superio_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct superio_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case SUPERIO_IVAR_LDN: *result = dinfo->ldn; break; case SUPERIO_IVAR_TYPE: *result = dinfo->type; break; case SUPERIO_IVAR_IOBASE: *result = dinfo->iobase; break; case SUPERIO_IVAR_IOBASE2: *result = dinfo->iobase2; break; case SUPERIO_IVAR_IRQ: *result = dinfo->irq; break; case SUPERIO_IVAR_DMA: *result = dinfo->dma; break; default: return (ENOENT); } return (0); } static int superio_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case SUPERIO_IVAR_LDN: case SUPERIO_IVAR_TYPE: case SUPERIO_IVAR_IOBASE: case SUPERIO_IVAR_IOBASE2: case SUPERIO_IVAR_IRQ: case SUPERIO_IVAR_DMA: return (EINVAL); default: return (ENOENT); } } static struct resource_list * superio_get_resource_list(device_t dev, device_t child) { struct superio_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } static int superio_printf(struct superio_devinfo *dinfo, const char *fmt, ...) { va_list ap; int retval; retval = printf("superio:%s@ldn%0x2x: ", devtype_to_str(dinfo->type), dinfo->ldn); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } static void superio_child_detached(device_t dev, device_t child) { struct superio_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) superio_printf(dinfo, "Device leaked IRQ resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) superio_printf(dinfo, "Device leaked memory resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) superio_printf(dinfo, "Device leaked I/O resources\n"); } static int superio_child_location(device_t parent, device_t child, struct sbuf *sb) { uint8_t ldn; ldn = superio_get_ldn(child); sbuf_printf(sb, "ldn=0x%02x", ldn); return (0); } static int superio_child_pnp(device_t parent, device_t child, struct sbuf *sb) { superio_dev_type_t type; type = superio_get_type(child); sbuf_printf(sb, "type=%s", devtype_to_str(type)); return (0); } static int superio_print_child(device_t parent, device_t child) { superio_dev_type_t type; uint8_t ldn; int retval; ldn = superio_get_ldn(child); type = superio_get_type(child); retval = bus_print_child_header(parent, child); retval += printf(" at %s ldn 0x%02x", devtype_to_str(type), ldn); retval += bus_print_child_footer(parent, child); return (retval); } superio_vendor_t superio_vendor(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->vendor); } uint16_t superio_devid(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->devid); } uint8_t superio_revid(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->revid); } int superio_extid(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->extid); } uint8_t superio_ldn_read(device_t dev, uint8_t ldn, uint8_t reg) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); uint8_t v; sio_conf_enter(sc); v = sio_ldn_read(sc, ldn, reg); sio_conf_exit(sc); return (v); } uint8_t superio_read(device_t dev, uint8_t reg) { struct superio_devinfo *dinfo = device_get_ivars(dev); return (superio_ldn_read(dev, dinfo->ldn, reg)); } void superio_ldn_write(device_t dev, uint8_t ldn, uint8_t reg, uint8_t val) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); sio_conf_enter(sc); sio_ldn_write(sc, ldn, reg, val); sio_conf_exit(sc); } void superio_write(device_t dev, uint8_t reg, uint8_t val) { struct superio_devinfo *dinfo = device_get_ivars(dev); return (superio_ldn_write(dev, dinfo->ldn, reg, val)); } bool superio_dev_enabled(device_t dev, uint8_t mask) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; /* GPIO device is always active in ITE chips. */ if (sc->vendor == SUPERIO_VENDOR_ITE && dinfo->ldn == 7) return (true); v = superio_read(dev, sc->enable_reg); /* FIXME enable_reg not used by nctgpio(4). */ return ((v & mask) != 0); } void superio_dev_enable(device_t dev, uint8_t mask) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; /* GPIO device is always active in ITE chips. */ if (sc->vendor == SUPERIO_VENDOR_ITE && dinfo->ldn == 7) return; sio_conf_enter(sc); v = sio_ldn_read(sc, dinfo->ldn, sc->enable_reg); v |= mask; sio_ldn_write(sc, dinfo->ldn, sc->enable_reg, v); sio_conf_exit(sc); } void superio_dev_disable(device_t dev, uint8_t mask) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; /* GPIO device is always active in ITE chips. */ if (sc->vendor == SUPERIO_VENDOR_ITE && dinfo->ldn == 7) return; sio_conf_enter(sc); v = sio_ldn_read(sc, dinfo->ldn, sc->enable_reg); v &= ~mask; sio_ldn_write(sc, dinfo->ldn, sc->enable_reg, v); sio_conf_exit(sc); } device_t superio_find_dev(device_t superio, superio_dev_type_t type, int ldn) { struct siosc *sc = device_get_softc(superio); struct superio_devinfo *dinfo; if (ldn < -1 || ldn > UINT8_MAX) return (NULL); /* ERANGE */ if (type == SUPERIO_DEV_NONE && ldn == -1) return (NULL); /* EINVAL */ STAILQ_FOREACH(dinfo, &sc->devlist, link) { if (ldn != -1 && dinfo->ldn != ldn) continue; if (type != SUPERIO_DEV_NONE && dinfo->type != type) continue; return (dinfo->dev); } return (NULL); } static int superio_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct siosc *sc; struct superiocmd *s; sc = dev->si_drv1; s = (struct superiocmd *)data; switch (cmd) { case SUPERIO_CR_READ: sio_conf_enter(sc); s->val = sio_ldn_read(sc, s->ldn, s->cr); sio_conf_exit(sc); return (0); case SUPERIO_CR_WRITE: sio_conf_enter(sc); sio_ldn_write(sc, s->ldn, s->cr, s->val); sio_conf_exit(sc); return (0); default: return (ENOTTY); } } static device_method_t superio_methods[] = { DEVMETHOD(device_identify, superio_identify), DEVMETHOD(device_probe, superio_probe), DEVMETHOD(device_attach, superio_attach), DEVMETHOD(device_detach, superio_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(bus_add_child, superio_add_child), DEVMETHOD(bus_child_detached, superio_child_detached), DEVMETHOD(bus_child_location, superio_child_location), DEVMETHOD(bus_child_pnpinfo, superio_child_pnp), DEVMETHOD(bus_print_child, superio_print_child), DEVMETHOD(bus_read_ivar, superio_read_ivar), DEVMETHOD(bus_write_ivar, superio_write_ivar), DEVMETHOD(bus_get_resource_list, superio_get_resource_list), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static driver_t superio_driver = { "superio", superio_methods, sizeof(struct siosc) }; DRIVER_MODULE(superio, isa, superio_driver, 0, 0); MODULE_VERSION(superio, 1); diff --git a/sys/i386/bios/smapi.c b/sys/i386/bios/smapi.c index b73fb97e4365..b222e5e4cc61 100644 --- a/sys/i386/bios/smapi.c +++ b/sys/i386/bios/smapi.c @@ -1,317 +1,317 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2003 Matthew N. Dodd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include /* And all this for BIOS_PADDRTOVADDR() */ #include #include #include #include #include #include #define SMAPI_START 0xf0000 #define SMAPI_STEP 0x10 #define SMAPI_OFF 0 #define SMAPI_LEN 4 #define SMAPI_SIG "$SMB" #define RES2HDR(res) ((struct smapi_bios_header *)rman_get_virtual(res)) #define ADDR2HDR(addr) ((struct smapi_bios_header *)BIOS_PADDRTOVADDR(addr)) struct smapi_softc { struct cdev * cdev; device_t dev; struct resource * res; int rid; u_int32_t smapi32_entry; struct smapi_bios_header *header; }; extern u_long smapi32_offset; extern u_short smapi32_segment; static d_ioctl_t smapi_ioctl; static struct cdevsw smapi_cdevsw = { .d_version = D_VERSION, .d_ioctl = smapi_ioctl, .d_name = "smapi", .d_flags = D_NEEDGIANT, }; static void smapi_identify(driver_t *, device_t); static int smapi_probe(device_t); static int smapi_attach(device_t); static int smapi_detach(device_t); static int smapi_modevent(module_t, int, void *); static int smapi_header_cksum(struct smapi_bios_header *); extern int smapi32(struct smapi_bios_parameter *, struct smapi_bios_parameter *); extern int smapi32_new(u_long, u_short, struct smapi_bios_parameter *, struct smapi_bios_parameter *); static int smapi_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct smapi_softc *sc = dev->si_drv1; int error; switch (cmd) { case SMAPIOGHEADER: bcopy((caddr_t)sc->header, data, sizeof(struct smapi_bios_header)); error = 0; break; case SMAPIOCGFUNCTION: smapi32_offset = sc->smapi32_entry; error = smapi32((struct smapi_bios_parameter *)data, (struct smapi_bios_parameter *)data); break; default: error = ENOTTY; } return (error); } static int smapi_header_cksum (struct smapi_bios_header *header) { u_int8_t *ptr; u_int8_t cksum; int i; ptr = (u_int8_t *)header; cksum = 0; for (i = 0; i < header->length; i++) { cksum += ptr[i]; } return (cksum); } static void smapi_identify (driver_t *driver, device_t parent) { device_t child; u_int32_t addr; int length; int rid; if (!device_is_alive(parent)) return; addr = bios_sigsearch(SMAPI_START, SMAPI_SIG, SMAPI_LEN, SMAPI_STEP, SMAPI_OFF); if (addr != 0) { rid = 0; length = ADDR2HDR(addr)->length; - child = BUS_ADD_CHILD(parent, 5, "smapi", -1); + child = BUS_ADD_CHILD(parent, 5, "smapi", DEVICE_UNIT_ANY); device_set_driver(child, driver); bus_set_resource(child, SYS_RES_MEMORY, rid, addr, length); device_set_desc(child, "SMAPI BIOS"); } return; } static int smapi_probe (device_t dev) { struct resource *res; int rid; int error; error = 0; rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) { device_printf(dev, "Unable to allocate memory resource.\n"); error = ENOMEM; goto bad; } if (smapi_header_cksum(RES2HDR(res))) { device_printf(dev, "SMAPI header checksum failed.\n"); error = ENXIO; goto bad; } bad: if (res) bus_release_resource(dev, SYS_RES_MEMORY, rid, res); return (error); } static int smapi_attach (device_t dev) { struct make_dev_args args; struct smapi_softc *sc; int error; sc = device_get_softc(dev); error = 0; sc->dev = dev; sc->rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "Unable to allocate memory resource.\n"); error = ENOMEM; goto bad; } sc->header = (struct smapi_bios_header *)rman_get_virtual(sc->res); sc->smapi32_entry = (u_int32_t)BIOS_PADDRTOVADDR( sc->header->prot32_segment + sc->header->prot32_offset); make_dev_args_init(&args); args.mda_devsw = &smapi_cdevsw; args.mda_uid = UID_ROOT; args.mda_gid = GID_WHEEL; args.mda_mode = 0600; args.mda_si_drv1 = sc; error = make_dev_s(&args, &sc->cdev, "%s%d", smapi_cdevsw.d_name, device_get_unit(sc->dev)); if (error != 0) goto bad; device_printf(dev, "Version: %d.%02d, Length: %d, Checksum: 0x%02x\n", bcd2bin(sc->header->version_major), bcd2bin(sc->header->version_minor), sc->header->length, sc->header->checksum); device_printf(dev, "Information=0x%b\n", sc->header->information, "\020" "\001REAL_VM86" "\002PROTECTED_16" "\003PROTECTED_32"); if (bootverbose) { if (sc->header->information & SMAPI_REAL_VM86) device_printf(dev, "Real/VM86 mode: Segment 0x%04x, Offset 0x%04x\n", sc->header->real16_segment, sc->header->real16_offset); if (sc->header->information & SMAPI_PROT_16BIT) device_printf(dev, "16-bit Protected mode: Segment 0x%08x, Offset 0x%04x\n", sc->header->prot16_segment, sc->header->prot16_offset); if (sc->header->information & SMAPI_PROT_32BIT) device_printf(dev, "32-bit Protected mode: Segment 0x%08x, Offset 0x%08x\n", sc->header->prot32_segment, sc->header->prot32_offset); } return (0); bad: if (sc->res) bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); return (error); } static int smapi_detach (device_t dev) { struct smapi_softc *sc; sc = device_get_softc(dev); destroy_dev(sc->cdev); if (sc->res) bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); return (0); } static device_method_t smapi_methods[] = { /* Device interface */ DEVMETHOD(device_identify, smapi_identify), DEVMETHOD(device_probe, smapi_probe), DEVMETHOD(device_attach, smapi_attach), DEVMETHOD(device_detach, smapi_detach), { 0, 0 } }; static driver_t smapi_driver = { "smapi", smapi_methods, sizeof(struct smapi_softc), }; static int smapi_modevent (module_t mod, int what, void *arg) { device_t * devs; int count; int i; switch (what) { case MOD_LOAD: break; case MOD_UNLOAD: devclass_get_devices(devclass_find(smapi_driver.name), &devs, &count); for (i = 0; i < count; i++) { device_delete_child(device_get_parent(devs[i]), devs[i]); } free(devs, M_TEMP); break; default: break; } return (0); } DRIVER_MODULE(smapi, nexus, smapi_driver, smapi_modevent, NULL); MODULE_VERSION(smapi, 1); diff --git a/sys/i386/i386/bios.c b/sys/i386/i386/bios.c index 0951fbe6a679..7f3e1546f236 100644 --- a/sys/i386/i386/bios.c +++ b/sys/i386/i386/bios.c @@ -1,723 +1,723 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997 Michael Smith * Copyright (c) 1998 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Code for dealing with the BIOS in x86 PC systems. */ #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ISA #include #include #include #endif #define BIOS_START 0xe0000 #define BIOS_SIZE 0x20000 /* exported lookup results */ struct bios32_SDentry PCIbios; static struct PnPBIOS_table *PnPBIOStable; static u_int bios32_SDCI; /* start fairly early */ static void bios32_init(void *junk); SYSINIT(bios32, SI_SUB_CPU, SI_ORDER_ANY, bios32_init, NULL); /* * bios32_init * * Locate various bios32 entities. */ static void bios32_init(void *junk) { u_long sigaddr; struct bios32_SDheader *sdh; struct PnPBIOS_table *pt; u_int8_t ck, *cv; int i; char *p; /* * BIOS32 Service Directory, PCI BIOS */ /* look for the signature */ if ((sigaddr = bios_sigsearch(0, "_32_", 4, 16, 0)) != 0) { /* get a virtual pointer to the structure */ sdh = (struct bios32_SDheader *)(uintptr_t)BIOS_PADDRTOVADDR(sigaddr); for (cv = (u_int8_t *)sdh, ck = 0, i = 0; i < (sdh->len * 16); i++) { ck += cv[i]; } /* If checksum is OK, enable use of the entrypoint */ if ((ck == 0) && (BIOS_START <= sdh->entry ) && (sdh->entry < (BIOS_START + BIOS_SIZE))) { bios32_SDCI = BIOS_PADDRTOVADDR(sdh->entry); if (bootverbose) { printf("bios32: Found BIOS32 Service Directory header at %p\n", sdh); printf("bios32: Entry = 0x%x (%x) Rev = %d Len = %d\n", sdh->entry, bios32_SDCI, sdh->revision, sdh->len); } /* Allow user override of PCI BIOS search */ if (((p = kern_getenv("machdep.bios.pci")) == NULL) || strcmp(p, "disable")) { /* See if there's a PCI BIOS entrypoint here */ PCIbios.ident.id = 0x49435024; /* PCI systems should have this */ if (!bios32_SDlookup(&PCIbios) && bootverbose) printf("pcibios: PCI BIOS entry at 0x%x+0x%x\n", PCIbios.base, PCIbios.entry); } if (p != NULL) freeenv(p); } else { printf("bios32: Bad BIOS32 Service Directory\n"); } } /* * PnP BIOS * * Allow user override of PnP BIOS search */ if ((((p = kern_getenv("machdep.bios.pnp")) == NULL) || strcmp(p, "disable")) && ((sigaddr = bios_sigsearch(0, "$PnP", 4, 16, 0)) != 0)) { /* get a virtual pointer to the structure */ pt = (struct PnPBIOS_table *)(uintptr_t)BIOS_PADDRTOVADDR(sigaddr); for (cv = (u_int8_t *)pt, ck = 0, i = 0; i < pt->len; i++) { ck += cv[i]; } /* If checksum is OK, enable use of the entrypoint */ if (ck == 0) { PnPBIOStable = pt; if (bootverbose) { printf("pnpbios: Found PnP BIOS data at %p\n", pt); printf("pnpbios: Entry = %x:%x Rev = %d.%d\n", pt->pmentrybase, pt->pmentryoffset, pt->version >> 4, pt->version & 0xf); if ((pt->control & 0x3) == 0x01) printf("pnpbios: Event flag at %x\n", pt->evflagaddr); if (pt->oemdevid != 0) printf("pnpbios: OEM ID %x\n", pt->oemdevid); } } else { printf("pnpbios: Bad PnP BIOS data checksum\n"); } } if (p != NULL) freeenv(p); if (bootverbose) { /* look for other know signatures */ printf("Other BIOS signatures found:\n"); } } /* * bios32_SDlookup * * Query the BIOS32 Service Directory for the service named in (ent), * returns nonzero if the lookup fails. The caller must fill in * (ent->ident), the remainder are populated on a successful lookup. */ int bios32_SDlookup(struct bios32_SDentry *ent) { struct bios_regs args; if (bios32_SDCI == 0) return (1); args.eax = ent->ident.id; /* set up arguments */ args.ebx = args.ecx = args.edx = 0; bios32(&args, bios32_SDCI, GSEL(GCODE_SEL, SEL_KPL)); if ((args.eax & 0xff) == 0) { /* success? */ ent->base = args.ebx; ent->len = args.ecx; ent->entry = args.edx; ent->ventry = BIOS_PADDRTOVADDR(ent->base + ent->entry); return (0); /* all OK */ } return (1); /* failed */ } /* * bios_sigsearch * * Search some or all of the BIOS region for a signature string. * * (start) Optional offset returned from this function * (for searching for multiple matches), or NULL * to start the search from the base of the BIOS. * Note that this will be a _physical_ address in * the range 0xe0000 - 0xfffff. * (sig) is a pointer to the byte(s) of the signature. * (siglen) number of bytes in the signature. * (paralen) signature paragraph (alignment) size. * (sigofs) offset of the signature within the paragraph. * * Returns the _physical_ address of the found signature, 0 if the * signature was not found. */ u_int32_t bios_sigsearch(u_int32_t start, u_char *sig, int siglen, int paralen, int sigofs) { u_char *sp, *end; /* compute the starting address */ if ((start >= BIOS_START) && (start <= (BIOS_START + BIOS_SIZE))) { sp = (char *)BIOS_PADDRTOVADDR(start); } else if (start == 0) { sp = (char *)BIOS_PADDRTOVADDR(BIOS_START); } else { return 0; /* bogus start address */ } /* compute the end address */ end = (u_char *)BIOS_PADDRTOVADDR(BIOS_START + BIOS_SIZE); /* loop searching */ while ((sp + sigofs + siglen) < end) { /* compare here */ if (!bcmp(sp + sigofs, sig, siglen)) { /* convert back to physical address */ return((u_int32_t)BIOS_VADDRTOPADDR(sp)); } sp += paralen; } return(0); } /* * do not staticize, used by bioscall.s */ union { struct { u_short offset; u_short segment; } vec16; struct { u_int offset; u_short segment; } vec32; } bioscall_vector; /* bios jump vector */ void set_bios_selectors(struct bios_segments *seg, int flags) { struct soft_segment_descriptor ssd = { 0, /* segment base address (overwritten) */ 0, /* length (overwritten) */ SDT_MEMERA, /* segment type (overwritten) */ 0, /* priority level */ 1, /* descriptor present */ 0, 0, 1, /* descriptor size (overwritten) */ 0 /* granularity == byte units */ }; union descriptor *p_gdt; #ifdef SMP p_gdt = &gdt[PCPU_GET(cpuid) * NGDT]; #else p_gdt = gdt; #endif ssd.ssd_base = seg->code32.base; ssd.ssd_limit = seg->code32.limit; ssdtosd(&ssd, &p_gdt[GBIOSCODE32_SEL].sd); ssd.ssd_def32 = 0; if (flags & BIOSCODE_FLAG) { ssd.ssd_base = seg->code16.base; ssd.ssd_limit = seg->code16.limit; ssdtosd(&ssd, &p_gdt[GBIOSCODE16_SEL].sd); } ssd.ssd_type = SDT_MEMRWA; if (flags & BIOSDATA_FLAG) { ssd.ssd_base = seg->data.base; ssd.ssd_limit = seg->data.limit; ssdtosd(&ssd, &p_gdt[GBIOSDATA_SEL].sd); } if (flags & BIOSUTIL_FLAG) { ssd.ssd_base = seg->util.base; ssd.ssd_limit = seg->util.limit; ssdtosd(&ssd, &p_gdt[GBIOSUTIL_SEL].sd); } if (flags & BIOSARGS_FLAG) { ssd.ssd_base = seg->args.base; ssd.ssd_limit = seg->args.limit; ssdtosd(&ssd, &p_gdt[GBIOSARGS_SEL].sd); } } extern int vm86pa; extern u_long vm86phystk; extern void bios16_jmp(void); /* * this routine is really greedy with selectors, and uses 5: * * 32-bit code selector: to return to kernel * 16-bit code selector: for running code * data selector: for 16-bit data * util selector: extra utility selector * args selector: to handle pointers * * the util selector is set from the util16 entry in bios16_args, if a * "U" specifier is seen. * * See for description of format specifiers */ int bios16(struct bios_args *args, char *fmt, ...) { char *p, *stack, *stack_top; va_list ap; int flags = BIOSCODE_FLAG | BIOSDATA_FLAG; u_int i, arg_start, arg_end; void *bios16_pmap_handle; arg_start = 0xffffffff; arg_end = 0; /* * Some BIOS entrypoints attempt to copy the largest-case * argument frame (in order to generalise handling for * different entry types). If our argument frame is * smaller than this, the BIOS will reach off the top of * our constructed stack segment. Pad the top of the stack * with some garbage to avoid this. */ stack = (caddr_t)PAGE_SIZE - 32; va_start(ap, fmt); for (p = fmt; p && *p; p++) { switch (*p) { case 'p': /* 32-bit pointer */ i = va_arg(ap, u_int); arg_start = min(arg_start, i); arg_end = max(arg_end, i); flags |= BIOSARGS_FLAG; stack -= 4; break; case 'i': /* 32-bit integer */ i = va_arg(ap, u_int); stack -= 4; break; case 'U': /* 16-bit selector */ flags |= BIOSUTIL_FLAG; /* FALLTHROUGH */ case 'D': /* 16-bit selector */ case 'C': /* 16-bit selector */ stack -= 2; break; case 's': /* 16-bit integer passed as an int */ i = va_arg(ap, int); stack -= 2; break; default: va_end(ap); return (EINVAL); } } va_end(ap); if (flags & BIOSARGS_FLAG) { if (arg_end - arg_start > ctob(16)) return (EACCES); args->seg.args.base = arg_start; args->seg.args.limit = 0xffff; } args->seg.code32.base = pmap_pg_frame((u_int)&bios16_jmp); args->seg.code32.limit = 0xffff; bios16_pmap_handle = pmap_bios16_enter(); stack_top = stack; va_start(ap, fmt); for (p = fmt; p && *p; p++) { switch (*p) { case 'p': /* 32-bit pointer */ i = va_arg(ap, u_int); *(u_int *)stack = (i - arg_start) | (GSEL(GBIOSARGS_SEL, SEL_KPL) << 16); stack += 4; break; case 'i': /* 32-bit integer */ i = va_arg(ap, u_int); *(u_int *)stack = i; stack += 4; break; case 'U': /* 16-bit selector */ *(u_short *)stack = GSEL(GBIOSUTIL_SEL, SEL_KPL); stack += 2; break; case 'D': /* 16-bit selector */ *(u_short *)stack = GSEL(GBIOSDATA_SEL, SEL_KPL); stack += 2; break; case 'C': /* 16-bit selector */ *(u_short *)stack = GSEL(GBIOSCODE16_SEL, SEL_KPL); stack += 2; break; case 's': /* 16-bit integer passed as an int */ i = va_arg(ap, int); *(u_short *)stack = i; stack += 2; break; default: va_end(ap); return (EINVAL); } } va_end(ap); set_bios_selectors(&args->seg, flags); bioscall_vector.vec16.offset = (u_short)args->entry; bioscall_vector.vec16.segment = GSEL(GBIOSCODE16_SEL, SEL_KPL); i = bios16_call(&args->r, stack_top); pmap_bios16_leave(bios16_pmap_handle); return (i); } int bios_oem_strings(struct bios_oem *oem, u_char *buffer, size_t maxlen) { size_t idx = 0; struct bios_oem_signature *sig; u_int from, to; u_char c, *s, *se, *str, *bios_str; size_t i, off, len, tot; if ( !oem || !buffer || maxlen<2 ) return(-1); sig = oem->signature; if (!sig) return(-2); from = oem->range.from; to = oem->range.to; if ( (to<=from) || (from(BIOS_START+BIOS_SIZE)) ) return(-3); while (sig->anchor != NULL) { str = sig->anchor; len = strlen(str); off = sig->offset; tot = sig->totlen; /* make sure offset doesn't go beyond bios area */ if ( (to+off)>(BIOS_START+BIOS_SIZE) || ((from+off) maxlen - 1) { printf("sys/i386/i386/bios.c: sig '%s' " "idx %d + tot %d = %d > maxlen-1 %d\n", str, idx, tot, idx+tot, maxlen-1); return(-5); } bios_str = NULL; s = (u_char *)BIOS_PADDRTOVADDR(from); se = (u_char *)BIOS_PADDRTOVADDR(to-len); for (; s 0x7E) ) c = ' '; if (idx == 0) { if (c != ' ') buffer[idx++] = c; } else if ( (c != ' ') || ((c == ' ') && (buffer[idx-1] != ' ')) ) buffer[idx++] = c; } } sig++; } /* remove a final trailing space */ if ( (idx > 1) && (buffer[idx-1] == ' ') ) idx--; buffer[idx] = '\0'; return (idx); } #ifdef DEV_ISA /* * PnP BIOS interface; enumerate devices only known to the system * BIOS and save information about them for later use. */ struct pnp_sysdev { u_int16_t size; u_int8_t handle; u_int32_t devid; u_int8_t type[3]; u_int16_t attrib; #define PNPATTR_NODISABLE (1<<0) /* can't be disabled */ #define PNPATTR_NOCONFIG (1<<1) /* can't be configured */ #define PNPATTR_OUTPUT (1<<2) /* can be primary output */ #define PNPATTR_INPUT (1<<3) /* can be primary input */ #define PNPATTR_BOOTABLE (1<<4) /* can be booted from */ #define PNPATTR_DOCK (1<<5) /* is a docking station */ #define PNPATTR_REMOVEABLE (1<<6) /* device is removeable */ #define PNPATTR_CONFIG_STATIC (0) #define PNPATTR_CONFIG_DYNAMIC (1) #define PNPATTR_CONFIG_DYNONLY (3) #define PNPATTR_CONFIG(a) (((a) >> 7) & 0x3) /* device-specific data comes here */ u_int8_t devdata[0]; } __packed; /* We have to cluster arguments within a 64k range for the bios16 call */ struct pnp_sysdevargs { u_int16_t next; struct pnp_sysdev node; }; /* * This function is called after the bus has assigned resource * locations for a logical device. */ static void pnpbios_set_config(void *arg, struct isa_config *config, int enable) { } /* * Quiz the PnP BIOS, build a list of PNP IDs and resource data. */ static void pnpbios_identify(driver_t *driver, device_t parent) { struct PnPBIOS_table *pt = PnPBIOStable; struct bios_args args; struct pnp_sysdev *pd; struct pnp_sysdevargs *pda; u_int16_t ndevs, bigdev; int error, currdev; u_int8_t *devnodebuf, tag; u_int32_t *devid, *compid; int idx, left; device_t dev; /* no PnP BIOS information */ if (pt == NULL) return; /* Check to see if ACPI is already active. */ dev = devclass_get_device(devclass_find("acpi"), 0); if (dev != NULL && device_is_attached(dev)) return; /* get count of PnP devices */ bzero(&args, sizeof(args)); args.seg.code16.base = BIOS_PADDRTOVADDR(pt->pmentrybase); args.seg.code16.limit = 0xffff; /* XXX ? */ args.seg.data.base = BIOS_PADDRTOVADDR(pt->pmdataseg); args.seg.data.limit = 0xffff; args.entry = pt->pmentryoffset; if ((error = bios16(&args, PNP_COUNT_DEVNODES, &ndevs, &bigdev)) || (args.r.eax & 0xff)) { printf("pnpbios: error %d/%x getting device count/size limit\n", error, args.r.eax); return; } ndevs &= 0xff; /* clear high byte garbage */ if (bootverbose) printf("pnpbios: %d devices, largest %d bytes\n", ndevs, bigdev); devnodebuf = malloc(bigdev + (sizeof(struct pnp_sysdevargs) - sizeof(struct pnp_sysdev)), M_DEVBUF, M_NOWAIT); if (devnodebuf == NULL) { printf("pnpbios: cannot allocate memory, bailing\n"); return; } pda = (struct pnp_sysdevargs *)devnodebuf; pd = &pda->node; for (currdev = 0, left = ndevs; (currdev != 0xff) && (left > 0); left--) { bzero(pd, bigdev); pda->next = currdev; /* get current configuration */ if ((error = bios16(&args, PNP_GET_DEVNODE, &pda->next, &pda->node, 1))) { printf("pnpbios: error %d making BIOS16 call\n", error); break; } if ((error = (args.r.eax & 0xff))) { if (bootverbose) printf("pnpbios: %s 0x%x fetching node %d\n", error & 0x80 ? "error" : "warning", error, currdev); if (error & 0x80) break; } currdev = pda->next; if (pd->size < sizeof(struct pnp_sysdev)) { printf("pnpbios: bogus system node data, aborting scan\n"); break; } /* * Ignore PICs so that we don't have to worry about the PICs * claiming IRQs to prevent their use. The PIC drivers * already ensure that invalid IRQs are not used. */ if (!strcmp(pnp_eisaformat(pd->devid), "PNP0000")) /* ISA PIC */ continue; if (!strcmp(pnp_eisaformat(pd->devid), "PNP0003")) /* APIC */ continue; /* Add the device and parse its resources */ - dev = BUS_ADD_CHILD(parent, ISA_ORDER_PNPBIOS, NULL, -1); + dev = BUS_ADD_CHILD(parent, ISA_ORDER_PNPBIOS, NULL, DEVICE_UNIT_ANY); isa_set_vendorid(dev, pd->devid); isa_set_logicalid(dev, pd->devid); /* * It appears that some PnP BIOS doesn't allow us to re-enable * the embedded system device once it is disabled. We shall * mark all system device nodes as "cannot be disabled", regardless * of actual settings in the device attribute byte. * XXX isa_set_configattr(dev, ((pd->attrib & PNPATTR_NODISABLE) ? 0 : ISACFGATTR_CANDISABLE) | ((!(pd->attrib & PNPATTR_NOCONFIG) && PNPATTR_CONFIG(pd->attrib) != PNPATTR_CONFIG_STATIC) ? ISACFGATTR_DYNAMIC : 0)); */ isa_set_configattr(dev, (!(pd->attrib & PNPATTR_NOCONFIG) && PNPATTR_CONFIG(pd->attrib) != PNPATTR_CONFIG_STATIC) ? ISACFGATTR_DYNAMIC : 0); isa_set_pnpbios_handle(dev, pd->handle); ISA_SET_CONFIG_CALLBACK(parent, dev, pnpbios_set_config, 0); pnp_parse_resources(dev, &pd->devdata[0], pd->size - sizeof(struct pnp_sysdev), 0); if (!device_get_desc(dev)) device_set_desc_copy(dev, pnp_eisaformat(pd->devid)); /* Find device IDs */ devid = &pd->devid; compid = NULL; /* look for a compatible device ID too */ left = pd->size - sizeof(struct pnp_sysdev); idx = 0; while (idx < left) { tag = pd->devdata[idx++]; if (PNP_RES_TYPE(tag) == 0) { /* Small resource */ switch (PNP_SRES_NUM(tag)) { case PNP_TAG_COMPAT_DEVICE: compid = (u_int32_t *)(pd->devdata + idx); if (bootverbose) printf("pnpbios: node %d compat ID 0x%08x\n", pd->handle, *compid); /* FALLTHROUGH */ case PNP_TAG_END: idx = left; break; default: idx += PNP_SRES_LEN(tag); break; } } else /* Large resource, skip it */ idx += *(u_int16_t *)(pd->devdata + idx) + 2; } if (bootverbose) { printf("pnpbios: handle %d device ID %s (%08x)", pd->handle, pnp_eisaformat(*devid), *devid); if (compid != NULL) printf(" compat ID %s (%08x)", pnp_eisaformat(*compid), *compid); printf("\n"); } } } static device_method_t pnpbios_methods[] = { /* Device interface */ DEVMETHOD(device_identify, pnpbios_identify), { 0, 0 } }; static driver_t pnpbios_driver = { "pnpbios", pnpbios_methods, 1, /* no softc */ }; DRIVER_MODULE(pnpbios, isa, pnpbios_driver, 0, 0); #endif /* DEV_ISA */ diff --git a/sys/isa/pnp.c b/sys/isa/pnp.c index 877e2ed91d6f..0e711a93d1ce 100644 --- a/sys/isa/pnp.c +++ b/sys/isa/pnp.c @@ -1,767 +1,767 @@ /* * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1996, Sujal M. Patel * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: pnp.c,v 1.11 1999/05/06 22:11:19 peter Exp */ #include #include #include #include #include #include #include #include #include #include #include typedef struct _pnp_id { uint32_t vendor_id; uint32_t serial; u_char checksum; } pnp_id; struct pnp_set_config_arg { int csn; /* Card number to configure */ int ldn; /* Logical device on card */ }; struct pnp_quirk { uint32_t vendor_id; /* Vendor of the card */ uint32_t logical_id; /* ID of the device with quirk */ int type; #define PNP_QUIRK_WRITE_REG 1 /* Need to write a pnp register */ #define PNP_QUIRK_EXTRA_IO 2 /* Has extra io ports */ int arg1; int arg2; }; struct pnp_quirk pnp_quirks[] = { /* * The Gravis UltraSound needs register 0xf2 to be set to 0xff * to enable power. * XXX need to know the logical device id. */ { 0x0100561e /* GRV0001 */, 0, PNP_QUIRK_WRITE_REG, 0xf2, 0xff }, /* * An emu8000 does not give us other than the first * port. */ { 0x26008c0e /* SB16 */, 0x21008c0e, PNP_QUIRK_EXTRA_IO, 0x400, 0x800 }, { 0x42008c0e /* SB32(CTL0042) */, 0x21008c0e, PNP_QUIRK_EXTRA_IO, 0x400, 0x800 }, { 0x44008c0e /* SB32(CTL0044) */, 0x21008c0e, PNP_QUIRK_EXTRA_IO, 0x400, 0x800 }, { 0x49008c0e /* SB32(CTL0049) */, 0x21008c0e, PNP_QUIRK_EXTRA_IO, 0x400, 0x800 }, { 0xf1008c0e /* SB32(CTL00f1) */, 0x21008c0e, PNP_QUIRK_EXTRA_IO, 0x400, 0x800 }, { 0xc1008c0e /* SB64(CTL00c1) */, 0x22008c0e, PNP_QUIRK_EXTRA_IO, 0x400, 0x800 }, { 0xc5008c0e /* SB64(CTL00c5) */, 0x22008c0e, PNP_QUIRK_EXTRA_IO, 0x400, 0x800 }, { 0xe4008c0e /* SB64(CTL00e4) */, 0x22008c0e, PNP_QUIRK_EXTRA_IO, 0x400, 0x800 }, { 0 } }; /* The READ_DATA port that we are using currently */ static int pnp_rd_port; static void pnp_send_initiation_key(void); static int pnp_get_serial(pnp_id *p); static int pnp_isolation_protocol(device_t parent); static void pnp_write(int d, u_char r) { outb (_PNP_ADDRESS, d); outb (_PNP_WRITE_DATA, r); } /* * Send Initiation LFSR as described in "Plug and Play ISA Specification", * Intel May 94. */ static void pnp_send_initiation_key(void) { int cur, i; /* Reset the LSFR */ outb(_PNP_ADDRESS, 0); outb(_PNP_ADDRESS, 0); /* yes, we do need it twice! */ cur = 0x6a; outb(_PNP_ADDRESS, cur); for (i = 1; i < 32; i++) { cur = (cur >> 1) | (((cur ^ (cur >> 1)) << 7) & 0xff); outb(_PNP_ADDRESS, cur); } } /* * Get the device's serial number. Returns 1 if the serial is valid. */ static int pnp_get_serial(pnp_id *p) { int i, bit, valid = 0, sum = 0x6a; u_char *data = (u_char *)p; bzero(data, sizeof(char) * 9); outb(_PNP_ADDRESS, PNP_SERIAL_ISOLATION); for (i = 0; i < 72; i++) { bit = inb((pnp_rd_port << 2) | 0x3) == 0x55; DELAY(250); /* Delay 250 usec */ /* Can't Short Circuit the next evaluation, so 'and' is last */ bit = (inb((pnp_rd_port << 2) | 0x3) == 0xaa) && bit; DELAY(250); /* Delay 250 usec */ valid = valid || bit; if (i < 64) sum = (sum >> 1) | (((sum ^ (sum >> 1) ^ bit) << 7) & 0xff); data[i / 8] = (data[i / 8] >> 1) | (bit ? 0x80 : 0); } valid = valid && (data[8] == sum); return (valid); } /* * Fill's the buffer with resource info from the device. * Returns the number of characters read. */ static int pnp_get_resource_info(u_char *buffer, int len) { int i, j, count; u_char temp; count = 0; for (i = 0; i < len; i++) { outb(_PNP_ADDRESS, PNP_STATUS); for (j = 0; j < 100; j++) { if ((inb((pnp_rd_port << 2) | 0x3)) & 0x1) break; DELAY(10); } if (j == 100) { printf("PnP device failed to report resource data\n"); return (count); } outb(_PNP_ADDRESS, PNP_RESOURCE_DATA); temp = inb((pnp_rd_port << 2) | 0x3); if (buffer != NULL) buffer[i] = temp; count++; } return (count); } /* * This function is called after the bus has assigned resource * locations for a logical device. */ static void pnp_set_config(void *arg, struct isa_config *config, int enable) { int csn = ((struct pnp_set_config_arg *) arg)->csn; int ldn = ((struct pnp_set_config_arg *) arg)->ldn; int i; /* * First put all cards into Sleep state with the initiation * key, then put our card into Config state. */ pnp_send_initiation_key(); pnp_write(PNP_WAKE, csn); /* * Select our logical device so that we can program it. */ pnp_write(PNP_SET_LDN, ldn); /* * Constrain the number of resources we will try to program */ if (config->ic_nmem > ISA_PNP_NMEM) { printf("too many ISA memory ranges (%d > %d)\n", config->ic_nmem, ISA_PNP_NMEM); config->ic_nmem = ISA_PNP_NMEM; } if (config->ic_nport > ISA_PNP_NPORT) { printf("too many ISA I/O ranges (%d > %d)\n", config->ic_nport, ISA_PNP_NPORT); config->ic_nport = ISA_PNP_NPORT; } if (config->ic_nirq > ISA_PNP_NIRQ) { printf("too many ISA IRQs (%d > %d)\n", config->ic_nirq, ISA_PNP_NIRQ); config->ic_nirq = ISA_PNP_NIRQ; } if (config->ic_ndrq > ISA_PNP_NDRQ) { printf("too many ISA DRQs (%d > %d)\n", config->ic_ndrq, ISA_PNP_NDRQ); config->ic_ndrq = ISA_PNP_NDRQ; } /* * Now program the resources. */ for (i = 0; i < config->ic_nmem; i++) { uint32_t start; uint32_t size; /* XXX: should handle memory control register, 32 bit memory */ if (config->ic_mem[i].ir_size == 0) { pnp_write(PNP_MEM_BASE_HIGH(i), 0); pnp_write(PNP_MEM_BASE_LOW(i), 0); pnp_write(PNP_MEM_RANGE_HIGH(i), 0); pnp_write(PNP_MEM_RANGE_LOW(i), 0); } else { start = config->ic_mem[i].ir_start; size = config->ic_mem[i].ir_size; if (start & 0xff) panic("pnp_set_config: bogus memory assignment"); pnp_write(PNP_MEM_BASE_HIGH(i), (start >> 16) & 0xff); pnp_write(PNP_MEM_BASE_LOW(i), (start >> 8) & 0xff); pnp_write(PNP_MEM_RANGE_HIGH(i), (size >> 16) & 0xff); pnp_write(PNP_MEM_RANGE_LOW(i), (size >> 8) & 0xff); } } for (; i < ISA_PNP_NMEM; i++) { pnp_write(PNP_MEM_BASE_HIGH(i), 0); pnp_write(PNP_MEM_BASE_LOW(i), 0); pnp_write(PNP_MEM_RANGE_HIGH(i), 0); pnp_write(PNP_MEM_RANGE_LOW(i), 0); } for (i = 0; i < config->ic_nport; i++) { uint32_t start; if (config->ic_port[i].ir_size == 0) { pnp_write(PNP_IO_BASE_HIGH(i), 0); pnp_write(PNP_IO_BASE_LOW(i), 0); } else { start = config->ic_port[i].ir_start; pnp_write(PNP_IO_BASE_HIGH(i), (start >> 8) & 0xff); pnp_write(PNP_IO_BASE_LOW(i), (start >> 0) & 0xff); } } for (; i < ISA_PNP_NPORT; i++) { pnp_write(PNP_IO_BASE_HIGH(i), 0); pnp_write(PNP_IO_BASE_LOW(i), 0); } for (i = 0; i < config->ic_nirq; i++) { int irq; /* XXX: interrupt type */ if (config->ic_irqmask[i] == 0) { pnp_write(PNP_IRQ_LEVEL(i), 0); pnp_write(PNP_IRQ_TYPE(i), 2); } else { irq = ffs(config->ic_irqmask[i]) - 1; pnp_write(PNP_IRQ_LEVEL(i), irq); pnp_write(PNP_IRQ_TYPE(i), 2); /* XXX */ } } for (; i < ISA_PNP_NIRQ; i++) { /* * IRQ 0 is not a valid interrupt selection and * represents no interrupt selection. */ pnp_write(PNP_IRQ_LEVEL(i), 0); pnp_write(PNP_IRQ_TYPE(i), 2); } for (i = 0; i < config->ic_ndrq; i++) { int drq; if (config->ic_drqmask[i] == 0) { pnp_write(PNP_DMA_CHANNEL(i), 4); } else { drq = ffs(config->ic_drqmask[i]) - 1; pnp_write(PNP_DMA_CHANNEL(i), drq); } } for (; i < ISA_PNP_NDRQ; i++) { /* * DMA channel 4, the cascade channel is used to * indicate no DMA channel is active. */ pnp_write(PNP_DMA_CHANNEL(i), 4); } pnp_write(PNP_ACTIVATE, enable ? 1 : 0); /* * Wake everyone up again, we are finished. */ pnp_write(PNP_CONFIG_CONTROL, PNP_CONFIG_CONTROL_WAIT_FOR_KEY); } /* * Process quirks for a logical device.. The card must be in Config state. */ void pnp_check_quirks(uint32_t vendor_id, uint32_t logical_id, int ldn, struct isa_config *config) { struct pnp_quirk *qp; for (qp = &pnp_quirks[0]; qp->vendor_id; qp++) { if (qp->vendor_id == vendor_id && (qp->logical_id == 0 || qp->logical_id == logical_id)) { switch (qp->type) { case PNP_QUIRK_WRITE_REG: pnp_write(PNP_SET_LDN, ldn); pnp_write(qp->arg1, qp->arg2); break; case PNP_QUIRK_EXTRA_IO: if (config == NULL) break; if (qp->arg1 != 0) { config->ic_nport++; config->ic_port[config->ic_nport - 1] = config->ic_port[0]; config->ic_port[config->ic_nport - 1].ir_start += qp->arg1; config->ic_port[config->ic_nport - 1].ir_end += qp->arg1; } if (qp->arg2 != 0) { config->ic_nport++; config->ic_port[config->ic_nport - 1] = config->ic_port[0]; config->ic_port[config->ic_nport - 1].ir_start += qp->arg2; config->ic_port[config->ic_nport - 1].ir_end += qp->arg2; } break; } } } } /* * Scan Resource Data for Logical Devices. * * This function exits as soon as it gets an error reading *ANY* * Resource Data or it reaches the end of Resource Data. In the first * case the return value will be TRUE, FALSE otherwise. */ static int pnp_create_devices(device_t parent, pnp_id *p, int csn, u_char *resources, int len) { u_char tag, *resp, *resinfo, *startres = NULL; int large_len, scanning = len, retval = FALSE; uint32_t logical_id; device_t dev = 0; int ldn = 0; struct pnp_set_config_arg *csnldn; char buf[100]; char *desc = NULL; resp = resources; while (scanning > 0) { tag = *resp++; scanning--; if (PNP_RES_TYPE(tag) != 0) { /* Large resource */ if (scanning < 2) { scanning = 0; continue; } large_len = resp[0] + (resp[1] << 8); resp += 2; if (scanning < large_len) { scanning = 0; continue; } resinfo = resp; resp += large_len; scanning -= large_len; if (PNP_LRES_NUM(tag) == PNP_TAG_ID_ANSI) { if (dev) { /* * This is an optional device * identifier string. Skip it * for now. */ continue; } /* else mandately card identifier string */ if (large_len > sizeof(buf) - 1) large_len = sizeof(buf) - 1; bcopy(resinfo, buf, large_len); /* * Trim trailing spaces. */ while (buf[large_len-1] == ' ') large_len--; buf[large_len] = '\0'; desc = buf; continue; } continue; } /* Small resource */ if (scanning < PNP_SRES_LEN(tag)) { scanning = 0; continue; } resinfo = resp; resp += PNP_SRES_LEN(tag); scanning -= PNP_SRES_LEN(tag); switch (PNP_SRES_NUM(tag)) { case PNP_TAG_LOGICAL_DEVICE: /* * Parse the resources for the previous * logical device (if any). */ if (startres) { pnp_parse_resources(dev, startres, resinfo - startres - 1, ldn); dev = 0; startres = NULL; } /* * A new logical device. Scan for end of * resources. */ bcopy(resinfo, &logical_id, 4); pnp_check_quirks(p->vendor_id, logical_id, ldn, NULL); - dev = BUS_ADD_CHILD(parent, ISA_ORDER_PNP, NULL, -1); + dev = BUS_ADD_CHILD(parent, ISA_ORDER_PNP, NULL, DEVICE_UNIT_ANY); if (desc) device_set_desc_copy(dev, desc); else device_set_desc_copy(dev, pnp_eisaformat(logical_id)); isa_set_vendorid(dev, p->vendor_id); isa_set_serial(dev, p->serial); isa_set_logicalid(dev, logical_id); isa_set_configattr(dev, ISACFGATTR_CANDISABLE | ISACFGATTR_DYNAMIC); csnldn = malloc(sizeof *csnldn, M_DEVBUF, M_NOWAIT); if (!csnldn) { device_printf(parent, "out of memory\n"); scanning = 0; break; } csnldn->csn = csn; csnldn->ldn = ldn; ISA_SET_CONFIG_CALLBACK(parent, dev, pnp_set_config, csnldn); isa_set_pnp_csn(dev, csn); isa_set_pnp_ldn(dev, ldn); ldn++; startres = resp; break; case PNP_TAG_END: if (!startres) { device_printf(parent, "malformed resources\n"); scanning = 0; break; } pnp_parse_resources(dev, startres, resinfo - startres - 1, ldn); dev = 0; startres = NULL; scanning = 0; break; default: /* Skip this resource */ break; } } return (retval); } /* * Read 'amount' bytes of resources from the card, allocating memory * as needed. If a buffer is already available, it should be passed in * '*resourcesp' and its length in '*spacep'. The number of resource * bytes already in the buffer should be passed in '*lenp'. The memory * allocated will be returned in '*resourcesp' with its size and the * number of bytes of resources in '*spacep' and '*lenp' respectively. * * XXX: Multiple problems here, we forget to free() stuff in one * XXX: error return, and in another case we free (*resourcesp) but * XXX: don't tell the caller. */ static int pnp_read_bytes(int amount, u_char **resourcesp, int *spacep, int *lenp) { u_char *resources = *resourcesp; u_char *newres; int space = *spacep; int len = *lenp; if (space == 0) { space = 1024; resources = malloc(space, M_TEMP, M_NOWAIT); if (!resources) return (ENOMEM); } if (len + amount > space) { int extra = 1024; while (len + amount > space + extra) extra += 1024; newres = malloc(space + extra, M_TEMP, M_NOWAIT); if (!newres) { /* XXX: free resources */ return (ENOMEM); } bcopy(resources, newres, len); free(resources, M_TEMP); resources = newres; space += extra; } if (pnp_get_resource_info(resources + len, amount) != amount) return (EINVAL); len += amount; *resourcesp = resources; *spacep = space; *lenp = len; return (0); } /* * Read all resources from the card, allocating memory as needed. If a * buffer is already available, it should be passed in '*resourcesp' * and its length in '*spacep'. The memory allocated will be returned * in '*resourcesp' with its size and the number of bytes of resources * in '*spacep' and '*lenp' respectively. */ static int pnp_read_resources(u_char **resourcesp, int *spacep, int *lenp) { u_char *resources = *resourcesp; int space = *spacep; int len = 0; int error, done; u_char tag; error = 0; done = 0; while (!done) { error = pnp_read_bytes(1, &resources, &space, &len); if (error) goto out; tag = resources[len-1]; if (PNP_RES_TYPE(tag) == 0) { /* * Small resource, read contents. */ error = pnp_read_bytes(PNP_SRES_LEN(tag), &resources, &space, &len); if (error) goto out; if (PNP_SRES_NUM(tag) == PNP_TAG_END) done = 1; } else { /* * Large resource, read length and contents. */ error = pnp_read_bytes(2, &resources, &space, &len); if (error) goto out; error = pnp_read_bytes(resources[len-2] + (resources[len-1] << 8), &resources, &space, &len); if (error) goto out; } } out: *resourcesp = resources; *spacep = space; *lenp = len; return (error); } /* * Run the isolation protocol. Use pnp_rd_port as the READ_DATA port * value (caller should try multiple READ_DATA locations before giving * up). Upon exiting, all cards are aware that they should use * pnp_rd_port as the READ_DATA port. * * In the first pass, a csn is assigned to each board and pnp_id's * are saved to an array, pnp_devices. In the second pass, each * card is woken up and the device configuration is called. */ static int pnp_isolation_protocol(device_t parent) { int csn; pnp_id id; int found = 0, len; u_char *resources = NULL; int space = 0; int error; /* * Put all cards into the Sleep state so that we can clear * their CSNs. */ pnp_send_initiation_key(); /* * Clear the CSN for all cards. */ pnp_write(PNP_CONFIG_CONTROL, PNP_CONFIG_CONTROL_RESET_CSN); /* * Move all cards to the Isolation state. */ pnp_write(PNP_WAKE, 0); /* * Tell them where the read point is going to be this time. */ pnp_write(PNP_SET_RD_DATA, pnp_rd_port); for (csn = 1; csn < PNP_MAX_CARDS; csn++) { /* * Start the serial isolation protocol. */ outb(_PNP_ADDRESS, PNP_SERIAL_ISOLATION); DELAY(1000); /* Delay 1 msec */ if (pnp_get_serial(&id)) { /* * We have read the id from a card * successfully. The card which won the * isolation protocol will be in Isolation * mode and all others will be in Sleep. * Program the CSN of the isolated card * (taking it to Config state) and read its * resources, creating devices as we find * logical devices on the card. */ pnp_write(PNP_SET_CSN, csn); if (bootverbose) printf("Reading PnP configuration for %s.\n", pnp_eisaformat(id.vendor_id)); error = pnp_read_resources(&resources, &space, &len); if (error) break; pnp_create_devices(parent, &id, csn, resources, len); found++; } else break; /* * Put this card back to the Sleep state and * simultaneously move all cards which don't have a * CSN yet to Isolation state. */ pnp_write(PNP_WAKE, 0); } /* * Unless we have chosen the wrong read port, all cards will * be in Sleep state. Put them back into WaitForKey for * now. Their resources will be programmed later. */ pnp_write(PNP_CONFIG_CONTROL, PNP_CONFIG_CONTROL_WAIT_FOR_KEY); /* * Cleanup. */ if (resources) free(resources, M_TEMP); return (found); } /* * pnp_identify() * * autoconfiguration of pnp devices. This routine just runs the * isolation protocol over several ports, until one is successful. * * may be called more than once ? * */ static void pnp_identify(driver_t *driver, device_t parent) { int num_pnp_devs; /* Try various READ_DATA ports from 0x203-0x3ff */ for (pnp_rd_port = 0x80; (pnp_rd_port < 0xff); pnp_rd_port += 0x10) { if (bootverbose) printf("pnp_identify: Trying Read_Port at %x\n", (pnp_rd_port << 2) | 0x3); num_pnp_devs = pnp_isolation_protocol(parent); if (num_pnp_devs) break; } if (bootverbose) printf("PNP Identify complete\n"); } static device_method_t pnp_methods[] = { /* Device interface */ DEVMETHOD(device_identify, pnp_identify), { 0, 0 } }; static driver_t pnp_driver = { "pnp", pnp_methods, 1, /* no softc */ }; DRIVER_MODULE(pnp, isa, pnp_driver, 0, 0); diff --git a/sys/powerpc/mpc85xx/atpic.c b/sys/powerpc/mpc85xx/atpic.c index fdee2177c970..8c69af8b4b4d 100644 --- a/sys/powerpc/mpc85xx/atpic.c +++ b/sys/powerpc/mpc85xx/atpic.c @@ -1,360 +1,360 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009 Marcel Moolenaar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pic_if.h" #define ATPIC_MASTER 0 #define ATPIC_SLAVE 1 struct atpic_softc { device_t sc_dev; /* I/O port resources for master & slave. */ struct resource *sc_res[2]; int sc_rid[2]; /* Our "routing" interrupt */ struct resource *sc_ires; void *sc_icookie; int sc_irid; int sc_vector[16]; uint8_t sc_mask[2]; }; static int atpic_isa_attach(device_t); static void atpic_isa_identify(driver_t *, device_t); static int atpic_isa_probe(device_t); static void atpic_config(device_t, u_int, enum intr_trigger, enum intr_polarity); static void atpic_dispatch(device_t, struct trapframe *); static void atpic_enable(device_t, u_int, u_int); static void atpic_eoi(device_t, u_int); static void atpic_ipi(device_t, u_int); static void atpic_mask(device_t, u_int); static void atpic_unmask(device_t, u_int); static void atpic_ofw_translate_code(device_t, u_int irq, int code, enum intr_trigger *trig, enum intr_polarity *pol); static device_method_t atpic_isa_methods[] = { /* Device interface */ DEVMETHOD(device_identify, atpic_isa_identify), DEVMETHOD(device_probe, atpic_isa_probe), DEVMETHOD(device_attach, atpic_isa_attach), /* PIC interface */ DEVMETHOD(pic_config, atpic_config), DEVMETHOD(pic_dispatch, atpic_dispatch), DEVMETHOD(pic_enable, atpic_enable), DEVMETHOD(pic_eoi, atpic_eoi), DEVMETHOD(pic_ipi, atpic_ipi), DEVMETHOD(pic_mask, atpic_mask), DEVMETHOD(pic_unmask, atpic_unmask), DEVMETHOD(pic_translate_code, atpic_ofw_translate_code), { 0, 0 }, }; static driver_t atpic_isa_driver = { "atpic", atpic_isa_methods, sizeof(struct atpic_softc) }; static struct isa_pnp_id atpic_ids[] = { { 0x0000d041 /* PNP0000 */, "AT interrupt controller" }, { 0 } }; DRIVER_MODULE(atpic, isa, atpic_isa_driver, 0, 0); ISA_PNP_INFO(atpic_ids); static __inline uint8_t atpic_read(struct atpic_softc *sc, int icu, int ofs) { uint8_t val; val = bus_read_1(sc->sc_res[icu], ofs); return (val); } static __inline void atpic_write(struct atpic_softc *sc, int icu, int ofs, uint8_t val) { bus_write_1(sc->sc_res[icu], ofs, val); bus_barrier(sc->sc_res[icu], ofs, 2 - ofs, BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE); } static void atpic_intr(void *arg) { atpic_dispatch(arg, NULL); } static void atpic_isa_identify(driver_t *drv, device_t parent) { device_t child; - child = BUS_ADD_CHILD(parent, ISA_ORDER_SENSITIVE, drv->name, -1); + child = BUS_ADD_CHILD(parent, ISA_ORDER_SENSITIVE, drv->name, DEVICE_UNIT_ANY); device_set_driver(child, drv); isa_set_logicalid(child, atpic_ids[0].ip_id); isa_set_vendorid(child, atpic_ids[0].ip_id); bus_set_resource(child, SYS_RES_IOPORT, ATPIC_MASTER, IO_ICU1, 2); bus_set_resource(child, SYS_RES_IOPORT, ATPIC_SLAVE, IO_ICU2, 2); /* ISA interrupts are routed through external interrupt 0. */ bus_set_resource(child, SYS_RES_IRQ, 0, 16, 1); } static int atpic_isa_probe(device_t dev) { int res; res = ISA_PNP_PROBE(device_get_parent(dev), dev, atpic_ids); if (res > 0) return (res); device_set_desc(dev, "PC/AT compatible PIC"); return (res); } static void atpic_init(struct atpic_softc *sc, int icu) { sc->sc_mask[icu] = 0xff - ((icu == ATPIC_MASTER) ? 4 : 0); atpic_write(sc, icu, 0, ICW1_RESET | ICW1_IC4); atpic_write(sc, icu, 1, (icu == ATPIC_SLAVE) ? 8 : 0); atpic_write(sc, icu, 1, (icu == ATPIC_SLAVE) ? 2 : 4); atpic_write(sc, icu, 1, ICW4_8086); atpic_write(sc, icu, 1, sc->sc_mask[icu]); atpic_write(sc, icu, 0, OCW3_SEL | OCW3_RR); } static int atpic_isa_attach(device_t dev) { struct atpic_softc *sc; int error; sc = device_get_softc(dev); sc->sc_dev = dev; error = ENXIO; sc->sc_rid[ATPIC_MASTER] = 0; sc->sc_res[ATPIC_MASTER] = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->sc_rid[ATPIC_MASTER], RF_ACTIVE); if (sc->sc_res[ATPIC_MASTER] == NULL) goto fail; sc->sc_rid[ATPIC_SLAVE] = 1; sc->sc_res[ATPIC_SLAVE] = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->sc_rid[ATPIC_SLAVE], RF_ACTIVE); if (sc->sc_res[ATPIC_SLAVE] == NULL) goto fail; sc->sc_irid = 0; sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE); if (sc->sc_ires == NULL) goto fail; error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_MISC | INTR_MPSAFE, NULL, atpic_intr, dev, &sc->sc_icookie); if (error) goto fail; atpic_init(sc, ATPIC_SLAVE); atpic_init(sc, ATPIC_MASTER); powerpc_register_pic(dev, 0, 16, 0, TRUE); return (0); fail: if (sc->sc_ires != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); if (sc->sc_res[ATPIC_SLAVE] != NULL) bus_release_resource(dev, SYS_RES_IOPORT, sc->sc_rid[ATPIC_SLAVE], sc->sc_res[ATPIC_SLAVE]); if (sc->sc_res[ATPIC_MASTER] != NULL) bus_release_resource(dev, SYS_RES_IOPORT, sc->sc_rid[ATPIC_MASTER], sc->sc_res[ATPIC_MASTER]); return (error); } /* * PIC interface. */ static void atpic_config(device_t dev, u_int irq, enum intr_trigger trig, enum intr_polarity pol) { } static void atpic_dispatch(device_t dev, struct trapframe *tf) { struct atpic_softc *sc; uint8_t irq; sc = device_get_softc(dev); atpic_write(sc, ATPIC_MASTER, 0, OCW3_SEL | OCW3_P); irq = atpic_read(sc, ATPIC_MASTER, 0); atpic_write(sc, ATPIC_MASTER, 0, OCW3_SEL | OCW3_RR); if ((irq & 0x80) == 0) return; if (irq == 0x82) { atpic_write(sc, ATPIC_SLAVE, 0, OCW3_SEL | OCW3_P); irq = atpic_read(sc, ATPIC_SLAVE, 0) + 8; atpic_write(sc, ATPIC_SLAVE, 0, OCW3_SEL | OCW3_RR); if ((irq & 0x80) == 0) return; } powerpc_dispatch_intr(sc->sc_vector[irq & 0x0f], tf); } static void atpic_enable(device_t dev, u_int irq, u_int vector) { struct atpic_softc *sc; sc = device_get_softc(dev); sc->sc_vector[irq] = vector; atpic_unmask(dev, irq); } static void atpic_eoi(device_t dev, u_int irq) { struct atpic_softc *sc; sc = device_get_softc(dev); if (irq > 7) atpic_write(sc, ATPIC_SLAVE, 0, OCW2_EOI); atpic_write(sc, ATPIC_MASTER, 0, OCW2_EOI); } static void atpic_ipi(device_t dev, u_int cpu) { /* No SMP support. */ } static void atpic_mask(device_t dev, u_int irq) { struct atpic_softc *sc; sc = device_get_softc(dev); if (irq > 7) { sc->sc_mask[ATPIC_SLAVE] |= 1 << (irq - 8); atpic_write(sc, ATPIC_SLAVE, 1, sc->sc_mask[ATPIC_SLAVE]); } else { sc->sc_mask[ATPIC_MASTER] |= 1 << irq; atpic_write(sc, ATPIC_MASTER, 1, sc->sc_mask[ATPIC_MASTER]); } } static void atpic_unmask(device_t dev, u_int irq) { struct atpic_softc *sc; sc = device_get_softc(dev); if (irq > 7) { sc->sc_mask[ATPIC_SLAVE] &= ~(1 << (irq - 8)); atpic_write(sc, ATPIC_SLAVE, 1, sc->sc_mask[ATPIC_SLAVE]); } else { sc->sc_mask[ATPIC_MASTER] &= ~(1 << irq); atpic_write(sc, ATPIC_MASTER, 1, sc->sc_mask[ATPIC_MASTER]); } } static void atpic_ofw_translate_code(device_t dev, u_int irq, int code, enum intr_trigger *trig, enum intr_polarity *pol) { switch (code) { case 0: /* Active L level */ *trig = INTR_TRIGGER_LEVEL; *pol = INTR_POLARITY_LOW; break; case 1: /* Active H level */ *trig = INTR_TRIGGER_LEVEL; *pol = INTR_POLARITY_HIGH; break; case 2: /* H to L edge */ *trig = INTR_TRIGGER_EDGE; *pol = INTR_POLARITY_LOW; break; case 3: /* L to H edge */ *trig = INTR_TRIGGER_EDGE; *pol = INTR_POLARITY_HIGH; break; default: *trig = INTR_TRIGGER_CONFORM; *pol = INTR_POLARITY_CONFORM; } } diff --git a/sys/x86/bios/vpd.c b/sys/x86/bios/vpd.c index 5ad194d28bba..8b68dcd39a66 100644 --- a/sys/x86/bios/vpd.c +++ b/sys/x86/bios/vpd.c @@ -1,307 +1,307 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2003 Matthew N. Dodd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * VPD decoder for IBM systems (Thinkpads) * http://www-1.ibm.com/support/docview.wss?uid=psg1MIGR-45120 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Vital Product Data */ struct vpd { u_int16_t Header; /* 0x55AA */ u_int8_t Signature[3]; /* Always 'VPD' */ u_int8_t Length; /* Sructure Length */ u_int8_t Reserved[7]; /* Reserved */ u_int8_t BuildID[9]; /* BIOS Build ID */ u_int8_t BoxSerial[7]; /* Box Serial Number */ u_int8_t PlanarSerial[11]; /* Motherboard Serial Number */ u_int8_t MachType[7]; /* Machine Type/Model */ u_int8_t Checksum; /* Checksum */ } __packed; struct vpd_softc { device_t dev; struct resource * res; int rid; struct vpd * vpd; struct sysctl_ctx_list ctx; char BuildID[10]; char BoxSerial[8]; char PlanarSerial[12]; char MachineType[5]; char MachineModel[4]; }; #define VPD_START 0xf0000 #define VPD_STEP 0x10 #define VPD_OFF 2 #define VPD_LEN 3 #define VPD_SIG "VPD" #define RES2VPD(res) ((struct vpd *)rman_get_virtual(res)) #define ADDR2VPD(addr) ((struct vpd *)BIOS_PADDRTOVADDR(addr)) static void vpd_identify (driver_t *, device_t); static int vpd_probe (device_t); static int vpd_attach (device_t); static int vpd_detach (device_t); static int vpd_modevent (module_t, int, void *); static int vpd_cksum (struct vpd *); static SYSCTL_NODE(_hw, OID_AUTO, vpd, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, NULL); static SYSCTL_NODE(_hw_vpd, OID_AUTO, machine, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, NULL); static SYSCTL_NODE(_hw_vpd_machine, OID_AUTO, type, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, NULL); static SYSCTL_NODE(_hw_vpd_machine, OID_AUTO, model, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, NULL); static SYSCTL_NODE(_hw_vpd, OID_AUTO, build_id, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, NULL); static SYSCTL_NODE(_hw_vpd, OID_AUTO, serial, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, NULL); static SYSCTL_NODE(_hw_vpd_serial, OID_AUTO, box, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, NULL); static SYSCTL_NODE(_hw_vpd_serial, OID_AUTO, planar, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, NULL); static void vpd_identify (driver_t *driver, device_t parent) { device_t child; u_int32_t addr; int length; int rid; if (!device_is_alive(parent)) return; addr = bios_sigsearch(VPD_START, VPD_SIG, VPD_LEN, VPD_STEP, VPD_OFF); if (addr != 0) { rid = 0; length = ADDR2VPD(addr)->Length; - child = BUS_ADD_CHILD(parent, 5, "vpd", -1); + child = BUS_ADD_CHILD(parent, 5, "vpd", DEVICE_UNIT_ANY); device_set_driver(child, driver); bus_set_resource(child, SYS_RES_MEMORY, rid, addr, length); device_set_desc(child, "Vital Product Data Area"); } return; } static int vpd_probe (device_t dev) { struct resource *res; int rid; int error; error = 0; rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) { device_printf(dev, "Unable to allocate memory resource.\n"); error = ENOMEM; goto bad; } if (vpd_cksum(RES2VPD(res))) device_printf(dev, "VPD checksum failed. BIOS update may be required.\n"); bad: if (res) bus_release_resource(dev, SYS_RES_MEMORY, rid, res); return (error); } static int vpd_attach (device_t dev) { struct vpd_softc *sc; char unit[4]; int error; sc = device_get_softc(dev); error = 0; sc->dev = dev; sc->rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "Unable to allocate memory resource.\n"); error = ENOMEM; goto bad; } sc->vpd = RES2VPD(sc->res); snprintf(unit, sizeof(unit), "%d", device_get_unit(sc->dev)); snprintf(sc->MachineType, 5, "%.4s", sc->vpd->MachType); snprintf(sc->MachineModel, 4, "%.3s", sc->vpd->MachType+4); snprintf(sc->BuildID, 10, "%.9s", sc->vpd->BuildID); snprintf(sc->BoxSerial, 8, "%.7s", sc->vpd->BoxSerial); snprintf(sc->PlanarSerial, 12, "%.11s", sc->vpd->PlanarSerial); sysctl_ctx_init(&sc->ctx); SYSCTL_ADD_STRING(&sc->ctx, SYSCTL_STATIC_CHILDREN(_hw_vpd_machine_type), OID_AUTO, unit, CTLFLAG_RD, sc->MachineType, 0, NULL); SYSCTL_ADD_STRING(&sc->ctx, SYSCTL_STATIC_CHILDREN(_hw_vpd_machine_model), OID_AUTO, unit, CTLFLAG_RD, sc->MachineModel, 0, NULL); SYSCTL_ADD_STRING(&sc->ctx, SYSCTL_STATIC_CHILDREN(_hw_vpd_build_id), OID_AUTO, unit, CTLFLAG_RD, sc->BuildID, 0, NULL); SYSCTL_ADD_STRING(&sc->ctx, SYSCTL_STATIC_CHILDREN(_hw_vpd_serial_box), OID_AUTO, unit, CTLFLAG_RD, sc->BoxSerial, 0, NULL); SYSCTL_ADD_STRING(&sc->ctx, SYSCTL_STATIC_CHILDREN(_hw_vpd_serial_planar), OID_AUTO, unit, CTLFLAG_RD, sc->PlanarSerial, 0, NULL); device_printf(dev, "Machine Type: %.4s, Model: %.3s, Build ID: %.9s\n", sc->MachineType, sc->MachineModel, sc->BuildID); device_printf(dev, "Box Serial: %.7s, Planar Serial: %.11s\n", sc->BoxSerial, sc->PlanarSerial); return (0); bad: if (sc->res) bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); return (error); } static int vpd_detach (device_t dev) { struct vpd_softc *sc; sc = device_get_softc(dev); if (sc->res) bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); sysctl_ctx_free(&sc->ctx); return (0); } static int vpd_modevent (module_t mod, int what, void *arg) { device_t * devs; int count; int i; switch (what) { case MOD_LOAD: break; case MOD_UNLOAD: devclass_get_devices(devclass_find("vpd"), &devs, &count); for (i = 0; i < count; i++) { device_delete_child(device_get_parent(devs[i]), devs[i]); } break; default: break; } return (0); } static device_method_t vpd_methods[] = { /* Device interface */ DEVMETHOD(device_identify, vpd_identify), DEVMETHOD(device_probe, vpd_probe), DEVMETHOD(device_attach, vpd_attach), DEVMETHOD(device_detach, vpd_detach), { 0, 0 } }; static driver_t vpd_driver = { "vpd", vpd_methods, sizeof(struct vpd_softc), }; DRIVER_MODULE(vpd, nexus, vpd_driver, vpd_modevent, 0); MODULE_VERSION(vpd, 1); /* * Perform a checksum over the VPD structure, starting with * the BuildID. (Jean Delvare ) */ static int vpd_cksum (struct vpd *v) { u_int8_t *ptr; u_int8_t cksum; int i; ptr = (u_int8_t *)v; cksum = 0; for (i = offsetof(struct vpd, BuildID); i < v->Length ; i++) cksum += ptr[i]; return (cksum); } diff --git a/sys/x86/isa/orm.c b/sys/x86/isa/orm.c index f8afc17cac71..bb2663332550 100644 --- a/sys/x86/isa/orm.c +++ b/sys/x86/isa/orm.c @@ -1,187 +1,187 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Nikolai Saoukh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Driver to take care of holes in ISA I/O memory occupied * by option rom(s) */ #include #include #include #include #include #include #include #include #include #include #include #define IOMEM_START 0x0a0000 #define IOMEM_STEP 0x000800 #define IOMEM_END 0x100000 #define ORM_ID 0x00004d3e static struct isa_pnp_id orm_ids[] = { { ORM_ID, NULL }, /* ORM0000 */ { 0, NULL }, }; #define MAX_ROMS 32 struct orm_softc { int rnum; int rid[MAX_ROMS]; struct resource *res[MAX_ROMS]; }; static int orm_probe(device_t dev) { return (ISA_PNP_PROBE(device_get_parent(dev), dev, orm_ids)); } static int orm_attach(device_t dev) { return (0); } static void orm_identify(driver_t* driver, device_t parent) { bus_space_handle_t bh; bus_space_tag_t bt; device_t child; u_int32_t chunk = IOMEM_START; struct resource *res; int rid; u_int32_t rom_size; struct orm_softc *sc; u_int8_t buf[3]; if (resource_disabled("orm", 0)) return; - child = BUS_ADD_CHILD(parent, ISA_ORDER_SENSITIVE, "orm", -1); + child = BUS_ADD_CHILD(parent, ISA_ORDER_SENSITIVE, "orm", DEVICE_UNIT_ANY); device_set_driver(child, driver); isa_set_logicalid(child, ORM_ID); isa_set_vendorid(child, ORM_ID); sc = device_get_softc(child); sc->rnum = 0; while (sc->rnum < MAX_ROMS && chunk < IOMEM_END) { bus_set_resource(child, SYS_RES_MEMORY, sc->rnum, chunk, IOMEM_STEP); rid = sc->rnum; res = bus_alloc_resource_any(child, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) { bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); chunk += IOMEM_STEP; continue; } bt = rman_get_bustag(res); bh = rman_get_bushandle(res); bus_space_read_region_1(bt, bh, 0, buf, sizeof(buf)); /* * We need to release and delete the resource since we're * changing its size, or the rom isn't there. There * is a checksum field in the ROM to prevent false * positives. However, some common hardware (IBM thinkpads) * neglects to put a valid checksum in the ROM, so we do * not double check the checksum here. On the ISA bus * areas that have no hardware read back as 0xff, so the * tests to see if we have 0x55 followed by 0xaa are * generally sufficient. */ bus_release_resource(child, SYS_RES_MEMORY, rid, res); bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); if (buf[0] != 0x55 || buf[1] != 0xAA || (buf[2] & 0x03) != 0) { chunk += IOMEM_STEP; continue; } rom_size = buf[2] << 9; bus_set_resource(child, SYS_RES_MEMORY, sc->rnum, chunk, rom_size); rid = sc->rnum; res = bus_alloc_resource_any(child, SYS_RES_MEMORY, &rid, 0); if (res == NULL) { bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); chunk += IOMEM_STEP; continue; } sc->rid[sc->rnum] = rid; sc->res[sc->rnum] = res; sc->rnum++; chunk += rom_size; } if (sc->rnum == 0) device_delete_child(parent, child); else if (sc->rnum == 1) device_set_desc(child, "ISA Option ROM"); else device_set_desc(child, "ISA Option ROMs"); } static int orm_detach(device_t dev) { int i; struct orm_softc *sc = device_get_softc(dev); for (i = 0; i < sc->rnum; i++) bus_release_resource(dev, SYS_RES_MEMORY, sc->rid[i], sc->res[i]); return (0); } static device_method_t orm_methods[] = { /* Device interface */ DEVMETHOD(device_identify, orm_identify), DEVMETHOD(device_probe, orm_probe), DEVMETHOD(device_attach, orm_attach), DEVMETHOD(device_detach, orm_detach), { 0, 0 } }; static driver_t orm_driver = { "orm", orm_methods, sizeof (struct orm_softc) }; DRIVER_MODULE(orm, isa, orm_driver, 0, 0); ISA_PNP_INFO(orm_ids); diff --git a/sys/x86/pci/qpi.c b/sys/x86/pci/qpi.c index 085afd49852b..278489dfe10a 100644 --- a/sys/x86/pci/qpi.c +++ b/sys/x86/pci/qpi.c @@ -1,303 +1,303 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2010 Hudson River Trading LLC * Written by: John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This driver provides a pseudo-bus to enumerate the PCI buses * present on a system using a QPI chipset. It creates a qpi0 bus that * is a child of nexus0 and then creates Host-PCI bridges as a * child of that. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" struct qpi_device { int qd_pcibus; }; static MALLOC_DEFINE(M_QPI, "qpidrv", "qpi system device"); static void qpi_identify(driver_t *driver, device_t parent) { int do_qpi; /* Check CPUID to ensure this is an i7 CPU of some sort. */ if (cpu_vendor_id != CPU_VENDOR_INTEL || CPUID_TO_FAMILY(cpu_id) != 0x6) return; /* Only discover buses with configuration devices if allowed by user */ do_qpi = 0; TUNABLE_INT_FETCH("hw.attach_intel_csr_pci", &do_qpi); if (!do_qpi) return; /* PCI config register access is required. */ if (pci_cfgregopen() == 0) return; /* Add a qpi bus device. */ if (BUS_ADD_CHILD(parent, 20, "qpi", -1) == NULL) panic("Failed to add qpi bus"); } static int qpi_probe(device_t dev) { device_set_desc(dev, "QPI system bus"); return (BUS_PROBE_SPECIFIC); } /* * Look for a PCI bus with the specified bus address. If one is found, * add a pcib device and return 0. Otherwise, return an error code. */ static int qpi_probe_pcib(device_t dev, int bus) { struct qpi_device *qdev; device_t child; uint32_t devid; int s; /* * If a PCI bus already exists for this bus number, then * fail. */ if (pci_find_bsf(bus, 0, 0) != NULL) return (EEXIST); /* * Attempt to read the device id for every slot, function 0 on * the bus. If all read values are 0xffffffff this means that * the bus is not present. */ for (s = 0; s <= PCI_SLOTMAX; s++) { devid = pci_cfgregread(0, bus, s, 0, PCIR_DEVVENDOR, 4); if (devid != 0xffffffff) break; } if (devid == 0xffffffff) return (ENOENT); if ((devid & 0xffff) != 0x8086) { if (bootverbose) device_printf(dev, "Device at pci%d.%d.0 has non-Intel vendor 0x%x\n", bus, s, devid & 0xffff); return (ENXIO); } - child = BUS_ADD_CHILD(dev, 0, "pcib", -1); + child = BUS_ADD_CHILD(dev, 0, "pcib", DEVICE_UNIT_ANY); if (child == NULL) panic("%s: failed to add pci bus %d", device_get_nameunit(dev), bus); qdev = malloc(sizeof(struct qpi_device), M_QPI, M_WAITOK); qdev->qd_pcibus = bus; device_set_ivars(child, qdev); return (0); } static int qpi_attach(device_t dev) { int bus; /* * Each processor socket has a dedicated PCI bus, sometimes * not enumerated by ACPI. Probe all unattached buses from 0 * to 255. */ for (bus = PCI_BUSMAX; bus >= 0; bus--) qpi_probe_pcib(dev, bus); return (bus_generic_attach(dev)); } static int qpi_print_child(device_t bus, device_t child) { struct qpi_device *qdev; int retval = 0; qdev = device_get_ivars(child); retval += bus_print_child_header(bus, child); if (qdev->qd_pcibus != -1) retval += printf(" pcibus %d", qdev->qd_pcibus); retval += bus_print_child_footer(bus, child); return (retval); } static int qpi_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct qpi_device *qdev; qdev = device_get_ivars(child); switch (which) { case PCIB_IVAR_BUS: *result = qdev->qd_pcibus; break; default: return (ENOENT); } return (0); } static device_method_t qpi_methods[] = { /* Device interface */ DEVMETHOD(device_identify, qpi_identify), DEVMETHOD(device_probe, qpi_probe), DEVMETHOD(device_attach, qpi_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, qpi_print_child), DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, qpi_read_ivar), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; DEFINE_CLASS_0(qpi, qpi_driver, qpi_methods, 0); DRIVER_MODULE(qpi, nexus, qpi_driver, 0, 0); static int qpi_pcib_probe(device_t dev) { device_set_desc(dev, "QPI Host-PCI bridge"); return (BUS_PROBE_SPECIFIC); } static int qpi_pcib_attach(device_t dev) { device_add_child(dev, "pci", DEVICE_UNIT_ANY); return (bus_generic_attach(dev)); } static int qpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = pcib_get_bus(dev); return (0); default: return (ENOENT); } } static struct resource * qpi_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(0, child, rid, start, end, count, flags)); return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } static int qpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus; bus = device_get_parent(pcib); return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data)); } static device_method_t qpi_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qpi_pcib_probe), DEVMETHOD(device_attach, qpi_pcib_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, qpi_pcib_read_ivar), DEVMETHOD(bus_alloc_resource, qpi_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, legacy_pcib_adjust_resource), DEVMETHOD(bus_release_resource, legacy_pcib_release_resource), DEVMETHOD(bus_activate_resource, legacy_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, legacy_pcib_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, legacy_pcib_read_config), DEVMETHOD(pcib_write_config, legacy_pcib_write_config), DEVMETHOD(pcib_alloc_msi, legacy_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, legacy_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, qpi_pcib_map_msi), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, qpi_pcib_driver, qpi_pcib_methods, 0); DRIVER_MODULE(pcib, qpi, qpi_pcib_driver, 0, 0);