diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -281,6 +281,7 @@ dev/firmware/arm/scmi_smc.c optional fdt scmi dev/firmware/arm/scmi_virtio.c optional fdt scmi virtio dev/firmware/arm/scmi_shmem.c optional fdt scmi +dev/firmware/arm/scmi_raw.c optional fdt scmi scmi_raw dev/gpio/pl061.c optional pl061 gpio dev/gpio/pl061_acpi.c optional pl061 gpio acpi diff --git a/sys/conf/options.arm64 b/sys/conf/options.arm64 --- a/sys/conf/options.arm64 +++ b/sys/conf/options.arm64 @@ -22,6 +22,10 @@ # Bhyve VMM opt_global.h +# SCMI +SCMI_RAW opt_global.h +SCMI_RAW_COEX opt_global.h + # SoC Support SOC_ALLWINNER_A64 opt_soc.h SOC_ALLWINNER_H5 opt_soc.h diff --git a/sys/dev/firmware/arm/scmi.h b/sys/dev/firmware/arm/scmi.h --- a/sys/dev/firmware/arm/scmi.h +++ b/sys/dev/firmware/arm/scmi.h @@ -74,6 +74,8 @@ struct scmi_transport_desc trs_desc; struct scmi_transport *trs; struct sysctl_oid *sysctl_root; + void *raw; + bool drivers_disabled; }; struct scmi_msg { diff --git a/sys/dev/firmware/arm/scmi_raw.h b/sys/dev/firmware/arm/scmi_raw.h new file mode 100644 --- /dev/null +++ b/sys/dev/firmware/arm/scmi_raw.h @@ -0,0 +1,58 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2024 Arm Ltd + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _ARM64_SCMI_SCMI_RAW_H_ +#define _ARM64_SCMI_SCMI_RAW_H_ + +#include +#include + +#ifdef SCMI_RAW +int scmi_raw_init(device_t dev); +void scmi_raw_cleanup(device_t dev); +void scmi_raw_reply_report(device_t dev, void *msg, uint32_t len); +void scmi_raw_error_report(device_t dev, void *msg, uint32_t len); +#else +static inline int scmi_raw_init(device_t dev) +{ + return (0); +} + +static inline void scmi_raw_cleanup(device_t dev) +{ +} + +static inline void scmi_raw_reply_report(device_t dev, void *msg, uint32_t len) +{ +} + +static inline void scmi_raw_error_report(device_t dev, void *msg, uint32_t len) +{ +} +#endif + +#endif /* !_ARM64_SCMI_SCMI_RAW_H_ */ diff --git a/sys/dev/firmware/arm/scmi_raw.c b/sys/dev/firmware/arm/scmi_raw.c new file mode 100644 --- /dev/null +++ b/sys/dev/firmware/arm/scmi_raw.c @@ -0,0 +1,611 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2024 Arm Ltd + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "scmi.h" +#include "scmi_protocols.h" +#include "scmi_raw.h" + +static const char *rbq_names[] = { + "rbq_replies", + "rbq_notifs", + "rbq_errs", +}; + +enum { + RB_REPLIES_Q, + RB_NOTIF_Q, + RB_ERRS_Q, + RB_QUEUES_MAX +}; + +enum { + Q_FREE, + Q_READ, + Q_MAX +}; + +struct scmi_raw_buf_queue; + +struct scmi_raw_buf { + STAILQ_ENTRY(scmi_raw_buf) next; + struct scmi_raw_buf_queue *q; + uint32_t len; + off_t offset; + uint32_t size; + uint32_t hdr; + uint8_t payld[]; +}; + +STAILQ_HEAD(rb_head, scmi_raw_buf); + +struct scmi_raw_buf_queue { + device_t dev; + const char *name; + struct mtx mtx[Q_MAX]; + struct mtx wait_mtx[Q_MAX]; + struct rb_head head[Q_MAX]; + struct selinfo sip; + int cnt[Q_MAX]; +}; + +enum { + RAW_DEV_MESG, + RAW_DEV_MESG_ASYNC, + RAW_DEV_ERRS, + RAW_DEV_NOTIFS, + RAW_DEV_RESET, + RAW_DEV_MAX +}; + +static const char *cdevs_str[RAW_DEV_MAX] = { + "message", + "message_async", + "errors", + "notification", + "reset" +}; + +static const int queues_map[RAW_DEV_MAX] = { + RB_REPLIES_Q, + RB_REPLIES_Q, + RB_ERRS_Q, + RB_NOTIF_Q, + RB_QUEUES_MAX +}; + +struct scmi_raw { + bool coex; + struct cdev *cdevs[RAW_DEV_MAX]; + device_t dev; + struct scmi_raw_buf_queue *rbqs[RB_QUEUES_MAX]; +}; + +static d_read_t scmi_raw_device_read; +static d_poll_t scmi_raw_device_poll; +static d_write_t scmi_raw_device_write; +static d_close_t scmi_raw_device_close; + +static struct cdevsw scmi_raw_cdevsw = { + .d_version = D_VERSION, + .d_read = scmi_raw_device_read, + .d_poll = scmi_raw_device_poll, + .d_write = scmi_raw_device_write, + .d_close = scmi_raw_device_close, + .d_name = "scmi_raw", + .d_flags = D_TRACKCLOSE, +}; + +static d_write_t scmi_raw_reset_device_write; + +static struct cdevsw scmi_raw_reset_cdevsw = { + .d_version = D_VERSION, + .d_write = scmi_raw_reset_device_write, + .d_name = "scmi_raw", +}; + +static void scmi_raw_buf_insert(struct scmi_raw_buf_queue *, int, + struct scmi_raw_buf *); +static void scmi_raw_buf_put(struct scmi_raw_buf *); +static void scmi_raw_buf_enqueue(struct scmi_raw_buf *); +static struct scmi_raw_buf * + scmi_raw_buf_extract(struct scmi_raw_buf_queue *, int); +static bool scmi_raw_buf_queue_empty(struct scmi_raw_buf_queue *, int); +static int scmi_raw_buf_queue_wait_not_empty(struct scmi_raw_buf_queue *, + int); +static struct scmi_raw_buf * + scmi_raw_buf_get(struct scmi_raw_buf_queue *); +static struct scmi_raw_buf * + scmi_raw_buf_dequeue(struct scmi_raw_buf_queue *); +static void scmi_raw_buf_queue_reset(struct scmi_raw_buf_queue *); +static struct scmi_raw_buf_queue * + scmi_raw_buf_queue_allocate(device_t, const char *, const int, + const int); +static void scmi_raw_buf_queue_free(struct scmi_raw_buf_queue *); +static void scmi_raw_buf_queues_init(struct scmi_raw *); +static void scmi_raw_buf_queues_cleanup(struct scmi_raw *); +static int scmi_raw_message_inject(struct scmi_raw_buf_queue *, + struct uio *); +static void scmi_raw_report(device_t, int, void *, uint32_t); + +static void +scmi_raw_buf_insert(struct scmi_raw_buf_queue *rbq, int q_idx, struct scmi_raw_buf *rb) +{ + mtx_lock_spin(&rbq->mtx[q_idx]); + STAILQ_INSERT_TAIL(&rbq->head[q_idx], rb, next); + rbq->cnt[q_idx]++; + mtx_unlock_spin(&rbq->mtx[q_idx]); +} + +static void +scmi_raw_buf_put(struct scmi_raw_buf *rb) +{ + rb->len = 0; + rb->offset = 0; + scmi_raw_buf_insert(rb->q, Q_FREE, rb); +} + +static void +scmi_raw_buf_enqueue(struct scmi_raw_buf *rb) +{ + scmi_raw_buf_insert(rb->q, Q_READ, rb); + wakeup(&rb->q->head[Q_READ]); + selwakeup(&rb->q->sip); +} + +static struct scmi_raw_buf * +scmi_raw_buf_extract(struct scmi_raw_buf_queue *rbq, int q_idx) +{ + struct scmi_raw_buf *rb = NULL; + + mtx_lock_spin(&rbq->mtx[q_idx]); + if (!STAILQ_EMPTY(&rbq->head[q_idx])) { + rb = STAILQ_FIRST(&rbq->head[q_idx]); + STAILQ_REMOVE_HEAD(&rbq->head[q_idx], next); + rbq->cnt[q_idx]--; + } + mtx_unlock_spin(&rbq->mtx[q_idx]); + + return (rb); +} + +static bool +scmi_raw_buf_queue_empty(struct scmi_raw_buf_queue *rbq, int q_idx) +{ + bool ret; + + mtx_lock_spin(&rbq->mtx[q_idx]); + ret = STAILQ_EMPTY(&rbq->head[q_idx]); + mtx_unlock_spin(&rbq->mtx[q_idx]); + + return (ret); +} + +static int +scmi_raw_buf_queue_wait_not_empty(struct scmi_raw_buf_queue *rbq, int q_idx) +{ + int ret = 0; + + if (scmi_raw_buf_queue_empty(rbq, q_idx)) { + mtx_lock(&rbq->wait_mtx[q_idx]); + ret = msleep(&rbq->head[q_idx], &rbq->wait_mtx[q_idx], + PCATCH | PDROP, "Q_WAIT", 0); + } + + return (ret); +} + +static struct scmi_raw_buf * +scmi_raw_buf_get(struct scmi_raw_buf_queue *rbq) +{ + return scmi_raw_buf_extract(rbq, Q_FREE); +} + +static struct scmi_raw_buf * +scmi_raw_buf_dequeue(struct scmi_raw_buf_queue *rbq) +{ + return scmi_raw_buf_extract(rbq, Q_READ); +} + +static void +scmi_raw_buf_queue_reset(struct scmi_raw_buf_queue *rbq) +{ + struct scmi_raw_buf *rb; + + do { + rb = scmi_raw_buf_dequeue(rbq); + if (rb != NULL) + scmi_raw_buf_put(rb); + } while (rb != NULL); +} + +static struct scmi_raw_buf_queue * +scmi_raw_buf_queue_allocate(device_t dev, const char *tag, const int max_msg, + const int max_payld_sz) +{ + struct scmi_raw_buf_queue *rbq; + + rbq = malloc(sizeof(*rbq), M_DEVBUF, M_ZERO | M_WAITOK); + rbq->dev = dev; + rbq->name = tag; + + for (int q_idx = 0; q_idx < Q_MAX; q_idx++) { + mtx_init(&rbq->mtx[q_idx], tag, "SCMI", MTX_SPIN); + mtx_init(&rbq->wait_mtx[q_idx], tag, "SCMI", MTX_DEF); + STAILQ_INIT(&rbq->head[q_idx]); + + if (q_idx != Q_FREE) + continue; + + for (int i = 0; i < max_msg; i++) { + struct scmi_raw_buf *rb; + + rb = malloc(sizeof(*rb) + max_payld_sz, + M_DEVBUF, M_ZERO | M_WAITOK); + + rb->size = sizeof(rb->hdr) + max_payld_sz; + rb->q = rbq; + scmi_raw_buf_put(rb); + } + } + + return (rbq); +} + +static void +scmi_raw_buf_queue_free(struct scmi_raw_buf_queue *rbq) +{ + struct scmi_raw_buf *rb; + + /* Forcibly collectvig any pending transaction to Q_FREE */ + scmi_raw_buf_queue_reset(rbq); + do { + /* Emptying Q_FREE and freeing buffers */ + rb = scmi_raw_buf_get(rbq); + if (rb == NULL) + break; + + free(rb, M_DEVBUF); + } while (1); + + for (int q_idx = 0; q_idx < Q_MAX; q_idx++) { + mtx_destroy(&rbq->mtx[q_idx]); + mtx_destroy(&rbq->wait_mtx[q_idx]); + } + + free(rbq, M_DEVBUF); +} + +static void +scmi_raw_buf_queues_init(struct scmi_raw *raw) +{ + struct scmi_softc *sc; + + sc = device_get_softc(raw->dev); + for (int i = 0; i < RB_QUEUES_MAX; i++) + raw->rbqs[i] = scmi_raw_buf_queue_allocate(raw->dev, rbq_names[i], + SCMI_MAX_MSG(sc), SCMI_MAX_MSG_SIZE(sc)); +} + +static void +scmi_raw_buf_queues_cleanup(struct scmi_raw *raw) +{ + for (int i = 0; i < RB_QUEUES_MAX; i++) + scmi_raw_buf_queue_free(raw->rbqs[i]); +} + +static void +cdev_rb_dtor(void *data) +{ + struct scmi_raw_buf *rb = data; + + scmi_raw_buf_put(rb); +} + +static int +scmi_raw_message_inject(struct scmi_raw_buf_queue *rbq, struct uio *uiop) +{ + ssize_t len = uiop->uio_resid; + uint32_t payld_len = 0; + struct scmi_msg *in_msg; + int ret; + + if (len < sizeof(in_msg->hdr)) + return (EINVAL); + + if (len > sizeof(in_msg->hdr)) + payld_len = len - sizeof(in_msg->hdr); + + in_msg = scmi_msg_get(rbq->dev, payld_len, 0); + if (in_msg == NULL) + return (EINVAL); + + ret = uiomove_frombuf(&in_msg->hdr, in_msg->tx_len, uiop); + if (ret != 0) + goto err; + + ret = scmi_request_tx(rbq->dev, &in_msg->payld[0]); + if (ret != 0) + goto err; + + ret = scmi_msg_async_enqueue(in_msg); + if (ret != 0) + goto err; + + /* Once done reset the offset */ + uiop->uio_offset = 0; + + return (ret); + +err: + scmi_msg_put(rbq->dev, in_msg); + + return (ret); +} + +static int +scmi_raw_device_poll(struct cdev *cdev, int poll_events, struct thread *td) +{ + struct scmi_raw_buf_queue *rbq = cdev->si_drv1; + + if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { + if (!scmi_raw_buf_queue_empty(rbq, Q_READ)) + return poll_events & (POLLIN | POLLRDNORM); + else + selrecord(td, &rbq->sip); + } + + return (0); +} + +static int +scmi_raw_device_read(struct cdev *cdev, struct uio *uiop, int ioflag) +{ + struct scmi_raw_buf_queue *rbq = cdev->si_drv1; + struct scmi_raw_buf *rb = NULL; + int ret; + + /* Any raw_buf already in the process of being read ? ... */ + ret = devfs_get_cdevpriv((void **)&rb); + if (ret == ENOENT) { + do { + /* ...no...so pick the next in the queue, if any */ + rb = scmi_raw_buf_dequeue(rbq); + if (rb != NULL) + break; + + if (ioflag & O_NONBLOCK) + return (EAGAIN); + + /* block waiting for a message to arrive */ + ret = scmi_raw_buf_queue_wait_not_empty(rbq, Q_READ); + if (ret != 0) + return (ret); + } while (rb == NULL); + + devfs_set_cdevpriv(rb, cdev_rb_dtor); + } else if (ret != 0) { + return (ret); + } + + if (rb->offset >= rb->len) { + /* Whole snooped message has been read, return EOF and free raw_buf */ + uiop->uio_offset = 0; + devfs_clear_cdevpriv(); + return (0); + } + + ret = uiomove_frombuf((uint8_t *)&rb->hdr + rb->offset, rb->len - rb->offset, uiop); + if (ret != 0) { + devfs_clear_cdevpriv(); + return (ret); + } + + rb->offset += uiop->uio_offset; + if (rb->offset < rb->len) + uiop->uio_offset = 0; + + return (0); +} + +static int +scmi_raw_device_write(struct cdev *cdev, struct uio *uio, int ioflag) +{ + struct scmi_raw_buf_queue *rbq = cdev->si_drv1; + + if (dev2unit(cdev) == RAW_DEV_MESG_ASYNC) { + printf("SCMI RAW - Asynchronous Messages NOT supported\n"); + return (-EOPNOTSUPP); + } + + return (scmi_raw_message_inject(rbq, uio)); +} + +static int +scmi_raw_device_close(struct cdev *dev, int fflag, int devtype, struct thread *td) +{ + struct scmi_raw_buf *rb = NULL; + int ret; + + /* Any raw_buf still pending ? */ + ret = devfs_get_cdevpriv((void **)&rb); + if (ret == 0) + devfs_clear_cdevpriv(); + + return (0); +} + +static int +scmi_raw_reset_device_write(struct cdev *cdev, struct uio *uio, int ioflag) +{ + struct scmi_raw_buf_queue **rbqs = cdev->si_drv1; + + printf("Resetting all SCMI RAW queues.\n"); + uio->uio_resid = 0; + for (int i = 0; i < RB_QUEUES_MAX; i++) + scmi_raw_buf_queue_reset(rbqs[i]); + + return (0); +} + +static void +scmi_raw_report(device_t dev, int q_idx, void *msg, uint32_t len) +{ + struct scmi_raw_buf *rb; + struct scmi_raw *raw; + struct scmi_softc *sc; + + sc = device_get_softc(dev); + raw = sc->raw; + + rb = scmi_raw_buf_get(raw->rbqs[q_idx]); + if (rb == NULL) { + device_printf(dev, + "SCMI RAW[%s] - Cannot allocate a free raw buffer.\n", + rbq_names[q_idx]); + return; + } + + if (len > rb->size) { + device_printf(dev, "SCMI RAW[%s] - Message too big: %d\n", + rbq_names[q_idx], len); + scmi_raw_buf_put(rb); + return; + } + + memcpy(&rb->hdr, msg, len); + rb->len = len; + + + scmi_raw_buf_enqueue(rb); +} + +void +scmi_raw_reply_report(device_t dev, void *msg, uint32_t len) +{ + return (scmi_raw_report(dev, RB_REPLIES_Q, msg, len)); +} + +void +scmi_raw_error_report(device_t dev, void *msg, uint32_t len) +{ + return (scmi_raw_report(dev, RB_ERRS_Q, msg, len)); +} + +int +scmi_raw_init(device_t dev) +{ + struct scmi_softc *sc; + struct scmi_raw *raw; + int error; + + sc = device_get_softc(dev); + + raw = malloc(sizeof(*raw), M_DEVBUF, M_ZERO | M_WAITOK); + raw->coex = SCMI_RAW_COEX; + raw->dev = dev; + + scmi_raw_buf_queues_init(raw); + + for (int i = RAW_DEV_MESG; i < RAW_DEV_MAX; i++) { + struct make_dev_args args; + + make_dev_args_init(&args); + args.mda_uid = UID_ROOT; + args.mda_gid = GID_WHEEL; + args.mda_unit = i; + if (i != RAW_DEV_RESET) { + args.mda_mode = S_IRWXU; + args.mda_devsw = &scmi_raw_cdevsw; + args.mda_si_drv1 = raw->rbqs[queues_map[i]]; + } else { + args.mda_mode = S_IWUSR; + args.mda_devsw = &scmi_raw_reset_cdevsw; + args.mda_si_drv1 = raw->rbqs; + } + + error = make_dev_s(&args, &raw->cdevs[i], "scmi/raw/%s", cdevs_str[i]); + if (error != 0) + device_printf(dev, "Failed to create RAW device '%s'\n", cdevs_str[i]); + } + + sc->raw = raw; + /* Skip device creation if SCMI RAW was enabled in NON-COEX mode */ + sc->drivers_disabled = !raw->coex; + + device_printf(dev, "SCMI RAW%sInitialized.\n", raw->coex ? " (COEX) " : " "); + + return (0); +} + +void +scmi_raw_cleanup(device_t dev) +{ + struct scmi_softc *sc; + struct scmi_raw *raw; + + sc = device_get_softc(dev); + if (sc->raw == NULL) + return; + + raw = sc->raw; + + for (int i = RAW_DEV_MESG; i < RAW_DEV_MAX; i++) + destroy_dev(raw->cdevs[i]); + + scmi_raw_buf_queues_cleanup(raw); + + free(raw, M_DEVBUF); + + device_printf(dev, "SCMI RAW finalized.\n"); + + return; +}