diff --git a/sys/dev/firmware/arm/scmi.c b/sys/dev/firmware/arm/scmi.c
index 37496136c828..00a229762414 100644
--- a/sys/dev/firmware/arm/scmi.c
+++ b/sys/dev/firmware/arm/scmi.c
@@ -1,650 +1,652 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2022 Ruslan Bukin
* Copyright (c) 2023 Arm Ltd
*
* This work was supported by Innovate UK project 105694, "Digital Security
* by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "scmi.h"
#include "scmi_protocols.h"
#define SCMI_MAX_TOKEN 1024
#define SCMI_HDR_TOKEN_S 18
#define SCMI_HDR_TOKEN_BF (0x3ff)
#define SCMI_HDR_TOKEN_M (SCMI_HDR_TOKEN_BF << SCMI_HDR_TOKEN_S)
#define SCMI_HDR_PROTOCOL_ID_S 10
#define SCMI_HDR_PROTOCOL_ID_BF (0xff)
#define SCMI_HDR_PROTOCOL_ID_M \
(SCMI_HDR_PROTOCOL_ID_BF << SCMI_HDR_PROTOCOL_ID_S)
#define SCMI_HDR_MESSAGE_TYPE_S 8
#define SCMI_HDR_MESSAGE_TYPE_BF (0x3)
#define SCMI_HDR_MESSAGE_TYPE_M \
(SCMI_HDR_MESSAGE_TYPE_BF << SCMI_HDR_MESSAGE_TYPE_S)
#define SCMI_HDR_MESSAGE_ID_S 0
#define SCMI_HDR_MESSAGE_ID_BF (0xff)
#define SCMI_HDR_MESSAGE_ID_M \
(SCMI_HDR_MESSAGE_ID_BF << SCMI_HDR_MESSAGE_ID_S)
#define SCMI_MSG_TYPE_CMD 0
#define SCMI_MSG_TYPE_DRESP 2
#define SCMI_MSG_TYPE_NOTIF 3
#define SCMI_MSG_TYPE_CHECK(_h, _t) \
((((_h) & SCMI_HDR_MESSAGE_TYPE_M) >> SCMI_HDR_MESSAGE_TYPE_S) == (_t))
#define SCMI_IS_MSG_TYPE_NOTIF(h) \
SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_NOTIF)
#define SCMI_IS_MSG_TYPE_DRESP(h) \
SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_DRESP)
#define SCMI_MSG_TOKEN(_hdr) \
(((_hdr) & SCMI_HDR_TOKEN_M) >> SCMI_HDR_TOKEN_S)
struct scmi_req {
int cnt;
bool timed_out;
bool use_polling;
bool done;
struct mtx mtx;
LIST_ENTRY(scmi_req) next;
int protocol_id;
int message_id;
int token;
uint32_t header;
struct scmi_msg msg;
};
#define buf_to_msg(b) __containerof((b), struct scmi_msg, payld)
#define msg_to_req(m) __containerof((m), struct scmi_req, msg)
#define buf_to_req(b) msg_to_req(buf_to_msg(b))
LIST_HEAD(reqs_head, scmi_req);
struct scmi_reqs_pool {
struct mtx mtx;
struct reqs_head head;
};
BITSET_DEFINE(_scmi_tokens, SCMI_MAX_TOKEN);
LIST_HEAD(inflight_head, scmi_req);
#define REQHASH(_sc, _tk) \
(&((_sc)->trs->inflight_ht[(_tk) & (_sc)->trs->inflight_mask]))
struct scmi_transport {
unsigned long next_id;
struct _scmi_tokens avail_tokens;
struct inflight_head *inflight_ht;
unsigned long inflight_mask;
struct scmi_reqs_pool *chans[SCMI_CHAN_MAX];
struct mtx mtx;
};
static int scmi_transport_init(struct scmi_softc *);
static void scmi_transport_cleanup(struct scmi_softc *);
static struct scmi_reqs_pool *scmi_reqs_pool_allocate(const int, const int);
static void scmi_reqs_pool_free(struct scmi_reqs_pool *);
static struct scmi_req *scmi_req_alloc(struct scmi_softc *, enum scmi_chan);
static void scmi_req_free_unlocked(struct scmi_softc *,
enum scmi_chan, struct scmi_req *);
static void scmi_req_get(struct scmi_softc *, struct scmi_req *);
static void scmi_req_put(struct scmi_softc *, struct scmi_req *);
static int scmi_token_pick(struct scmi_softc *);
static void scmi_token_release_unlocked(struct scmi_softc *, int);
static int scmi_req_track_inflight(struct scmi_softc *,
struct scmi_req *);
static int scmi_req_drop_inflight(struct scmi_softc *,
struct scmi_req *);
static struct scmi_req *scmi_req_lookup_inflight(struct scmi_softc *, uint32_t);
static int scmi_wait_for_response(struct scmi_softc *,
struct scmi_req *, void **);
-static void scmi_process_response(struct scmi_softc *, uint32_t);
+static void scmi_process_response(struct scmi_softc *, uint32_t,
+ unsigned int);
int
scmi_attach(device_t dev)
{
struct scmi_softc *sc;
phandle_t node;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
node = ofw_bus_get_node(dev);
if (node == -1)
return (ENXIO);
simplebus_init(dev, node);
error = scmi_transport_init(sc);
if (error != 0)
return (error);
device_printf(dev, "Transport reply timeout initialized to %dms\n",
sc->trs_desc.reply_timo_ms);
/*
* Allow devices to identify.
*/
bus_identify_children(dev);
/*
* Now walk the OFW tree and attach top-level devices.
*/
for (node = OF_child(node); node > 0; node = OF_peer(node))
simplebus_add_device(dev, node, 0, NULL, -1, NULL);
bus_attach_children(dev);
return (0);
}
static int
scmi_detach(device_t dev)
{
struct scmi_softc *sc;
sc = device_get_softc(dev);
scmi_transport_cleanup(sc);
return (0);
}
static device_method_t scmi_methods[] = {
DEVMETHOD(device_attach, scmi_attach),
DEVMETHOD(device_detach, scmi_detach),
DEVMETHOD_END
};
DEFINE_CLASS_1(scmi, scmi_driver, scmi_methods, sizeof(struct scmi_softc),
simplebus_driver);
DRIVER_MODULE(scmi, simplebus, scmi_driver, 0, 0);
MODULE_VERSION(scmi, 1);
static struct scmi_reqs_pool *
scmi_reqs_pool_allocate(const int max_msg, const int max_payld_sz)
{
struct scmi_reqs_pool *rp;
struct scmi_req *req;
rp = malloc(sizeof(*rp), M_DEVBUF, M_ZERO | M_WAITOK);
LIST_INIT(&rp->head);
for (int i = 0; i < max_msg; i++) {
req = malloc(sizeof(*req) + max_payld_sz,
M_DEVBUF, M_ZERO | M_WAITOK);
mtx_init(&req->mtx, "req", "SCMI", MTX_SPIN);
LIST_INSERT_HEAD(&rp->head, req, next);
}
mtx_init(&rp->mtx, "reqs_pool", "SCMI", MTX_SPIN);
return (rp);
}
static void
scmi_reqs_pool_free(struct scmi_reqs_pool *rp)
{
struct scmi_req *req;
LIST_FOREACH(req, &rp->head, next) {
mtx_destroy(&req->mtx);
free(req, M_DEVBUF);
}
mtx_destroy(&rp->mtx);
free(rp, M_DEVBUF);
}
static int
scmi_transport_init(struct scmi_softc *sc)
{
struct scmi_transport *trs;
int ret;
trs = malloc(sizeof(*trs), M_DEVBUF, M_ZERO | M_WAITOK);
BIT_FILL(SCMI_MAX_TOKEN, &trs->avail_tokens);
mtx_init(&trs->mtx, "tokens", "SCMI", MTX_SPIN);
trs->inflight_ht = hashinit(SCMI_MAX_MSG, M_DEVBUF,
&trs->inflight_mask);
trs->chans[SCMI_CHAN_A2P] =
scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE);
if (trs->chans[SCMI_CHAN_A2P] == NULL) {
free(trs, M_DEVBUF);
return (ENOMEM);
}
trs->chans[SCMI_CHAN_P2A] =
scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE);
if (trs->chans[SCMI_CHAN_P2A] == NULL) {
scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
free(trs, M_DEVBUF);
return (ENOMEM);
}
sc->trs = trs;
ret = SCMI_TRANSPORT_INIT(sc->dev);
if (ret != 0) {
scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
scmi_reqs_pool_free(trs->chans[SCMI_CHAN_P2A]);
free(trs, M_DEVBUF);
return (ret);
}
return (0);
}
static void
scmi_transport_cleanup(struct scmi_softc *sc)
{
SCMI_TRANSPORT_CLEANUP(sc->dev);
mtx_destroy(&sc->trs->mtx);
hashdestroy(sc->trs->inflight_ht, M_DEVBUF, sc->trs->inflight_mask);
scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_A2P]);
scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_P2A]);
free(sc->trs, M_DEVBUF);
}
static struct scmi_req *
scmi_req_alloc(struct scmi_softc *sc, enum scmi_chan ch_idx)
{
struct scmi_reqs_pool *rp;
struct scmi_req *req = NULL;
rp = sc->trs->chans[ch_idx];
mtx_lock_spin(&rp->mtx);
if (!LIST_EMPTY(&rp->head)) {
req = LIST_FIRST(&rp->head);
LIST_REMOVE_HEAD(&rp->head, next);
}
mtx_unlock_spin(&rp->mtx);
if (req != NULL)
refcount_init(&req->cnt, 1);
return (req);
}
static void
scmi_req_free_unlocked(struct scmi_softc *sc, enum scmi_chan ch_idx,
struct scmi_req *req)
{
struct scmi_reqs_pool *rp;
rp = sc->trs->chans[ch_idx];
mtx_lock_spin(&rp->mtx);
req->timed_out = false;
req->done = false;
refcount_init(&req->cnt, 0);
LIST_INSERT_HEAD(&rp->head, req, next);
mtx_unlock_spin(&rp->mtx);
}
static void
scmi_req_get(struct scmi_softc *sc, struct scmi_req *req)
{
bool ok;
mtx_lock_spin(&req->mtx);
ok = refcount_acquire_if_not_zero(&req->cnt);
mtx_unlock_spin(&req->mtx);
if (!ok)
device_printf(sc->dev, "%s() -- BAD REFCOUNT\n", __func__);
return;
}
static void
scmi_req_put(struct scmi_softc *sc, struct scmi_req *req)
{
mtx_lock_spin(&req->mtx);
if (!refcount_release_if_not_last(&req->cnt)) {
bzero(&req->msg, sizeof(req->msg) + SCMI_MAX_MSG_PAYLD_SIZE);
scmi_req_free_unlocked(sc, SCMI_CHAN_A2P, req);
}
mtx_unlock_spin(&req->mtx);
}
static int
scmi_token_pick(struct scmi_softc *sc)
{
unsigned long next_msg_id, token;
mtx_lock_spin(&sc->trs->mtx);
/*
* next_id is a monotonically increasing unsigned long that can be used
* for tracing purposes; next_msg_id is a 10-bit sequence number derived
* from it.
*/
next_msg_id = sc->trs->next_id++ & SCMI_HDR_TOKEN_BF;
token = BIT_FFS_AT(SCMI_MAX_TOKEN, &sc->trs->avail_tokens, next_msg_id);
/* TODO Account for wrap-arounds and holes */
if (token != 0)
BIT_CLR(SCMI_MAX_TOKEN, token - 1, &sc->trs->avail_tokens);
mtx_unlock_spin(&sc->trs->mtx);
/*
* BIT_FFS_AT returns 1-indexed values, so 0 means failure to find a
* free slot: all possible SCMI messages are in-flight using all of the
* SCMI_MAX_TOKEN sequence numbers.
*/
if (!token)
return (-EBUSY);
return ((int)(token - 1));
}
static void
scmi_token_release_unlocked(struct scmi_softc *sc, int token)
{
BIT_SET(SCMI_MAX_TOKEN, token, &sc->trs->avail_tokens);
}
static int
scmi_finalize_req(struct scmi_softc *sc, struct scmi_req *req)
{
uint32_t header = 0;
req->token = scmi_token_pick(sc);
if (req->token < 0)
return (EBUSY);
header = req->message_id;
header |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S;
header |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S;
header |= req->token << SCMI_HDR_TOKEN_S;
req->header = htole32(header);
req->msg.hdr = htole32(header);
return (0);
}
static int
scmi_req_track_inflight(struct scmi_softc *sc, struct scmi_req *req)
{
int error;
/* build hdr, pick token */
error = scmi_finalize_req(sc, req);
if (error != 0)
return (error);
/* Bump refcount to get hold of this in-flight transaction */
scmi_req_get(sc, req);
/* Register in the inflight hashtable */
mtx_lock_spin(&sc->trs->mtx);
LIST_INSERT_HEAD(REQHASH(sc, req->token), req, next);
mtx_unlock_spin(&sc->trs->mtx);
return (0);
}
static int
scmi_req_drop_inflight(struct scmi_softc *sc, struct scmi_req *req)
{
/* Remove from inflight hashtable at first ... */
mtx_lock_spin(&sc->trs->mtx);
LIST_REMOVE(req, next);
scmi_token_release_unlocked(sc, req->token);
mtx_unlock_spin(&sc->trs->mtx);
/* ...and drop refcount..potentially releasing *req */
scmi_req_put(sc, req);
return (0);
}
static struct scmi_req *
scmi_req_lookup_inflight(struct scmi_softc *sc, uint32_t hdr)
{
struct scmi_req *req = NULL;
unsigned int token;
token = SCMI_MSG_TOKEN(hdr);
mtx_lock_spin(&sc->trs->mtx);
LIST_FOREACH(req, REQHASH(sc, token), next) {
if (req->token == token)
break;
}
mtx_unlock_spin(&sc->trs->mtx);
return (req);
}
static void
-scmi_process_response(struct scmi_softc *sc, uint32_t hdr)
+scmi_process_response(struct scmi_softc *sc, uint32_t hdr, uint32_t rx_len)
{
bool timed_out = false;
struct scmi_req *req;
req = scmi_req_lookup_inflight(sc, hdr);
if (req == NULL) {
device_printf(sc->dev,
"Unexpected reply with header |%X| - token: 0x%X Drop.\n",
hdr, SCMI_MSG_TOKEN(hdr));
return;
}
mtx_lock_spin(&req->mtx);
req->done = true;
+ req->msg.rx_len = rx_len;
if (!req->timed_out) {
/*
* Consider the case in which a polled message is picked
* by chance on the IRQ path on another CPU: setting poll_done
* will terminate the other poll loop.
*/
if (!req->msg.polling)
wakeup(req);
else
atomic_store_rel_int(&req->msg.poll_done, 1);
} else {
timed_out = true;
}
mtx_unlock_spin(&req->mtx);
if (timed_out)
device_printf(sc->dev,
"Late reply for timed-out request - token: 0x%X. Ignore.\n",
req->token);
/*
* In case of a late reply to a timed-out transaction this will
* finally free the pending scmi_req
*/
scmi_req_drop_inflight(sc, req);
}
void
-scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr)
+scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr, uint32_t rx_len)
{
struct scmi_softc *sc;
sc = device_get_softc(dev);
if (SCMI_IS_MSG_TYPE_NOTIF(hdr) || SCMI_IS_MSG_TYPE_DRESP(hdr)) {
device_printf(dev, "DRESP/NOTIF unsupported. Drop.\n");
SCMI_CLEAR_CHANNEL(dev, chan);
return;
}
- scmi_process_response(sc, hdr);
+ scmi_process_response(sc, hdr, rx_len);
}
static int
scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out)
{
int ret;
if (req->msg.polling) {
bool needs_drop;
ret = SCMI_POLL_MSG(sc->dev, &req->msg,
sc->trs_desc.reply_timo_ms);
/*
* Drop reference to successfully polled req unless it had
* already also been processed on the IRQ path.
* Addresses a possible race-condition between polling and
* interrupt reception paths.
*/
mtx_lock_spin(&req->mtx);
needs_drop = (ret == 0) && !req->done;
req->timed_out = ret != 0;
mtx_unlock_spin(&req->mtx);
if (needs_drop)
scmi_req_drop_inflight(sc, req);
if (ret == 0 && req->msg.hdr != req->header) {
device_printf(sc->dev,
"Malformed reply with header |%08X|. Expected: |%08X|Drop.\n",
le32toh(req->msg.hdr), le32toh(req->header));
}
} else {
ret = tsleep(req, 0, "scmi_wait4",
(sc->trs_desc.reply_timo_ms * hz) / 1000);
/* Check for lost wakeups since there is no associated lock */
mtx_lock_spin(&req->mtx);
if (ret != 0 && req->done)
ret = 0;
req->timed_out = ret != 0;
mtx_unlock_spin(&req->mtx);
}
if (ret == 0) {
SCMI_COLLECT_REPLY(sc->dev, &req->msg);
if (req->msg.payld[0] != 0)
ret = req->msg.payld[0];
*out = &req->msg.payld[SCMI_MSG_HDR_SIZE];
} else {
device_printf(sc->dev,
"Request for token 0x%X timed-out.\n", req->token);
}
SCMI_TX_COMPLETE(sc->dev, NULL);
return (ret);
}
void *
scmi_buf_get(device_t dev, uint8_t protocol_id, uint8_t message_id,
int tx_payld_sz, int rx_payld_sz)
{
struct scmi_softc *sc;
struct scmi_req *req;
sc = device_get_softc(dev);
if (tx_payld_sz > SCMI_MAX_MSG_PAYLD_SIZE ||
rx_payld_sz > SCMI_MAX_MSG_REPLY_SIZE) {
device_printf(dev, "Unsupported payload size. Drop.\n");
return (NULL);
}
/* Pick one from free list */
req = scmi_req_alloc(sc, SCMI_CHAN_A2P);
if (req == NULL)
return (NULL);
req->protocol_id = protocol_id & SCMI_HDR_PROTOCOL_ID_BF;
req->message_id = message_id & SCMI_HDR_MESSAGE_ID_BF;
req->msg.tx_len = sizeof(req->msg.hdr) + tx_payld_sz;
req->msg.rx_len = rx_payld_sz ?
rx_payld_sz + 2 * sizeof(uint32_t) : SCMI_MAX_MSG_SIZE;
return (&req->msg.payld[0]);
}
void
scmi_buf_put(device_t dev, void *buf)
{
struct scmi_softc *sc;
struct scmi_req *req;
sc = device_get_softc(dev);
req = buf_to_req(buf);
scmi_req_put(sc, req);
}
int
scmi_request(device_t dev, void *in, void **out)
{
struct scmi_softc *sc;
struct scmi_req *req;
int error;
sc = device_get_softc(dev);
req = buf_to_req(in);
req->msg.polling =
(cold || sc->trs_desc.no_completion_irq || req->use_polling);
/* Set inflight and send using transport specific method - refc-2 */
error = scmi_req_track_inflight(sc, req);
if (error != 0)
return (error);
error = SCMI_XFER_MSG(sc->dev, &req->msg);
if (error != 0) {
scmi_req_drop_inflight(sc, req);
return (error);
}
return (scmi_wait_for_response(sc, req, out));
}
diff --git a/sys/dev/firmware/arm/scmi.h b/sys/dev/firmware/arm/scmi.h
index 345ae6eeb03a..5ad7b0db3f5f 100644
--- a/sys/dev/firmware/arm/scmi.h
+++ b/sys/dev/firmware/arm/scmi.h
@@ -1,84 +1,84 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2022 Ruslan Bukin
* Copyright (c) 2023 Arm Ltd
*
* This work was supported by Innovate UK project 105694, "Digital Security
* by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _ARM64_SCMI_SCMI_H_
#define _ARM64_SCMI_SCMI_H_
#include "scmi_if.h"
#define SCMI_MAX_MSG 32
#define SCMI_MAX_MSG_PAYLD_SIZE 128
#define SCMI_MAX_MSG_REPLY_SIZE (SCMI_MAX_MSG_PAYLD_SIZE - sizeof(uint32_t))
#define SCMI_MAX_MSG_SIZE (SCMI_MAX_MSG_PAYLD_SIZE + sizeof(uint32_t))
enum scmi_chan {
SCMI_CHAN_A2P,
SCMI_CHAN_P2A,
SCMI_CHAN_MAX
};
struct scmi_transport_desc {
bool no_completion_irq;
unsigned int reply_timo_ms;
};
struct scmi_transport;
struct scmi_softc {
struct simplebus_softc simplebus_sc;
device_t dev;
struct mtx mtx;
struct scmi_transport_desc trs_desc;
struct scmi_transport *trs;
};
struct scmi_msg {
bool polling;
int poll_done;
uint32_t tx_len;
uint32_t rx_len;
#define SCMI_MSG_HDR_SIZE (sizeof(uint32_t))
uint32_t hdr;
uint8_t payld[];
};
#define hdr_to_msg(h) __containerof((h), struct scmi_msg, hdr)
void *scmi_buf_get(device_t dev, uint8_t protocol_id, uint8_t message_id,
int tx_payd_sz, int rx_payld_sz);
void scmi_buf_put(device_t dev, void *buf);
int scmi_request(device_t dev, void *in, void **);
-void scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr);
+void scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr, uint32_t rx_len);
DECLARE_CLASS(scmi_driver);
int scmi_attach(device_t dev);
#endif /* !_ARM64_SCMI_SCMI_H_ */
diff --git a/sys/dev/firmware/arm/scmi_mailbox.c b/sys/dev/firmware/arm/scmi_mailbox.c
index 858b81f68845..d7b642b69b85 100644
--- a/sys/dev/firmware/arm/scmi_mailbox.c
+++ b/sys/dev/firmware/arm/scmi_mailbox.c
@@ -1,229 +1,229 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2022 Ruslan Bukin
* Copyright (c) 2023 Arm Ltd
*
* This work was supported by Innovate UK project 105694, "Digital Security
* by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "dev/mailbox/arm/arm_doorbell.h"
#include "scmi.h"
#include "scmi_protocols.h"
#include "scmi_shmem.h"
#define SCMI_MBOX_POLL_INTERVAL_MS 3
struct scmi_mailbox_softc {
struct scmi_softc base;
device_t a2p_dev;
struct arm_doorbell *db;
};
static int scmi_mailbox_transport_init(device_t);
static void scmi_mailbox_transport_cleanup(device_t);
static int scmi_mailbox_xfer_msg(device_t, struct scmi_msg *);
static int scmi_mailbox_poll_msg(device_t, struct scmi_msg *,
unsigned int);
static int scmi_mailbox_collect_reply(device_t, struct scmi_msg *);
static void scmi_mailbox_tx_complete(device_t, void *);
static int scmi_mailbox_probe(device_t);
static void
scmi_mailbox_a2p_callback(void *arg)
{
struct scmi_mailbox_softc *sc;
- uint32_t msg_header;
+ uint32_t msg_header, rx_len;
int ret;
sc = arg;
- ret = scmi_shmem_read_msg_header(sc->a2p_dev, &msg_header);
+ ret = scmi_shmem_read_msg_header(sc->a2p_dev, &msg_header, &rx_len);
if (ret == 0)
- scmi_rx_irq_callback(sc->base.dev, sc->a2p_dev, msg_header);
+ scmi_rx_irq_callback(sc->base.dev, sc->a2p_dev, msg_header, rx_len);
}
static int
scmi_mailbox_transport_init(device_t dev)
{
struct scmi_mailbox_softc *sc;
phandle_t node;
sc = device_get_softc(dev);
node = ofw_bus_get_node(dev);
if (node == -1)
return (ENXIO);
/*
* TODO
* - Support P2A shmem + IRQ/doorbell
* - Support other mailbox devices
*/
sc->a2p_dev = scmi_shmem_get(dev, node, SCMI_CHAN_A2P);
if (sc->a2p_dev == NULL) {
device_printf(dev, "A2P shmem dev not found.\n");
return (ENXIO);
}
/* TODO: Fix ofw_get...mbox doorbell names NOT required in Linux DT */
sc->db = arm_doorbell_ofw_get(dev, "tx");
if (sc->db == NULL) {
device_printf(dev, "Doorbell device not found.\n");
return (ENXIO);
}
sc->base.trs_desc.reply_timo_ms = 30;
arm_doorbell_set_handler(sc->db, scmi_mailbox_a2p_callback, sc);
return (0);
}
static void
scmi_mailbox_transport_cleanup(device_t dev)
{
struct scmi_mailbox_softc *sc;
sc = device_get_softc(dev);
arm_doorbell_set_handler(sc->db, NULL, NULL);
}
static int
scmi_mailbox_xfer_msg(device_t dev, struct scmi_msg *msg)
{
struct scmi_mailbox_softc *sc;
int ret;
sc = device_get_softc(dev);
ret = scmi_shmem_prepare_msg(sc->a2p_dev, (uint8_t *)&msg->hdr,
msg->tx_len, msg->polling);
if (ret != 0)
return (ret);
/* Interrupt SCP firmware. */
arm_doorbell_set(sc->db);
return (0);
}
static int
scmi_mailbox_poll_msg(device_t dev, struct scmi_msg *msg, unsigned int tmo_ms)
{
struct scmi_mailbox_softc *sc;
unsigned int tmo_loops = tmo_ms / SCMI_MBOX_POLL_INTERVAL_MS;
sc = device_get_softc(dev);
do {
- if (scmi_shmem_poll_msg(sc->a2p_dev, &msg->hdr))
+ if (scmi_shmem_poll_msg(sc->a2p_dev, &msg->hdr, &msg->rx_len))
break;
DELAY(SCMI_MBOX_POLL_INTERVAL_MS * 1000);
} while (tmo_loops--);
return (tmo_loops ? 0 : 1);
}
static int
scmi_mailbox_collect_reply(device_t dev, struct scmi_msg *msg)
{
struct scmi_mailbox_softc *sc;
int ret;
sc = device_get_softc(dev);
ret = scmi_shmem_read_msg_payload(sc->a2p_dev,
- msg->payld, msg->rx_len - SCMI_MSG_HDR_SIZE);
+ msg->payld, msg->rx_len - SCMI_MSG_HDR_SIZE, msg->rx_len);
return (ret);
}
static void
scmi_mailbox_tx_complete(device_t dev, void *chan)
{
struct scmi_mailbox_softc *sc;
sc = device_get_softc(dev);
scmi_shmem_tx_complete(sc->a2p_dev);
}
static void
scmi_mailbox_clear_channel(device_t dev, void *chan)
{
/* Only P2A channel can be cleared forcibly by agent */
scmi_shmem_clear_channel(chan);
}
static int
scmi_mailbox_probe(device_t dev)
{
if (!ofw_bus_is_compatible(dev, "arm,scmi"))
return (ENXIO);
if (!ofw_bus_status_okay(dev))
return (ENXIO);
device_set_desc(dev, "ARM SCMI Mailbox Transport driver");
return (BUS_PROBE_DEFAULT);
}
static device_method_t scmi_mailbox_methods[] = {
DEVMETHOD(device_probe, scmi_mailbox_probe),
/* SCMI interface */
DEVMETHOD(scmi_transport_init, scmi_mailbox_transport_init),
DEVMETHOD(scmi_transport_cleanup, scmi_mailbox_transport_cleanup),
DEVMETHOD(scmi_xfer_msg, scmi_mailbox_xfer_msg),
DEVMETHOD(scmi_poll_msg, scmi_mailbox_poll_msg),
DEVMETHOD(scmi_collect_reply, scmi_mailbox_collect_reply),
DEVMETHOD(scmi_tx_complete, scmi_mailbox_tx_complete),
DEVMETHOD(scmi_clear_channel, scmi_mailbox_clear_channel),
DEVMETHOD_END
};
DEFINE_CLASS_1(scmi_mailbox, scmi_mailbox_driver, scmi_mailbox_methods,
sizeof(struct scmi_mailbox_softc), scmi_driver);
DRIVER_MODULE(scmi_mailbox, simplebus, scmi_mailbox_driver, 0, 0);
MODULE_VERSION(scmi_mailbox, 1);
diff --git a/sys/dev/firmware/arm/scmi_shmem.c b/sys/dev/firmware/arm/scmi_shmem.c
index 9bab4bc4004e..a63d96b64cb5 100644
--- a/sys/dev/firmware/arm/scmi_shmem.c
+++ b/sys/dev/firmware/arm/scmi_shmem.c
@@ -1,329 +1,325 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2022 Ruslan Bukin
* Copyright (c) 2023 Arm Ltd
*
* This work was supported by Innovate UK project 105694, "Digital Security
* by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "mmio_sram_if.h"
#include "scmi_shmem.h"
#include "scmi.h"
#define INFLIGHT_NONE 0
#define INFLIGHT_REQ 1
struct shmem_softc {
device_t dev;
device_t parent;
int reg;
int inflight;
};
static void scmi_shmem_read(device_t, bus_size_t, void *, bus_size_t);
static void scmi_shmem_write(device_t, bus_size_t, const void *,
bus_size_t);
static void scmi_shmem_acquire_channel(struct shmem_softc *);
static void scmi_shmem_release_channel(struct shmem_softc *);
static int shmem_probe(device_t);
static int shmem_attach(device_t);
static int shmem_detach(device_t);
static int
shmem_probe(device_t dev)
{
if (!ofw_bus_is_compatible(dev, "arm,scmi-shmem"))
return (ENXIO);
if (!ofw_bus_status_okay(dev))
return (ENXIO);
device_set_desc(dev, "ARM SCMI Shared Memory driver");
return (BUS_PROBE_DEFAULT);
}
static int
shmem_attach(device_t dev)
{
struct shmem_softc *sc;
phandle_t node;
int reg;
sc = device_get_softc(dev);
sc->dev = dev;
sc->parent = device_get_parent(dev);
node = ofw_bus_get_node(dev);
if (node == -1)
return (ENXIO);
OF_getencprop(node, "reg", ®, sizeof(reg));
sc->reg = reg;
atomic_store_rel_int(&sc->inflight, INFLIGHT_NONE);
OF_device_register_xref(OF_xref_from_node(node), dev);
return (0);
}
static int
shmem_detach(device_t dev)
{
return (0);
}
static void
scmi_shmem_read(device_t dev, bus_size_t offset, void *buf, bus_size_t len)
{
struct shmem_softc *sc;
uint8_t *addr;
int i;
sc = device_get_softc(dev);
addr = (uint8_t *)buf;
for (i = 0; i < len; i++)
addr[i] = MMIO_SRAM_READ_1(sc->parent, sc->reg + offset + i);
}
static void
scmi_shmem_write(device_t dev, bus_size_t offset, const void *buf,
bus_size_t len)
{
struct shmem_softc *sc;
const uint8_t *addr;
int i;
sc = device_get_softc(dev);
addr = (const uint8_t *)buf;
for (i = 0; i < len; i++)
MMIO_SRAM_WRITE_1(sc->parent, sc->reg + offset + i, addr[i]);
}
device_t
scmi_shmem_get(device_t dev, phandle_t node, int index)
{
phandle_t *shmems;
device_t shmem_dev;
size_t len;
len = OF_getencprop_alloc_multi(node, "shmem", sizeof(*shmems),
(void **)&shmems);
if (len <= 0) {
device_printf(dev, "%s: Can't get shmem node.\n", __func__);
return (NULL);
}
if (index >= len) {
OF_prop_free(shmems);
return (NULL);
}
shmem_dev = OF_device_from_xref(shmems[index]);
if (shmem_dev == NULL)
device_printf(dev, "%s: Can't get shmem device.\n",
__func__);
OF_prop_free(shmems);
return (shmem_dev);
}
static void
scmi_shmem_acquire_channel(struct shmem_softc *sc)
{
while ((atomic_cmpset_acq_int(&sc->inflight, INFLIGHT_NONE,
INFLIGHT_REQ)) == 0)
DELAY(1000);
}
static void
scmi_shmem_release_channel(struct shmem_softc *sc)
{
atomic_store_rel_int(&sc->inflight, INFLIGHT_NONE);
}
int
scmi_shmem_prepare_msg(device_t dev, uint8_t *msg, uint32_t tx_len,
bool polling)
{
struct shmem_softc *sc;
struct scmi_smt_header hdr = {};
uint32_t channel_status;
sc = device_get_softc(dev);
/* Get exclusive write access to channel */
scmi_shmem_acquire_channel(sc);
/* Read channel status */
scmi_shmem_read(dev, SMT_OFFSET_CHAN_STATUS, &channel_status,
SMT_SIZE_CHAN_STATUS);
if ((channel_status & SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) == 0) {
scmi_shmem_release_channel(sc);
device_printf(dev, "Shmem channel busy. Abort !.\n");
return (1);
}
/* Update header */
hdr.channel_status &= ~SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE;
hdr.msg_header = htole32(*((uint32_t *)msg));
hdr.length = htole32(tx_len);
if (!polling)
hdr.flags |= SCMI_SHMEM_FLAG_INTR_ENABLED;
else
hdr.flags &= ~SCMI_SHMEM_FLAG_INTR_ENABLED;
/* Write header */
scmi_shmem_write(dev, 0, &hdr, SMT_SIZE_HEADER);
/* Write request payload if any */
if (tx_len > SCMI_MSG_HDR_SIZE)
scmi_shmem_write(dev, SMT_SIZE_HEADER,
&msg[SCMI_MSG_HDR_SIZE], tx_len - SCMI_MSG_HDR_SIZE);
return (0);
}
void
scmi_shmem_clear_channel(device_t dev)
{
uint32_t channel_status = 0;
if (dev == NULL)
return;
channel_status |= SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE;
scmi_shmem_write(dev, SMT_OFFSET_CHAN_STATUS, &channel_status,
SMT_SIZE_CHAN_STATUS);
}
int
-scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header)
+scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header, unsigned int *rx_len)
{
uint32_t length, header;
/* Read and check length. */
scmi_shmem_read(dev, SMT_OFFSET_LENGTH, &length, SMT_SIZE_LENGTH);
if (le32toh(length) < sizeof(header))
return (EINVAL);
+ *rx_len = le32toh(length);
/* Read header. */
scmi_shmem_read(dev, SMT_OFFSET_MSG_HEADER, &header,
SMT_SIZE_MSG_HEADER);
*msg_header = le32toh(header);
return (0);
}
int
-scmi_shmem_read_msg_payload(device_t dev, uint8_t *buf, uint32_t buf_len)
+scmi_shmem_read_msg_payload(device_t dev, uint8_t *buf, uint32_t buf_len, uint32_t rx_len)
{
- uint32_t length, payld_len;
-
- /* Read length. */
- scmi_shmem_read(dev, SMT_OFFSET_LENGTH, &length, SMT_SIZE_LENGTH);
- payld_len = le32toh(length) - SCMI_MSG_HDR_SIZE;
+ uint32_t payld_len;
+ payld_len = rx_len - SCMI_MSG_HDR_SIZE;
if (payld_len > buf_len) {
device_printf(dev,
"RX payload %dbytes exceeds buflen %dbytes. Truncate.\n",
payld_len, buf_len);
payld_len = buf_len;
}
/* Read response payload */
scmi_shmem_read(dev, SMT_SIZE_HEADER, buf, payld_len);
return (0);
}
void
scmi_shmem_tx_complete(device_t dev)
{
struct shmem_softc *sc;
sc = device_get_softc(dev);
scmi_shmem_release_channel(sc);
}
-bool scmi_shmem_poll_msg(device_t dev, uint32_t *msg_header)
+bool scmi_shmem_poll_msg(device_t dev, uint32_t *msg_header, uint32_t *rx_len)
{
uint32_t status;
bool ret;
scmi_shmem_read(dev, SMT_OFFSET_CHAN_STATUS, &status,
SMT_SIZE_CHAN_STATUS);
ret = (status & (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE));
+ if (ret == 0)
+ return (ret);
- if (ret)
- scmi_shmem_read(dev, SMT_OFFSET_MSG_HEADER, msg_header,
- SMT_SIZE_MSG_HEADER);
-
- return (ret);
+ return (scmi_shmem_read_msg_header(dev, msg_header, rx_len));
}
static device_method_t shmem_methods[] = {
DEVMETHOD(device_probe, shmem_probe),
DEVMETHOD(device_attach, shmem_attach),
DEVMETHOD(device_detach, shmem_detach),
DEVMETHOD_END
};
DEFINE_CLASS_1(shmem, shmem_driver, shmem_methods, sizeof(struct shmem_softc),
simplebus_driver);
EARLY_DRIVER_MODULE(shmem, mmio_sram, shmem_driver, 0, 0,
BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
MODULE_VERSION(scmi_shmem, 1);
diff --git a/sys/dev/firmware/arm/scmi_shmem.h b/sys/dev/firmware/arm/scmi_shmem.h
index ed8763d5c145..bc8284502129 100644
--- a/sys/dev/firmware/arm/scmi_shmem.h
+++ b/sys/dev/firmware/arm/scmi_shmem.h
@@ -1,72 +1,72 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2022 Ruslan Bukin
* Copyright (c) 2023 Arm Ltd
*
* This work was supported by Innovate UK project 105694, "Digital Security
* by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _ARM64_SCMI_SCMI_SHMEM_H_
#define _ARM64_SCMI_SCMI_SHMEM_H_
/* Shared Memory Transfer. */
struct scmi_smt_header {
uint32_t reserved;
uint32_t channel_status;
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR (1 << 1)
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE (1 << 0)
uint32_t reserved1[2];
uint32_t flags;
#define SCMI_SHMEM_FLAG_INTR_ENABLED (1 << 0)
uint32_t length;
uint32_t msg_header;
uint8_t msg_payload[0];
};
#define SMT_SIZE_HEADER sizeof(struct scmi_smt_header)
#define SMT_OFFSET_CHAN_STATUS \
__offsetof(struct scmi_smt_header, channel_status)
#define SMT_SIZE_CHAN_STATUS sizeof(uint32_t)
#define SMT_OFFSET_LENGTH \
__offsetof(struct scmi_smt_header, length)
#define SMT_SIZE_LENGTH sizeof(uint32_t)
#define SMT_OFFSET_MSG_HEADER \
__offsetof(struct scmi_smt_header, msg_header)
#define SMT_SIZE_MSG_HEADER sizeof(uint32_t)
device_t scmi_shmem_get(device_t sdev, phandle_t node, int index);
int scmi_shmem_prepare_msg(device_t dev, uint8_t *msg, uint32_t tx_len,
bool polling);
-bool scmi_shmem_poll_msg(device_t dev, uint32_t *msg_header);
-int scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header);
-int scmi_shmem_read_msg_payload(device_t dev, uint8_t *buf, uint32_t buf_len);
+bool scmi_shmem_poll_msg(device_t dev, uint32_t *msg_header, uint32_t *rx_len);
+int scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header, uint32_t *rx_len);
+int scmi_shmem_read_msg_payload(device_t dev, uint8_t *buf, uint32_t buf_len, uint32_t rx_len);
void scmi_shmem_tx_complete(device_t);
void scmi_shmem_clear_channel(device_t);
#endif /* !_ARM64_SCMI_SCMI_SHMEM_H_ */
diff --git a/sys/dev/firmware/arm/scmi_smc.c b/sys/dev/firmware/arm/scmi_smc.c
index 9f0d899233fa..81c66ad7bb46 100644
--- a/sys/dev/firmware/arm/scmi_smc.c
+++ b/sys/dev/firmware/arm/scmi_smc.c
@@ -1,187 +1,187 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2022 Ruslan Bukin
* Copyright (c) 2023 Arm Ltd
*
* This work was supported by Innovate UK project 105694, "Digital Security
* by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "scmi.h"
#include "scmi_protocols.h"
#include "scmi_shmem.h"
struct scmi_smc_softc {
struct scmi_softc base;
uint32_t smc_id;
device_t a2p_dev;
};
static int scmi_smc_transport_init(device_t);
static int scmi_smc_xfer_msg(device_t, struct scmi_msg *);
static int scmi_smc_poll_msg(device_t, struct scmi_msg *, unsigned int);
static int scmi_smc_collect_reply(device_t, struct scmi_msg *);
static void scmi_smc_tx_complete(device_t, void *);
static int scmi_smc_probe(device_t);
static int
scmi_smc_transport_init(device_t dev)
{
struct scmi_smc_softc *sc;
phandle_t node;
ssize_t len;
sc = device_get_softc(dev);
node = ofw_bus_get_node(dev);
len = OF_getencprop(node, "arm,smc-id", &sc->smc_id,
sizeof(sc->smc_id));
if (len <= 0) {
device_printf(dev, "No SMC ID found\n");
return (EINVAL);
}
device_printf(dev, "smc id %x\n", sc->smc_id);
sc->a2p_dev = scmi_shmem_get(dev, node, SCMI_CHAN_A2P);
if (sc->a2p_dev == NULL) {
device_printf(dev, "A2P shmem dev not found.\n");
return (ENXIO);
}
sc->base.trs_desc.no_completion_irq = true;
sc->base.trs_desc.reply_timo_ms = 30;
return (0);
}
static int
scmi_smc_xfer_msg(device_t dev, struct scmi_msg *msg)
{
struct scmi_smc_softc *sc;
int ret;
sc = device_get_softc(dev);
ret = scmi_shmem_prepare_msg(sc->a2p_dev, (uint8_t *)&msg->hdr,
msg->tx_len, msg->polling);
if (ret != 0)
return (ret);
arm_smccc_invoke_smc(sc->smc_id, NULL);
return (0);
}
static int
scmi_smc_poll_msg(device_t dev, struct scmi_msg *msg, unsigned int tmo)
{
struct scmi_smc_softc *sc;
sc = device_get_softc(dev);
/*
* Nothing to poll since commands are completed as soon as smc
* returns ... but did we get back what we were poling for ?
*/
- scmi_shmem_read_msg_header(sc->a2p_dev, &msg->hdr);
+ scmi_shmem_read_msg_header(sc->a2p_dev, &msg->hdr, &msg->rx_len);
return (0);
}
static int
scmi_smc_collect_reply(device_t dev, struct scmi_msg *msg)
{
struct scmi_smc_softc *sc;
int ret;
sc = device_get_softc(dev);
ret = scmi_shmem_read_msg_payload(sc->a2p_dev,
- msg->payld, msg->rx_len - SCMI_MSG_HDR_SIZE);
+ msg->payld, msg->rx_len - SCMI_MSG_HDR_SIZE, msg->rx_len);
return (ret);
}
static void
scmi_smc_tx_complete(device_t dev, void *chan)
{
struct scmi_smc_softc *sc;
sc = device_get_softc(dev);
scmi_shmem_tx_complete(sc->a2p_dev);
}
static int
scmi_smc_probe(device_t dev)
{
if (!ofw_bus_is_compatible(dev, "arm,scmi-smc"))
return (ENXIO);
if (!ofw_bus_status_okay(dev))
return (ENXIO);
device_set_desc(dev, "ARM SCMI SMC Transport driver");
return (BUS_PROBE_DEFAULT);
}
static device_method_t scmi_smc_methods[] = {
DEVMETHOD(device_probe, scmi_smc_probe),
/* SCMI interface */
DEVMETHOD(scmi_transport_init, scmi_smc_transport_init),
DEVMETHOD(scmi_xfer_msg, scmi_smc_xfer_msg),
DEVMETHOD(scmi_poll_msg, scmi_smc_poll_msg),
DEVMETHOD(scmi_collect_reply, scmi_smc_collect_reply),
DEVMETHOD(scmi_tx_complete, scmi_smc_tx_complete),
DEVMETHOD_END
};
DEFINE_CLASS_1(scmi_smc, scmi_smc_driver, scmi_smc_methods,
sizeof(struct scmi_smc_softc), scmi_driver);
/* Needs to be after the mmio_sram driver */
EARLY_DRIVER_MODULE(scmi_smc, simplebus, scmi_smc_driver, 0, 0,
BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_LATE);
MODULE_VERSION(scmi_smc, 1);
diff --git a/sys/dev/firmware/arm/scmi_virtio.c b/sys/dev/firmware/arm/scmi_virtio.c
index 12cbb9ecefd5..8714fee2ec87 100644
--- a/sys/dev/firmware/arm/scmi_virtio.c
+++ b/sys/dev/firmware/arm/scmi_virtio.c
@@ -1,298 +1,297 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023 Arm Ltd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "scmi.h"
#include "scmi_protocols.h"
#define SCMI_VIRTIO_POLLING_INTERVAL_MS 2
struct scmi_virtio_softc {
struct scmi_softc base;
device_t virtio_dev;
int cmdq_sz;
int evtq_sz;
void *p2a_pool;
};
static void scmi_virtio_callback(void *, unsigned int, void *);
static void *scmi_virtio_p2a_pool_init(device_t, unsigned int);
static int scmi_virtio_transport_init(device_t);
static void scmi_virtio_transport_cleanup(device_t);
static int scmi_virtio_xfer_msg(device_t, struct scmi_msg *);
static int scmi_virtio_poll_msg(device_t, struct scmi_msg *, unsigned int);
static void scmi_virtio_clear_channel(device_t, void *);
static int scmi_virtio_probe(device_t);
static int scmi_virtio_attach(device_t);
static void
scmi_virtio_callback(void *msg, unsigned int len, void *priv)
{
struct scmi_virtio_softc *sc;
uint32_t hdr;
sc = priv;
if (msg == NULL || len < sizeof(hdr)) {
device_printf(sc->virtio_dev, "Ignoring malformed message.\n");
return;
}
hdr = le32toh(*((uint32_t *)msg));
- scmi_rx_irq_callback(sc->base.dev, msg, hdr);
+ scmi_rx_irq_callback(sc->base.dev, msg, hdr, len);
}
static void *
scmi_virtio_p2a_pool_init(device_t dev, unsigned int max_msg)
{
struct scmi_virtio_softc *sc;
void *pool;
uint8_t *buf;
int i;
sc = device_get_softc(dev);
pool = mallocarray(max_msg, SCMI_MAX_MSG_SIZE, M_DEVBUF,
M_ZERO | M_WAITOK);
for (i = 0, buf = pool; i < max_msg; i++, buf += SCMI_MAX_MSG_SIZE) {
/* Feed platform with pre-allocated P2A buffers */
virtio_scmi_message_enqueue(sc->virtio_dev,
VIRTIO_SCMI_CHAN_P2A, buf, 0, SCMI_MAX_MSG_SIZE);
}
device_printf(dev,
"Fed %d initial P2A buffers to platform.\n", max_msg);
return (pool);
}
static void
scmi_virtio_clear_channel(device_t dev, void *msg)
{
struct scmi_virtio_softc *sc;
sc = device_get_softc(dev);
virtio_scmi_message_enqueue(sc->virtio_dev, VIRTIO_SCMI_CHAN_P2A,
msg, 0, SCMI_MAX_MSG_SIZE);
}
static int
scmi_virtio_transport_init(device_t dev)
{
struct scmi_virtio_softc *sc;
int ret;
sc = device_get_softc(dev);
sc->cmdq_sz = virtio_scmi_channel_size_get(sc->virtio_dev,
VIRTIO_SCMI_CHAN_A2P);
sc->evtq_sz = virtio_scmi_channel_size_get(sc->virtio_dev,
VIRTIO_SCMI_CHAN_P2A);
if (!sc->cmdq_sz) {
device_printf(dev,
"VirtIO cmdq virtqueue not found. Aborting.\n");
return (ENXIO);
}
/*
* P2A buffers are owned by the platform initially; allocate a feed an
* appropriate number of buffers.
*/
if (sc->evtq_sz != 0) {
sc->p2a_pool = scmi_virtio_p2a_pool_init(dev, sc->evtq_sz);
if (sc->p2a_pool == NULL)
return (ENOMEM);
}
/* Note that setting a callback also enables that VQ interrupts */
ret = virtio_scmi_channel_callback_set(sc->virtio_dev,
VIRTIO_SCMI_CHAN_A2P, scmi_virtio_callback, sc);
if (ret) {
device_printf(dev, "Failed to set VirtIO cmdq callback.\n");
return (ENXIO);
}
device_printf(dev,
"VirtIO cmdq virtqueue configured - cmdq_sz:%d\n", sc->cmdq_sz);
/* P2A channel is optional */
if (sc->evtq_sz) {
ret = virtio_scmi_channel_callback_set(sc->virtio_dev,
VIRTIO_SCMI_CHAN_P2A, scmi_virtio_callback, sc);
if (ret == 0) {
device_printf(dev,
"VirtIO evtq virtqueue configured - evtq_sz:%d\n",
sc->evtq_sz);
} else {
device_printf(dev,
"Failed to set VirtIO evtq callback.Skip.\n");
sc->evtq_sz = 0;
}
}
sc->base.trs_desc.reply_timo_ms = 100;
return (0);
}
static void
scmi_virtio_transport_cleanup(device_t dev)
{
struct scmi_virtio_softc *sc;
sc = device_get_softc(dev);
if (sc->evtq_sz != 0) {
virtio_scmi_channel_callback_set(sc->virtio_dev,
VIRTIO_SCMI_CHAN_P2A, NULL, NULL);
free(sc->p2a_pool, M_DEVBUF);
}
virtio_scmi_channel_callback_set(sc->virtio_dev,
VIRTIO_SCMI_CHAN_A2P, NULL, NULL);
}
static int
scmi_virtio_xfer_msg(device_t dev, struct scmi_msg *msg)
{
struct scmi_virtio_softc *sc;
sc = device_get_softc(dev);
return (virtio_scmi_message_enqueue(sc->virtio_dev,
VIRTIO_SCMI_CHAN_A2P, &msg->hdr, msg->tx_len, msg->rx_len));
}
static int
scmi_virtio_poll_msg(device_t dev, struct scmi_msg *msg, unsigned int tmo_ms)
{
struct scmi_virtio_softc *sc;
device_t vdev;
int tmo_loops;
sc = device_get_softc(dev);
vdev = sc->virtio_dev;
tmo_loops = tmo_ms / SCMI_VIRTIO_POLLING_INTERVAL_MS;
while (tmo_loops-- && atomic_load_acq_int(&msg->poll_done) == 0) {
struct scmi_msg *rx_msg;
void *rx_buf;
uint32_t rx_len;
rx_buf = virtio_scmi_message_poll(vdev, &rx_len);
if (rx_buf == NULL) {
DELAY(SCMI_VIRTIO_POLLING_INTERVAL_MS * 1000);
continue;
}
rx_msg = hdr_to_msg(rx_buf);
- rx_msg->rx_len = rx_len;
/* Complete the polling on any poll path */
if (rx_msg->polling)
atomic_store_rel_int(&rx_msg->poll_done, 1);
if (__predict_true(rx_msg == msg))
break;
/*
* Polling returned an unexpected message: either a message
* polled by some other thread of execution or a message not
* supposed to be polled.
*/
device_printf(dev, "POLLED OoO HDR:|%08X| - polling:%d\n",
rx_msg->hdr, rx_msg->polling);
if (!rx_msg->polling)
- scmi_rx_irq_callback(sc->base.dev, rx_msg, rx_msg->hdr);
+ scmi_rx_irq_callback(sc->base.dev, rx_msg, rx_msg->hdr, rx_len);
}
return (tmo_loops > 0 ? 0 : ETIMEDOUT);
}
static int
scmi_virtio_probe(device_t dev)
{
if (!ofw_bus_is_compatible(dev, "arm,scmi-virtio"))
return (ENXIO);
if (!ofw_bus_status_okay(dev))
return (ENXIO);
device_set_desc(dev, "ARM SCMI VirtIO Transport driver");
return (BUS_PROBE_DEFAULT);
}
static int
scmi_virtio_attach(device_t dev)
{
struct scmi_virtio_softc *sc;
sc = device_get_softc(dev);
sc->virtio_dev = virtio_scmi_transport_get();
if (sc->virtio_dev == NULL)
return (1);
/* When attach fails there is nothing to cleanup*/
return (scmi_attach(dev));
}
static device_method_t scmi_virtio_methods[] = {
DEVMETHOD(device_probe, scmi_virtio_probe),
DEVMETHOD(device_attach, scmi_virtio_attach),
/* SCMI interface */
DEVMETHOD(scmi_transport_init, scmi_virtio_transport_init),
DEVMETHOD(scmi_transport_cleanup, scmi_virtio_transport_cleanup),
DEVMETHOD(scmi_xfer_msg, scmi_virtio_xfer_msg),
DEVMETHOD(scmi_poll_msg, scmi_virtio_poll_msg),
DEVMETHOD(scmi_clear_channel, scmi_virtio_clear_channel),
DEVMETHOD_END
};
DEFINE_CLASS_1(scmi_virtio, scmi_virtio_driver, scmi_virtio_methods,
sizeof(struct scmi_virtio_softc), scmi_driver);
/* Needs to be after the mmio_sram driver */
DRIVER_MODULE(scmi_virtio, simplebus, scmi_virtio_driver, 0, 0);
MODULE_VERSION(scmi_virtio, 1);