Index: head/sys/dev/ntb/if_ntb/if_ntb.c =================================================================== --- head/sys/dev/ntb/if_ntb/if_ntb.c (revision 289545) +++ head/sys/dev/ntb/if_ntb/if_ntb.c (revision 289546) @@ -1,1475 +1,1591 @@ /*- * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../ntb_hw/ntb_hw.h" /* * The Non-Transparent Bridge (NTB) is a device on some Intel processors that * allows you to connect two systems using a PCI-e link. * * This module contains a protocol for sending and receiving messages, and * exposes that protocol through a simulated ethernet device called ntb. * * NOTE: Much of the code in this module is shared with Linux. Any patches may * be picked up and redistributed in Linux with a dual GPL/BSD license. */ #define QP_SETSIZE 64 BITSET_DEFINE(_qpset, QP_SETSIZE); #define test_bit(pos, addr) BIT_ISSET(QP_SETSIZE, (pos), (addr)) #define set_bit(pos, addr) BIT_SET(QP_SETSIZE, (pos), (addr)) #define clear_bit(pos, addr) BIT_CLR(QP_SETSIZE, (pos), (addr)) #define ffs_bit(addr) BIT_FFS(QP_SETSIZE, (addr)) #define KTR_NTB KTR_SPARE3 -#define NTB_TRANSPORT_VERSION 3 +#define NTB_TRANSPORT_VERSION 4 #define NTB_RX_MAX_PKTS 64 #define NTB_RXQ_SIZE 300 +enum ntb_link_event { + NTB_LINK_DOWN = 0, + NTB_LINK_UP, +}; + static unsigned int transport_mtu = 0x4000 + ETHER_HDR_LEN + ETHER_CRC_LEN; +static uint64_t max_mw_size; +SYSCTL_UQUAD(_hw_ntb, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0, + "If enabled (non-zero), limit the size of large memory windows. " + "Both sides of the NTB MUST set the same value here."); + static unsigned int max_num_clients; SYSCTL_UINT(_hw_ntb, OID_AUTO, max_num_clients, CTLFLAG_RDTUN, &max_num_clients, 0, "Maximum number of NTB transport clients. " "0 (default) - use all available NTB memory windows; " "positive integer N - Limit to N memory windows."); STAILQ_HEAD(ntb_queue_list, ntb_queue_entry); struct ntb_queue_entry { /* ntb_queue list reference */ STAILQ_ENTRY(ntb_queue_entry) entry; - /* info on data to be transfered */ + /* info on data to be transferred */ void *cb_data; void *buf; - uint64_t len; - uint64_t flags; + unsigned len; + unsigned flags; + + struct ntb_transport_qp *qp; + struct ntb_payload_header *x_hdr; + unsigned index; }; struct ntb_rx_info { unsigned int entry; }; struct ntb_transport_qp { struct ntb_transport_ctx *transport; struct ntb_softc *ntb; void *cb_data; bool client_ready; bool link_is_up; uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */ struct ntb_rx_info *rx_info; struct ntb_rx_info *remote_rx_info; void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); struct ntb_queue_list tx_free_q; struct mtx ntb_tx_free_q_lock; void *tx_mw; + bus_addr_t tx_mw_phys; uint64_t tx_index; uint64_t tx_max_entry; uint64_t tx_max_frame; void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); struct ntb_queue_list rx_pend_q; struct ntb_queue_list rx_free_q; struct mtx ntb_rx_pend_q_lock; struct mtx ntb_rx_free_q_lock; struct task rx_completion_task; + struct task rxc_db_work; void *rx_buff; uint64_t rx_index; uint64_t rx_max_entry; uint64_t rx_max_frame; void (*event_handler)(void *data, enum ntb_link_event status); struct callout link_work; struct callout queue_full; struct callout rx_full; uint64_t last_rx_no_buf; /* Stats */ uint64_t rx_bytes; uint64_t rx_pkts; uint64_t rx_ring_empty; uint64_t rx_err_no_buf; uint64_t rx_err_oflow; uint64_t rx_err_ver; uint64_t tx_bytes; uint64_t tx_pkts; uint64_t tx_ring_full; }; struct ntb_queue_handlers { void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); void (*event_handler)(void *data, enum ntb_link_event status); }; - struct ntb_transport_mw { + vm_paddr_t phys_addr; + size_t phys_size; + size_t xlat_align; + size_t xlat_align_size; + /* Tx buff is off vbase / phys_addr */ + void *vbase; size_t xlat_size; + size_t buff_size; + /* Rx buff is off virt_addr / dma_addr */ void *virt_addr; - vm_paddr_t dma_addr; + bus_addr_t dma_addr; }; struct ntb_transport_ctx { struct ntb_softc *ntb; struct ifnet *ifp; struct ntb_transport_mw mw_vec[NTB_MAX_NUM_MW]; struct ntb_transport_qp *qp_vec; struct _qpset qp_bitmap; + struct _qpset qp_bitmap_free; unsigned mw_count; unsigned qp_count; enum ntb_link_event link_is_up; struct callout link_work; - struct ntb_transport_qp *qp; uint64_t bufsize; u_char eaddr[ETHER_ADDR_LEN]; struct mtx tx_lock; struct mtx rx_lock; + + /* The hardcoded single queuepair in ntb_setup_interface() */ + struct ntb_transport_qp *qp; }; static struct ntb_transport_ctx net_softc; enum { IF_NTB_DESC_DONE_FLAG = 1 << 0, IF_NTB_LINK_DOWN_FLAG = 1 << 1, }; struct ntb_payload_header { uint64_t ver; uint64_t len; uint64_t flags; }; enum { /* * The order of this enum is part of the if_ntb remote protocol. Do * not reorder without bumping protocol version (and it's probably best * to keep the protocol in lock-step with the Linux NTB driver. */ IF_NTB_VERSION = 0, IF_NTB_QP_LINKS, IF_NTB_NUM_QPS, IF_NTB_NUM_MWS, /* * N.B.: transport_link_work assumes MW1 enums = MW0 + 2. */ IF_NTB_MW0_SZ_HIGH, IF_NTB_MW0_SZ_LOW, IF_NTB_MW1_SZ_HIGH, IF_NTB_MW1_SZ_LOW, IF_NTB_MAX_SPAD, }; #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) #define NTB_QP_DEF_NUM_ENTRIES 100 #define NTB_LINK_DOWN_TIMEOUT 10 static int ntb_handle_module_events(struct module *m, int what, void *arg); static int ntb_setup_interface(void); static int ntb_teardown_interface(void); static void ntb_net_init(void *arg); static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void ntb_start(struct ifnet *ifp); static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); static void ntb_net_event_handler(void *data, enum ntb_link_event status); static int ntb_transport_init(struct ntb_softc *ntb); -static void ntb_transport_free(void *transport); +static void ntb_transport_free(struct ntb_transport_ctx *); static void ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num); static void ntb_transport_free_queue(struct ntb_transport_qp *qp); -static struct ntb_transport_qp * ntb_transport_create_queue(void *data, +static struct ntb_transport_qp *ntb_transport_create_queue(void *data, struct ntb_softc *pdev, const struct ntb_queue_handlers *handlers); static void ntb_transport_link_up(struct ntb_transport_qp *qp); static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len); static int ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry); -static void ntb_tx_copy_task(struct ntb_transport_qp *qp, +static void ntb_memcpy_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset); static void ntb_qp_full(void *arg); -static int ntb_transport_rxc_db(void *arg, int dummy); +static void ntb_transport_rxc_db(void *arg, int pending); static void ntb_rx_pendq_full(void *arg); static int ntb_process_rxc(struct ntb_transport_qp *qp); static void ntb_rx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset); -static void ntb_rx_completion_task(void *arg, int pending); -static void ntb_transport_event_callback(void *data, enum ntb_hw_event event); +static void ntb_complete_rxc(void *arg, int pending); +static void ntb_transport_doorbell_callback(void *data, int vector); +static void ntb_transport_event_callback(void *data); static void ntb_transport_link_work(void *arg); static int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, unsigned size); static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw); -static void ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, +static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num); static void ntb_qp_link_work(void *arg); static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt); static void ntb_qp_link_down(struct ntb_transport_qp *qp); static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp); static void ntb_transport_link_down(struct ntb_transport_qp *qp); static void ntb_send_link_down(struct ntb_transport_qp *qp); static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, struct ntb_queue_list *list); static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list); static void create_random_local_eui48(u_char *eaddr); static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); +static const struct ntb_ctx_ops ntb_transport_ops = { + .link_event = ntb_transport_event_callback, + .db_event = ntb_transport_doorbell_callback, +}; + MALLOC_DEFINE(M_NTB_IF, "if_ntb", "ntb network driver"); /* Module setup and teardown */ static int ntb_handle_module_events(struct module *m, int what, void *arg) { int err = 0; switch (what) { case MOD_LOAD: err = ntb_setup_interface(); break; case MOD_UNLOAD: err = ntb_teardown_interface(); break; default: err = EOPNOTSUPP; break; } return (err); } static moduledata_t if_ntb_mod = { "if_ntb", ntb_handle_module_events, NULL }; DECLARE_MODULE(if_ntb, if_ntb_mod, SI_SUB_KLD, SI_ORDER_ANY); MODULE_DEPEND(if_ntb, ntb_hw, 1, 1, 1); static int ntb_setup_interface(void) { struct ifnet *ifp; struct ntb_queue_handlers handlers = { ntb_net_rx_handler, ntb_net_tx_handler, ntb_net_event_handler }; + int rc; net_softc.ntb = devclass_get_softc(devclass_find("ntb_hw"), 0); if (net_softc.ntb == NULL) { printf("ntb: Cannot find devclass\n"); return (ENXIO); } - ntb_transport_init(net_softc.ntb); + rc = ntb_transport_init(net_softc.ntb); + if (rc != 0) { + printf("ntb: Cannot init transport: %d\n", rc); + return (rc); + } ifp = net_softc.ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { - printf("ntb: cannot allocate ifnet structure\n"); + ntb_transport_free(&net_softc); + printf("ntb: Cannot allocate ifnet structure\n"); return (ENOMEM); } net_softc.qp = ntb_transport_create_queue(ifp, net_softc.ntb, &handlers); if_initname(ifp, "ntb", 0); ifp->if_init = ntb_net_init; ifp->if_softc = &net_softc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; ifp->if_ioctl = ntb_ioctl; ifp->if_start = ntb_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); create_random_local_eui48(net_softc.eaddr); ether_ifattach(ifp, net_softc.eaddr); ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU; ifp->if_capenable = ifp->if_capabilities; ntb_transport_link_up(net_softc.qp); net_softc.bufsize = ntb_transport_max_size(net_softc.qp) + sizeof(struct ether_header); return (0); } static int ntb_teardown_interface(void) { if (net_softc.qp != NULL) ntb_transport_link_down(net_softc.qp); if (net_softc.ifp != NULL) { ether_ifdetach(net_softc.ifp); if_free(net_softc.ifp); } if (net_softc.qp != NULL) { ntb_transport_free_queue(net_softc.qp); ntb_transport_free(&net_softc); } return (0); } /* Network device interface */ static void ntb_net_init(void *arg) { struct ntb_transport_ctx *ntb_softc = arg; struct ifnet *ifp = ntb_softc->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_flags |= IFF_UP; if_link_state_change(ifp, LINK_STATE_UP); } static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ntb_transport_ctx *nt = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFMTU: { if (ifr->ifr_mtu > ntb_transport_max_size(nt->qp) - ETHER_HDR_LEN - ETHER_CRC_LEN) { error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void ntb_start(struct ifnet *ifp) { struct mbuf *m_head; struct ntb_transport_ctx *nt = ifp->if_softc; int rc; mtx_lock(&nt->tx_lock); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; CTR0(KTR_NTB, "TX: ntb_start"); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); CTR1(KTR_NTB, "TX: start mbuf %p", m_head); rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head, m_length(m_head, NULL)); if (rc != 0) { CTR1(KTR_NTB, "TX: could not tx mbuf %p. Returning to snd q", m_head); if (rc == EAGAIN) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); callout_reset(&nt->qp->queue_full, hz / 1000, ntb_qp_full, ifp); } break; } } mtx_unlock(&nt->tx_lock); } /* Network Device Callbacks */ static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { m_freem(data); CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data); } static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { struct mbuf *m = data; struct ifnet *ifp = qp_data; CTR0(KTR_NTB, "RX: rx handler"); (*ifp->if_input)(ifp, m); } static void ntb_net_event_handler(void *data, enum ntb_link_event status) { struct ifnet *ifp; ifp = data; (void)ifp; /* XXX The Linux driver munges with the carrier status here. */ switch (status) { case NTB_LINK_DOWN: break; case NTB_LINK_UP: break; default: panic("Bogus ntb_link_event %u\n", status); } } /* Transport Init and teardown */ static int ntb_transport_init(struct ntb_softc *ntb) { struct ntb_transport_ctx *nt = &net_softc; + struct ntb_transport_mw *mw; + uint64_t qp_bitmap; int rc; - uint8_t i; + unsigned i; nt->mw_count = ntb_mw_count(ntb); - if (max_num_clients == 0) - nt->qp_count = MIN(ntb_get_max_cbs(ntb), ntb_mw_count(ntb)); - else - nt->qp_count = MIN(ntb_get_max_cbs(ntb), max_num_clients); + for (i = 0; i < nt->mw_count; i++) { + mw = &nt->mw_vec[i]; - ntb_register_transport(ntb, nt); + rc = ntb_mw_get_range(ntb, i, &mw->phys_addr, &mw->vbase, + &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size); + if (rc != 0) + goto err; + + mw->buff_size = 0; + mw->xlat_size = 0; + mw->virt_addr = 0; + mw->dma_addr = 0; + } + + qp_bitmap = ntb_db_valid_mask(ntb); + nt->qp_count = flsll(qp_bitmap); + KASSERT(nt->qp_count != 0, ("bogus db bitmap")); + nt->qp_count -= 1; + + if (max_num_clients != 0 && max_num_clients < nt->qp_count) + nt->qp_count = max_num_clients; + else if (nt->mw_count < nt->qp_count) + nt->qp_count = nt->mw_count; + KASSERT(nt->qp_count <= QP_SETSIZE, ("invalid qp_count")); + mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF); mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF); nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_IF, M_WAITOK | M_ZERO); - KASSERT(nt->qp_count <= QP_SETSIZE, ("invalid qp_count")); - for (i = 0; i < nt->qp_count; i++) { set_bit(i, &nt->qp_bitmap); + set_bit(i, &nt->qp_bitmap_free); ntb_transport_init_queue(nt, i); } callout_init(&nt->link_work, 0); - rc = ntb_register_event_callback(ntb, ntb_transport_event_callback); + rc = ntb_set_ctx(ntb, nt, &ntb_transport_ops); if (rc != 0) goto err; - if (ntb_link_is_up(ntb)) { - if (bootverbose) - device_printf(ntb_get_device(ntb), "link up\n"); - callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); - } - + nt->link_is_up = false; + ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); + ntb_link_event(ntb); return (0); err: free(nt->qp_vec, M_NTB_IF); - ntb_unregister_transport(ntb); + nt->qp_vec = NULL; return (rc); } static void -ntb_transport_free(void *transport) +ntb_transport_free(struct ntb_transport_ctx *nt) { - struct ntb_transport_ctx *nt = transport; struct ntb_softc *ntb = nt->ntb; + struct _qpset qp_bitmap_alloc; uint8_t i; ntb_transport_link_cleanup(nt); callout_drain(&nt->link_work); - /* verify that all the qps are freed */ + BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc); + BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free); + + /* Verify that all the QPs are freed */ for (i = 0; i < nt->qp_count; i++) - if (!test_bit(i, &nt->qp_bitmap)) + if (test_bit(i, &qp_bitmap_alloc)) ntb_transport_free_queue(&nt->qp_vec[i]); - ntb_unregister_event_callback(ntb); + ntb_link_disable(ntb); + ntb_clear_ctx(ntb); - for (i = 0; i < NTB_MAX_NUM_MW; i++) + for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); free(nt->qp_vec, M_NTB_IF); - ntb_unregister_transport(ntb); } static void ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num) { + struct ntb_transport_mw *mw; struct ntb_transport_qp *qp; - unsigned int num_qps_mw, tx_size; - uint8_t mw_num, mw_count; + vm_paddr_t mw_base; + uint64_t mw_size, qp_offset; + size_t tx_size; + unsigned num_qps_mw, mw_num, mw_count; - mw_count = ntb_mw_count(nt->ntb); + mw_count = nt->mw_count; mw_num = QP_TO_MW(nt, qp_num); + mw = &nt->mw_vec[mw_num]; qp = &nt->qp_vec[qp_num]; qp->qp_num = qp_num; qp->transport = nt; qp->ntb = nt->ntb; qp->link_is_up = false; qp->client_ready = false; qp->event_handler = NULL; if (nt->qp_count % mw_count && mw_num + 1 < nt->qp_count / mw_count) num_qps_mw = nt->qp_count / mw_count + 1; else num_qps_mw = nt->qp_count / mw_count; - tx_size = ntb_get_mw_size(qp->ntb, mw_num) / num_qps_mw; - qp->rx_info = (struct ntb_rx_info *) - ((char *)ntb_get_mw_vbase(qp->ntb, mw_num) + - (qp_num / mw_count * tx_size)); + mw_base = mw->phys_addr; + mw_size = mw->phys_size; + + tx_size = mw_size / num_qps_mw; + qp_offset = tx_size * qp_num / mw_count; + + qp->tx_mw = (char *)mw->vbase + qp_offset; + KASSERT(qp->tx_mw != NULL, ("uh oh?")); + + /* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */ + qp->tx_mw_phys = mw_base + qp_offset; + KASSERT(qp->tx_mw_phys != 0, ("uh oh?")); + tx_size -= sizeof(struct ntb_rx_info); + qp->rx_info = (void *)((char *)qp->tx_mw + tx_size); - qp->tx_mw = qp->rx_info + 1; /* Due to house-keeping, there must be at least 2 buffs */ qp->tx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header), tx_size / 2); qp->tx_max_entry = tx_size / qp->tx_max_frame; callout_init(&qp->link_work, 0); callout_init(&qp->queue_full, 1); callout_init(&qp->rx_full, 1); mtx_init(&qp->ntb_rx_pend_q_lock, "ntb rx pend q", NULL, MTX_SPIN); mtx_init(&qp->ntb_rx_free_q_lock, "ntb rx free q", NULL, MTX_SPIN); mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN); - TASK_INIT(&qp->rx_completion_task, 0, ntb_rx_completion_task, qp); + TASK_INIT(&qp->rx_completion_task, 0, ntb_complete_rxc, qp); + TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp); STAILQ_INIT(&qp->rx_pend_q); STAILQ_INIT(&qp->rx_free_q); STAILQ_INIT(&qp->tx_free_q); } static void ntb_transport_free_queue(struct ntb_transport_qp *qp) { struct ntb_queue_entry *entry; if (qp == NULL) return; callout_drain(&qp->link_work); - ntb_unregister_db_callback(qp->ntb, qp->qp_num); + ntb_db_set_mask(qp->ntb, 1ull << qp->qp_num); + taskqueue_drain(taskqueue_swi, &qp->rxc_db_work); + taskqueue_drain(taskqueue_swi, &qp->rx_completion_task); + qp->cb_data = NULL; + qp->rx_handler = NULL; + qp->tx_handler = NULL; + qp->event_handler = NULL; + while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) free(entry, M_NTB_IF); while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) free(entry, M_NTB_IF); while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) free(entry, M_NTB_IF); - set_bit(qp->qp_num, &qp->transport->qp_bitmap); + set_bit(qp->qp_num, &qp->transport->qp_bitmap_free); } /** * ntb_transport_create_queue - Create a new NTB transport layer queue * @rx_handler: receive callback function * @tx_handler: transmit callback function * @event_handler: event callback function * * Create a new NTB transport layer queue and provide the queue with a callback * routine for both transmit and receive. The receive callback routine will be * used to pass up data when the transport has received it on the queue. The * transmit callback routine will be called when the transport has completed the * transmission of the data on the queue and the data is ready to be freed. * * RETURNS: pointer to newly created ntb_queue, NULL on error. */ static struct ntb_transport_qp * -ntb_transport_create_queue(void *data, struct ntb_softc *pdev, +ntb_transport_create_queue(void *data, struct ntb_softc *ntb, const struct ntb_queue_handlers *handlers) { struct ntb_queue_entry *entry; struct ntb_transport_qp *qp; struct ntb_transport_ctx *nt; unsigned int free_queue; - int rc, i; + int i; - nt = ntb_find_transport(pdev); - if (nt == NULL) - goto err; + nt = ntb_get_ctx(ntb, NULL); + KASSERT(nt != NULL, ("bogus")); free_queue = ffs_bit(&nt->qp_bitmap); if (free_queue == 0) - goto err; + return (NULL); /* decrement free_queue to make it zero based */ free_queue--; - clear_bit(free_queue, &nt->qp_bitmap); - qp = &nt->qp_vec[free_queue]; + clear_bit(1ull << qp->qp_num, &nt->qp_bitmap_free); qp->cb_data = data; qp->rx_handler = handlers->rx_handler; qp->tx_handler = handlers->tx_handler; qp->event_handler = handlers->event_handler; for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { - entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF, - M_WAITOK|M_ZERO); + entry = malloc(sizeof(*entry), M_NTB_IF, M_WAITOK | M_ZERO); entry->cb_data = nt->ifp; entry->buf = NULL; entry->len = transport_mtu; ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); } for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { - entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF, - M_WAITOK|M_ZERO); + entry = malloc(sizeof(*entry), M_NTB_IF, M_WAITOK | M_ZERO); ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); } - rc = ntb_register_db_callback(qp->ntb, free_queue, qp, - ntb_transport_rxc_db); - if (rc != 0) - goto err1; - + ntb_db_clear(ntb, 1ull << qp->qp_num); + ntb_db_clear_mask(ntb, 1ull << qp->qp_num); return (qp); - -err1: - while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) - free(entry, M_NTB_IF); - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) - free(entry, M_NTB_IF); - set_bit(free_queue, &nt->qp_bitmap); -err: - return (NULL); } /** * ntb_transport_link_up - Notify NTB transport of client readiness to use queue * @qp: NTB transport layer queue to be enabled * * Notify NTB transport layer of client readiness to use queue */ static void ntb_transport_link_up(struct ntb_transport_qp *qp) { if (qp == NULL) return; qp->client_ready = true; if (bootverbose) device_printf(ntb_get_device(qp->ntb), "qp client ready\n"); if (qp->transport->link_is_up) callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } /* Transport Tx */ /** * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry * @qp: NTB transport layer queue the entry is to be enqueued on * @cb: per buffer pointer for callback function to use * @data: pointer to data buffer that will be sent * @len: length of the data buffer * * Enqueue a new transmit buffer onto the transport queue from which a NTB * payload will be transmitted. This assumes that a lock is being held to * serialize access to the qp. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) { struct ntb_queue_entry *entry; int rc; if (qp == NULL || !qp->link_is_up || len == 0) { CTR0(KTR_NTB, "TX: link not up"); return (EINVAL); } entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (entry == NULL) { CTR0(KTR_NTB, "TX: could not get entry from tx_free_q"); return (ENOMEM); } CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); entry->cb_data = cb; entry->buf = data; entry->len = len; entry->flags = 0; rc = ntb_process_tx(qp, entry); if (rc != 0) { ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); CTR1(KTR_NTB, "TX: process_tx failed. Returning entry %p to tx_free_q", entry); } return (rc); } static int ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) { void *offset; offset = (char *)qp->tx_mw + qp->tx_max_frame * qp->tx_index; CTR3(KTR_NTB, "TX: process_tx: tx_pkts=%u, tx_index=%u, remote entry=%u", qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry); if (qp->tx_index == qp->remote_rx_info->entry) { CTR0(KTR_NTB, "TX: ring full"); qp->tx_ring_full++; return (EAGAIN); } if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { if (qp->tx_handler != NULL) qp->tx_handler(qp, qp->cb_data, entry->buf, EIO); ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); CTR1(KTR_NTB, "TX: frame too big. returning entry %p to tx_free_q", entry); return (0); } CTR2(KTR_NTB, "TX: copying entry %p to offset %p", entry, offset); - ntb_tx_copy_task(qp, entry, offset); + ntb_memcpy_tx(qp, entry, offset); qp->tx_index++; qp->tx_index %= qp->tx_max_entry; qp->tx_pkts++; return (0); } static void -ntb_tx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, +ntb_memcpy_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ntb_payload_header *hdr; - CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); - if (entry->buf != NULL) - m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); - + /* This piece is from Linux' ntb_async_tx() */ hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - sizeof(struct ntb_payload_header)); + entry->x_hdr = hdr; hdr->len = entry->len; /* TODO: replace with bus_space_write */ hdr->ver = qp->tx_pkts; /* TODO: replace with bus_space_write */ - wmb(); + + /* This piece is ntb_memcpy_tx() */ + CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); + if (entry->buf != NULL) { + m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); + + /* + * Ensure that the data is fully copied before setting the + * flags + */ + wmb(); + } + + /* The rest is ntb_tx_copy_callback() */ /* TODO: replace with bus_space_write */ hdr->flags = entry->flags | IF_NTB_DESC_DONE_FLAG; ntb_peer_db_set(qp->ntb, 1ull << qp->qp_num); /* * The entry length can only be zero if the packet is intended to be a * "link down" or similar. Since no payload is being sent in these * cases, there is nothing to add to the completion queue. */ if (entry->len > 0) { qp->tx_bytes += entry->len; if (qp->tx_handler) qp->tx_handler(qp, qp->cb_data, entry->cb_data, entry->len); } CTR2(KTR_NTB, "TX: entry %p sent. hdr->ver = %d, Returning to tx_free_q", entry, hdr->ver); ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); } static void ntb_qp_full(void *arg) { CTR0(KTR_NTB, "TX: qp_full callout"); ntb_start(arg); } /* Transport Rx */ static void ntb_rx_pendq_full(void *arg) { CTR0(KTR_NTB, "RX: ntb_rx_pendq_full callout"); ntb_transport_rxc_db(arg, 0); } -static int -ntb_transport_rxc_db(void *arg, int dummy __unused) +static void +ntb_transport_rxc_db(void *arg, int pending __unused) { struct ntb_transport_qp *qp = arg; uint64_t i; int rc; /* * Limit the number of packets processed in a single interrupt to * provide fairness to others */ - mtx_lock(&qp->transport->rx_lock); CTR0(KTR_NTB, "RX: transport_rx"); - for (i = 0; i < MIN(qp->rx_max_entry, INT_MAX); i++) { + mtx_lock(&qp->transport->rx_lock); + for (i = 0; i < qp->rx_max_entry; i++) { rc = ntb_process_rxc(qp); if (rc != 0) { CTR0(KTR_NTB, "RX: process_rxc failed"); break; } } mtx_unlock(&qp->transport->rx_lock); - return ((int)i); + if (i == qp->rx_max_entry) + taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); + else if ((ntb_db_read(qp->ntb) & (1ull << qp->qp_num)) != 0) { + /* If db is set, clear it and read it back to commit clear. */ + ntb_db_clear(qp->ntb, 1ull << qp->qp_num); + (void)ntb_db_read(qp->ntb); + + /* + * An interrupt may have arrived between finishing + * ntb_process_rxc and clearing the doorbell bit: there might + * be some more work to do. + */ + taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); + } } static int ntb_process_rxc(struct ntb_transport_qp *qp) { struct ntb_payload_header *hdr; struct ntb_queue_entry *entry; void *offset; offset = (void *) ((char *)qp->rx_buff + qp->rx_max_frame * qp->rx_index); hdr = (void *) ((char *)offset + qp->rx_max_frame - sizeof(struct ntb_payload_header)); CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index); - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); - if (entry == NULL) { - qp->rx_err_no_buf++; - CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); - return (ENOMEM); - } - callout_stop(&qp->rx_full); - CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); - if ((hdr->flags & IF_NTB_DESC_DONE_FLAG) == 0) { - CTR1(KTR_NTB, - "RX: hdr not done. Returning entry %p to rx_pend_q", entry); - ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); + CTR0(KTR_NTB, "RX: hdr not done"); qp->rx_ring_empty++; return (EAGAIN); } - if (hdr->ver != (uint32_t) qp->rx_pkts) { - CTR3(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " - "Returning entry %p to rx_pend_q", hdr->ver, qp->rx_pkts, - entry); - ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); + if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) { + CTR0(KTR_NTB, "RX: link down"); + ntb_qp_link_down(qp); + hdr->flags = 0; + return (EAGAIN); + } + + if (hdr->ver != (uint32_t)qp->rx_pkts) { + CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " + "Returning entry %p to rx_pend_q", hdr->ver, qp->rx_pkts); qp->rx_err_ver++; return (EIO); } - if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) { - ntb_qp_link_down(qp); - CTR1(KTR_NTB, - "RX: link down. adding entry %p back to rx_pend_q", entry); - ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); - goto out; + entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + if (entry == NULL) { + qp->rx_err_no_buf++; + CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); + return (EAGAIN); } + callout_stop(&qp->rx_full); + CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); - if (hdr->len <= entry->len) { - entry->len = hdr->len; - ntb_rx_copy_task(qp, entry, offset); - } else { - CTR1(KTR_NTB, - "RX: len too long. Returning entry %p to rx_pend_q", entry); - ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); + entry->x_hdr = hdr; + entry->index = qp->rx_index; + if (hdr->len > entry->len) { + CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju", + (uintmax_t)hdr->len, (uintmax_t)entry->len); qp->rx_err_oflow++; - } - qp->rx_bytes += hdr->len; - qp->rx_pkts++; - CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); + entry->len = -EIO; + entry->flags |= IF_NTB_DESC_DONE_FLAG; + ntb_list_add(&qp->ntb_rx_free_q_lock, entry, &qp->rx_free_q); + taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); + } else { + qp->rx_bytes += hdr->len; + qp->rx_pkts++; -out: - /* Ensure that the data is globally visible before clearing the flag */ - wmb(); - hdr->flags = 0; - /* TODO: replace with bus_space_write */ - qp->rx_info->entry = qp->rx_index; + CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); + entry->len = hdr->len; + + ntb_rx_copy_task(qp, entry, offset); + } + qp->rx_index++; qp->rx_index %= qp->rx_max_entry; - return (0); } static void ntb_rx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ifnet *ifp = entry->cb_data; unsigned int len = entry->len; struct mbuf *m; CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); m = m_devget(offset, len, 0, ifp, NULL); m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; entry->buf = (void *)m; + /* Ensure that the data is globally visible before clearing the flag */ + wmb(); + entry->x_hdr->flags = 0; + /* TODO: replace with bus_space_write */ + qp->rx_info->entry = qp->rx_index; + CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p. Adding entry to rx_free_q", entry, m); ntb_list_add(&qp->ntb_rx_free_q_lock, entry, &qp->rx_free_q); taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); } static void -ntb_rx_completion_task(void *arg, int pending) +ntb_complete_rxc(void *arg, int pending) { struct ntb_transport_qp *qp = arg; struct mbuf *m; struct ntb_queue_entry *entry; CTR0(KTR_NTB, "RX: rx_completion_task"); while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) { m = entry->buf; CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); if (qp->rx_handler && qp->client_ready) qp->rx_handler(qp, qp->cb_data, m, entry->len); entry->buf = NULL; entry->len = qp->transport->bufsize; CTR1(KTR_NTB,"RX: entry %p removed from rx_free_q " "and added to rx_pend_q", entry); ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); if (qp->rx_err_no_buf > qp->last_rx_no_buf) { qp->last_rx_no_buf = qp->rx_err_no_buf; CTR0(KTR_NTB, "RX: could spawn rx task"); callout_reset(&qp->rx_full, hz / 1000, ntb_rx_pendq_full, qp); } } } +static void +ntb_transport_doorbell_callback(void *data, int vector) +{ + struct ntb_transport_ctx *nt = data; + struct ntb_transport_qp *qp; + struct _qpset db_bits; + uint64_t vec_mask; + unsigned qp_num; + + BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &db_bits); + BIT_NAND(QP_SETSIZE, &db_bits, &nt->qp_bitmap_free); + + vec_mask = ntb_db_vector_mask(nt->ntb, vector); + while (vec_mask != 0) { + qp_num = ffsl(vec_mask); + /* i386 doesn't have ffsll(), fake it */ + if (qp_num == 0) { + qp_num = ffsl(vec_mask >> 32); + KASSERT(qp_num != 0, ("ffs")); + qp_num += 32; + } + qp_num--; + + if (test_bit(qp_num, &db_bits)) { + qp = &nt->qp_vec[qp_num]; + taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); + } + + vec_mask &= ~(1ull << qp_num); + } +} + /* Link Event handler */ static void -ntb_transport_event_callback(void *data, enum ntb_hw_event event) +ntb_transport_event_callback(void *data) { struct ntb_transport_ctx *nt = data; - switch (event) { - case NTB_EVENT_HW_LINK_UP: + if (ntb_link_is_up(nt->ntb, NULL, NULL)) { if (bootverbose) device_printf(ntb_get_device(nt->ntb), "HW link up\n"); callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); - break; - case NTB_EVENT_HW_LINK_DOWN: + } else { if (bootverbose) device_printf(ntb_get_device(nt->ntb), "HW link down\n"); ntb_transport_link_cleanup(nt); - break; - default: - panic("ntb: Unknown NTB event"); } } /* Link bring up */ static void ntb_transport_link_work(void *arg) { struct ntb_transport_ctx *nt = arg; struct ntb_softc *ntb = nt->ntb; struct ntb_transport_qp *qp; - uint64_t val64; - uint32_t val, i, num_mw; + uint64_t val64, size; + uint32_t val; + unsigned i; int rc; - num_mw = ntb_mw_count(ntb); - /* send the local info, in the opposite order of the way we read it */ - for (i = 0; i < num_mw; i++) { - rc = ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), - (uint64_t)ntb_get_mw_size(ntb, i) >> 32); - if (rc != 0) - goto out; + for (i = 0; i < nt->mw_count; i++) { + size = nt->mw_vec[i].phys_size; - rc = ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), - (uint32_t)ntb_get_mw_size(ntb, i)); - if (rc != 0) - goto out; + if (max_mw_size != 0 && size > max_mw_size) + size = max_mw_size; + + ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), + size >> 32); + ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), size); } - rc = ntb_peer_spad_write(ntb, IF_NTB_NUM_MWS, num_mw); - if (rc != 0) - goto out; + ntb_peer_spad_write(ntb, IF_NTB_NUM_MWS, nt->mw_count); - rc = ntb_peer_spad_write(ntb, IF_NTB_NUM_QPS, nt->qp_count); - if (rc != 0) - goto out; + ntb_peer_spad_write(ntb, IF_NTB_NUM_QPS, nt->qp_count); - rc = ntb_peer_spad_write(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION); - if (rc != 0) - goto out; + ntb_peer_spad_write(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION); /* Query the remote side for its info */ - rc = ntb_spad_read(ntb, IF_NTB_VERSION, &val); - if (rc != 0) - goto out; - + val = 0; + ntb_spad_read(ntb, IF_NTB_VERSION, &val); if (val != NTB_TRANSPORT_VERSION) goto out; - rc = ntb_spad_read(ntb, IF_NTB_NUM_QPS, &val); - if (rc != 0) - goto out; - + ntb_spad_read(ntb, IF_NTB_NUM_QPS, &val); if (val != nt->qp_count) goto out; - rc = ntb_spad_read(ntb, IF_NTB_NUM_MWS, &val); - if (rc != 0) + ntb_spad_read(ntb, IF_NTB_NUM_MWS, &val); + if (val != nt->mw_count) goto out; - if (val != num_mw) - goto out; - - for (i = 0; i < num_mw; i++) { - rc = ntb_spad_read(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), - &val); - if (rc != 0) - goto free_mws; - + for (i = 0; i < nt->mw_count; i++) { + ntb_spad_read(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), &val); val64 = (uint64_t)val << 32; - rc = ntb_spad_read(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), - &val); - if (rc != 0) - goto free_mws; - + ntb_spad_read(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), &val); val64 |= val; rc = ntb_set_mw(nt, i, val64); if (rc != 0) goto free_mws; } nt->link_is_up = true; if (bootverbose) device_printf(ntb_get_device(ntb), "transport link up\n"); for (i = 0; i < nt->qp_count; i++) { qp = &nt->qp_vec[i]; ntb_transport_setup_qp_mw(nt, i); if (qp->client_ready) callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } return; free_mws: - for (i = 0; i < NTB_MAX_NUM_MW; i++) + for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); out: - if (ntb_link_is_up(ntb)) + if (ntb_link_is_up(ntb, NULL, NULL)) callout_reset(&nt->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt); } static int -ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, unsigned int size) +ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, unsigned size) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; + unsigned xlat_size, buff_size; + int rc; + xlat_size = roundup(size, mw->xlat_align_size); + buff_size = roundup(size, mw->xlat_align); + /* No need to re-setup */ - if (mw->xlat_size == size) + if (mw->xlat_size == xlat_size) return (0); - if (mw->xlat_size != 0) + if (mw->buff_size != 0) ntb_free_mw(nt, num_mw); - /* Alloc memory for receiving data. Must be 4k aligned */ - mw->xlat_size = size; + /* Alloc memory for receiving data. Must be aligned */ + mw->xlat_size = xlat_size; + mw->buff_size = buff_size; - mw->virt_addr = contigmalloc(mw->xlat_size, M_NTB_IF, M_ZERO, 0, - BUS_SPACE_MAXADDR, mw->xlat_size, 0); + mw->virt_addr = contigmalloc(mw->buff_size, M_NTB_IF, M_ZERO, 0, + BUS_SPACE_MAXADDR, mw->xlat_align, 0); if (mw->virt_addr == NULL) { mw->xlat_size = 0; + mw->buff_size = 0; printf("ntb: Unable to allocate MW buffer of size %zu\n", mw->xlat_size); return (ENOMEM); } /* TODO: replace with bus_space_* functions */ mw->dma_addr = vtophys(mw->virt_addr); /* * Ensure that the allocation from contigmalloc is aligned as * requested. XXX: This may not be needed -- brought in for parity * with the Linux driver. */ - if (mw->dma_addr % size != 0) { + if (mw->dma_addr % mw->xlat_align != 0) { device_printf(ntb_get_device(nt->ntb), "DMA memory 0x%jx not aligned to BAR size 0x%x\n", (uintmax_t)mw->dma_addr, size); ntb_free_mw(nt, num_mw); return (ENOMEM); } /* Notify HW the memory location of the receive buffer */ - ntb_set_mw_addr(nt->ntb, num_mw, mw->dma_addr); + rc = ntb_mw_set_trans(nt->ntb, num_mw, mw->dma_addr, mw->xlat_size); + if (rc) { + device_printf(ntb_get_device(nt->ntb), + "Unable to set mw%d translation", num_mw); + ntb_free_mw(nt, num_mw); + return (rc); + } return (0); } static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; if (mw->virt_addr == NULL) return; + ntb_mw_clear_trans(nt->ntb, num_mw); contigfree(mw->virt_addr, mw->xlat_size, M_NTB_IF); + mw->xlat_size = 0; + mw->buff_size = 0; mw->virt_addr = NULL; } -static void +static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) { struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; + struct ntb_transport_mw *mw; void *offset; - unsigned int rx_size, num_qps_mw; - uint8_t mw_num, mw_count; - unsigned int i; + uint64_t i; + size_t rx_size; + unsigned num_qps_mw, mw_num, mw_count; - mw_count = ntb_mw_count(nt->ntb); + mw_count = nt->mw_count; mw_num = QP_TO_MW(nt, qp_num); + mw = &nt->mw_vec[mw_num]; + if (mw->virt_addr == NULL) + return (ENOMEM); + if (nt->qp_count % mw_count && mw_num + 1 < nt->qp_count / mw_count) num_qps_mw = nt->qp_count / mw_count + 1; else num_qps_mw = nt->qp_count / mw_count; - rx_size = nt->mw_vec[mw_num].xlat_size / num_qps_mw; - qp->remote_rx_info = (void *)((uint8_t *)nt->mw_vec[mw_num].virt_addr + - (qp_num / mw_count * rx_size)); + rx_size = mw->xlat_size / num_qps_mw; + qp->rx_buff = (char *)mw->virt_addr + rx_size * qp_num / mw_count; rx_size -= sizeof(struct ntb_rx_info); - qp->rx_buff = qp->remote_rx_info + 1; + qp->remote_rx_info = (void*)((char *)qp->rx_buff + rx_size); + /* Due to house-keeping, there must be at least 2 buffs */ qp->rx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header), rx_size / 2); qp->rx_max_entry = rx_size / qp->rx_max_frame; qp->rx_index = 0; qp->remote_rx_info->entry = qp->rx_max_entry - 1; - /* setup the hdr offsets with 0's */ + /* Set up the hdr offsets with 0s */ for (i = 0; i < qp->rx_max_entry; i++) { offset = (void *)((uint8_t *)qp->rx_buff + qp->rx_max_frame * (i + 1) - sizeof(struct ntb_payload_header)); memset(offset, 0, sizeof(struct ntb_payload_header)); } qp->rx_pkts = 0; qp->tx_pkts = 0; qp->tx_index = 0; + + return (0); } static void ntb_qp_link_work(void *arg) { struct ntb_transport_qp *qp = arg; struct ntb_softc *ntb = qp->ntb; struct ntb_transport_ctx *nt = qp->transport; - int rc, val; + int val; + ntb_spad_read(ntb, IF_NTB_QP_LINKS, &val); - rc = ntb_peer_spad_read(ntb, IF_NTB_QP_LINKS, &val); - if (rc != 0) - return; + ntb_peer_spad_write(ntb, IF_NTB_QP_LINKS, val | (1ull << qp->qp_num)); - rc = ntb_peer_spad_write(ntb, IF_NTB_QP_LINKS, val | 1 << qp->qp_num); - /* query remote spad for qp ready bits */ - rc = ntb_spad_read(ntb, IF_NTB_QP_LINKS, &val); + ntb_peer_spad_read(ntb, IF_NTB_QP_LINKS, &val); /* See if the remote side is up */ - if ((1 << qp->qp_num & val) != 0) { + if ((val & (1ull << qp->qp_num)) != 0) { + if (bootverbose) + device_printf(ntb_get_device(ntb), "qp link up\n"); qp->link_is_up = true; + if (qp->event_handler != NULL) qp->event_handler(qp->cb_data, NTB_LINK_UP); - if (bootverbose) - device_printf(ntb_get_device(ntb), "qp link up\n"); - } else if (nt->link_is_up) { + + taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); + } else if (nt->link_is_up) callout_reset(&qp->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); - } } /* Link down event*/ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) { - uint8_t i; + struct ntb_transport_qp *qp; + struct _qpset qp_bitmap_alloc; + unsigned i; + BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc); + BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free); + /* Pass along the info to any clients */ for (i = 0; i < nt->qp_count; i++) - if (!test_bit(i, &nt->qp_bitmap)) - ntb_qp_link_down(&nt->qp_vec[i]); + if (test_bit(i, &qp_bitmap_alloc)) { + qp = &nt->qp_vec[i]; + ntb_qp_link_cleanup(qp); + callout_drain(&qp->link_work); + } if (!nt->link_is_up) callout_drain(&nt->link_work); - else - nt->link_is_up = false; /* * The scratchpad registers keep the values if the remote side * goes down, blast them now to give them a sane value the next * time they are accessed */ for (i = 0; i < IF_NTB_MAX_SPAD; i++) ntb_spad_write(nt->ntb, i, 0); } static void ntb_qp_link_down(struct ntb_transport_qp *qp) { ntb_qp_link_cleanup(qp); } static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt = qp->transport; if (!qp->link_is_up) { callout_drain(&qp->link_work); return; } + qp->link_is_up = false; + if (qp->event_handler != NULL) qp->event_handler(qp->cb_data, NTB_LINK_DOWN); - qp->link_is_up = false; - if (nt->link_is_up) callout_reset(&qp->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); } /* Link commanded down */ /** * ntb_transport_link_down - Notify NTB transport to no longer enqueue data * @qp: NTB transport layer queue to be disabled * * Notify NTB transport layer of client's desire to no longer receive data on * transport queue specified. It is the client's responsibility to ensure all * entries on queue are purged or otherwise handled appropriately. */ static void ntb_transport_link_down(struct ntb_transport_qp *qp) { - int rc, val; + uint32_t val; if (qp == NULL) return; qp->client_ready = false; - rc = ntb_peer_spad_read(qp->ntb, IF_NTB_QP_LINKS, &val); - if (rc != 0) - return; + ntb_spad_read(qp->ntb, IF_NTB_QP_LINKS, &val); - rc = ntb_peer_spad_write(qp->ntb, IF_NTB_QP_LINKS, + ntb_peer_spad_write(qp->ntb, IF_NTB_QP_LINKS, val & ~(1 << qp->qp_num)); if (qp->link_is_up) ntb_send_link_down(qp); else callout_drain(&qp->link_work); - } static void ntb_send_link_down(struct ntb_transport_qp *qp) { struct ntb_queue_entry *entry; int i, rc; if (!qp->link_is_up) return; qp->link_is_up = false; for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (entry != NULL) break; pause("NTB Wait for link down", hz / 10); } if (entry == NULL) return; entry->cb_data = NULL; entry->buf = NULL; entry->len = 0; entry->flags = IF_NTB_LINK_DOWN_FLAG; mtx_lock(&qp->transport->tx_lock); rc = ntb_process_tx(qp, entry); if (rc != 0) printf("ntb: Failed to send link down\n"); mtx_unlock(&qp->transport->tx_lock); } /* List Management */ static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, struct ntb_queue_list *list) { mtx_lock_spin(lock); STAILQ_INSERT_TAIL(list, entry, entry); mtx_unlock_spin(lock); } static struct ntb_queue_entry * ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list) { struct ntb_queue_entry *entry; mtx_lock_spin(lock); if (STAILQ_EMPTY(list)) { entry = NULL; goto out; } entry = STAILQ_FIRST(list); STAILQ_REMOVE_HEAD(list, entry); out: mtx_unlock_spin(lock); return (entry); } /* Helper functions */ /* TODO: This too should really be part of the kernel */ #define EUI48_MULTICAST 1 << 0 #define EUI48_LOCALLY_ADMINISTERED 1 << 1 static void create_random_local_eui48(u_char *eaddr) { static uint8_t counter = 0; uint32_t seed = ticks; eaddr[0] = EUI48_LOCALLY_ADMINISTERED; memcpy(&eaddr[1], &seed, sizeof(uint32_t)); eaddr[5] = counter++; } /** * ntb_transport_max_size - Query the max payload size of a qp * @qp: NTB transport layer queue to be queried * * Query the maximum payload size permissible on the given qp * * RETURNS: the max payload size of a qp */ static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) { if (qp == NULL) return (0); return (qp->tx_max_frame - sizeof(struct ntb_payload_header)); } Index: head/sys/dev/ntb/ntb_hw/ntb_hw.c =================================================================== --- head/sys/dev/ntb/ntb_hw/ntb_hw.c (revision 289545) +++ head/sys/dev/ntb/ntb_hw/ntb_hw.c (revision 289546) @@ -1,2173 +1,2222 @@ /*- * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ntb_regs.h" #include "ntb_hw.h" /* * The Non-Transparent Bridge (NTB) is a device on some Intel processors that * allows you to connect two systems using a PCI-e link. * * This module contains the hardware abstraction layer for the NTB. It allows * you to send and recieve interrupts, map the memory windows and send and * receive messages in the scratch-pad registers. * * NOTE: Much of the code in this module is shared with Linux. Any patches may * be picked up and redistributed in Linux with a dual GPL/BSD license. */ #define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, SOC_DB_COUNT) #define NTB_HB_TIMEOUT 1 /* second */ #define SOC_LINK_RECOVERY_TIME 500 /* ms */ #define DEVICE2SOFTC(dev) ((struct ntb_softc *) device_get_softc(dev)) enum ntb_device_type { NTB_XEON, NTB_SOC }; enum ntb_bar { NTB_CONFIG_BAR = 0, NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3, NTB_MAX_BARS }; /* Device features and workarounds */ #define HAS_FEATURE(feature) \ ((ntb->features & (feature)) != 0) struct ntb_hw_info { uint32_t device_id; const char *desc; enum ntb_device_type type; uint32_t features; }; struct ntb_pci_bar_info { bus_space_tag_t pci_bus_tag; bus_space_handle_t pci_bus_handle; int pci_resource_id; struct resource *pci_resource; vm_paddr_t pbase; void *vbase; u_long size; /* Configuration register offsets */ uint32_t psz_off; uint32_t ssz_off; - uint32_t sbarbase_off; - uint32_t sbarlmt_off; uint32_t pbarxlat_off; }; struct ntb_int_info { struct resource *res; int rid; void *tag; }; -struct ntb_db_cb { - ntb_db_callback callback; - unsigned int db_num; - void *data; +struct ntb_vec { struct ntb_softc *ntb; - struct callout irq_work; - bool reserved; + uint32_t num; }; struct ntb_reg { uint32_t ntb_ctl; uint32_t lnk_sta; uint8_t db_size; unsigned mw_bar[NTB_MAX_BARS]; }; struct ntb_alt_reg { uint32_t db_bell; uint32_t db_mask; uint32_t spad; }; struct ntb_xlat_reg { - uint64_t bar0_base; - uint64_t bar2_xlat; - uint64_t bar2_limit; + uint32_t bar0_base; + uint32_t bar2_base; + uint32_t bar4_base; + uint32_t bar5_base; + + uint32_t bar2_xlat; + uint32_t bar4_xlat; + uint32_t bar5_xlat; + + uint32_t bar2_limit; + uint32_t bar4_limit; + uint32_t bar5_limit; }; struct ntb_b2b_addr { uint64_t bar0_addr; uint64_t bar2_addr64; uint64_t bar4_addr64; uint64_t bar4_addr32; uint64_t bar5_addr32; }; struct ntb_softc { device_t device; enum ntb_device_type type; uint64_t features; struct ntb_pci_bar_info bar_info[NTB_MAX_BARS]; struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS]; uint32_t allocated_interrupts; struct callout heartbeat_timer; struct callout lr_timer; - void *ntb_transport; - ntb_event_callback event_cb; - struct ntb_db_cb *db_cb; - uint8_t max_cbs; + void *ntb_ctx; + const struct ntb_ctx_ops *ctx_ops; + struct ntb_vec *msix_vec; +#define CTX_LOCK(sc) mtx_lock_spin(&(sc)->ctx_lock) +#define CTX_UNLOCK(sc) mtx_unlock_spin(&(sc)->ctx_lock) +#define CTX_ASSERT(sc,f) mtx_assert(&(sc)->ctx_lock, (f)) + struct mtx ctx_lock; struct { uint32_t ldb; uint32_t ldb_mask; uint32_t bar4_xlat; uint32_t bar5_xlat; uint32_t spad_local; uint32_t spci_cmd; } reg_ofs; uint32_t ppd; uint8_t conn_type; uint8_t dev_type; - uint8_t link_status; uint8_t link_width; uint8_t link_speed; /* Offset of peer bar0 in B2B BAR */ uint64_t b2b_off; /* Memory window used to access peer bar0 */ #define B2B_MW_DISABLED UINT8_MAX uint8_t b2b_mw_idx; uint8_t mw_count; uint8_t spad_count; uint8_t db_count; uint8_t db_vec_count; uint8_t db_vec_shift; - /* Protects local DB mask and (h). */ -#define HW_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock) -#define HW_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock) -#define HW_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f)) + /* Protects local db_mask. */ +#define DB_MASK_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock) +#define DB_MASK_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock) +#define DB_MASK_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f)) struct mtx db_mask_lock; - uint32_t ntb_ctl; /* (h) - SOC only */ - uint32_t lnk_sta; /* (h) - SOC only */ + uint32_t ntb_ctl; + uint32_t lnk_sta; uint64_t db_valid_mask; uint64_t db_link_mask; - uint64_t db_mask; /* (h) */ + uint64_t db_mask; int last_ts; /* ticks @ last irq */ const struct ntb_reg *reg; const struct ntb_alt_reg *self_reg; const struct ntb_alt_reg *peer_reg; const struct ntb_xlat_reg *xlat_reg; }; #ifdef __i386__ static __inline uint64_t bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset) { return (bus_space_read_4(tag, handle, offset) | ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); } static __inline void bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset, uint64_t val) { bus_space_write_4(tag, handle, offset, val); bus_space_write_4(tag, handle, offset + 4, val >> 32); } #endif #define ntb_bar_read(SIZE, bar, offset) \ bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ ntb->bar_info[(bar)].pci_bus_handle, (offset)) #define ntb_bar_write(SIZE, bar, offset, val) \ bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ ntb->bar_info[(bar)].pci_bus_handle, (offset), (val)) #define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset) #define ntb_reg_write(SIZE, offset, val) \ ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val) #define ntb_mw_read(SIZE, offset) \ ntb_bar_read(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), offset) #define ntb_mw_write(SIZE, offset, val) \ ntb_bar_write(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ offset, val) static int ntb_probe(device_t device); static int ntb_attach(device_t device); static int ntb_detach(device_t device); static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *, unsigned mw); +static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar); +static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar, + uint32_t *base, uint32_t *xlat, uint32_t *lmt); static int ntb_map_pci_bars(struct ntb_softc *ntb); static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *); static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); static int map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); static void ntb_unmap_pci_bar(struct ntb_softc *ntb); static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail); static int ntb_init_isr(struct ntb_softc *ntb); static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb); static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors); static void ntb_teardown_interrupts(struct ntb_softc *ntb); static inline uint64_t ntb_vec_mask(struct ntb_softc *, uint64_t db_vector); -static void handle_irq(void *arg); -static void ntb_handle_legacy_interrupt(void *arg); -static void ntb_irq_work(void *arg); -static inline uint64_t ntb_db_read(struct ntb_softc *, uint64_t regoff); -static inline void ntb_db_write(struct ntb_softc *, uint64_t regoff, uint64_t val); -static inline void mask_ldb_interrupt(struct ntb_softc *ntb, unsigned int idx); -static inline void unmask_ldb_interrupt(struct ntb_softc *ntb, unsigned int idx); -static inline void ntb_db_set_mask(struct ntb_softc *, uint64_t bits); -static inline void ntb_db_clear_mask(struct ntb_softc *, uint64_t bits); -static int ntb_create_callbacks(struct ntb_softc *ntb, uint32_t num_vectors); -static void ntb_free_callbacks(struct ntb_softc *ntb); +static void ntb_interrupt(struct ntb_softc *, uint32_t vec); +static void ndev_vec_isr(void *arg); +static void ndev_irq_isr(void *arg); +static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff); +static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t val); +static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors); +static void ntb_free_msix_vec(struct ntb_softc *ntb); static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id); static void ntb_detect_max_mw(struct ntb_softc *ntb); static int ntb_detect_xeon(struct ntb_softc *ntb); static int ntb_detect_soc(struct ntb_softc *ntb); static int ntb_xeon_init_dev(struct ntb_softc *ntb); static int ntb_soc_init_dev(struct ntb_softc *ntb); static void ntb_teardown_xeon(struct ntb_softc *ntb); static void configure_soc_secondary_side_bars(struct ntb_softc *ntb); static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx, enum ntb_bar regbar); static void xeon_set_sbar_base_and_limit(struct ntb_softc *, uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar); static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr, enum ntb_bar idx); static int xeon_setup_b2b_mw(struct ntb_softc *, const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr); +static inline bool link_is_up(struct ntb_softc *ntb); +static inline bool soc_link_is_err(struct ntb_softc *ntb); +static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *); +static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *); static void soc_link_hb(void *arg); -static void ntb_handle_link_event(struct ntb_softc *ntb, int link_state); -static void ntb_link_disable(struct ntb_softc *ntb); -static void ntb_link_enable(struct ntb_softc *ntb); +static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec); static void recover_soc_link(void *arg); -static int ntb_poll_link(struct ntb_softc *ntb); +static bool ntb_poll_link(struct ntb_softc *ntb); static void save_bar_parameters(struct ntb_pci_bar_info *bar); static struct ntb_hw_info pci_ids[] = { { 0x0C4E8086, "Atom Processor S1200 NTB Primary B2B", NTB_SOC, 0 }, /* XXX: PS/SS IDs left out until they are supported. */ { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K }, { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP }, { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP }, { 0x00000000, NULL, NTB_SOC, 0 } }; static const struct ntb_reg soc_reg = { .ntb_ctl = SOC_NTBCNTL_OFFSET, .lnk_sta = SOC_LINK_STATUS_OFFSET, .db_size = sizeof(uint64_t), .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 }, }; static const struct ntb_alt_reg soc_b2b_reg = { .db_bell = SOC_B2B_DOORBELL_OFFSET, .spad = SOC_B2B_SPAD_OFFSET, }; static const struct ntb_xlat_reg soc_sec_xlat = { #if 0 /* "FIXME" says the Linux driver. */ .bar0_base = SOC_SBAR0BASE_OFFSET, + .bar2_base = SOC_SBAR2BASE_OFFSET, + .bar4_base = SOC_SBAR4BASE_OFFSET, + .bar2_limit = SOC_SBAR2LMT_OFFSET, + .bar4_limit = SOC_SBAR4LMT_OFFSET, #endif + .bar2_xlat = SOC_SBAR2XLAT_OFFSET, + .bar4_xlat = SOC_SBAR4XLAT_OFFSET, }; static const struct ntb_reg xeon_reg = { .ntb_ctl = XEON_NTBCNTL_OFFSET, .lnk_sta = XEON_LINK_STATUS_OFFSET, .db_size = sizeof(uint16_t), .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 }, }; static const struct ntb_alt_reg xeon_b2b_reg = { .db_bell = XEON_B2B_DOORBELL_OFFSET, .spad = XEON_B2B_SPAD_OFFSET, }; static const struct ntb_xlat_reg xeon_sec_xlat = { .bar0_base = XEON_SBAR0BASE_OFFSET, + .bar2_base = XEON_SBAR2BASE_OFFSET, + .bar4_base = XEON_SBAR4BASE_OFFSET, + .bar5_base = XEON_SBAR5BASE_OFFSET, + .bar2_limit = XEON_SBAR2LMT_OFFSET, + .bar4_limit = XEON_SBAR4LMT_OFFSET, + .bar5_limit = XEON_SBAR5LMT_OFFSET, + .bar2_xlat = XEON_SBAR2XLAT_OFFSET, + .bar4_xlat = XEON_SBAR4XLAT_OFFSET, + .bar5_xlat = XEON_SBAR5XLAT_OFFSET, }; static const struct ntb_b2b_addr xeon_b2b_usd_addr = { .bar0_addr = XEON_B2B_BAR0_USD_ADDR, .bar2_addr64 = XEON_B2B_BAR2_USD_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_USD_ADDR64, .bar4_addr32 = XEON_B2B_BAR4_USD_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_USD_ADDR32, }; static const struct ntb_b2b_addr xeon_b2b_dsd_addr = { .bar0_addr = XEON_B2B_BAR0_DSD_ADDR, .bar2_addr64 = XEON_B2B_BAR2_DSD_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_DSD_ADDR64, .bar4_addr32 = XEON_B2B_BAR4_DSD_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_DSD_ADDR32, }; /* * OS <-> Driver interface structures */ MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations"); static device_method_t ntb_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ntb_probe), DEVMETHOD(device_attach, ntb_attach), DEVMETHOD(device_detach, ntb_detach), DEVMETHOD_END }; static driver_t ntb_pci_driver = { "ntb_hw", ntb_pci_methods, sizeof(struct ntb_softc), }; static devclass_t ntb_devclass; DRIVER_MODULE(ntb_hw, pci, ntb_pci_driver, ntb_devclass, NULL, NULL); MODULE_VERSION(ntb_hw, 1); SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls"); /* * OS <-> Driver linkage functions */ static int ntb_probe(device_t device) { struct ntb_hw_info *p; p = ntb_get_device_info(pci_get_devid(device)); if (p == NULL) return (ENXIO); device_set_desc(device, p->desc); return (0); } static int ntb_attach(device_t device) { struct ntb_softc *ntb; struct ntb_hw_info *p; int error; ntb = DEVICE2SOFTC(device); p = ntb_get_device_info(pci_get_devid(device)); ntb->device = device; ntb->type = p->type; ntb->features = p->features; ntb->b2b_mw_idx = B2B_MW_DISABLED; /* Heartbeat timer for NTB_SOC since there is no link interrupt */ callout_init(&ntb->heartbeat_timer, 1); callout_init(&ntb->lr_timer, 1); mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN); + mtx_init(&ntb->ctx_lock, "ntb ctx", NULL, MTX_SPIN); if (ntb->type == NTB_SOC) error = ntb_detect_soc(ntb); else error = ntb_detect_xeon(ntb); if (error) goto out; ntb_detect_max_mw(ntb); error = ntb_map_pci_bars(ntb); if (error) goto out; if (ntb->type == NTB_SOC) error = ntb_soc_init_dev(ntb); else error = ntb_xeon_init_dev(ntb); if (error) goto out; error = ntb_init_isr(ntb); if (error) goto out; pci_enable_busmaster(ntb->device); out: if (error != 0) ntb_detach(device); return (error); } static int ntb_detach(device_t device) { struct ntb_softc *ntb; ntb = DEVICE2SOFTC(device); ntb_db_set_mask(ntb, ntb->db_valid_mask); callout_drain(&ntb->heartbeat_timer); callout_drain(&ntb->lr_timer); if (ntb->type == NTB_XEON) ntb_teardown_xeon(ntb); ntb_teardown_interrupts(ntb); mtx_destroy(&ntb->db_mask_lock); + mtx_destroy(&ntb->ctx_lock); /* * Redetect total MWs so we unmap properly -- in case we lowered the * maximum to work around Xeon errata. */ ntb_detect_max_mw(ntb); ntb_unmap_pci_bar(ntb); return (0); } /* * Driver internal routines */ static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw) { KASSERT(mw < ntb->mw_count || (mw != B2B_MW_DISABLED && mw == ntb->b2b_mw_idx), ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count)); + KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw")); return (ntb->reg->mw_bar[mw]); } +static inline bool +bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar) +{ + /* XXX This assertion could be stronger. */ + KASSERT(bar < NTB_MAX_BARS, ("bogus bar")); + return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(NTB_SPLIT_BAR)); +} + +static inline void +bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base, + uint32_t *xlat, uint32_t *lmt) +{ + uint32_t basev, lmtv, xlatv; + + switch (bar) { + case NTB_B2B_BAR_1: + basev = ntb->xlat_reg->bar2_base; + lmtv = ntb->xlat_reg->bar2_limit; + xlatv = ntb->xlat_reg->bar2_xlat; + break; + case NTB_B2B_BAR_2: + basev = ntb->xlat_reg->bar4_base; + lmtv = ntb->xlat_reg->bar4_limit; + xlatv = ntb->xlat_reg->bar4_xlat; + break; + case NTB_B2B_BAR_3: + basev = ntb->xlat_reg->bar5_base; + lmtv = ntb->xlat_reg->bar5_limit; + xlatv = ntb->xlat_reg->bar5_xlat; + break; + default: + KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS, + ("bad bar")); + basev = lmtv = xlatv = 0; + break; + } + + if (base != NULL) + *base = basev; + if (xlat != NULL) + *xlat = xlatv; + if (lmt != NULL) + *lmt = lmtv; +} + static int ntb_map_pci_bars(struct ntb_softc *ntb) { int rc; ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); if (rc != 0) goto out; ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2); rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_1]); if (rc != 0) goto out; ntb->bar_info[NTB_B2B_BAR_1].psz_off = XEON_PBAR23SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_1].ssz_off = XEON_SBAR23SZ_OFFSET; - ntb->bar_info[NTB_B2B_BAR_1].sbarbase_off = XEON_SBAR2BASE_OFFSET; - ntb->bar_info[NTB_B2B_BAR_1].sbarlmt_off = XEON_SBAR2LMT_OFFSET; ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off = XEON_PBAR2XLAT_OFFSET; ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4); /* XXX Are shared MW B2Bs write-combining? */ if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP) && !HAS_FEATURE(NTB_SPLIT_BAR)) rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); else rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); ntb->bar_info[NTB_B2B_BAR_2].psz_off = XEON_PBAR4SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET; - ntb->bar_info[NTB_B2B_BAR_2].sbarbase_off = XEON_SBAR4BASE_OFFSET; - ntb->bar_info[NTB_B2B_BAR_2].sbarlmt_off = XEON_SBAR4LMT_OFFSET; ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET; if (!HAS_FEATURE(NTB_SPLIT_BAR)) goto out; ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5); if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); else rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); ntb->bar_info[NTB_B2B_BAR_3].psz_off = XEON_PBAR5SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_3].ssz_off = XEON_SBAR5SZ_OFFSET; - ntb->bar_info[NTB_B2B_BAR_3].sbarbase_off = XEON_SBAR5BASE_OFFSET; - ntb->bar_info[NTB_B2B_BAR_3].sbarlmt_off = XEON_SBAR5LMT_OFFSET; ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off = XEON_PBAR5XLAT_OFFSET; out: if (rc != 0) device_printf(ntb->device, "unable to allocate pci resource\n"); return (rc); } static void print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) { device_printf(ntb->device, "Bar size = %lx, v %p, p %p\n", bar->size, bar->vbase, (void *)(bar->pbase)); } static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) { bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE); if (bar->pci_resource == NULL) return (ENXIO); save_bar_parameters(bar); print_map_success(ntb, bar); return (0); } static int map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) { int rc; uint8_t bar_size_bits = 0; bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE); if (bar->pci_resource == NULL) return (ENXIO); save_bar_parameters(bar); /* * Ivytown NTB BAR sizes are misreported by the hardware due to a * hardware issue. To work around this, query the size it should be * configured to by the device and modify the resource to correspond to * this new size. The BIOS on systems with this problem is required to * provide enough address space to allow the driver to make this change * safely. * * Ideally I could have just specified the size when I allocated the * resource like: * bus_alloc_resource(ntb->device, * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul, * 1ul << bar_size_bits, RF_ACTIVE); * but the PCI driver does not honor the size in this call, so we have * to modify it after the fact. */ if (HAS_FEATURE(NTB_BAR_SIZE_4K)) { if (bar->pci_resource_id == PCIR_BAR(2)) bar_size_bits = pci_read_config(ntb->device, XEON_PBAR23SZ_OFFSET, 1); else bar_size_bits = pci_read_config(ntb->device, XEON_PBAR45SZ_OFFSET, 1); rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, bar->pci_resource, bar->pbase, bar->pbase + (1ul << bar_size_bits) - 1); if (rc != 0) { device_printf(ntb->device, "unable to resize bar\n"); return (rc); } save_bar_parameters(bar); } /* Mark bar region as write combining to improve performance. */ rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, VM_MEMATTR_WRITE_COMBINING); if (rc != 0) { device_printf(ntb->device, "unable to mark bar as WRITE_COMBINING\n"); return (rc); } print_map_success(ntb, bar); return (0); } static void ntb_unmap_pci_bar(struct ntb_softc *ntb) { struct ntb_pci_bar_info *current_bar; int i; for (i = 0; i < NTB_MAX_BARS; i++) { current_bar = &ntb->bar_info[i]; if (current_bar->pci_resource != NULL) bus_release_resource(ntb->device, SYS_RES_MEMORY, current_bar->pci_resource_id, current_bar->pci_resource); } } static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors) { uint32_t i; int rc; for (i = 0; i < num_vectors; i++) { ntb->int_info[i].rid = i + 1; ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); if (ntb->int_info[i].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (ENOMEM); } ntb->int_info[i].tag = NULL; ntb->allocated_interrupts++; rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, - INTR_MPSAFE | INTR_TYPE_MISC, NULL, handle_irq, - &ntb->db_cb[i], &ntb->int_info[i].tag); + INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr, + &ntb->msix_vec[i], &ntb->int_info[i].tag); if (rc != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } } return (0); } /* * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector * cannot be allocated for each MSI-X message. JHB seems to think remapping * should be okay. This tunable should enable us to test that hypothesis * when someone gets their hands on some Xeon hardware. */ static int ntb_force_remap_mode; SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN, &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped" " to a smaller number of ithreads, even if the desired number are " "available"); /* * In case it is NOT ok, give consumers an abort button. */ static int ntb_prefer_intx; SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN, &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather " "than remapping MSI-X messages over available slots (match Linux driver " "behavior)"); /* * Remap the desired number of MSI-X messages to available ithreads in a simple * round-robin fashion. */ static int ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail) { u_int *vectors; uint32_t i; int rc; if (ntb_prefer_intx != 0) return (ENXIO); vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK); for (i = 0; i < desired; i++) vectors[i] = (i % avail) + 1; rc = pci_remap_msix(dev, desired, vectors); free(vectors, M_NTB); return (rc); } static int ntb_init_isr(struct ntb_softc *ntb) { uint32_t desired_vectors, num_vectors; - uint64_t mask; int rc; ntb->allocated_interrupts = 0; ntb->last_ts = ticks; /* - * On SOC, disable all interrupts. On XEON, disable all but Link - * Interrupt. The rest will be unmasked as callbacks are registered. + * Mask all doorbell interrupts. */ - mask = ntb->db_valid_mask; - if (ntb->type == NTB_XEON) - mask &= ~ntb->db_link_mask; - ntb_db_set_mask(ntb, mask); + ntb_db_set_mask(ntb, ntb->db_valid_mask); num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), ntb->db_count); if (desired_vectors >= 1) { rc = pci_alloc_msix(ntb->device, &num_vectors); if (ntb_force_remap_mode != 0 && rc == 0 && num_vectors == desired_vectors) num_vectors--; if (rc == 0 && num_vectors < desired_vectors) { rc = ntb_remap_msix(ntb->device, desired_vectors, num_vectors); if (rc == 0) num_vectors = desired_vectors; else pci_release_msi(ntb->device); } if (rc != 0) num_vectors = 1; } else num_vectors = 1; if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) { - /* - * If allocating MSI-X interrupts failed and we're forced to - * use legacy INTx anyway, the only limit on individual - * callbacks is the number of doorbell bits. - */ ntb->db_vec_count = 1; ntb->db_vec_shift = ntb->db_count; - ntb_create_callbacks(ntb, ntb->db_count); rc = ntb_setup_legacy_interrupt(ntb); } else { - ntb_create_callbacks(ntb, num_vectors); + ntb_create_msix_vec(ntb, num_vectors); rc = ntb_setup_msix(ntb, num_vectors); - if (rc == 0 && ntb->type == NTB_XEON) { - /* - * Prevent consumers from registering callbacks on the link event irq - * slot, from which they will never be called back. - */ - ntb->db_cb[num_vectors - 1].reserved = true; - ntb->max_cbs--; - } } if (rc != 0) { device_printf(ntb->device, "Error allocating interrupts: %d\n", rc); - ntb_free_callbacks(ntb); + ntb_free_msix_vec(ntb); } return (rc); } static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb) { int rc; ntb->int_info[0].rid = 0; ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); if (ntb->int_info[0].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (ENOMEM); } ntb->int_info[0].tag = NULL; ntb->allocated_interrupts = 1; rc = bus_setup_intr(ntb->device, ntb->int_info[0].res, - INTR_MPSAFE | INTR_TYPE_MISC, NULL, ntb_handle_legacy_interrupt, + INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr, ntb, &ntb->int_info[0].tag); if (rc != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } return (0); } static void ntb_teardown_interrupts(struct ntb_softc *ntb) { struct ntb_int_info *current_int; int i; for (i = 0; i < ntb->allocated_interrupts; i++) { current_int = &ntb->int_info[i]; if (current_int->tag != NULL) bus_teardown_intr(ntb->device, current_int->res, current_int->tag); if (current_int->res != NULL) bus_release_resource(ntb->device, SYS_RES_IRQ, rman_get_rid(current_int->res), current_int->res); } - ntb_free_callbacks(ntb); + ntb_free_msix_vec(ntb); pci_release_msi(ntb->device); } /* * Doorbell register and mask are 64-bit on SoC, 16-bit on Xeon. Abstract it * out to make code clearer. */ static inline uint64_t -ntb_db_read(struct ntb_softc *ntb, uint64_t regoff) +db_ioread(struct ntb_softc *ntb, uint64_t regoff) { if (ntb->type == NTB_SOC) return (ntb_reg_read(8, regoff)); KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); return (ntb_reg_read(2, regoff)); } static inline void -ntb_db_write(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) +db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) { KASSERT((val & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(val & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); if (regoff == ntb->reg_ofs.ldb_mask) - HW_ASSERT(ntb, MA_OWNED); + DB_MASK_ASSERT(ntb, MA_OWNED); if (ntb->type == NTB_SOC) { ntb_reg_write(8, regoff, val); return; } KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); ntb_reg_write(2, regoff, (uint16_t)val); } -static inline void +void ntb_db_set_mask(struct ntb_softc *ntb, uint64_t bits) { - HW_LOCK(ntb); + DB_MASK_LOCK(ntb); ntb->db_mask |= bits; - ntb_db_write(ntb, ntb->reg_ofs.ldb_mask, ntb->db_mask); - HW_UNLOCK(ntb); + db_iowrite(ntb, ntb->reg_ofs.ldb_mask, ntb->db_mask); + DB_MASK_UNLOCK(ntb); } -static inline void +void ntb_db_clear_mask(struct ntb_softc *ntb, uint64_t bits) { KASSERT((bits & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(bits & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); - HW_LOCK(ntb); + DB_MASK_LOCK(ntb); ntb->db_mask &= ~bits; - ntb_db_write(ntb, ntb->reg_ofs.ldb_mask, ntb->db_mask); - HW_UNLOCK(ntb); + db_iowrite(ntb, ntb->reg_ofs.ldb_mask, ntb->db_mask); + DB_MASK_UNLOCK(ntb); } -static inline void -mask_ldb_interrupt(struct ntb_softc *ntb, unsigned int idx) +uint64_t +ntb_db_read(struct ntb_softc *ntb) { - uint64_t mask; - mask = 1ull << (idx * ntb->db_vec_shift); - ntb_db_set_mask(ntb, mask); + return (db_ioread(ntb, ntb->reg_ofs.ldb)); } -static inline void -unmask_ldb_interrupt(struct ntb_softc *ntb, unsigned int idx) +void +ntb_db_clear(struct ntb_softc *ntb, uint64_t bits) { - uint64_t mask; - mask = 1ull << (idx * ntb->db_vec_shift); - ntb_db_clear_mask(ntb, mask); + KASSERT((bits & ~ntb->db_valid_mask) == 0, + ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, + (uintmax_t)(bits & ~ntb->db_valid_mask), + (uintmax_t)ntb->db_valid_mask)); + + db_iowrite(ntb, ntb->reg_ofs.ldb, bits); } static inline uint64_t ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector) { uint64_t shift, mask; shift = ntb->db_vec_shift; mask = (1ull << shift) - 1; return (mask << (shift * db_vector)); } static void -handle_irq(void *arg) +ntb_interrupt(struct ntb_softc *ntb, uint32_t vec) { - struct ntb_db_cb *db_cb = arg; - struct ntb_softc *ntb = db_cb->ntb; uint64_t vec_mask; - int rc; ntb->last_ts = ticks; - vec_mask = ntb_vec_mask(ntb, db_cb->db_num); + vec_mask = ntb_vec_mask(ntb, vec); if ((vec_mask & ntb->db_link_mask) != 0) { - rc = ntb_poll_link(ntb); - if (rc != 0) - device_printf(ntb->device, - "Error determining link status\n"); + if (ntb_poll_link(ntb)) + ntb_link_event(ntb); } - if (db_cb->callback != NULL) { - KASSERT(!db_cb->reserved, ("user callback on link event cb")); - mask_ldb_interrupt(ntb, db_cb->db_num); - } + if ((vec_mask & ntb->db_valid_mask) != 0) + ntb_db_event(ntb, vec); +} - ntb_db_write(ntb, ntb->reg_ofs.ldb, vec_mask); +static void +ndev_vec_isr(void *arg) +{ + struct ntb_vec *nvec = arg; - if (db_cb->callback != NULL) - callout_reset(&db_cb->irq_work, 0, ntb_irq_work, db_cb); + ntb_interrupt(nvec->ntb, nvec->num); } static void -ntb_handle_legacy_interrupt(void *arg) +ndev_irq_isr(void *arg) { - struct ntb_softc *ntb = arg; - unsigned int i; - uint64_t ldb; - - ldb = ntb_db_read(ntb, ntb->reg_ofs.ldb); - while (ldb != 0) { - i = ffs(ldb); - ldb &= ldb - 1; - handle_irq(&ntb->db_cb[i]); - } + /* If we couldn't set up MSI-X, we only have the one vector. */ + ntb_interrupt(arg, 0); } static int -ntb_create_callbacks(struct ntb_softc *ntb, uint32_t num_vectors) +ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors) { uint32_t i; - ntb->max_cbs = num_vectors; - ntb->db_cb = malloc(num_vectors * sizeof(*ntb->db_cb), M_NTB, + ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB, M_ZERO | M_WAITOK); for (i = 0; i < num_vectors; i++) { - ntb->db_cb[i].db_num = i; - ntb->db_cb[i].ntb = ntb; + ntb->msix_vec[i].num = i; + ntb->msix_vec[i].ntb = ntb; } return (0); } static void -ntb_free_callbacks(struct ntb_softc *ntb) +ntb_free_msix_vec(struct ntb_softc *ntb) { - uint8_t i; - if (ntb->db_cb == NULL) + if (ntb->msix_vec == NULL) return; - for (i = 0; i < ntb->max_cbs; i++) - ntb_unregister_db_callback(ntb, i); - - free(ntb->db_cb, M_NTB); - ntb->db_cb = NULL; - ntb->max_cbs = 0; + free(ntb->msix_vec, M_NTB); + ntb->msix_vec = NULL; } static struct ntb_hw_info * ntb_get_device_info(uint32_t device_id) { struct ntb_hw_info *ep = pci_ids; while (ep->device_id) { if (ep->device_id == device_id) return (ep); ++ep; } return (NULL); } static void ntb_teardown_xeon(struct ntb_softc *ntb) { ntb_link_disable(ntb); } static void ntb_detect_max_mw(struct ntb_softc *ntb) { if (ntb->type == NTB_SOC) { ntb->mw_count = SOC_MW_COUNT; return; } if (HAS_FEATURE(NTB_SPLIT_BAR)) ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT; else ntb->mw_count = XEON_SNB_MW_COUNT; } static int ntb_detect_xeon(struct ntb_softc *ntb) { uint8_t ppd, conn_type; ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1); ntb->ppd = ppd; if ((ppd & XEON_PPD_DEV_TYPE) != 0) ntb->dev_type = NTB_DEV_USD; else ntb->dev_type = NTB_DEV_DSD; if ((ppd & XEON_PPD_SPLIT_BAR) != 0) ntb->features |= NTB_SPLIT_BAR; /* SB01BASE_LOCKUP errata is a superset of SDOORBELL errata */ if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) ntb->features |= NTB_SDOORBELL_LOCKUP; conn_type = ppd & XEON_PPD_CONN_TYPE; switch (conn_type) { case NTB_CONN_B2B: ntb->conn_type = conn_type; break; case NTB_CONN_RP: case NTB_CONN_TRANSPARENT: default: device_printf(ntb->device, "Unsupported connection type: %u\n", (unsigned)conn_type); return (ENXIO); } return (0); } static int ntb_detect_soc(struct ntb_softc *ntb) { uint32_t ppd, conn_type; ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); ntb->ppd = ppd; if ((ppd & SOC_PPD_DEV_TYPE) != 0) ntb->dev_type = NTB_DEV_DSD; else ntb->dev_type = NTB_DEV_USD; conn_type = (ppd & SOC_PPD_CONN_TYPE) >> 8; switch (conn_type) { case NTB_CONN_B2B: ntb->conn_type = conn_type; break; default: device_printf(ntb->device, "Unsupported NTB configuration\n"); return (ENXIO); } return (0); } static int ntb_xeon_init_dev(struct ntb_softc *ntb) { int rc; ntb->reg_ofs.ldb = XEON_PDOORBELL_OFFSET; ntb->reg_ofs.ldb_mask = XEON_PDBMSK_OFFSET; ntb->reg_ofs.spad_local = XEON_SPAD_OFFSET; ntb->reg_ofs.bar4_xlat = XEON_SBAR4XLAT_OFFSET; if (HAS_FEATURE(NTB_SPLIT_BAR)) ntb->reg_ofs.bar5_xlat = XEON_SBAR5XLAT_OFFSET; ntb->reg_ofs.spci_cmd = XEON_PCICMD_OFFSET; ntb->spad_count = XEON_SPAD_COUNT; ntb->db_count = XEON_DB_COUNT; ntb->db_link_mask = XEON_DB_LINK_BIT; ntb->db_vec_count = XEON_DB_MSIX_VECTOR_COUNT; ntb->db_vec_shift = XEON_DB_MSIX_VECTOR_SHIFT; if (ntb->conn_type != NTB_CONN_B2B) { device_printf(ntb->device, "Connection type %d not supported\n", ntb->conn_type); return (ENXIO); } ntb->reg = &xeon_reg; ntb->peer_reg = &xeon_b2b_reg; ntb->xlat_reg = &xeon_sec_xlat; /* * There is a Xeon hardware errata related to writes to SDOORBELL or * B2BDOORBELL in conjunction with inbound access to NTB MMIO space, * which may hang the system. To workaround this use the second memory * window to access the interrupt and scratch pad registers on the * remote system. */ if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) /* Use the last MW for mapping remote spad */ ntb->b2b_mw_idx = ntb->mw_count - 1; else if (HAS_FEATURE(NTB_B2BDOORBELL_BIT14)) /* * HW Errata on bit 14 of b2bdoorbell register. Writes will not be * mirrored to the remote system. Shrink the number of bits by one, * since bit 14 is the last bit. * * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register * anyway. Nor for non-B2B connection types. */ ntb->db_count = XEON_DB_COUNT - 1; ntb->db_valid_mask = (1ull << ntb->db_count) - 1; if (ntb->dev_type == NTB_DEV_USD) rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr, &xeon_b2b_usd_addr); else rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr, &xeon_b2b_dsd_addr); if (rc != 0) return (rc); /* Enable Bus Master and Memory Space on the secondary side */ ntb_reg_write(2, ntb->reg_ofs.spci_cmd, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); /* Enable link training */ - ntb_link_enable(ntb); + ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); return (0); } static int ntb_soc_init_dev(struct ntb_softc *ntb) { KASSERT(ntb->conn_type == NTB_CONN_B2B, ("Unsupported NTB configuration (%d)\n", ntb->conn_type)); ntb->reg_ofs.ldb = SOC_PDOORBELL_OFFSET; ntb->reg_ofs.ldb_mask = SOC_PDBMSK_OFFSET; ntb->reg_ofs.bar4_xlat = SOC_SBAR4XLAT_OFFSET; ntb->reg_ofs.spad_local = SOC_SPAD_OFFSET; ntb->reg_ofs.spci_cmd = SOC_PCICMD_OFFSET; ntb->spad_count = SOC_SPAD_COUNT; ntb->db_count = SOC_DB_COUNT; ntb->db_vec_count = SOC_DB_MSIX_VECTOR_COUNT; ntb->db_vec_shift = SOC_DB_MSIX_VECTOR_SHIFT; ntb->db_valid_mask = (1ull << ntb->db_count) - 1; ntb->reg = &soc_reg; ntb->peer_reg = &soc_b2b_reg; ntb->xlat_reg = &soc_sec_xlat; /* * FIXME - MSI-X bug on early SOC HW, remove once internal issue is * resolved. Mask transaction layer internal parity errors. */ pci_write_config(ntb->device, 0xFC, 0x4, 4); configure_soc_secondary_side_bars(ntb); /* Enable Bus Master and Memory Space on the secondary side */ ntb_reg_write(2, ntb->reg_ofs.spci_cmd, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); /* Initiate PCI-E link training */ - ntb_link_enable(ntb); + ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); callout_reset(&ntb->heartbeat_timer, 0, soc_link_hb, ntb); return (0); } /* XXX: Linux driver doesn't seem to do any of this for SoC. */ static void configure_soc_secondary_side_bars(struct ntb_softc *ntb) { if (ntb->dev_type == NTB_DEV_USD) { ntb_reg_write(8, SOC_PBAR2XLAT_OFFSET, XEON_B2B_BAR2_DSD_ADDR64); ntb_reg_write(8, SOC_PBAR4XLAT_OFFSET, XEON_B2B_BAR4_DSD_ADDR64); ntb_reg_write(8, SOC_MBAR23_OFFSET, XEON_B2B_BAR2_USD_ADDR64); ntb_reg_write(8, SOC_MBAR45_OFFSET, XEON_B2B_BAR4_USD_ADDR64); } else { ntb_reg_write(8, SOC_PBAR2XLAT_OFFSET, XEON_B2B_BAR2_USD_ADDR64); ntb_reg_write(8, SOC_PBAR4XLAT_OFFSET, XEON_B2B_BAR4_USD_ADDR64); ntb_reg_write(8, SOC_MBAR23_OFFSET, XEON_B2B_BAR2_DSD_ADDR64); ntb_reg_write(8, SOC_MBAR45_OFFSET, XEON_B2B_BAR4_DSD_ADDR64); } } /* * When working around Xeon SDOORBELL errata by remapping remote registers in a * MW, limit the B2B MW to half a MW. By sharing a MW, half the shared MW * remains for use by a higher layer. * * Will only be used if working around SDOORBELL errata and the BIOS-configured * MW size is sufficiently large. */ static unsigned int ntb_b2b_mw_share; SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share, 0, "If enabled (non-zero), prefer to share half of the B2B peer register " "MW with higher level consumers. Both sides of the NTB MUST set the same " "value here."); static void xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx, enum ntb_bar regbar) { struct ntb_pci_bar_info *bar; uint8_t bar_sz; if (!HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3) return; bar = &ntb->bar_info[idx]; bar_sz = pci_read_config(ntb->device, bar->psz_off, 1); if (idx == regbar) { if (ntb->b2b_off != 0) bar_sz--; else bar_sz = 0; } pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1); bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1); (void)bar_sz; } static void -xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t base_addr, +xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr, enum ntb_bar idx, enum ntb_bar regbar) { - struct ntb_pci_bar_info *bar; - vm_paddr_t bar_addr; + uint64_t reg_val; + uint32_t base_reg, lmt_reg; - bar = &ntb->bar_info[idx]; - bar_addr = base_addr + ((idx == regbar) ? ntb->b2b_off : 0); + bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg); + if (idx == regbar) + bar_addr += ntb->b2b_off; - if (HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) { - ntb_reg_write(4, bar->sbarbase_off, bar_addr); - ntb_reg_write(4, bar->sbarlmt_off, bar_addr); - bar_addr = ntb_reg_read(4, bar->sbarbase_off); - (void)bar_addr; - bar_addr = ntb_reg_read(4, bar->sbarlmt_off); + if (!bar_is_64bit(ntb, idx)) { + ntb_reg_write(4, base_reg, bar_addr); + reg_val = ntb_reg_read(4, base_reg); + (void)reg_val; + + ntb_reg_write(4, lmt_reg, bar_addr); + reg_val = ntb_reg_read(4, lmt_reg); + (void)reg_val; } else { - ntb_reg_write(8, bar->sbarbase_off, bar_addr); - ntb_reg_write(8, bar->sbarlmt_off, bar_addr); - bar_addr = ntb_reg_read(8, bar->sbarbase_off); - (void)bar_addr; - bar_addr = ntb_reg_read(8, bar->sbarlmt_off); + ntb_reg_write(8, base_reg, bar_addr); + reg_val = ntb_reg_read(8, base_reg); + (void)reg_val; + + ntb_reg_write(8, lmt_reg, bar_addr); + reg_val = ntb_reg_read(8, lmt_reg); + (void)reg_val; } - (void)bar_addr; } static void xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx) { struct ntb_pci_bar_info *bar; bar = &ntb->bar_info[idx]; if (HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) { ntb_reg_write(4, bar->pbarxlat_off, base_addr); base_addr = ntb_reg_read(4, bar->pbarxlat_off); } else { ntb_reg_write(8, bar->pbarxlat_off, base_addr); base_addr = ntb_reg_read(8, bar->pbarxlat_off); } (void)base_addr; } static int xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr) { struct ntb_pci_bar_info *b2b_bar; vm_size_t bar_size; uint64_t bar_addr; enum ntb_bar b2b_bar_num, i; if (ntb->b2b_mw_idx == B2B_MW_DISABLED) { b2b_bar = NULL; b2b_bar_num = NTB_CONFIG_BAR; ntb->b2b_off = 0; } else { b2b_bar_num = ntb_mw_to_bar(ntb, ntb->b2b_mw_idx); KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS, ("invalid b2b mw bar")); b2b_bar = &ntb->bar_info[b2b_bar_num]; bar_size = b2b_bar->size; if (ntb_b2b_mw_share != 0 && (bar_size >> 1) >= XEON_B2B_MIN_SIZE) ntb->b2b_off = bar_size >> 1; else if (bar_size >= XEON_B2B_MIN_SIZE) { ntb->b2b_off = 0; ntb->mw_count--; } else { device_printf(ntb->device, "B2B bar size is too small!\n"); return (EIO); } } /* * Reset the secondary bar sizes to match the primary bar sizes. * (Except, disable or halve the size of the B2B secondary bar.) */ for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++) xeon_reset_sbar_size(ntb, i, b2b_bar_num); bar_addr = 0; if (b2b_bar_num == NTB_CONFIG_BAR) bar_addr = addr->bar0_addr; else if (b2b_bar_num == NTB_B2B_BAR_1) bar_addr = addr->bar2_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) bar_addr = addr->bar4_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2) bar_addr = addr->bar4_addr32; else if (b2b_bar_num == NTB_B2B_BAR_3) bar_addr = addr->bar5_addr32; else KASSERT(false, ("invalid bar")); ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr); /* * Other SBARs are normally hit by the PBAR xlat, except for the b2b * register BAR. The B2B BAR is either disabled above or configured * half-size. It starts at PBAR xlat + offset. * * Also set up incoming BAR limits == base (zero length window). */ xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1, b2b_bar_num); if (HAS_FEATURE(NTB_SPLIT_BAR)) { xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32, NTB_B2B_BAR_2, b2b_bar_num); xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32, NTB_B2B_BAR_3, b2b_bar_num); } else xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64, NTB_B2B_BAR_2, b2b_bar_num); /* Zero incoming translation addrs */ ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0); ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0); /* Zero outgoing translation limits (whole bar size windows) */ ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0); ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0); /* Set outgoing translation offsets */ xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1); if (HAS_FEATURE(NTB_SPLIT_BAR)) { xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2); xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3); } else xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2); /* Set the translation offset for B2B registers */ bar_addr = 0; if (b2b_bar_num == NTB_CONFIG_BAR) bar_addr = peer_addr->bar0_addr; else if (b2b_bar_num == NTB_B2B_BAR_1) bar_addr = peer_addr->bar2_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) bar_addr = peer_addr->bar4_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2) bar_addr = peer_addr->bar4_addr32; else if (b2b_bar_num == NTB_B2B_BAR_3) bar_addr = peer_addr->bar5_addr32; else KASSERT(false, ("invalid bar")); /* * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits * at a time. */ ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff); ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32); return (0); } +static inline bool +link_is_up(struct ntb_softc *ntb) +{ + + if (ntb->type == NTB_XEON) + return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0); + + KASSERT(ntb->type == NTB_SOC, ("ntb type")); + return ((ntb->ntb_ctl & SOC_CNTL_LINK_DOWN) == 0); +} + +static inline bool +soc_link_is_err(struct ntb_softc *ntb) +{ + uint32_t status; + + KASSERT(ntb->type == NTB_SOC, ("ntb type")); + + status = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET); + if ((status & SOC_LTSSMSTATEJMP_FORCEDETECT) != 0) + return (true); + + status = ntb_reg_read(4, SOC_IBSTERRRCRVSTS0_OFFSET); + return ((status & SOC_IBIST_ERR_OFLOW) != 0); +} + /* SOC does not have link status interrupt, poll on that platform */ static void soc_link_hb(void *arg) { struct ntb_softc *ntb = arg; - uint32_t status32; - int rc; + sbintime_t timo, poll_ts; + timo = NTB_HB_TIMEOUT * hz; + poll_ts = ntb->last_ts + timo; + /* * Delay polling the link status if an interrupt was received, unless * the cached link status says the link is down. */ - if ((long)ticks - ((long)ntb->last_ts + NTB_HB_TIMEOUT * hz) < 0 && - (ntb->ntb_ctl & SOC_CNTL_LINK_DOWN) == 0) + if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) { + timo = poll_ts - ticks; goto out; + } + if (ntb_poll_link(ntb)) + ntb_link_event(ntb); - rc = ntb_poll_link(ntb); - if (rc != 0) - device_printf(ntb->device, - "Error determining link status\n"); - - /* Check to see if a link error is the cause of the link down */ - if (ntb->link_status == NTB_LINK_DOWN) { - status32 = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET); - if ((status32 & SOC_LTSSMSTATEJMP_FORCEDETECT) != 0) { - callout_reset(&ntb->lr_timer, 0, recover_soc_link, - ntb); - return; - } + if (!link_is_up(ntb) && soc_link_is_err(ntb)) { + /* Link is down with error, proceed with recovery */ + callout_reset(&ntb->lr_timer, 0, recover_soc_link, ntb); + return; } out: - callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, soc_link_hb, - ntb); + callout_reset(&ntb->heartbeat_timer, timo, soc_link_hb, ntb); } static void soc_perform_link_restart(struct ntb_softc *ntb) { uint32_t status; /* Driver resets the NTB ModPhy lanes - magic! */ ntb_reg_write(1, SOC_MODPHY_PCSREG6, 0xe0); ntb_reg_write(1, SOC_MODPHY_PCSREG4, 0x40); ntb_reg_write(1, SOC_MODPHY_PCSREG4, 0x60); ntb_reg_write(1, SOC_MODPHY_PCSREG6, 0x60); /* Driver waits 100ms to allow the NTB ModPhy to settle */ pause("ModPhy", hz / 10); /* Clear AER Errors, write to clear */ status = ntb_reg_read(4, SOC_ERRCORSTS_OFFSET); status &= PCIM_AER_COR_REPLAY_ROLLOVER; ntb_reg_write(4, SOC_ERRCORSTS_OFFSET, status); /* Clear unexpected electrical idle event in LTSSM, write to clear */ status = ntb_reg_read(4, SOC_LTSSMERRSTS0_OFFSET); status |= SOC_LTSSMERRSTS0_UNEXPECTEDEI; ntb_reg_write(4, SOC_LTSSMERRSTS0_OFFSET, status); /* Clear DeSkew Buffer error, write to clear */ status = ntb_reg_read(4, SOC_DESKEWSTS_OFFSET); status |= SOC_DESKEWSTS_DBERR; ntb_reg_write(4, SOC_DESKEWSTS_OFFSET, status); status = ntb_reg_read(4, SOC_IBSTERRRCRVSTS0_OFFSET); status &= SOC_IBIST_ERR_OFLOW; ntb_reg_write(4, SOC_IBSTERRRCRVSTS0_OFFSET, status); /* Releases the NTB state machine to allow the link to retrain */ status = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET); status &= ~SOC_LTSSMSTATEJMP_FORCEDETECT; ntb_reg_write(4, SOC_LTSSMSTATEJMP_OFFSET, status); } -static void -ntb_handle_link_event(struct ntb_softc *ntb, int link_state) +/* + * ntb_set_ctx() - associate a driver context with an ntb device + * @ntb: NTB device context + * @ctx: Driver context + * @ctx_ops: Driver context operations + * + * Associate a driver context and operations with a ntb device. The context is + * provided by the client driver, and the driver may associate a different + * context with each ntb device. + * + * Return: Zero if the context is associated, otherwise an error number. + */ +int +ntb_set_ctx(struct ntb_softc *ntb, void *ctx, const struct ntb_ctx_ops *ops) { - enum ntb_hw_event event; - uint16_t status; - if (ntb->link_status == link_state) - return; + if (ctx == NULL || ops == NULL) + return (EINVAL); + if (ntb->ctx_ops != NULL) + return (EINVAL); - if (link_state == NTB_LINK_UP) { - device_printf(ntb->device, "Link Up\n"); - ntb->link_status = NTB_LINK_UP; - event = NTB_EVENT_HW_LINK_UP; - - if (ntb->type == NTB_SOC || - ntb->conn_type == NTB_CONN_TRANSPARENT) - status = ntb_reg_read(2, ntb->reg->lnk_sta); - else - status = pci_read_config(ntb->device, - XEON_LINK_STATUS_OFFSET, 2); - ntb->link_width = (status & NTB_LINK_WIDTH_MASK) >> 4; - ntb->link_speed = (status & NTB_LINK_SPEED_MASK); - device_printf(ntb->device, "Link Width %d, Link Speed %d\n", - ntb->link_width, ntb->link_speed); - callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, - soc_link_hb, ntb); - } else { - device_printf(ntb->device, "Link Down\n"); - ntb->link_status = NTB_LINK_DOWN; - event = NTB_EVENT_HW_LINK_DOWN; - /* Do not modify link width/speed, we need it in link recovery */ + CTX_LOCK(ntb); + if (ntb->ctx_ops != NULL) { + CTX_UNLOCK(ntb); + return (EINVAL); } + ntb->ntb_ctx = ctx; + ntb->ctx_ops = ops; + CTX_UNLOCK(ntb); - /* notify the upper layer if we have an event change */ - if (ntb->event_cb != NULL) - ntb->event_cb(ntb->ntb_transport, event); + return (0); } +/* + * It is expected that this will only be used from contexts where the ctx_lock + * is not needed to protect ntb_ctx lifetime. + */ +void * +ntb_get_ctx(struct ntb_softc *ntb, const struct ntb_ctx_ops **ops) +{ + + KASSERT(ntb->ntb_ctx != NULL && ntb->ctx_ops != NULL, ("bogus")); + if (ops != NULL) + *ops = ntb->ctx_ops; + return (ntb->ntb_ctx); +} + +/* + * ntb_clear_ctx() - disassociate any driver context from an ntb device + * @ntb: NTB device context + * + * Clear any association that may exist between a driver context and the ntb + * device. + */ +void +ntb_clear_ctx(struct ntb_softc *ntb) +{ + + CTX_LOCK(ntb); + ntb->ntb_ctx = NULL; + ntb->ctx_ops = NULL; + CTX_UNLOCK(ntb); +} + +/* + * ntb_link_event() - notify driver context of a change in link status + * @ntb: NTB device context + * + * Notify the driver context that the link status may have changed. The driver + * should call ntb_link_is_up() to get the current status. + */ +void +ntb_link_event(struct ntb_softc *ntb) +{ + + CTX_LOCK(ntb); + if (ntb->ctx_ops != NULL && ntb->ctx_ops->link_event != NULL) + ntb->ctx_ops->link_event(ntb->ntb_ctx); + CTX_UNLOCK(ntb); +} + +/* + * ntb_db_event() - notify driver context of a doorbell event + * @ntb: NTB device context + * @vector: Interrupt vector number + * + * Notify the driver context of a doorbell event. If hardware supports + * multiple interrupt vectors for doorbells, the vector number indicates which + * vector received the interrupt. The vector number is relative to the first + * vector used for doorbells, starting at zero, and must be less than + * ntb_db_vector_count(). The driver may call ntb_db_read() to check which + * doorbell bits need service, and ntb_db_vector_mask() to determine which of + * those bits are associated with the vector number. + */ static void -ntb_link_enable(struct ntb_softc *ntb) +ntb_db_event(struct ntb_softc *ntb, uint32_t vec) { + + CTX_LOCK(ntb); + if (ntb->ctx_ops != NULL && ntb->ctx_ops->db_event != NULL) + ntb->ctx_ops->db_event(ntb->ntb_ctx, vec); + CTX_UNLOCK(ntb); +} + +/* + * ntb_link_enable() - enable the link on the secondary side of the ntb + * @ntb: NTB device context + * @max_speed: The maximum link speed expressed as PCIe generation number[0] + * @max_width: The maximum link width expressed as the number of PCIe lanes[0] + * + * Enable the link on the secondary side of the ntb. This can only be done + * from the primary side of the ntb in primary or b2b topology. The ntb device + * should train the link to its maximum speed and width, or the requested speed + * and width, whichever is smaller, if supported. + * + * Return: Zero on success, otherwise an error number. + * + * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed + * and width input will be ignored. + */ +int +ntb_link_enable(struct ntb_softc *ntb, enum ntb_speed s __unused, + enum ntb_width w __unused) +{ uint32_t cntl; if (ntb->type == NTB_SOC) { pci_write_config(ntb->device, NTB_PPD_OFFSET, ntb->ppd | SOC_PPD_INIT_LINK, 4); - return; + return (0); } if (ntb->conn_type == NTB_CONN_TRANSPARENT) { - ntb_handle_link_event(ntb, NTB_LINK_UP); - return; + ntb_link_event(ntb); + return (0); } cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP; if (HAS_FEATURE(NTB_SPLIT_BAR)) cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP; ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); + return (0); } -static void +/* + * ntb_link_disable() - disable the link on the secondary side of the ntb + * @ntb: NTB device context + * + * Disable the link on the secondary side of the ntb. This can only be done + * from the primary side of the ntb in primary or b2b topology. The ntb device + * should disable the link. Returning from this call must indicate that a + * barrier has passed, though with no more writes may pass in either direction + * across the link, except if this call returns an error number. + * + * Return: Zero on success, otherwise an error number. + */ +int ntb_link_disable(struct ntb_softc *ntb) { uint32_t cntl; if (ntb->conn_type == NTB_CONN_TRANSPARENT) { - ntb_handle_link_event(ntb, NTB_LINK_DOWN); - return; + ntb_link_event(ntb); + return (0); } cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP); if (HAS_FEATURE(NTB_SPLIT_BAR)) cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP); cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); + return (0); } static void recover_soc_link(void *arg) { struct ntb_softc *ntb = arg; uint8_t speed, width; uint32_t status32; soc_perform_link_restart(ntb); /* * There is a potential race between the 2 NTB devices recovering at * the same time. If the times are the same, the link will not recover * and the driver will be stuck in this loop forever. Add a random * interval to the recovery time to prevent this race. */ status32 = arc4random() % SOC_LINK_RECOVERY_TIME; pause("Link", (SOC_LINK_RECOVERY_TIME + status32) * hz / 1000); status32 = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET); if ((status32 & SOC_LTSSMSTATEJMP_FORCEDETECT) != 0) goto retry; status32 = ntb_reg_read(4, SOC_IBSTERRRCRVSTS0_OFFSET); if ((status32 & SOC_IBIST_ERR_OFLOW) != 0) goto retry; status32 = ntb_reg_read(4, ntb->reg->ntb_ctl); if ((status32 & SOC_CNTL_LINK_DOWN) != 0) goto out; status32 = ntb_reg_read(4, ntb->reg->lnk_sta); width = (status32 & NTB_LINK_WIDTH_MASK) >> 4; speed = (status32 & NTB_LINK_SPEED_MASK); if (ntb->link_width != width || ntb->link_speed != speed) goto retry; out: callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, soc_link_hb, ntb); return; retry: callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_soc_link, ntb); } -static int +/* + * Polls the HW link status register(s); returns true if something has changed. + */ +static bool ntb_poll_link(struct ntb_softc *ntb) { - int link_state; uint32_t ntb_cntl; - uint16_t status; + uint16_t reg_val; if (ntb->type == NTB_SOC) { - HW_LOCK(ntb); ntb_cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); - if (ntb_cntl == ntb->ntb_ctl) { - HW_UNLOCK(ntb); - return (0); - } + if (ntb_cntl == ntb->ntb_ctl) + return (false); + ntb->ntb_ctl = ntb_cntl; ntb->lnk_sta = ntb_reg_read(4, ntb->reg->lnk_sta); - HW_UNLOCK(ntb); - - if ((ntb_cntl & SOC_CNTL_LINK_DOWN) != 0) - link_state = NTB_LINK_DOWN; - else - link_state = NTB_LINK_UP; } else { - status = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); - if (status == ntb->lnk_sta) - return (0); - ntb->lnk_sta = status; + db_iowrite(ntb, ntb->reg_ofs.ldb, ntb->db_link_mask); - if ((status & NTB_LINK_STATUS_ACTIVE) != 0) - link_state = NTB_LINK_UP; - else - link_state = NTB_LINK_DOWN; - } + reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); + if (reg_val == ntb->lnk_sta) + return (false); - ntb_handle_link_event(ntb, link_state); - return (0); -} - -static void -ntb_irq_work(void *arg) -{ - struct ntb_db_cb *db_cb = arg; - struct ntb_softc *ntb; - int rc; - - rc = db_cb->callback(db_cb->data, db_cb->db_num); - /* Poll if forward progress was made. */ - if (rc != 0) { - callout_reset(&db_cb->irq_work, 0, ntb_irq_work, db_cb); - return; + ntb->lnk_sta = reg_val; } - - /* Unmask interrupt if no progress was made. */ - ntb = db_cb->ntb; - unmask_ldb_interrupt(ntb, db_cb->db_num); + return (true); } -/* - * Public API to the rest of the OS - */ - -/** - * ntb_register_event_callback() - register event callback - * @ntb: pointer to ntb_softc instance - * @func: callback function to register - * - * This function registers a callback for any HW driver events such as link - * up/down, power management notices and etc. - * - * RETURNS: An appropriate ERRNO error value on error, or zero for success. - */ -int -ntb_register_event_callback(struct ntb_softc *ntb, ntb_event_callback func) +static inline enum ntb_speed +ntb_link_sta_speed(struct ntb_softc *ntb) { - if (ntb->event_cb != NULL) - return (EINVAL); - - ntb->event_cb = func; - - return (0); + if (!link_is_up(ntb)) + return (NTB_SPEED_NONE); + return (ntb->lnk_sta & NTB_LINK_SPEED_MASK); } -/** - * ntb_unregister_event_callback() - unregisters the event callback - * @ntb: pointer to ntb_softc instance - * - * This function unregisters the existing callback from transport - */ -void -ntb_unregister_event_callback(struct ntb_softc *ntb) +static inline enum ntb_width +ntb_link_sta_width(struct ntb_softc *ntb) { - ntb->event_cb = NULL; + if (!link_is_up(ntb)) + return (NTB_WIDTH_NONE); + return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); } -/** - * ntb_register_db_callback() - register a callback for doorbell interrupt - * @ntb: pointer to ntb_softc instance - * @idx: doorbell index to register callback, zero based - * @data: pointer to be returned to caller with every callback - * @func: callback function to register - * - * This function registers a callback function for the doorbell interrupt - * on the primary side. The function will unmask the doorbell as well to - * allow interrupt. - * - * RETURNS: An appropriate ERRNO error value on error, or zero for success. +/* + * Public API to the rest of the OS */ -int -ntb_register_db_callback(struct ntb_softc *ntb, unsigned int idx, void *data, - ntb_db_callback func) -{ - struct ntb_db_cb *db_cb = &ntb->db_cb[idx]; - if (idx >= ntb->max_cbs || db_cb->callback != NULL || db_cb->reserved) { - device_printf(ntb->device, "Invalid Index.\n"); - return (EINVAL); - } - - db_cb->callback = func; - db_cb->data = data; - callout_init(&db_cb->irq_work, 1); - - unmask_ldb_interrupt(ntb, idx); - - return (0); -} - /** - * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt - * @ntb: pointer to ntb_softc instance - * @idx: doorbell index to register callback, zero based - * - * This function unregisters a callback function for the doorbell interrupt - * on the primary side. The function will also mask the said doorbell. - */ -void -ntb_unregister_db_callback(struct ntb_softc *ntb, unsigned int idx) -{ - - if (idx >= ntb->max_cbs || ntb->db_cb[idx].callback == NULL) - return; - - mask_ldb_interrupt(ntb, idx); - - callout_drain(&ntb->db_cb[idx].irq_work); - ntb->db_cb[idx].callback = NULL; -} - -/** - * ntb_find_transport() - find the transport pointer - * @transport: pointer to pci device - * - * Given the pci device pointer, return the transport pointer passed in when - * the transport attached when it was inited. - * - * RETURNS: pointer to transport. - */ -void * -ntb_find_transport(struct ntb_softc *ntb) -{ - - return (ntb->ntb_transport); -} - -/** - * ntb_register_transport() - Register NTB transport with NTB HW driver - * @transport: transport identifier - * - * This function allows a transport to reserve the hardware driver for - * NTB usage. - * - * RETURNS: pointer to ntb_softc, NULL on error. - */ -struct ntb_softc * -ntb_register_transport(struct ntb_softc *ntb, void *transport) -{ - - /* - * TODO: when we have more than one transport, we will need to rewrite - * this to prevent race conditions - */ - if (ntb->ntb_transport != NULL) - return (NULL); - - ntb->ntb_transport = transport; - return (ntb); -} - -/** - * ntb_unregister_transport() - Unregister the transport with the NTB HW driver - * @ntb - ntb_softc of the transport to be freed - * - * This function unregisters the transport from the HW driver and performs any - * necessary cleanups. - */ -void -ntb_unregister_transport(struct ntb_softc *ntb) -{ - uint8_t i; - - if (ntb->ntb_transport == NULL) - return; - - for (i = 0; i < ntb->max_cbs; i++) - ntb_unregister_db_callback(ntb, i); - - ntb_unregister_event_callback(ntb); - ntb->ntb_transport = NULL; -} - -/** * ntb_get_max_spads() - get the total scratch regs usable * @ntb: pointer to ntb_softc instance * * This function returns the max 32bit scratchpad registers usable by the * upper layer. * * RETURNS: total number of scratch pad registers available */ uint8_t ntb_get_max_spads(struct ntb_softc *ntb) { return (ntb->spad_count); } uint8_t -ntb_get_max_cbs(struct ntb_softc *ntb) -{ - - return (ntb->max_cbs); -} - -uint8_t ntb_mw_count(struct ntb_softc *ntb) { return (ntb->mw_count); } /** * ntb_spad_write() - write to the secondary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to the scratchpad register, 0 based * @val: the data value to put into the register * * This function allows writing of a 32bit value to the indexed scratchpad * register. The register resides on the secondary (external) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) { if (idx >= ntb->spad_count) return (EINVAL); ntb_reg_write(4, ntb->reg_ofs.spad_local + idx * 4, val); return (0); } /** * ntb_spad_read() - read from the primary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to scratchpad register, 0 based * @val: pointer to 32bit integer for storing the register value * * This function allows reading of the 32bit scratchpad register on * the primary (internal) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) { if (idx >= ntb->spad_count) return (EINVAL); *val = ntb_reg_read(4, ntb->reg_ofs.spad_local + idx * 4); return (0); } /** * ntb_peer_spad_write() - write to the secondary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to the scratchpad register, 0 based * @val: the data value to put into the register * * This function allows writing of a 32bit value to the indexed scratchpad * register. The register resides on the secondary (external) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) { if (idx >= ntb->spad_count) return (EINVAL); if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) ntb_mw_write(4, XEON_SHADOW_SPAD_OFFSET + idx * 4, val); else ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val); return (0); } /** * ntb_peer_spad_read() - read from the primary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to scratchpad register, 0 based * @val: pointer to 32bit integer for storing the register value * * This function allows reading of the 32bit scratchpad register on * the primary (internal) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) { if (idx >= ntb->spad_count) return (EINVAL); if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) *val = ntb_mw_read(4, XEON_SHADOW_SPAD_OFFSET + idx * 4); else *val = ntb_reg_read(4, ntb->peer_reg->spad + idx * 4); return (0); } -/** - * ntb_get_mw_vbase() - get virtual addr for the NTB memory window - * @ntb: pointer to ntb_softc instance - * @mw: memory window number +/* + * ntb_mw_get_range() - get the range of a memory window + * @ntb: NTB device context + * @idx: Memory window number + * @base: OUT - the base address for mapping the memory window + * @size: OUT - the size for mapping the memory window + * @align: OUT - the base alignment for translating the memory window + * @align_size: OUT - the size alignment for translating the memory window * - * This function provides the base virtual address of the memory window - * specified. + * Get the range of a memory window. NULL may be given for any output + * parameter if the value is not needed. The base and size may be used for + * mapping the memory window, to access the peer memory. The alignment and + * size may be used for translating the memory window, for the peer to access + * memory on the local system. * - * RETURNS: pointer to virtual address, or NULL on error. + * Return: Zero on success, otherwise an error number. */ -void * -ntb_get_mw_vbase(struct ntb_softc *ntb, unsigned int mw) +int +ntb_mw_get_range(struct ntb_softc *ntb, unsigned mw_idx, vm_paddr_t *base, + void **vbase, size_t *size, size_t *align, size_t *align_size) { + struct ntb_pci_bar_info *bar; + size_t bar_b2b_off; - if (mw >= ntb_mw_count(ntb)) - return (NULL); + if (mw_idx >= ntb_mw_count(ntb)) + return (EINVAL); - return (ntb->bar_info[ntb_mw_to_bar(ntb, mw)].vbase); -} + bar = &ntb->bar_info[ntb_mw_to_bar(ntb, mw_idx)]; + bar_b2b_off = 0; + if (mw_idx == ntb->b2b_mw_idx) { + KASSERT(ntb->b2b_off != 0, + ("user shouldn't get non-shared b2b mw")); + bar_b2b_off = ntb->b2b_off; + } -bus_addr_t -ntb_get_mw_pbase(struct ntb_softc *ntb, unsigned int mw) -{ - - if (mw >= ntb_mw_count(ntb)) - return (0); - - return (ntb->bar_info[ntb_mw_to_bar(ntb, mw)].pbase); + if (base != NULL) + *base = bar->pbase + bar_b2b_off; + if (vbase != NULL) + *vbase = (char *)bar->vbase + bar_b2b_off; + if (size != NULL) + *size = bar->size - bar_b2b_off; + if (align != NULL) + *align = bar->size; + if (align_size != NULL) + *align_size = 1; + return (0); } -/** - * ntb_get_mw_size() - return size of NTB memory window - * @ntb: pointer to ntb_softc instance - * @mw: memory window number +/* + * ntb_mw_set_trans() - set the translation of a memory window + * @ntb: NTB device context + * @idx: Memory window number + * @addr: The dma address local memory to expose to the peer + * @size: The size of the local memory to expose to the peer * - * This function provides the physical size of the memory window specified + * Set the translation of a memory window. The peer may access local memory + * through the window starting at the address, up to the size. The address + * must be aligned to the alignment specified by ntb_mw_get_range(). The size + * must be aligned to the size alignment specified by ntb_mw_get_range(). * - * RETURNS: the size of the memory window or zero on error + * Return: Zero on success, otherwise an error number. */ -u_long -ntb_get_mw_size(struct ntb_softc *ntb, unsigned int mw) +int +ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr, + size_t size) { + struct ntb_pci_bar_info *bar; + uint64_t base, limit, reg_val; + size_t bar_size, mw_size; + uint32_t base_reg, xlat_reg, limit_reg; + enum ntb_bar bar_num; - if (mw >= ntb_mw_count(ntb)) - return (0); + if (idx >= ntb_mw_count(ntb)) + return (EINVAL); - return (ntb->bar_info[ntb_mw_to_bar(ntb, mw)].size); -} + bar_num = ntb_mw_to_bar(ntb, idx); + bar = &ntb->bar_info[bar_num]; -/** - * ntb_set_mw_addr - set the memory window address - * @ntb: pointer to ntb_softc instance - * @mw: memory window number - * @addr: base address for data - * - * This function sets the base physical address of the memory window. This - * memory address is where data from the remote system will be transfered into - * or out of depending on how the transport is configured. - */ -void -ntb_set_mw_addr(struct ntb_softc *ntb, unsigned int mw, uint64_t addr) -{ + bar_size = bar->size; + if (idx == ntb->b2b_mw_idx) + mw_size = bar_size - ntb->b2b_off; + else + mw_size = bar_size; - if (mw >= ntb_mw_count(ntb)) - return; + /* Hardware requires that addr is aligned to bar size */ + if ((addr & (bar_size - 1)) != 0) + return (EINVAL); - switch (ntb_mw_to_bar(ntb, mw)) { - case NTB_B2B_BAR_1: - ntb_reg_write(8, ntb->xlat_reg->bar2_xlat, addr); - break; - case NTB_B2B_BAR_2: - if (HAS_FEATURE(NTB_SPLIT_BAR)) - ntb_reg_write(4, ntb->reg_ofs.bar4_xlat, addr); - else - ntb_reg_write(8, ntb->reg_ofs.bar4_xlat, addr); - break; - case NTB_B2B_BAR_3: - ntb_reg_write(4, ntb->reg_ofs.bar5_xlat, addr); - break; - default: - KASSERT(false, ("invalid BAR")); - break; + if (size > mw_size) + return (EINVAL); + + bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg); + + limit = 0; + if (bar_is_64bit(ntb, bar_num)) { + base = ntb_reg_read(8, base_reg); + + if (limit_reg != 0 && size != mw_size) + limit = base + size; + + /* Set and verify translation address */ + ntb_reg_write(8, xlat_reg, addr); + reg_val = ntb_reg_read(8, xlat_reg); + if (reg_val != addr) { + ntb_reg_write(8, xlat_reg, 0); + return (EIO); + } + + /* Set and verify the limit */ + ntb_reg_write(8, limit_reg, limit); + reg_val = ntb_reg_read(8, limit_reg); + if (reg_val != limit) { + ntb_reg_write(8, limit_reg, base); + ntb_reg_write(8, xlat_reg, 0); + return (EIO); + } + } else { + /* Configure 32-bit (split) BAR MW */ + + if ((addr & ~UINT32_MAX) != 0) + return (EINVAL); + if (((addr + size) & ~UINT32_MAX) != 0) + return (EINVAL); + + base = ntb_reg_read(4, base_reg); + + if (limit_reg != 0 && size != mw_size) + limit = base + size; + + /* Set and verify translation address */ + ntb_reg_write(4, xlat_reg, addr); + reg_val = ntb_reg_read(4, xlat_reg); + if (reg_val != addr) { + ntb_reg_write(4, xlat_reg, 0); + return (EIO); + } + + /* Set and verify the limit */ + ntb_reg_write(4, limit_reg, limit); + reg_val = ntb_reg_read(4, limit_reg); + if (reg_val != limit) { + ntb_reg_write(4, limit_reg, base); + ntb_reg_write(4, xlat_reg, 0); + return (EIO); + } } + return (0); } /** * ntb_peer_db_set() - Set the doorbell on the secondary/external side * @ntb: pointer to ntb_softc instance * @bit: doorbell bits to ring * * This function allows triggering of a doorbell on the secondary/external * side that will initiate an interrupt on the remote host */ void ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit) { if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { ntb_mw_write(2, XEON_SHADOW_PDOORBELL_OFFSET, bit); return; } - ntb_db_write(ntb, ntb->peer_reg->db_bell, bit); + db_iowrite(ntb, ntb->peer_reg->db_bell, bit); } /* * ntb_get_peer_db_addr() - Return the address of the remote doorbell register, * as well as the size of the register (via *sz_out). * * This function allows a caller using I/OAT DMA to chain the remote doorbell * ring to its memory window write. * * Note that writing the peer doorbell via a memory window will *not* generate * an interrupt on the remote host; that must be done seperately. */ bus_addr_t ntb_get_peer_db_addr(struct ntb_softc *ntb, vm_size_t *sz_out) { struct ntb_pci_bar_info *bar; uint64_t regoff; KASSERT(sz_out != NULL, ("must be non-NULL")); if (!HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { bar = &ntb->bar_info[NTB_CONFIG_BAR]; regoff = ntb->peer_reg->db_bell; } else { KASSERT((HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 2) || (!HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 1), ("mw_count invalid after setup")); KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED, ("invalid b2b idx")); bar = &ntb->bar_info[ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)]; regoff = XEON_SHADOW_PDOORBELL_OFFSET; } KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh")); *sz_out = ntb->reg->db_size; /* HACK: Specific to current x86 bus implementation. */ return ((uint64_t)bar->pci_bus_handle + regoff); } /** - * ntb_link_is_up() - return the hardware link status - * @ndev: pointer to ntb_device instance + * ntb_link_is_up() - get the current ntb link state + * @ntb: NTB device context + * @speed: OUT - The link speed expressed as PCIe generation number + * @width: OUT - The link width expressed as the number of PCIe lanes * - * Returns true if the hardware is connected to the remote system - * * RETURNS: true or false based on the hardware link state */ bool -ntb_link_is_up(struct ntb_softc *ntb) +ntb_link_is_up(struct ntb_softc *ntb, enum ntb_speed *speed, + enum ntb_width *width) { - return (ntb->link_status == NTB_LINK_UP); + if (speed != NULL) + *speed = ntb_link_sta_speed(ntb); + if (width != NULL) + *width = ntb_link_sta_width(ntb); + return (link_is_up(ntb)); } static void save_bar_parameters(struct ntb_pci_bar_info *bar) { bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); bar->pbase = rman_get_start(bar->pci_resource); bar->size = rman_get_size(bar->pci_resource); bar->vbase = rman_get_virtual(bar->pci_resource); } device_t ntb_get_device(struct ntb_softc *ntb) { return (ntb->device); } /* Export HW-specific errata information. */ bool ntb_has_feature(struct ntb_softc *ntb, uint64_t feature) { return (HAS_FEATURE(feature)); } Index: head/sys/dev/ntb/ntb_hw/ntb_hw.h =================================================================== --- head/sys/dev/ntb/ntb_hw/ntb_hw.h (revision 289545) +++ head/sys/dev/ntb/ntb_hw/ntb_hw.h (revision 289546) @@ -1,92 +1,111 @@ /*- * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NTB_HW_H_ #define _NTB_HW_H_ struct ntb_softc; #define NTB_MAX_NUM_MW 3 -enum ntb_link_event { - NTB_LINK_DOWN = 0, - NTB_LINK_UP, +enum ntb_speed { + NTB_SPEED_AUTO = -1, + NTB_SPEED_NONE = 0, + NTB_SPEED_GEN1 = 1, + NTB_SPEED_GEN2 = 2, + NTB_SPEED_GEN3 = 3, }; -enum ntb_hw_event { - NTB_EVENT_SW_EVENT0 = 0, - NTB_EVENT_SW_EVENT1, - NTB_EVENT_SW_EVENT2, - NTB_EVENT_HW_ERROR, - NTB_EVENT_HW_LINK_UP, - NTB_EVENT_HW_LINK_DOWN, +enum ntb_width { + NTB_WIDTH_AUTO = -1, + NTB_WIDTH_NONE = 0, + NTB_WIDTH_1 = 1, + NTB_WIDTH_2 = 2, + NTB_WIDTH_4 = 4, + NTB_WIDTH_8 = 8, + NTB_WIDTH_12 = 12, + NTB_WIDTH_16 = 16, + NTB_WIDTH_32 = 32, }; SYSCTL_DECL(_hw_ntb); -typedef int (*ntb_db_callback)(void *data, int db_num); -typedef void (*ntb_event_callback)(void *data, enum ntb_hw_event event); +typedef void (*ntb_db_callback)(void *data, int vector); +typedef void (*ntb_event_callback)(void *data); -int ntb_register_event_callback(struct ntb_softc *ntb, ntb_event_callback func); -void ntb_unregister_event_callback(struct ntb_softc *ntb); -int ntb_register_db_callback(struct ntb_softc *ntb, unsigned int idx, - void *data, ntb_db_callback func); -void ntb_unregister_db_callback(struct ntb_softc *ntb, unsigned int idx); -void *ntb_find_transport(struct ntb_softc *ntb); -struct ntb_softc *ntb_register_transport(struct ntb_softc *ntb, - void *transport); -void ntb_unregister_transport(struct ntb_softc *ntb); -uint8_t ntb_get_max_cbs(struct ntb_softc *ntb); -uint8_t ntb_mw_count(struct ntb_softc *ntb); +struct ntb_ctx_ops { + ntb_event_callback link_event; + ntb_db_callback db_event; +}; + +device_t ntb_get_device(struct ntb_softc *); + +bool ntb_link_is_up(struct ntb_softc *, enum ntb_speed *, enum ntb_width *); +void ntb_link_event(struct ntb_softc *); +int ntb_link_enable(struct ntb_softc *, enum ntb_speed, enum ntb_width); +int ntb_link_disable(struct ntb_softc *); + +int ntb_set_ctx(struct ntb_softc *, void *, const struct ntb_ctx_ops *); +void *ntb_get_ctx(struct ntb_softc *, const struct ntb_ctx_ops **); +void ntb_clear_ctx(struct ntb_softc *); + +uint8_t ntb_mw_count(struct ntb_softc *); +int ntb_mw_get_range(struct ntb_softc *, unsigned mw_idx, vm_paddr_t *base, + void **vbase, size_t *size, size_t *align, size_t *align_size); +int ntb_mw_set_trans(struct ntb_softc *, unsigned mw_idx, bus_addr_t, size_t); +int ntb_mw_clear_trans(struct ntb_softc *, unsigned mw_idx); + uint8_t ntb_get_max_spads(struct ntb_softc *ntb); int ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val); int ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val); int ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val); int ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val); -void *ntb_get_mw_vbase(struct ntb_softc *ntb, unsigned int mw); -bus_addr_t ntb_get_mw_pbase(struct ntb_softc *ntb, unsigned int mw); -u_long ntb_get_mw_size(struct ntb_softc *ntb, unsigned int mw); -void ntb_set_mw_addr(struct ntb_softc *ntb, unsigned int mw, uint64_t addr); -void ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit); -bus_addr_t ntb_get_peer_db_addr(struct ntb_softc *ntb, vm_size_t *sz_out); -bool ntb_link_is_up(struct ntb_softc *ntb); -device_t ntb_get_device(struct ntb_softc *ntb); + +uint64_t ntb_db_valid_mask(struct ntb_softc *); +bus_addr_t ntb_get_peer_db_addr(struct ntb_softc *, vm_size_t *sz_out); + +void ntb_db_clear(struct ntb_softc *, uint64_t bits); +void ntb_db_clear_mask(struct ntb_softc *, uint64_t bits); +uint64_t ntb_db_read(struct ntb_softc *); +void ntb_db_set_mask(struct ntb_softc *, uint64_t bits); +uint64_t ntb_db_vector_mask(struct ntb_softc *, int vector); +void ntb_peer_db_set(struct ntb_softc *, uint64_t bits); /* Hardware owns the low 32 bits of features. */ #define NTB_BAR_SIZE_4K (1 << 0) #define NTB_SDOORBELL_LOCKUP (1 << 1) #define NTB_SB01BASE_LOCKUP (1 << 2) #define NTB_B2BDOORBELL_BIT14 (1 << 3) /* Software/configuration owns the top 32 bits. */ #define NTB_SPLIT_BAR (1ull << 32) bool ntb_has_feature(struct ntb_softc *, uint64_t); #endif /* _NTB_HW_H_ */ Index: head/sys/dev/ntb/ntb_hw/ntb_regs.h =================================================================== --- head/sys/dev/ntb/ntb_hw/ntb_regs.h (revision 289545) +++ head/sys/dev/ntb/ntb_hw/ntb_regs.h (revision 289546) @@ -1,177 +1,178 @@ /*- * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NTB_REGS_H_ #define _NTB_REGS_H_ #define NTB_LINK_STATUS_ACTIVE 0x2000 #define NTB_LINK_SPEED_MASK 0x000f #define NTB_LINK_WIDTH_MASK 0x03f0 +#define NTB_LNK_STA_WIDTH(sta) (((sta) & NTB_LINK_WIDTH_MASK) >> 4) #define XEON_SNB_MW_COUNT 2 #define XEON_HSX_SPLIT_MW_COUNT 3 /* Reserve the uppermost bit for link interrupt */ #define XEON_DB_COUNT 15 #define XEON_DB_LINK 15 #define XEON_DB_MSIX_VECTOR_COUNT 4 #define XEON_DB_MSIX_VECTOR_SHIFT 5 #define XEON_DB_LINK_BIT (1 << XEON_DB_LINK) #define XEON_SPAD_COUNT 16 #define XEON_PCICMD_OFFSET 0x0504 #define XEON_DEVCTRL_OFFSET 0x0598 #define XEON_LINK_STATUS_OFFSET 0x01a2 #define XEON_SLINK_STATUS_OFFSET 0x05a2 #define XEON_PBAR2LMT_OFFSET 0x0000 #define XEON_PBAR4LMT_OFFSET 0x0008 #define XEON_PBAR5LMT_OFFSET 0x000c #define XEON_PBAR2XLAT_OFFSET 0x0010 #define XEON_PBAR4XLAT_OFFSET 0x0018 #define XEON_PBAR5XLAT_OFFSET 0x001c #define XEON_SBAR2LMT_OFFSET 0x0020 #define XEON_SBAR4LMT_OFFSET 0x0028 #define XEON_SBAR5LMT_OFFSET 0x002c #define XEON_SBAR2XLAT_OFFSET 0x0030 #define XEON_SBAR4XLAT_OFFSET 0x0038 #define XEON_SBAR5XLAT_OFFSET 0x003c #define XEON_SBAR0BASE_OFFSET 0x0040 #define XEON_SBAR2BASE_OFFSET 0x0048 #define XEON_SBAR4BASE_OFFSET 0x0050 #define XEON_SBAR5BASE_OFFSET 0x0054 #define XEON_NTBCNTL_OFFSET 0x0058 #define XEON_SBDF_OFFSET 0x005c #define XEON_PDOORBELL_OFFSET 0x0060 #define XEON_PDBMSK_OFFSET 0x0062 #define XEON_SDOORBELL_OFFSET 0x0064 #define XEON_SDBMSK_OFFSET 0x0066 #define XEON_USMEMMISS 0x0070 #define XEON_SPAD_OFFSET 0x0080 #define XEON_SPADSEMA4_OFFSET 0x00c0 #define XEON_WCCNTRL_OFFSET 0x00e0 #define XEON_B2B_SPAD_OFFSET 0x0100 #define XEON_B2B_DOORBELL_OFFSET 0x0140 #define XEON_B2B_XLAT_OFFSETL 0x0144 #define XEON_B2B_XLAT_OFFSETU 0x0148 #define SOC_MW_COUNT 2 #define SOC_DB_COUNT 34 #define SOC_DB_MSIX_VECTOR_COUNT 34 #define SOC_DB_MSIX_VECTOR_SHIFT 1 #define SOC_SPAD_COUNT 16 #define SOC_PCICMD_OFFSET 0xb004 #define SOC_MBAR23_OFFSET 0xb018 #define SOC_MBAR45_OFFSET 0xb020 #define SOC_DEVCTRL_OFFSET 0xb048 #define SOC_LINK_STATUS_OFFSET 0xb052 #define SOC_ERRCORSTS_OFFSET 0xb110 #define SOC_SBAR2XLAT_OFFSET 0x0008 #define SOC_SBAR4XLAT_OFFSET 0x0010 #define SOC_PDOORBELL_OFFSET 0x0020 #define SOC_PDBMSK_OFFSET 0x0028 #define SOC_NTBCNTL_OFFSET 0x0060 #define SOC_EBDF_OFFSET 0x0064 #define SOC_SPAD_OFFSET 0x0080 #define SOC_SPADSEMA_OFFSET 0x00c0 #define SOC_STKYSPAD_OFFSET 0x00c4 #define SOC_PBAR2XLAT_OFFSET 0x8008 #define SOC_PBAR4XLAT_OFFSET 0x8010 #define SOC_B2B_DOORBELL_OFFSET 0x8020 #define SOC_B2B_SPAD_OFFSET 0x8080 #define SOC_B2B_SPADSEMA_OFFSET 0x80c0 #define SOC_B2B_STKYSPAD_OFFSET 0x80c4 #define SOC_MODPHY_PCSREG4 0x1c004 #define SOC_MODPHY_PCSREG6 0x1c006 #define SOC_IP_BASE 0xc000 #define SOC_DESKEWSTS_OFFSET (SOC_IP_BASE + 0x3024) #define SOC_LTSSMERRSTS0_OFFSET (SOC_IP_BASE + 0x3180) #define SOC_LTSSMSTATEJMP_OFFSET (SOC_IP_BASE + 0x3040) #define SOC_IBSTERRRCRVSTS0_OFFSET (SOC_IP_BASE + 0x3324) #define SOC_DESKEWSTS_DBERR (1 << 15) #define SOC_LTSSMERRSTS0_UNEXPECTEDEI (1 << 20) #define SOC_LTSSMSTATEJMP_FORCEDETECT (1 << 2) #define SOC_IBIST_ERR_OFLOW 0x7fff7fff #define NTB_CNTL_CFG_LOCK (1 << 0) #define NTB_CNTL_LINK_DISABLE (1 << 1) #define NTB_CNTL_S2P_BAR23_SNOOP (1 << 2) #define NTB_CNTL_P2S_BAR23_SNOOP (1 << 4) #define NTB_CNTL_S2P_BAR4_SNOOP (1 << 6) #define NTB_CNTL_P2S_BAR4_SNOOP (1 << 8) #define NTB_CNTL_S2P_BAR5_SNOOP (1 << 12) #define NTB_CNTL_P2S_BAR5_SNOOP (1 << 14) #define SOC_CNTL_LINK_DOWN (1 << 16) #define XEON_PBAR23SZ_OFFSET 0x00d0 #define XEON_PBAR45SZ_OFFSET 0x00d1 #define XEON_PBAR4SZ_OFFSET 0x00d1 #define XEON_PBAR5SZ_OFFSET 0x00d5 #define XEON_SBAR23SZ_OFFSET 0x00d2 #define XEON_SBAR4SZ_OFFSET 0x00d3 #define XEON_SBAR5SZ_OFFSET 0x00d6 #define NTB_PPD_OFFSET 0x00d4 #define XEON_PPD_CONN_TYPE 0x0003 #define XEON_PPD_DEV_TYPE 0x0010 #define XEON_PPD_SPLIT_BAR 0x0040 #define SOC_PPD_INIT_LINK 0x0008 #define SOC_PPD_CONN_TYPE 0x0300 #define SOC_PPD_DEV_TYPE 0x1000 #define NTB_CONN_TRANSPARENT 0 #define NTB_CONN_B2B 1 #define NTB_CONN_RP 2 #define NTB_DEV_DSD 1 #define NTB_DEV_USD 0 /* All addresses are in low 32-bit space so 32-bit BARs can function */ #define XEON_B2B_BAR0_USD_ADDR 0x1000000000000000ull #define XEON_B2B_BAR2_USD_ADDR64 0x2000000000000000ull #define XEON_B2B_BAR4_USD_ADDR64 0x4000000000000000ull #define XEON_B2B_BAR4_USD_ADDR32 0x20000000ull #define XEON_B2B_BAR5_USD_ADDR32 0x40000000ull #define XEON_B2B_BAR0_DSD_ADDR 0x9000000000000000ull #define XEON_B2B_BAR2_DSD_ADDR64 0xa000000000000000ull #define XEON_B2B_BAR4_DSD_ADDR64 0xc000000000000000ull #define XEON_B2B_BAR4_DSD_ADDR32 0xa0000000ull #define XEON_B2B_BAR5_DSD_ADDR32 0xc0000000ull /* The peer ntb secondary config space is 32KB fixed size */ #define XEON_B2B_MIN_SIZE 0x8000 /* XEON Shadowed MMIO Space */ #define XEON_SHADOW_PDOORBELL_OFFSET 0x60 #define XEON_SHADOW_SPAD_OFFSET 0x80 #endif /* _NTB_REGS_H_ */