Index: stable/10/contrib/ofed/libcxgb4/src/t4.h =================================================================== --- stable/10/contrib/ofed/libcxgb4/src/t4.h (revision 318798) +++ stable/10/contrib/ofed/libcxgb4/src/t4.h (revision 318799) @@ -1,719 +1,719 @@ /* * Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __T4_H__ #define __T4_H__ #include #include #include #define ENODATA 95 #undef ELAST #define ELAST ENODATA /* * Try and minimize the changes from the kernel code that is pull in * here for kernel bypass ops. */ #define __u8 uint8_t #define u8 uint8_t #define __u16 uint16_t #define __be16 uint16_t #define u16 uint16_t #define __u32 uint32_t #define __be32 uint32_t #define u32 uint32_t #define __u64 uint64_t #define __be64 uint64_t #define u64 uint64_t #define DECLARE_PCI_UNMAP_ADDR(a) #define __iomem #define cpu_to_be16 htons #define cpu_to_be32 htonl #define cpu_to_be64 htonll #define be16_to_cpu ntohs #define be32_to_cpu ntohl #define be64_to_cpu ntohll #define BUG_ON(c) assert(!(c)) #define unlikely #define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u)) #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #if __BYTE_ORDER == __LITTLE_ENDIAN # define cpu_to_pci32(val) ((val)) #elif __BYTE_ORDER == __BIG_ENDIAN # define cpu_to_pci32(val) (__bswap_32((val))) #else # error __BYTE_ORDER not defined #endif #define writel(v, a) do { *((volatile u32 *)(a)) = cpu_to_pci32(v); } while (0) #include /* For htonl() and friends */ #include "t4_regs.h" #include "t4_chip_type.h" #include "t4fw_interface.h" #ifdef DEBUG #define DBGLOG(s) #define PDBG(fmt, args...) do {syslog(LOG_DEBUG, fmt, ##args); } while (0) #else #define DBGLOG(s) #define PDBG(fmt, args...) do {} while (0) #endif #define T4_MAX_READ_DEPTH 16 #define T4_QID_BASE 1024 #define T4_MAX_QIDS 256 #define T4_MAX_NUM_PD 65536 #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) #define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) #define T4_MAX_IQ_SIZE (65520 - 1) #define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES) #define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1) #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1) #define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1) #define T4_MAX_NUM_STAG (1<<15) #define T4_MAX_MR_SIZE (~0ULL - 1) -#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ +#define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */ #define T4_STAG_UNSET 0xffffffff #define T4_FW_MAJ 0 struct t4_status_page { __be32 rsvd1; /* flit 0 - hw owns */ __be16 rsvd2; __be16 qid; __be16 cidx; __be16 pidx; u8 qp_err; /* flit 1 - sw owns */ u8 db_off; u8 pad; u16 host_wq_pidx; u16 host_cidx; u16 host_pidx; }; #define T4_EQ_ENTRY_SIZE 64 #define T4_SQ_NUM_SLOTS 5 #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS) #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - sizeof(struct fw_ri_isgl)) / sizeof (struct fw_ri_sge)) #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - sizeof(struct fw_ri_immd))) #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_rdma_write_wr) - sizeof(struct fw_ri_immd))) #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_rdma_write_wr) - sizeof(struct fw_ri_isgl)) / sizeof (struct fw_ri_sge)) #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - sizeof(struct fw_ri_immd))) #define T4_MAX_FR_DEPTH 255 #define T4_RQ_NUM_SLOTS 2 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) #define T4_MAX_RECV_SGE 4 union t4_wr { struct fw_ri_res_wr res; struct fw_ri_wr init; struct fw_ri_rdma_write_wr write; struct fw_ri_send_wr send; struct fw_ri_rdma_read_wr read; struct fw_ri_bind_mw_wr bind; struct fw_ri_fr_nsmr_wr fr; struct fw_ri_inv_lstag_wr inv; struct t4_status_page status; __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; }; union t4_recv_wr { struct fw_ri_recv_wr recv; struct t4_status_page status; __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; }; static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, enum fw_wr_opcodes opcode, u8 flags, u8 len16) { wqe->send.opcode = (u8)opcode; wqe->send.flags = flags; wqe->send.wrid = wrid; wqe->send.r1[0] = 0; wqe->send.r1[1] = 0; wqe->send.r1[2] = 0; wqe->send.len16 = len16; } /* CQE/AE status codes */ #define T4_ERR_SUCCESS 0x0 #define T4_ERR_STAG 0x1 /* STAG invalid: either the */ /* STAG is offlimt, being 0, */ /* or STAG_key mismatch */ #define T4_ERR_PDID 0x2 /* PDID mismatch */ #define T4_ERR_QPID 0x3 /* QPID mismatch */ #define T4_ERR_ACCESS 0x4 /* Invalid access right */ #define T4_ERR_WRAP 0x5 /* Wrap error */ #define T4_ERR_BOUND 0x6 /* base and bounds voilation */ #define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */ /* shared memory region */ #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */ /* shared memory region */ #define T4_ERR_ECC 0x9 /* ECC error detected */ #define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */ /* reading PSTAG for a MW */ /* Invalidate */ #define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */ /* software error */ #define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */ #define T4_ERR_CRC 0x10 /* CRC error */ #define T4_ERR_MARKER 0x11 /* Marker error */ #define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */ #define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */ #define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */ #define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */ #define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */ #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */ #define T4_ERR_MSN 0x18 /* MSN error */ #define T4_ERR_TBIT 0x19 /* tag bit not set correctly */ #define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */ /* or READ_REQ */ #define T4_ERR_MSN_GAP 0x1B #define T4_ERR_MSN_RANGE 0x1C #define T4_ERR_IRD_OVERFLOW 0x1D #define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */ /* software error */ #define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */ /* mismatch) */ /* * CQE defs */ struct t4_cqe { __be32 header; __be32 len; union { struct { __be32 stag; __be32 msn; } rcqe; struct { u32 nada1; u16 nada2; u16 cidx; } scqe; struct { __be32 wrid_hi; __be32 wrid_low; } gen; } u; __be64 reserved; __be64 bits_type_ts; }; /* macros for flit 0 of the cqe */ #define S_CQE_QPID 12 #define M_CQE_QPID 0xFFFFF #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) #define V_CQE_QPID(x) ((x)<> S_CQE_SWCQE)) & M_CQE_SWCQE) #define V_CQE_SWCQE(x) ((x)<> S_CQE_STATUS)) & M_CQE_STATUS) #define V_CQE_STATUS(x) ((x)<> S_CQE_TYPE)) & M_CQE_TYPE) #define V_CQE_TYPE(x) ((x)<> S_CQE_OPCODE)) & M_CQE_OPCODE) #define V_CQE_OPCODE(x) ((x)<header))) #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) #define SQ_TYPE(x) (CQE_TYPE((x))) #define RQ_TYPE(x) (!CQE_TYPE((x))) #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) #define CQE_SEND_OPCODE(x)( \ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) #define CQE_LEN(x) (be32_to_cpu((x)->len)) /* used for RQ completion processing */ #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag)) #define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn)) /* used for SQ completion processing */ #define CQE_WRID_SQ_IDX(x) (x)->u.scqe.cidx /* generic accessor macros */ #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) /* macros for flit 3 of the cqe */ #define S_CQE_GENBIT 63 #define M_CQE_GENBIT 0x1 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) #define V_CQE_GENBIT(x) ((x)<> S_CQE_OVFBIT)) & M_CQE_OVFBIT) #define S_CQE_IQTYPE 60 #define M_CQE_IQTYPE 0x3 #define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) #define M_CQE_TS 0x0fffffffffffffffULL #define G_CQE_TS(x) ((x) & M_CQE_TS) #define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) #define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) #define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) struct t4_swsqe { u64 wr_id; struct t4_cqe cqe; __be32 read_len; int opcode; int complete; int signaled; u16 idx; int flushed; }; enum { T4_SQ_ONCHIP = (1<<0), }; struct t4_sq { union t4_wr *queue; struct t4_swsqe *sw_sq; struct t4_swsqe *oldest_read; volatile u32 *udb; size_t memsize; u32 qid; u32 bar2_qid; void *ma_sync; u16 in_use; u16 size; u16 cidx; u16 pidx; u16 wq_pidx; u16 flags; short flush_cidx; int wc_reg_available; }; struct t4_swrqe { u64 wr_id; }; struct t4_rq { union t4_recv_wr *queue; struct t4_swrqe *sw_rq; volatile u32 *udb; size_t memsize; u32 qid; u32 bar2_qid; u32 msn; u32 rqt_hwaddr; u16 rqt_size; u16 in_use; u16 size; u16 cidx; u16 pidx; u16 wq_pidx; int wc_reg_available; }; struct t4_wq { struct t4_sq sq; struct t4_rq rq; struct c4iw_rdev *rdev; u32 qid_mask; int error; int flushed; u8 *db_offp; }; static inline void t4_ma_sync(struct t4_wq *wq, int page_size) { wc_wmb(); *((volatile u32 *)wq->sq.ma_sync) = 1; } static inline int t4_rqes_posted(struct t4_wq *wq) { return wq->rq.in_use; } static inline int t4_rq_empty(struct t4_wq *wq) { return wq->rq.in_use == 0; } static inline int t4_rq_full(struct t4_wq *wq) { return wq->rq.in_use == (wq->rq.size - 1); } static inline u32 t4_rq_avail(struct t4_wq *wq) { return wq->rq.size - 1 - wq->rq.in_use; } static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) { wq->rq.in_use++; if (++wq->rq.pidx == wq->rq.size) wq->rq.pidx = 0; wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; if (!wq->error) wq->rq.queue[wq->rq.size].status.host_pidx = wq->rq.pidx; } static inline void t4_rq_consume(struct t4_wq *wq) { wq->rq.in_use--; wq->rq.msn++; if (++wq->rq.cidx == wq->rq.size) wq->rq.cidx = 0; assert((wq->rq.cidx != wq->rq.pidx) || wq->rq.in_use == 0); if (!wq->error) wq->rq.queue[wq->rq.size].status.host_cidx = wq->rq.cidx; } static inline int t4_sq_empty(struct t4_wq *wq) { return wq->sq.in_use == 0; } static inline int t4_sq_full(struct t4_wq *wq) { return wq->sq.in_use == (wq->sq.size - 1); } static inline u32 t4_sq_avail(struct t4_wq *wq) { return wq->sq.size - 1 - wq->sq.in_use; } static inline int t4_sq_onchip(struct t4_wq *wq) { return wq->sq.flags & T4_SQ_ONCHIP; } static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) { wq->sq.in_use++; if (++wq->sq.pidx == wq->sq.size) wq->sq.pidx = 0; wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; if (!wq->error) wq->sq.queue[wq->sq.size].status.host_pidx = (wq->sq.pidx); } static inline void t4_sq_consume(struct t4_wq *wq) { assert(wq->sq.in_use >= 1); if (wq->sq.cidx == wq->sq.flush_cidx) wq->sq.flush_cidx = -1; wq->sq.in_use--; if (++wq->sq.cidx == wq->sq.size) wq->sq.cidx = 0; assert((wq->sq.cidx != wq->sq.pidx) || wq->sq.in_use == 0); if (!wq->error) wq->sq.queue[wq->sq.size].status.host_cidx = wq->sq.cidx; } static void copy_wqe_to_udb(volatile u32 *udb_offset, void *wqe) { u64 *src, *dst; int len16 = 4; src = (u64 *)wqe; dst = (u64 *)udb_offset; while (len16) { *dst++ = *src++; *dst++ = *src++; len16--; } } extern int ma_wr; extern int t5_en_wc; static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, u8 len16, union t4_wr *wqe) { wc_wmb(); if (t5) { if (t5_en_wc && inc == 1 && wq->sq.wc_reg_available) { PDBG("%s: WC wq->sq.pidx = %d; len16=%d\n", __func__, wq->sq.pidx, len16); copy_wqe_to_udb(wq->sq.udb + 14, wqe); } else { PDBG("%s: DB wq->sq.pidx = %d; len16=%d\n", __func__, wq->sq.pidx, len16); writel(V_QID(wq->sq.bar2_qid) | V_PIDX_T5(inc), wq->sq.udb); } wc_wmb(); return; } if (ma_wr) { if (t4_sq_onchip(wq)) { int i; for (i = 0; i < 16; i++) *(volatile u32 *)&wq->sq.queue[wq->sq.size].flits[2+i] = i; } } else { if (t4_sq_onchip(wq)) { int i; for (i = 0; i < 16; i++) *(u32 *)&wq->sq.queue[wq->sq.size].flits[2] = i; } } writel(V_QID(wq->sq.qid & wq->qid_mask) | V_PIDX(inc), wq->sq.udb); } static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, u8 len16, union t4_recv_wr *wqe) { wc_wmb(); if (t5) { if (t5_en_wc && inc == 1 && wq->sq.wc_reg_available) { PDBG("%s: WC wq->rq.pidx = %d; len16=%d\n", __func__, wq->rq.pidx, len16); copy_wqe_to_udb(wq->rq.udb + 14, wqe); } else { PDBG("%s: DB wq->rq.pidx = %d; len16=%d\n", __func__, wq->rq.pidx, len16); writel(V_QID(wq->rq.bar2_qid) | V_PIDX_T5(inc), wq->rq.udb); } wc_wmb(); return; } writel(V_QID(wq->rq.qid & wq->qid_mask) | V_PIDX(inc), wq->rq.udb); } static inline int t4_wq_in_error(struct t4_wq *wq) { return wq->error || wq->rq.queue[wq->rq.size].status.qp_err; } static inline void t4_set_wq_in_error(struct t4_wq *wq) { wq->rq.queue[wq->rq.size].status.qp_err = 1; } extern int c4iw_abi_version; static inline int t4_wq_db_enabled(struct t4_wq *wq) { /* * If iw_cxgb4 driver supports door bell drop recovery then its * c4iw_abi_version would be greater than or equal to 2. In such * case return the status of db_off flag to ring the kernel mode * DB from user mode library. */ if ( c4iw_abi_version >= 2 ) return ! *wq->db_offp; else return 1; } struct t4_cq { struct t4_cqe *queue; struct t4_cqe *sw_queue; struct c4iw_rdev *rdev; volatile u32 *ugts; size_t memsize; u64 bits_type_ts; u32 cqid; u32 qid_mask; u16 size; /* including status page */ u16 cidx; u16 sw_pidx; u16 sw_cidx; u16 sw_in_use; u16 cidx_inc; u8 gen; u8 error; }; static inline int t4_arm_cq(struct t4_cq *cq, int se) { u32 val; while (cq->cidx_inc > M_CIDXINC) { val = V_SEINTARM(0) | V_CIDXINC(M_CIDXINC) | V_TIMERREG(7) | V_INGRESSQID(cq->cqid & cq->qid_mask); writel(val, cq->ugts); cq->cidx_inc -= M_CIDXINC; } val = V_SEINTARM(se) | V_CIDXINC(cq->cidx_inc) | V_TIMERREG(6) | V_INGRESSQID(cq->cqid & cq->qid_mask); writel(val, cq->ugts); cq->cidx_inc = 0; return 0; } static inline void t4_swcq_produce(struct t4_cq *cq) { cq->sw_in_use++; if (cq->sw_in_use == cq->size) { syslog(LOG_NOTICE, "cxgb4 sw cq overflow cqid %u\n", cq->cqid); cq->error = 1; assert(0); } if (++cq->sw_pidx == cq->size) cq->sw_pidx = 0; } static inline void t4_swcq_consume(struct t4_cq *cq) { assert(cq->sw_in_use >= 1); cq->sw_in_use--; if (++cq->sw_cidx == cq->size) cq->sw_cidx = 0; } static inline void t4_hwcq_consume(struct t4_cq *cq) { cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == M_CIDXINC) { uint32_t val; val = V_SEINTARM(0) | V_CIDXINC(cq->cidx_inc) | V_TIMERREG(7) | V_INGRESSQID(cq->cqid & cq->qid_mask); writel(val, cq->ugts); cq->cidx_inc = 0; } if (++cq->cidx == cq->size) { cq->cidx = 0; cq->gen ^= 1; } ((struct t4_status_page *)&cq->queue[cq->size])->host_cidx = cq->cidx; } static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) { return (CQE_GENBIT(cqe) == cq->gen); } static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) { int ret; u16 prev_cidx; if (cq->cidx == 0) prev_cidx = cq->size - 1; else prev_cidx = cq->cidx - 1; if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { ret = -EOVERFLOW; syslog(LOG_NOTICE, "cxgb4 cq overflow cqid %u\n", cq->cqid); cq->error = 1; assert(0); } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { rmb(); *cqe = &cq->queue[cq->cidx]; ret = 0; } else ret = -ENODATA; return ret; } static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) { if (cq->sw_in_use == cq->size) { syslog(LOG_NOTICE, "cxgb4 sw cq overflow cqid %u\n", cq->cqid); cq->error = 1; assert(0); return NULL; } if (cq->sw_in_use) return &cq->sw_queue[cq->sw_cidx]; return NULL; } static inline int t4_cq_notempty(struct t4_cq *cq) { return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]); } static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) { int ret = 0; if (cq->error) ret = -ENODATA; else if (cq->sw_in_use) *cqe = &cq->sw_queue[cq->sw_cidx]; else ret = t4_next_hw_cqe(cq, cqe); return ret; } static inline int t4_cq_in_error(struct t4_cq *cq) { return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err; } static inline void t4_set_cq_in_error(struct t4_cq *cq) { ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; } static inline void t4_reset_cq_in_error(struct t4_cq *cq) { ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 0; } struct t4_dev_status_page { u8 db_off; }; #endif Index: stable/10/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c =================================================================== --- stable/10/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c (revision 318798) +++ stable/10/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c (revision 318799) @@ -1,1159 +1,1159 @@ /************************************************************************** Copyright (c) 2007, Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #ifdef TCP_OFFLOAD #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int iwch_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props) { return (-ENOSYS); } static struct ib_ah * iwch_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { return ERR_PTR(-ENOSYS); } static int iwch_ah_destroy(struct ib_ah *ah) { return (-ENOSYS); } static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { return (-ENOSYS); } static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { return (-ENOSYS); } static int iwch_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { return (-ENOSYS); } static int iwch_dealloc_ucontext(struct ib_ucontext *context) { struct iwch_dev *rhp = to_iwch_dev(context->device); struct iwch_ucontext *ucontext = to_iwch_ucontext(context); struct iwch_mm_entry *mm, *tmp; CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context); TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) { TAILQ_REMOVE(&ucontext->mmaps, mm, entry); cxfree(mm); } cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); cxfree(ucontext); return 0; } static struct ib_ucontext * iwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct iwch_ucontext *context; struct iwch_dev *rhp = to_iwch_dev(ibdev); CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev); context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT); if (!context) return ERR_PTR(-ENOMEM); cxio_init_ucontext(&rhp->rdev, &context->uctx); TAILQ_INIT(&context->mmaps); mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF); return &context->ibucontext; } static int iwch_destroy_cq(struct ib_cq *ib_cq) { struct iwch_cq *chp; CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq); chp = to_iwch_cq(ib_cq); remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); mtx_lock(&chp->lock); if (--chp->refcnt) msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0); mtx_unlock(&chp->lock); cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); cxfree(chp); return 0; } static struct ib_cq * iwch_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; struct iwch_ucontext *ucontext = NULL; static int warned; size_t resplen; CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries); rhp = to_iwch_dev(ibdev); chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO); if (!chp) { return ERR_PTR(-ENOMEM); } if (ib_context) { ucontext = to_iwch_ucontext(ib_context); if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { cxfree(chp); return ERR_PTR(-EFAULT); } chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr; } } if (t3a_device(rhp)) { /* * T3A: Add some fluff to handle extra CQEs inserted * for various errors. * Additional CQE possibilities: * TERMINATE, * incoming RDMA WRITE Failures * incoming RDMA READ REQUEST FAILUREs * NOTE: We cannot ensure the CQ won't overflow. */ entries += 16; } entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { cxfree(chp); return ERR_PTR(-ENOMEM); } chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK); chp->refcnt = 1; if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); cxfree(chp); return ERR_PTR(-ENOMEM); } if (ucontext) { struct iwch_mm_entry *mm; mm = kmalloc(sizeof *mm, M_NOWAIT); if (!mm) { iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-ENOMEM); } uresp.cqid = chp->cq.cqid; uresp.size_log2 = chp->cq.size_log2; mtx_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; mtx_unlock(&ucontext->mmap_lock); mm->key = uresp.key; mm->addr = vtophys(chp->cq.queue); if (udata->outlen < sizeof uresp) { if (!warned++) CTR1(KTR_IW_CXGB, "%s Warning - " "downlevel libcxgb3 (non-fatal).\n", __func__); mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * sizeof(struct t3_cqe)); resplen = sizeof(struct iwch_create_cq_resp_v0); } else { mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * sizeof(struct t3_cqe)); uresp.memsize = mm->len; resplen = sizeof uresp; } if (ib_copy_to_udata(udata, &uresp, resplen)) { cxfree(mm); iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-EFAULT); } insert_mmap(ucontext, mm); } CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx", chp->cq.cqid, chp, (1 << chp->cq.size_log2), (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; } static int iwch_resize_cq(struct ib_cq *cq __unused, int cqe __unused, struct ib_udata *udata __unused) { return (-ENOSYS); } static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct iwch_dev *rhp; struct iwch_cq *chp; enum t3_cq_opcode cq_op; int err; u32 rptr; chp = to_iwch_cq(ibcq); rhp = chp->rhp; if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) cq_op = CQ_ARM_SE; else cq_op = CQ_ARM_AN; if (chp->user_rptr_addr) { if (copyin(chp->user_rptr_addr, &rptr, sizeof(rptr))) return (-EFAULT); mtx_lock(&chp->lock); chp->cq.rptr = rptr; } else mtx_lock(&chp->lock); CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr); err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); mtx_unlock(&chp->lock); if (err < 0) log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err, chp->cq.cqid); if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) err = 0; return err; } static int iwch_mmap(struct ib_ucontext *context __unused, struct vm_area_struct *vma __unused) { return (-ENOSYS); } static int iwch_deallocate_pd(struct ib_pd *pd) { struct iwch_dev *rhp; struct iwch_pd *php; php = to_iwch_pd(pd); rhp = php->rhp; CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid); cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); cxfree(php); return 0; } static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct iwch_pd *php; u32 pdid; struct iwch_dev *rhp; CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev); rhp = (struct iwch_dev *) ibdev; pdid = cxio_hal_get_pdid(rhp->rdev.rscp); if (!pdid) return ERR_PTR(-EINVAL); php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT); if (!php) { cxio_hal_put_pdid(rhp->rdev.rscp, pdid); return ERR_PTR(-ENOMEM); } php->pdid = pdid; php->rhp = rhp; if (context) { if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) { iwch_deallocate_pd(&php->ibpd); return ERR_PTR(-EFAULT); } } CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php); return &php->ibpd; } static int iwch_dereg_mr(struct ib_mr *ib_mr) { struct iwch_dev *rhp; struct iwch_mr *mhp; u32 mmid; CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr); /* There can be no memory windows */ if (atomic_load_acq_int(&ib_mr->usecnt.counter)) return (-EINVAL); mhp = to_iwch_mr(ib_mr); rhp = mhp->rhp; mmid = mhp->attr.stag >> 8; cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); iwch_free_pbl(mhp); remove_handle(rhp, &rhp->mmidr, mmid); if (mhp->kva) cxfree((void *) (unsigned long) mhp->kva); if (mhp->umem) ib_umem_release(mhp->umem); CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp); cxfree(mhp); return 0; } static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start) { __be64 *page_list; int shift; u64 total_size; int npages; struct iwch_dev *rhp; struct iwch_pd *php; struct iwch_mr *mhp; int ret; CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd); php = to_iwch_pd(pd); rhp = php->rhp; mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT); if (!mhp) return ERR_PTR(-ENOMEM); mhp->rhp = rhp; /* First check that we have enough alignment */ if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { ret = -EINVAL; goto err; } if (num_phys_buf > 1 && ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { ret = -EINVAL; goto err; } ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, &total_size, &npages, &shift, &page_list); if (ret) goto err; ret = iwch_alloc_pbl(mhp, npages); if (ret) { cxfree(page_list); goto err_pbl; } ret = iwch_write_pbl(mhp, page_list, npages, 0); cxfree(page_list); if (ret) goto err; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = iwch_ib_to_tpt_access(acc); mhp->attr.va_fbo = *iova_start; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) total_size; mhp->attr.pbl_size = npages; ret = iwch_register_mem(rhp, php, mhp, shift); if (ret) goto err_pbl; return &mhp->ibmr; err_pbl: iwch_free_pbl(mhp); err: cxfree(mhp); return ERR_PTR(ret); } static int iwch_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 * iova_start) { struct iwch_mr mh, *mhp; struct iwch_pd *php; struct iwch_dev *rhp; __be64 *page_list = NULL; int shift = 0; u64 total_size; int npages = 0; int ret; CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd); /* There can be no memory windows */ if (atomic_load_acq_int(&mr->usecnt.counter)) return (-EINVAL); mhp = to_iwch_mr(mr); rhp = mhp->rhp; php = to_iwch_pd(mr->pd); /* make sure we are on the same adapter */ if (rhp != php->rhp) return (-EINVAL); memcpy(&mh, mhp, sizeof *mhp); if (mr_rereg_mask & IB_MR_REREG_PD) php = to_iwch_pd(pd); if (mr_rereg_mask & IB_MR_REREG_ACCESS) mh.attr.perms = iwch_ib_to_tpt_access(acc); if (mr_rereg_mask & IB_MR_REREG_TRANS) { ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, &total_size, &npages, &shift, &page_list); if (ret) return ret; } ret = iwch_reregister_mem(rhp, php, &mh, shift, npages); cxfree(page_list); if (ret) { return ret; } if (mr_rereg_mask & IB_MR_REREG_PD) mhp->attr.pdid = php->pdid; if (mr_rereg_mask & IB_MR_REREG_ACCESS) mhp->attr.perms = iwch_ib_to_tpt_access(acc); if (mr_rereg_mask & IB_MR_REREG_TRANS) { mhp->attr.zbva = 0; mhp->attr.va_fbo = *iova_start; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) total_size; mhp->attr.pbl_size = npages; } return 0; } static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata, int mr_id) { __be64 *pages; int shift, i, n; int err = 0; struct ib_umem_chunk *chunk; struct iwch_dev *rhp; struct iwch_pd *php; struct iwch_mr *mhp; struct iwch_reg_user_mr_resp uresp; #ifdef notyet int j, k, len; #endif CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd); php = to_iwch_pd(pd); rhp = php->rhp; mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO); if (!mhp) return ERR_PTR(-ENOMEM); mhp->rhp = rhp; mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); if (IS_ERR(mhp->umem)) { err = PTR_ERR(mhp->umem); cxfree(mhp); return ERR_PTR(-err); } shift = ffs(mhp->umem->page_size) - 1; n = 0; list_for_each_entry(chunk, &mhp->umem->chunk_list, list) n += chunk->nents; err = iwch_alloc_pbl(mhp, n); if (err) goto err; pages = (__be64 *) kmalloc(n * sizeof(u64), M_NOWAIT); if (!pages) { err = -ENOMEM; goto err_pbl; } i = n = 0; #ifdef notyet TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry) for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) { pages[i++] = htobe64(sg_dma_address( &chunk->page_list[j]) + mhp->umem->page_size * k); if (i == PAGE_SIZE / sizeof *pages) { err = iwch_write_pbl(mhp, pages, i, n); if (err) goto pbl_done; n += i; i = 0; } } } #endif if (i) err = iwch_write_pbl(mhp, pages, i, n); #ifdef notyet pbl_done: #endif cxfree(pages); if (err) goto err_pbl; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = iwch_ib_to_tpt_access(acc); mhp->attr.va_fbo = virt; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) length; err = iwch_register_mem(rhp, php, mhp, shift); if (err) goto err_pbl; if (udata && !t3a_device(rhp)) { uresp.pbl_addr = (mhp->attr.pbl_addr - rhp->rdev.rnic_info.pbl_base) >> 3; CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__, uresp.pbl_addr); if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { iwch_dereg_mr(&mhp->ibmr); err = EFAULT; goto err; } } return &mhp->ibmr; err_pbl: iwch_free_pbl(mhp); err: ib_umem_release(mhp->umem); cxfree(mhp); return ERR_PTR(-err); } static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) { struct ib_phys_buf bl; u64 kva; struct ib_mr *ibmr; CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd); /* * T3 only supports 32 bits of size. */ bl.size = 0xffffffff; bl.addr = 0; kva = 0; ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva); return ibmr; } static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) { struct iwch_dev *rhp; struct iwch_pd *php; struct iwch_mw *mhp; u32 mmid; u32 stag = 0; int ret; php = to_iwch_pd(pd); rhp = php->rhp; mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT); if (!mhp) return ERR_PTR(-ENOMEM); ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid); if (ret) { cxfree(mhp); return ERR_PTR(-ret); } mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.type = TPT_MW; mhp->attr.stag = stag; mmid = (stag) >> 8; mhp->ibmw.rkey = stag; if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); cxfree(mhp); return ERR_PTR(-ENOMEM); } CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag); return &(mhp->ibmw); } static int iwch_dealloc_mw(struct ib_mw *mw) { struct iwch_dev *rhp; struct iwch_mw *mhp; u32 mmid; mhp = to_iwch_mw(mw); rhp = mhp->rhp; mmid = (mw->rkey) >> 8; cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); remove_handle(rhp, &rhp->mmidr, mmid); cxfree(mhp); CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp); return 0; } static int iwch_destroy_qp(struct ib_qp *ib_qp) { struct iwch_dev *rhp; struct iwch_qp *qhp; struct iwch_qp_attributes attrs; struct iwch_ucontext *ucontext; qhp = to_iwch_qp(ib_qp); rhp = qhp->rhp; attrs.next_state = IWCH_QP_STATE_ERROR; iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); mtx_lock(&qhp->lock); if (qhp->ep) msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0); mtx_unlock(&qhp->lock); remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); mtx_lock(&qhp->lock); if (--qhp->refcnt) msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0); mtx_unlock(&qhp->lock); ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context) : NULL; cxio_destroy_qp(&rhp->rdev, &qhp->wq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__, ib_qp, qhp->wq.qpid, qhp); cxfree(qhp); return 0; } static struct ib_qp *iwch_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, struct ib_udata *udata) { struct iwch_dev *rhp; struct iwch_qp *qhp; struct iwch_pd *php; struct iwch_cq *schp; struct iwch_cq *rchp; struct iwch_create_qp_resp uresp; int wqsize, sqsize, rqsize; struct iwch_ucontext *ucontext; CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd); if (attrs->qp_type != IB_QPT_RC) return ERR_PTR(-EINVAL); php = to_iwch_pd(pd); rhp = php->rhp; schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); if (!schp || !rchp) return ERR_PTR(-EINVAL); /* The RQT size must be # of entries + 1 rounded up to a power of two */ rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr); if (rqsize == attrs->cap.max_recv_wr) rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1); /* T3 doesn't support RQT depth < 16 */ if (rqsize < 16) rqsize = 16; if (rqsize > T3_MAX_RQ_SIZE) return ERR_PTR(-EINVAL); if (attrs->cap.max_inline_data > T3_MAX_INLINE) return ERR_PTR(-EINVAL); /* * NOTE: The SQ and total WQ sizes don't need to be * a power of two. However, all the code assumes * they are. EG: Q_FREECNT() and friends. */ sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); wqsize = roundup_pow_of_two(rqsize + sqsize); CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__, wqsize, sqsize, rqsize); qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT); if (!qhp) return ERR_PTR(-ENOMEM); qhp->wq.size_log2 = ilog2(wqsize); qhp->wq.rq_size_log2 = ilog2(rqsize); qhp->wq.sq_size_log2 = ilog2(sqsize); ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) { cxfree(qhp); return ERR_PTR(-ENOMEM); } attrs->cap.max_recv_wr = rqsize - 1; attrs->cap.max_send_wr = sqsize; attrs->cap.max_inline_data = T3_MAX_INLINE; qhp->rhp = rhp; qhp->attr.pd = php->pdid; qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid; qhp->attr.sq_num_entries = attrs->cap.max_send_wr; qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; qhp->attr.sq_max_sges = attrs->cap.max_send_sge; qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; qhp->attr.state = IWCH_QP_STATE_IDLE; qhp->attr.next_state = IWCH_QP_STATE_IDLE; /* * XXX - These don't get passed in from the openib user * at create time. The CM sets them via a QP modify. * Need to fix... I think the CM should */ qhp->attr.enable_rdma_read = 1; qhp->attr.enable_rdma_write = 1; qhp->attr.enable_bind = 1; qhp->attr.max_ord = 1; qhp->attr.max_ird = 1; mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK); qhp->refcnt = 1; if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) { cxio_destroy_qp(&rhp->rdev, &qhp->wq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); cxfree(qhp); return ERR_PTR(-ENOMEM); } if (udata) { struct iwch_mm_entry *mm1, *mm2; mm1 = kmalloc(sizeof *mm1, M_NOWAIT); if (!mm1) { iwch_destroy_qp(&qhp->ibqp); return ERR_PTR(-ENOMEM); } mm2 = kmalloc(sizeof *mm2, M_NOWAIT); if (!mm2) { cxfree(mm1); iwch_destroy_qp(&qhp->ibqp); return ERR_PTR(-ENOMEM); } uresp.qpid = qhp->wq.qpid; uresp.size_log2 = qhp->wq.size_log2; uresp.sq_size_log2 = qhp->wq.sq_size_log2; uresp.rq_size_log2 = qhp->wq.rq_size_log2; mtx_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.db_key = ucontext->key; ucontext->key += PAGE_SIZE; mtx_unlock(&ucontext->mmap_lock); if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { cxfree(mm1); cxfree(mm2); iwch_destroy_qp(&qhp->ibqp); return ERR_PTR(-EFAULT); } mm1->key = uresp.key; mm1->addr = vtophys(qhp->wq.queue); mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr)); insert_mmap(ucontext, mm1); mm2->key = uresp.db_key; mm2->addr = qhp->wq.udb & PAGE_MASK; mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } qhp->ibqp.qp_num = qhp->wq.qpid; callout_init(&(qhp->timer), TRUE); CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d " "qpid 0x%0x qhp %p dma_addr 0x%llx size %d", qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, 1 << qhp->wq.size_log2); return &qhp->ibqp; } static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct iwch_dev *rhp; struct iwch_qp *qhp; enum iwch_qp_attr_mask mask = 0; struct iwch_qp_attributes attrs; CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp); /* iwarp does not support the RTR state */ if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) attr_mask &= ~IB_QP_STATE; /* Make sure we still have something left to do */ if (!attr_mask) return 0; memset(&attrs, 0, sizeof attrs); qhp = to_iwch_qp(ibqp); rhp = qhp->rhp; attrs.next_state = iwch_convert_state(attr->qp_state); attrs.enable_rdma_read = (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) ? 1 : 0; attrs.enable_rdma_write = (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0; mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? (IWCH_QP_ATTR_ENABLE_RDMA_READ | IWCH_QP_ATTR_ENABLE_RDMA_WRITE | IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0; return iwch_modify_qp(rhp, qhp, mask, &attrs, 0); } void iwch_qp_add_ref(struct ib_qp *qp) { CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp); mtx_lock(&to_iwch_qp(qp)->lock); to_iwch_qp(qp)->refcnt++; mtx_unlock(&to_iwch_qp(qp)->lock); } void iwch_qp_rem_ref(struct ib_qp *qp) { CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp); mtx_lock(&to_iwch_qp(qp)->lock); if (--to_iwch_qp(qp)->refcnt == 0) wakeup(to_iwch_qp(qp)); mtx_unlock(&to_iwch_qp(qp)->lock); } static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) { CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn); return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); } static int iwch_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey) { CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev); *pkey = 0; return 0; } static int iwch_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct iwch_dev *dev; struct port_info *pi; struct adapter *sc; CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p", __FUNCTION__, ibdev, port, index, gid); dev = to_iwch_dev(ibdev); sc = dev->rdev.adap; PANIC_IF(port == 0 || port > 2); pi = &sc->port[port - 1]; memset(&(gid->raw[0]), 0, sizeof(gid->raw)); memcpy(&(gid->raw[0]), pi->hw_addr, 6); return 0; } static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct iwch_dev *dev; struct adapter *sc; CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev); dev = to_iwch_dev(ibdev); sc = dev->rdev.adap; memset(props, 0, sizeof *props); memcpy(&props->sys_image_guid, sc->port[0].hw_addr, 6); props->device_cap_flags = dev->device_cap_flags; props->page_size_cap = dev->attr.mem_pgsizes_bitmask; props->vendor_id = pci_get_vendor(sc->dev); props->vendor_part_id = pci_get_device(sc->dev); props->max_mr_size = dev->attr.max_mr_size; props->max_qp = dev->attr.max_qps; props->max_qp_wr = dev->attr.max_wrs; props->max_sge = dev->attr.max_sge_per_wr; props->max_sge_rd = 1; props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp; props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp; props->max_cq = dev->attr.max_cqs; props->max_cqe = dev->attr.max_cqes_per_cq; props->max_mr = dev->attr.max_mem_regs; props->max_pd = dev->attr.max_pds; props->local_ca_ack_delay = 0; return 0; } static int iwch_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev); memset(props, 0, sizeof(struct ib_port_attr)); props->max_mtu = IB_MTU_4096; props->active_mtu = IB_MTU_2048; props->state = IB_PORT_ACTIVE; props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_REINIT_SUP | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->active_width = 2; props->active_speed = 2; props->max_msg_sz = -1; return 0; } int iwch_register_device(struct iwch_dev *dev) { int ret; struct adapter *sc = dev->rdev.adap; CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev); strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX); memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); memcpy(&dev->ibdev.node_guid, sc->port[0].hw_addr, 6); dev->device_cap_flags = (IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW); dev->ibdev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_POST_SEND) | (1ull << IB_USER_VERBS_CMD_POST_RECV); dev->ibdev.node_type = RDMA_NODE_RNIC; memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); dev->ibdev.phys_port_cnt = sc->params.nports; dev->ibdev.num_comp_vectors = 1; - dev->ibdev.dma_device = dev->rdev.adap->dev; + dev->ibdev.dma_device = NULL; dev->ibdev.query_device = iwch_query_device; dev->ibdev.query_port = iwch_query_port; dev->ibdev.modify_port = iwch_modify_port; dev->ibdev.query_pkey = iwch_query_pkey; dev->ibdev.query_gid = iwch_query_gid; dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext; dev->ibdev.mmap = iwch_mmap; dev->ibdev.alloc_pd = iwch_allocate_pd; dev->ibdev.dealloc_pd = iwch_deallocate_pd; dev->ibdev.create_ah = iwch_ah_create; dev->ibdev.destroy_ah = iwch_ah_destroy; dev->ibdev.create_qp = iwch_create_qp; dev->ibdev.modify_qp = iwch_ib_modify_qp; dev->ibdev.destroy_qp = iwch_destroy_qp; dev->ibdev.create_cq = iwch_create_cq; dev->ibdev.destroy_cq = iwch_destroy_cq; dev->ibdev.resize_cq = iwch_resize_cq; dev->ibdev.poll_cq = iwch_poll_cq; dev->ibdev.get_dma_mr = iwch_get_dma_mr; dev->ibdev.reg_phys_mr = iwch_register_phys_mem; dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem; dev->ibdev.reg_user_mr = iwch_reg_user_mr; dev->ibdev.dereg_mr = iwch_dereg_mr; dev->ibdev.alloc_mw = iwch_alloc_mw; dev->ibdev.bind_mw = iwch_bind_mw; dev->ibdev.dealloc_mw = iwch_dealloc_mw; dev->ibdev.attach_mcast = iwch_multicast_attach; dev->ibdev.detach_mcast = iwch_multicast_detach; dev->ibdev.process_mad = iwch_process_mad; dev->ibdev.req_notify_cq = iwch_arm_cq; dev->ibdev.post_send = iwch_post_send; dev->ibdev.post_recv = iwch_post_receive; dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), M_NOWAIT); if (!dev->ibdev.iwcm) return (ENOMEM); dev->ibdev.iwcm->connect = iwch_connect; dev->ibdev.iwcm->accept = iwch_accept_cr; dev->ibdev.iwcm->reject = iwch_reject_cr; dev->ibdev.iwcm->create_listen_ep = iwch_create_listen_ep; dev->ibdev.iwcm->destroy_listen_ep = iwch_destroy_listen_ep; dev->ibdev.iwcm->newconn = process_newconn; dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; dev->ibdev.iwcm->get_qp = iwch_get_qp; ret = ib_register_device(&dev->ibdev, NULL); if (ret) goto bail1; return (0); bail1: cxfree(dev->ibdev.iwcm); return (ret); } void iwch_unregister_device(struct iwch_dev *dev) { ib_unregister_device(&dev->ibdev); cxfree(dev->ibdev.iwcm); return; } #endif Index: stable/10/sys/dev/cxgbe/iw_cxgbe/device.c =================================================================== --- stable/10/sys/dev/cxgbe/iw_cxgbe/device.c (revision 318798) +++ stable/10/sys/dev/cxgbe/iw_cxgbe/device.c (revision 318799) @@ -1,362 +1,362 @@ /* * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include #include #include #include #include #ifdef TCP_OFFLOAD #include "iw_cxgbe.h" void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct list_head *pos, *nxt; struct c4iw_qid_list *entry; mutex_lock(&uctx->lock); list_for_each_safe(pos, nxt, &uctx->qpids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); if (!(entry->qid & rdev->qpmask)) { c4iw_put_resource(&rdev->resource.qid_table, entry->qid); mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur -= rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); } kfree(entry); } list_for_each_safe(pos, nxt, &uctx->qpids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); kfree(entry); } mutex_unlock(&uctx->lock); } void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { INIT_LIST_HEAD(&uctx->qpids); INIT_LIST_HEAD(&uctx->cqids); mutex_init(&uctx->lock); } static int c4iw_rdev_open(struct c4iw_rdev *rdev) { struct adapter *sc = rdev->adap; struct sge_params *sp = &sc->params.sge; int rc; c4iw_init_dev_ucontext(rdev, &rdev->uctx); /* XXX: we can probably make this work */ if (sp->eq_s_qpp > PAGE_SHIFT || sp->iq_s_qpp > PAGE_SHIFT) { device_printf(sc->dev, "doorbell density too high (eq %d, iq %d, pg %d).\n", sp->eq_s_qpp, sp->eq_s_qpp, PAGE_SHIFT); rc = -EINVAL; goto err1; } rdev->qpshift = PAGE_SHIFT - sp->eq_s_qpp; rdev->qpmask = (1 << sp->eq_s_qpp) - 1; rdev->cqshift = PAGE_SHIFT - sp->iq_s_qpp; rdev->cqmask = (1 << sp->iq_s_qpp) - 1; if (c4iw_num_stags(rdev) == 0) { rc = -EINVAL; goto err1; } rdev->stats.pd.total = T4_MAX_NUM_PD; rdev->stats.stag.total = sc->vres.stag.size; rdev->stats.pbl.total = sc->vres.pbl.size; rdev->stats.rqt.total = sc->vres.rq.size; rdev->stats.qid.total = sc->vres.qp.size; rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); if (rc) { device_printf(sc->dev, "error %d initializing resources\n", rc); goto err1; } rc = c4iw_pblpool_create(rdev); if (rc) { device_printf(sc->dev, "error %d initializing pbl pool\n", rc); goto err2; } rc = c4iw_rqtpool_create(rdev); if (rc) { device_printf(sc->dev, "error %d initializing rqt pool\n", rc); goto err3; } return (0); err3: c4iw_pblpool_destroy(rdev); err2: c4iw_destroy_resource(&rdev->resource); err1: return (rc); } static void c4iw_rdev_close(struct c4iw_rdev *rdev) { c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); c4iw_destroy_resource(&rdev->resource); } static void c4iw_dealloc(struct c4iw_dev *iwsc) { c4iw_rdev_close(&iwsc->rdev); idr_destroy(&iwsc->cqidr); idr_destroy(&iwsc->qpidr); idr_destroy(&iwsc->mmidr); ib_dealloc_device(&iwsc->ibdev); } static struct c4iw_dev * c4iw_alloc(struct adapter *sc) { struct c4iw_dev *iwsc; int rc; iwsc = (struct c4iw_dev *)ib_alloc_device(sizeof(*iwsc)); if (iwsc == NULL) { device_printf(sc->dev, "Cannot allocate ib device.\n"); return (ERR_PTR(-ENOMEM)); } iwsc->rdev.adap = sc; rc = c4iw_rdev_open(&iwsc->rdev); if (rc != 0) { device_printf(sc->dev, "Unable to open CXIO rdev (%d)\n", rc); ib_dealloc_device(&iwsc->ibdev); return (ERR_PTR(rc)); } idr_init(&iwsc->cqidr); idr_init(&iwsc->qpidr); idr_init(&iwsc->mmidr); spin_lock_init(&iwsc->lock); mutex_init(&iwsc->rdev.stats.lock); return (iwsc); } static int c4iw_mod_load(void); static int c4iw_mod_unload(void); static int c4iw_activate(struct adapter *); static int c4iw_deactivate(struct adapter *); static struct uld_info c4iw_uld_info = { .uld_id = ULD_IWARP, .activate = c4iw_activate, .deactivate = c4iw_deactivate, }; static int c4iw_activate(struct adapter *sc) { struct c4iw_dev *iwsc; int rc; ASSERT_SYNCHRONIZED_OP(sc); if (uld_active(sc, ULD_IWARP)) { KASSERT(0, ("%s: RDMA already eanbled on sc %p", __func__, sc)); return (0); } if (sc->rdmacaps == 0) { device_printf(sc->dev, "RDMA not supported or RDMA cap is not enabled.\n"); return (ENOSYS); } iwsc = c4iw_alloc(sc); if (IS_ERR(iwsc)) { rc = -PTR_ERR(iwsc); device_printf(sc->dev, "initialization failed: %d\n", rc); return (rc); } sc->iwarp_softc = iwsc; rc = -c4iw_register_device(iwsc); if (rc) { device_printf(sc->dev, "RDMA registration failed: %d\n", rc); c4iw_dealloc(iwsc); sc->iwarp_softc = NULL; } return (rc); } static int c4iw_deactivate(struct adapter *sc) { struct c4iw_dev *iwsc = sc->iwarp_softc; ASSERT_SYNCHRONIZED_OP(sc); c4iw_unregister_device(iwsc); c4iw_dealloc(iwsc); sc->iwarp_softc = NULL; return (0); } static void c4iw_activate_all(struct adapter *sc, void *arg __unused) { if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwact") != 0) return; /* Activate iWARP if any port on this adapter has IFCAP_TOE enabled. */ if (sc->offload_map && !uld_active(sc, ULD_IWARP)) (void) t4_activate_uld(sc, ULD_IWARP); end_synchronized_op(sc, 0); } static void c4iw_deactivate_all(struct adapter *sc, void *arg __unused) { if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwdea") != 0) return; if (uld_active(sc, ULD_IWARP)) (void) t4_deactivate_uld(sc, ULD_IWARP); end_synchronized_op(sc, 0); } static int c4iw_mod_load(void) { int rc; rc = -c4iw_cm_init(); if (rc != 0) return (rc); rc = t4_register_uld(&c4iw_uld_info); if (rc != 0) { c4iw_cm_term(); return (rc); } t4_iterate(c4iw_activate_all, NULL); return (rc); } static int c4iw_mod_unload(void) { t4_iterate(c4iw_deactivate_all, NULL); c4iw_cm_term(); if (t4_unregister_uld(&c4iw_uld_info) == EBUSY) return (EBUSY); return (0); } #endif #undef MODULE_VERSION #include /* * t4_tom won't load on kernels without TCP_OFFLOAD and this module's dependency * on t4_tom ensures that it won't either. So we don't directly check for * TCP_OFFLOAD here. */ static int c4iw_modevent(module_t mod, int cmd, void *arg) { int rc = 0; #ifdef TCP_OFFLOAD switch (cmd) { case MOD_LOAD: rc = c4iw_mod_load(); if (rc == 0) - printf("iw_cxgbe: Chelsio T4/T5 RDMA driver loaded.\n"); + printf("iw_cxgbe: Chelsio T4/T5/T6 RDMA driver loaded.\n"); break; case MOD_UNLOAD: rc = c4iw_mod_unload(); break; default: rc = EINVAL; } #else printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); rc = EOPNOTSUPP; #endif return (rc); } static moduledata_t c4iw_mod_data = { "iw_cxgbe", c4iw_modevent, 0 }; MODULE_VERSION(iw_cxgbe, 1); MODULE_DEPEND(iw_cxgbe, t4nex, 1, 1, 1); MODULE_DEPEND(iw_cxgbe, t4_tom, 1, 1, 1); MODULE_DEPEND(iw_cxgbe, ibcore, 1, 1, 1); DECLARE_MODULE(iw_cxgbe, c4iw_mod_data, SI_SUB_EXEC, SI_ORDER_ANY); Index: stable/10/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h =================================================================== --- stable/10/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h (revision 318798) +++ stable/10/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h (revision 318799) @@ -1,979 +1,1000 @@ /* * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $FreeBSD$ */ #ifndef __IW_CXGB4_H__ #define __IW_CXGB4_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #undef prefetch #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "common/t4_tcb.h" #include "t4_l2t.h" #define DRV_NAME "iw_cxgbe" #define MOD DRV_NAME ":" #define KTR_IW_CXGBE KTR_SPARE3 extern int c4iw_debug; #define PDBG(fmt, args...) \ do { \ if (c4iw_debug) \ printf(MOD fmt, ## args); \ } while (0) #include "t4.h" static inline void *cplhdr(struct mbuf *m) { return mtod(m, void*); } #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start) #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start) #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */ #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */ struct c4iw_id_table { u32 flags; u32 start; /* logical minimal id */ u32 last; /* hint for find */ u32 max; spinlock_t lock; unsigned long *table; }; struct c4iw_resource { struct c4iw_id_table tpt_table; struct c4iw_id_table qid_table; struct c4iw_id_table pdid_table; }; struct c4iw_qid_list { struct list_head entry; u32 qid; }; struct c4iw_dev_ucontext { struct list_head qpids; struct list_head cqids; struct mutex lock; }; enum c4iw_rdev_flags { T4_FATAL_ERROR = (1<<0), }; struct c4iw_stat { u64 total; u64 cur; u64 max; u64 fail; }; struct c4iw_stats { struct mutex lock; struct c4iw_stat qid; struct c4iw_stat pd; struct c4iw_stat stag; struct c4iw_stat pbl; struct c4iw_stat rqt; }; struct c4iw_rdev { struct adapter *adap; struct c4iw_resource resource; unsigned long qpshift; u32 qpmask; unsigned long cqshift; u32 cqmask; struct c4iw_dev_ucontext uctx; vmem_t *rqt_arena; vmem_t *pbl_arena; u32 flags; struct c4iw_stats stats; }; static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) { return rdev->flags & T4_FATAL_ERROR; } static inline int c4iw_num_stags(struct c4iw_rdev *rdev) { return (int)(rdev->adap->vres.stag.size >> 5); } -#define C4IW_WR_TO (10*HZ) +#define C4IW_WR_TO (60*HZ) struct c4iw_wr_wait { int ret; - atomic_t completion; + struct completion completion; }; static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) { wr_waitp->ret = 0; - atomic_set(&wr_waitp->completion, 0); + init_completion(&wr_waitp->completion); } static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) { wr_waitp->ret = ret; - atomic_set(&wr_waitp->completion, 1); - wakeup(wr_waitp); + complete(&wr_waitp->completion); } static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp, - u32 hwtid, u32 qpid, const char *func) + u32 hwtid, u32 qpid, const char *func) { struct adapter *sc = rdev->adap; unsigned to = C4IW_WR_TO; + int ret; + int timedout = 0; + struct timeval t1, t2; - while (!atomic_read(&wr_waitp->completion)) { - tsleep(wr_waitp, 0, "c4iw_wait", to); - if (SIGPENDING(curthread)) { - printf("%s - Device %s not responding - " - "tid %u qpid %u\n", func, - device_get_nameunit(sc->dev), hwtid, qpid); - if (c4iw_fatal_error(rdev)) { - wr_waitp->ret = -EIO; - break; - } - to = to << 2; - } - } + if (c4iw_fatal_error(rdev)) { + wr_waitp->ret = -EIO; + goto out; + } + + getmicrotime(&t1); + do { + ret = wait_for_completion_timeout(&wr_waitp->completion, to); + if (!ret) { + getmicrotime(&t2); + timevalsub(&t2, &t1); + printf("%s - Device %s not responding after %ld.%06ld " + "seconds - tid %u qpid %u\n", func, + device_get_nameunit(sc->dev), t2.tv_sec, t2.tv_usec, + hwtid, qpid); + if (c4iw_fatal_error(rdev)) { + wr_waitp->ret = -EIO; + break; + } + to = to << 2; + timedout = 1; + } + } while (!ret); + +out: + if (timedout) { + getmicrotime(&t2); + timevalsub(&t2, &t1); + printf("%s - Device %s reply after %ld.%06ld seconds - " + "tid %u qpid %u\n", func, device_get_nameunit(sc->dev), + t2.tv_sec, t2.tv_usec, hwtid, qpid); + } if (wr_waitp->ret) - CTR4(KTR_IW_CXGBE, "%s: FW reply %d tid %u qpid %u", - device_get_nameunit(sc->dev), wr_waitp->ret, hwtid, qpid); + CTR4(KTR_IW_CXGBE, "%p: FW reply %d tid %u qpid %u", sc, + wr_waitp->ret, hwtid, qpid); return (wr_waitp->ret); } struct c4iw_dev { struct ib_device ibdev; struct c4iw_rdev rdev; u32 device_cap_flags; struct idr cqidr; struct idr qpidr; struct idr mmidr; spinlock_t lock; struct dentry *debugfs_root; }; static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) { return container_of(ibdev, struct c4iw_dev, ibdev); } static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev) { return container_of(rdev, struct c4iw_dev, rdev); } static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) { return idr_find(&rhp->cqidr, cqid); } static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) { return idr_find(&rhp->qpidr, qpid); } static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid) { return idr_find(&rhp->mmidr, mmid); } static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr, void *handle, u32 id, int lock) { int ret; int newid; do { if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC)) return -ENOMEM; if (lock) spin_lock_irq(&rhp->lock); ret = idr_get_new_above(idr, handle, id, &newid); BUG_ON(!ret && newid != id); if (lock) spin_unlock_irq(&rhp->lock); } while (ret == -EAGAIN); return ret; } static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, void *handle, u32 id) { return _insert_handle(rhp, idr, handle, id, 1); } static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, void *handle, u32 id) { return _insert_handle(rhp, idr, handle, id, 0); } static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id, int lock) { if (lock) spin_lock_irq(&rhp->lock); idr_remove(idr, id); if (lock) spin_unlock_irq(&rhp->lock); } static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id) { _remove_handle(rhp, idr, id, 1); } static inline void remove_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, u32 id) { _remove_handle(rhp, idr, id, 0); } struct c4iw_pd { struct ib_pd ibpd; u32 pdid; struct c4iw_dev *rhp; }; static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd) { return container_of(ibpd, struct c4iw_pd, ibpd); } struct tpt_attributes { u64 len; u64 va_fbo; enum fw_ri_mem_perms perms; u32 stag; u32 pdid; u32 qpid; u32 pbl_addr; u32 pbl_size; u32 state:1; u32 type:2; u32 rsvd:1; u32 remote_invaliate_disable:1; u32 zbva:1; u32 mw_bind_enable:1; u32 page_size:5; }; struct c4iw_mr { struct ib_mr ibmr; struct ib_umem *umem; struct c4iw_dev *rhp; u64 kva; struct tpt_attributes attr; }; static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr) { return container_of(ibmr, struct c4iw_mr, ibmr); } struct c4iw_mw { struct ib_mw ibmw; struct c4iw_dev *rhp; u64 kva; struct tpt_attributes attr; }; static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw) { return container_of(ibmw, struct c4iw_mw, ibmw); } struct c4iw_fr_page_list { struct ib_fast_reg_page_list ibpl; DECLARE_PCI_UNMAP_ADDR(mapping); dma_addr_t dma_addr; struct c4iw_dev *dev; int size; }; static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( struct ib_fast_reg_page_list *ibpl) { return container_of(ibpl, struct c4iw_fr_page_list, ibpl); } struct c4iw_cq { struct ib_cq ibcq; struct c4iw_dev *rhp; struct t4_cq cq; spinlock_t lock; spinlock_t comp_handler_lock; atomic_t refcnt; wait_queue_head_t wait; }; static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) { return container_of(ibcq, struct c4iw_cq, ibcq); } struct c4iw_mpa_attributes { u8 initiator; u8 recv_marker_enabled; u8 xmit_marker_enabled; u8 crc_enabled; u8 enhanced_rdma_conn; u8 version; u8 p2p_type; }; struct c4iw_qp_attributes { u32 scq; u32 rcq; u32 sq_num_entries; u32 rq_num_entries; u32 sq_max_sges; u32 sq_max_sges_rdma_write; u32 rq_max_sges; u32 state; u8 enable_rdma_read; u8 enable_rdma_write; u8 enable_bind; u8 enable_mmid0_fastreg; u32 max_ord; u32 max_ird; u32 pd; u32 next_state; char terminate_buffer[52]; u32 terminate_msg_len; u8 is_terminate_local; struct c4iw_mpa_attributes mpa_attr; struct c4iw_ep *llp_stream_handle; u8 layer_etype; u8 ecode; u16 sq_db_inc; u16 rq_db_inc; }; struct c4iw_qp { struct ib_qp ibqp; struct c4iw_dev *rhp; struct c4iw_ep *ep; struct c4iw_qp_attributes attr; struct t4_wq wq; spinlock_t lock; struct mutex mutex; atomic_t refcnt; wait_queue_head_t wait; struct timer_list timer; int sq_sig_all; }; static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) { return container_of(ibqp, struct c4iw_qp, ibqp); } struct c4iw_ucontext { struct ib_ucontext ibucontext; struct c4iw_dev_ucontext uctx; u32 key; spinlock_t mmap_lock; struct list_head mmaps; }; static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) { return container_of(c, struct c4iw_ucontext, ibucontext); } struct c4iw_mm_entry { struct list_head entry; u64 addr; u32 key; unsigned len; }; static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, u32 key, unsigned len) { struct list_head *pos, *nxt; struct c4iw_mm_entry *mm; spin_lock(&ucontext->mmap_lock); list_for_each_safe(pos, nxt, &ucontext->mmaps) { mm = list_entry(pos, struct c4iw_mm_entry, entry); if (mm->key == key && mm->len == len) { list_del_init(&mm->entry); spin_unlock(&ucontext->mmap_lock); CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, key, (unsigned long long) mm->addr, mm->len); return mm; } } spin_unlock(&ucontext->mmap_lock); return NULL; } static inline void insert_mmap(struct c4iw_ucontext *ucontext, struct c4iw_mm_entry *mm) { spin_lock(&ucontext->mmap_lock); CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key, (unsigned long long) mm->addr, mm->len); list_add_tail(&mm->entry, &ucontext->mmaps); spin_unlock(&ucontext->mmap_lock); } enum c4iw_qp_attr_mask { C4IW_QP_ATTR_NEXT_STATE = 1 << 0, C4IW_QP_ATTR_SQ_DB = 1<<1, C4IW_QP_ATTR_RQ_DB = 1<<2, C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7, C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8, C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9, C4IW_QP_ATTR_MAX_ORD = 1 << 11, C4IW_QP_ATTR_MAX_IRD = 1 << 12, C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22, C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23, C4IW_QP_ATTR_MPA_ATTR = 1 << 24, C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25, C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | C4IW_QP_ATTR_MAX_ORD | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_STREAM_MSG_BUFFER | C4IW_QP_ATTR_MPA_ATTR | C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE) }; int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, enum c4iw_qp_attr_mask mask, struct c4iw_qp_attributes *attrs, int internal); enum c4iw_qp_state { C4IW_QP_STATE_IDLE, C4IW_QP_STATE_RTS, C4IW_QP_STATE_ERROR, C4IW_QP_STATE_TERMINATE, C4IW_QP_STATE_CLOSING, C4IW_QP_STATE_TOT }; /* * IW_CXGBE event bits. * These bits are used for handling all events for a particular 'ep' serially. */ #define C4IW_EVENT_SOCKET 0x0001 #define C4IW_EVENT_TIMEOUT 0x0002 #define C4IW_EVENT_TERM 0x0004 static inline int c4iw_convert_state(enum ib_qp_state ib_state) { switch (ib_state) { case IB_QPS_RESET: case IB_QPS_INIT: return C4IW_QP_STATE_IDLE; case IB_QPS_RTS: return C4IW_QP_STATE_RTS; case IB_QPS_SQD: return C4IW_QP_STATE_CLOSING; case IB_QPS_SQE: return C4IW_QP_STATE_TERMINATE; case IB_QPS_ERR: return C4IW_QP_STATE_ERROR; default: return -1; } } static inline int to_ib_qp_state(int c4iw_qp_state) { switch (c4iw_qp_state) { case C4IW_QP_STATE_IDLE: return IB_QPS_INIT; case C4IW_QP_STATE_RTS: return IB_QPS_RTS; case C4IW_QP_STATE_CLOSING: return IB_QPS_SQD; case C4IW_QP_STATE_TERMINATE: return IB_QPS_SQE; case C4IW_QP_STATE_ERROR: return IB_QPS_ERR; } return IB_QPS_ERR; } #define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN static inline u32 c4iw_ib_to_tpt_access(int a) { return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) | (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) | FW_RI_MEM_ACCESS_LOCAL_READ; } static inline u32 c4iw_ib_to_tpt_bind_access(int acc) { return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0); } enum c4iw_mmid_state { C4IW_STAG_STATE_VALID, C4IW_STAG_STATE_INVALID }; #define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications" #define MPA_KEY_REQ "MPA ID Req Frame" #define MPA_KEY_REP "MPA ID Rep Frame" #define MPA_MAX_PRIVATE_DATA 256 #define MPA_ENHANCED_RDMA_CONN 0x10 #define MPA_REJECT 0x20 #define MPA_CRC 0x40 #define MPA_MARKERS 0x80 #define MPA_FLAGS_MASK 0xE0 #define MPA_V2_PEER2PEER_MODEL 0x8000 #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000 #define MPA_V2_RDMA_WRITE_RTR 0x8000 #define MPA_V2_RDMA_READ_RTR 0x4000 #define MPA_V2_IRD_ORD_MASK 0x3FFF #define c4iw_put_ep(ep) { \ CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \ __func__, __LINE__, ep, atomic_read(&(ep)->kref.refcount)); \ WARN_ON(atomic_read(&(ep)->kref.refcount) < 1); \ kref_put(&((ep)->kref), _c4iw_free_ep); \ } #define c4iw_get_ep(ep) { \ CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \ __func__, __LINE__, ep, atomic_read(&(ep)->kref.refcount)); \ kref_get(&((ep)->kref)); \ } void _c4iw_free_ep(struct kref *kref); struct mpa_message { u8 key[16]; u8 flags; u8 revision; __be16 private_data_size; u8 private_data[0]; }; struct mpa_v2_conn_params { __be16 ird; __be16 ord; }; struct terminate_message { u8 layer_etype; u8 ecode; __be16 hdrct_rsvd; u8 len_hdrs[0]; }; #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28) enum c4iw_layers_types { LAYER_RDMAP = 0x00, LAYER_DDP = 0x10, LAYER_MPA = 0x20, RDMAP_LOCAL_CATA = 0x00, RDMAP_REMOTE_PROT = 0x01, RDMAP_REMOTE_OP = 0x02, DDP_LOCAL_CATA = 0x00, DDP_TAGGED_ERR = 0x01, DDP_UNTAGGED_ERR = 0x02, DDP_LLP = 0x03 }; enum c4iw_rdma_ecodes { RDMAP_INV_STAG = 0x00, RDMAP_BASE_BOUNDS = 0x01, RDMAP_ACC_VIOL = 0x02, RDMAP_STAG_NOT_ASSOC = 0x03, RDMAP_TO_WRAP = 0x04, RDMAP_INV_VERS = 0x05, RDMAP_INV_OPCODE = 0x06, RDMAP_STREAM_CATA = 0x07, RDMAP_GLOBAL_CATA = 0x08, RDMAP_CANT_INV_STAG = 0x09, RDMAP_UNSPECIFIED = 0xff }; enum c4iw_ddp_ecodes { DDPT_INV_STAG = 0x00, DDPT_BASE_BOUNDS = 0x01, DDPT_STAG_NOT_ASSOC = 0x02, DDPT_TO_WRAP = 0x03, DDPT_INV_VERS = 0x04, DDPU_INV_QN = 0x01, DDPU_INV_MSN_NOBUF = 0x02, DDPU_INV_MSN_RANGE = 0x03, DDPU_INV_MO = 0x04, DDPU_MSG_TOOBIG = 0x05, DDPU_INV_VERS = 0x06 }; enum c4iw_mpa_ecodes { MPA_CRC_ERR = 0x02, MPA_MARKER_ERR = 0x03, MPA_LOCAL_CATA = 0x05, MPA_INSUFF_IRD = 0x06, MPA_NOMATCH_RTR = 0x07, }; enum c4iw_ep_state { IDLE = 0, LISTEN, CONNECTING, MPA_REQ_WAIT, MPA_REQ_SENT, MPA_REQ_RCVD, MPA_REP_SENT, FPDU_MODE, ABORTING, CLOSING, MORIBUND, DEAD, }; enum c4iw_ep_flags { PEER_ABORT_IN_PROGRESS = 0, ABORT_REQ_IN_PROGRESS = 1, RELEASE_RESOURCES = 2, CLOSE_SENT = 3, TIMEOUT = 4, QP_REFERENCED = 5 }; enum c4iw_ep_history { ACT_OPEN_REQ = 0, ACT_OFLD_CONN = 1, ACT_OPEN_RPL = 2, ACT_ESTAB = 3, PASS_ACCEPT_REQ = 4, PASS_ESTAB = 5, ABORT_UPCALL = 6, ESTAB_UPCALL = 7, CLOSE_UPCALL = 8, ULP_ACCEPT = 9, ULP_REJECT = 10, TIMEDOUT = 11, PEER_ABORT = 12, PEER_CLOSE = 13, CONNREQ_UPCALL = 14, ABORT_CONN = 15, DISCONN_UPCALL = 16, EP_DISC_CLOSE = 17, EP_DISC_ABORT = 18, CONN_RPL_UPCALL = 19, ACT_RETRY_NOMEM = 20, ACT_RETRY_INUSE = 21, CLOSE_CON_RPL = 22, EP_DISC_FAIL = 24, QP_REFED = 25, QP_DEREFED = 26, CM_ID_REFED = 27, CM_ID_DEREFED = 28 }; struct c4iw_ep_common { TAILQ_ENTRY(c4iw_ep_common) entry; /* Work queue attachment */ struct iw_cm_id *cm_id; struct c4iw_qp *qp; struct c4iw_dev *dev; enum c4iw_ep_state state; struct kref kref; struct mutex mutex; struct sockaddr_in local_addr; struct sockaddr_in remote_addr; struct c4iw_wr_wait wr_wait; unsigned long flags; unsigned long history; int rpl_err; int rpl_done; struct thread *thread; struct socket *so; int ep_events; }; struct c4iw_listen_ep { struct c4iw_ep_common com; unsigned int stid; int backlog; }; struct c4iw_ep { struct c4iw_ep_common com; struct c4iw_ep *parent_ep; struct timer_list timer; unsigned int atid; u32 hwtid; u32 snd_seq; u32 rcv_seq; struct l2t_entry *l2t; struct dst_entry *dst; struct c4iw_mpa_attributes mpa_attr; u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA]; unsigned int mpa_pkt_len; u32 ird; u32 ord; u32 smac_idx; u32 tx_chan; u32 mtu; u16 mss; u16 emss; u16 plen; u16 rss_qid; u16 txq_idx; u16 ctrlq_idx; u8 tos; u8 retry_with_mpa_v1; u8 tried_with_mpa_v1; }; static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) { return cm_id->provider_data; } static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) { return cm_id->provider_data; } static inline int compute_wscale(int win) { int wscale = 0; while (wscale < 14 && (65535<layers * IDR_BITS; p = idp->top; max = 1 << n; id = 0; while (id < max) { while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; p = p->ary[(id >> n) & IDR_MASK]; } if (p) { error = fn(id, (void *)p, data); if (error) break; } id += 1 << n; while (n < fls(id)) { n += IDR_BITS; p = *--paa; } } return error; } void your_reg_device(struct c4iw_dev *dev); #define SGE_CTRLQ_NUM 0 #endif Index: stable/10/sys/dev/cxgbe/iw_cxgbe/mem.c =================================================================== --- stable/10/sys/dev/cxgbe/iw_cxgbe/mem.c (revision 318798) +++ stable/10/sys/dev/cxgbe/iw_cxgbe/mem.c (revision 318799) @@ -1,847 +1,851 @@ /* * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #ifdef TCP_OFFLOAD #include #include #include #include #include #include "iw_cxgbe.h" #define T4_ULPTX_MIN_IO 32 #define C4IW_MAX_INLINE_SIZE 96 -static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) +static int +mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) { - return (is_t4(dev->rdev.adap) || + + return ((is_t4(dev->rdev.adap) || is_t5(dev->rdev.adap)) && - length >= 8*1024*1024*1024ULL; + length >= 8*1024*1024*1024ULL); } + static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) { struct adapter *sc = rdev->adap; struct ulp_mem_io *ulpmc; struct ulptx_idata *ulpsc; u8 wr_len, *to_dp, *from_dp; int copy_len, num_wqe, i, ret = 0; struct c4iw_wr_wait wr_wait; struct wrqe *wr; u32 cmd; cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); if (is_t4(sc)) cmd |= cpu_to_be32(F_ULP_MEMIO_ORDER); else cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM); addr &= 0x7FFFFFF; CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len); num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); c4iw_init_wr_wait(&wr_wait); for (i = 0; i < num_wqe; i++) { copy_len = min(len, C4IW_MAX_INLINE_SIZE); wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc + roundup(copy_len, T4_ULPTX_MIN_IO), 16); wr = alloc_wrqe(wr_len, &sc->sge.mgmtq); if (wr == NULL) return (0); ulpmc = wrtod(wr); memset(ulpmc, 0, wr_len); INIT_ULPTX_WR(ulpmc, wr_len, 0, 0); if (i == (num_wqe-1)) { ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL); ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; } else ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR)); ulpmc->wr.wr_mid = cpu_to_be32( V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); ulpmc->cmd = cmd; ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN( DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16)); ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3)); ulpsc = (struct ulptx_idata *)(ulpmc + 1); ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); to_dp = (u8 *)(ulpsc + 1); from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; if (data) memcpy(to_dp, from_dp, copy_len); else memset(to_dp, 0, copy_len); if (copy_len % T4_ULPTX_MIN_IO) memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - (copy_len % T4_ULPTX_MIN_IO)); t4_wrq_tx(sc, wr); len -= C4IW_MAX_INLINE_SIZE; } ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); return ret; } /* * Build and write a TPT entry. * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, * pbl_size and pbl_addr * OUT: stag index */ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, u32 *stag, u8 stag_state, u32 pdid, enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, int bind_enabled, u32 zbva, u64 to, u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr) { int err; struct fw_ri_tpte tpt; u32 stag_idx; static atomic_t key; if (c4iw_fatal_error(rdev)) return -EIO; stag_state = stag_state > 0; stag_idx = (*stag) >> 8; if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); if (!stag_idx) { mutex_lock(&rdev->stats.lock); rdev->stats.stag.fail++; mutex_unlock(&rdev->stats.lock); return -ENOMEM; } mutex_lock(&rdev->stats.lock); rdev->stats.stag.cur += 32; if (rdev->stats.stag.cur > rdev->stats.stag.max) rdev->stats.stag.max = rdev->stats.stag.cur; mutex_unlock(&rdev->stats.lock); *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); } CTR5(KTR_IW_CXGBE, "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x", __func__, stag_state, type, pdid, stag_idx); /* write TPT entry */ if (reset_tpt_entry) memset(&tpt, 0, sizeof(tpt)); else { tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | V_FW_RI_TPTE_STAGSTATE(stag_state) | V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : FW_RI_VA_BASED_TO))| V_FW_RI_TPTE_PS(page_size)); tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); tpt.va_hi = cpu_to_be32((u32)(to >> 32)); tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); tpt.dca_mwbcnt_pstag = cpu_to_be32(0); tpt.len_hi = cpu_to_be32((u32)(len >> 32)); } err = write_adapter_mem(rdev, stag_idx + (rdev->adap->vres.stag.start >> 5), sizeof(tpt), &tpt); if (reset_tpt_entry) { c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); mutex_lock(&rdev->stats.lock); rdev->stats.stag.cur -= 32; mutex_unlock(&rdev->stats.lock); } return err; } static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, u32 pbl_addr, u32 pbl_size) { int err; CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d", __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size); err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl); return err; } static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, u32 pbl_addr) { return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, pbl_size, pbl_addr); } static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) { *stag = T4_STAG_UNSET; return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, 0UL, 0, 0, 0, 0); } static int deallocate_window(struct c4iw_rdev *rdev, u32 stag) { return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, 0); } static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr) { *stag = T4_STAG_UNSET; return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, 0UL, 0, 0, pbl_size, pbl_addr); } static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) { u32 mmid; mhp->attr.state = 1; mhp->attr.stag = stag; mmid = stag >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp); return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); } static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift) { u32 stag = T4_STAG_UNSET; int ret; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0, mhp->attr.mw_bind_enable, mhp->attr.zbva, mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (ret) return ret; ret = finish_mem_reg(mhp, stag); if (ret) dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); return ret; } static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift, int npages) { u32 stag; int ret; if (npages > mhp->attr.pbl_size) return -ENOMEM; stag = mhp->attr.stag; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, FW_RI_STAG_NSMR, mhp->attr.perms, mhp->attr.mw_bind_enable, mhp->attr.zbva, mhp->attr.va_fbo, mhp->attr.len, shift - 12, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (ret) return ret; ret = finish_mem_reg(mhp, stag); if (ret) dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); return ret; } static int alloc_pbl(struct c4iw_mr *mhp, int npages) { mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, npages << 3); if (!mhp->attr.pbl_addr) return -ENOMEM; mhp->attr.pbl_size = npages; return 0; } static int build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) { u64 mask; int i, j, n; mask = 0; *total_size = 0; for (i = 0; i < num_phys_buf; ++i) { if (i != 0 && buffer_list[i].addr & ~PAGE_MASK) return -EINVAL; if (i != 0 && i != num_phys_buf - 1 && (buffer_list[i].size & ~PAGE_MASK)) return -EINVAL; *total_size += buffer_list[i].size; if (i > 0) mask |= buffer_list[i].addr; else mask |= buffer_list[i].addr & PAGE_MASK; if (i != num_phys_buf - 1) mask |= buffer_list[i].addr + buffer_list[i].size; else mask |= (buffer_list[i].addr + buffer_list[i].size + PAGE_SIZE - 1) & PAGE_MASK; } /* Find largest page shift we can use to cover buffers */ - for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) + for (*shift = PAGE_SHIFT; *shift < PAGE_SHIFT + M_FW_RI_TPTE_PS; + ++(*shift)) if ((1ULL << *shift) & mask) break; buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); buffer_list[0].addr &= ~0ull << *shift; *npages = 0; for (i = 0; i < num_phys_buf; ++i) *npages += (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift; if (!*npages) return -EINVAL; *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); if (!*page_list) return -ENOMEM; n = 0; for (i = 0; i < num_phys_buf; ++i) for (j = 0; j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift; ++j) (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + ((u64) j << *shift)); CTR6(KTR_IW_CXGBE, "%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d", __func__, (unsigned long long)*iova_start, (unsigned long long)mask, *shift, (unsigned long long)*total_size, *npages); return 0; } int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start) { struct c4iw_mr mh, *mhp; struct c4iw_pd *php; struct c4iw_dev *rhp; __be64 *page_list = NULL; int shift = 0; u64 total_size = 0; int npages = 0; int ret; CTR3(KTR_IW_CXGBE, "%s ib_mr %p ib_pd %p", __func__, mr, pd); /* There can be no memory windows */ if (atomic_read(&mr->usecnt)) return -EINVAL; mhp = to_c4iw_mr(mr); rhp = mhp->rhp; php = to_c4iw_pd(mr->pd); /* make sure we are on the same adapter */ if (rhp != php->rhp) return -EINVAL; memcpy(&mh, mhp, sizeof *mhp); if (mr_rereg_mask & IB_MR_REREG_PD) php = to_c4iw_pd(pd); if (mr_rereg_mask & IB_MR_REREG_ACCESS) { mh.attr.perms = c4iw_ib_to_tpt_access(acc); mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; } if (mr_rereg_mask & IB_MR_REREG_TRANS) { ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, &total_size, &npages, &shift, &page_list); if (ret) return ret; } if (mr_exceeds_hw_limits(rhp, total_size)) { kfree(page_list); return -EINVAL; } ret = reregister_mem(rhp, php, &mh, shift, npages); kfree(page_list); if (ret) return ret; if (mr_rereg_mask & IB_MR_REREG_PD) mhp->attr.pdid = php->pdid; if (mr_rereg_mask & IB_MR_REREG_ACCESS) mhp->attr.perms = c4iw_ib_to_tpt_access(acc); if (mr_rereg_mask & IB_MR_REREG_TRANS) { mhp->attr.zbva = 0; mhp->attr.va_fbo = *iova_start; mhp->attr.page_size = shift - 12; - mhp->attr.len = (u32) total_size; + mhp->attr.len = total_size; mhp->attr.pbl_size = npages; } return 0; } struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start) { __be64 *page_list; int shift; u64 total_size; int npages; struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; int ret; CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->rhp = rhp; /* First check that we have enough alignment */ if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { ret = -EINVAL; goto err; } if (num_phys_buf > 1 && ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { ret = -EINVAL; goto err; } ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, &total_size, &npages, &shift, &page_list); if (ret) goto err; if (mr_exceeds_hw_limits(rhp, total_size)) { kfree(page_list); ret = -EINVAL; goto err; } ret = alloc_pbl(mhp, npages); if (ret) { kfree(page_list); goto err; } ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr, npages); kfree(page_list); if (ret) goto err_pbl; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.va_fbo = *iova_start; mhp->attr.page_size = shift - 12; - mhp->attr.len = (u32) total_size; + mhp->attr.len = total_size; mhp->attr.pbl_size = npages; ret = register_mem(rhp, php, mhp, shift); if (ret) goto err_pbl; return &mhp->ibmr; err_pbl: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err: kfree(mhp); return ERR_PTR(ret); } struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; int ret; u32 stag = T4_STAG_UNSET; CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; mhp->attr.zbva = 0; mhp->attr.va_fbo = 0; mhp->attr.page_size = 0; mhp->attr.len = ~0UL; mhp->attr.pbl_size = 0; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, FW_RI_STAG_NSMR, mhp->attr.perms, mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0); if (ret) goto err1; ret = finish_mem_reg(mhp, stag); if (ret) goto err2; return &mhp->ibmr; err2: dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); err1: kfree(mhp); return ERR_PTR(ret); } struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata, int mr_id) { __be64 *pages; int shift, n, len; int i, j, k; int err = 0; struct ib_umem_chunk *chunk; struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); if (length == ~0ULL) return ERR_PTR(-EINVAL); if ((length + start) < start) return ERR_PTR(-EINVAL); php = to_c4iw_pd(pd); rhp = php->rhp; if (mr_exceeds_hw_limits(rhp, length)) return ERR_PTR(-EINVAL); mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->rhp = rhp; mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); if (IS_ERR(mhp->umem)) { err = PTR_ERR(mhp->umem); kfree(mhp); return ERR_PTR(err); } shift = ffs(mhp->umem->page_size) - 1; n = 0; list_for_each_entry(chunk, &mhp->umem->chunk_list, list) n += chunk->nents; err = alloc_pbl(mhp, n); if (err) goto err; pages = (__be64 *) __get_free_page(GFP_KERNEL); if (!pages) { err = -ENOMEM; goto err_pbl; } i = n = 0; list_for_each_entry(chunk, &mhp->umem->chunk_list, list) for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) { pages[i++] = cpu_to_be64(sg_dma_address( &chunk->page_list[j]) + mhp->umem->page_size * k); if (i == PAGE_SIZE / sizeof *pages) { err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i); if (err) goto pbl_done; n += i; i = 0; } } } if (i) err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i); pbl_done: free_page((unsigned long) pages); if (err) goto err_pbl; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.va_fbo = virt; mhp->attr.page_size = shift - 12; mhp->attr.len = length; err = register_mem(rhp, php, mhp, shift); if (err) goto err_pbl; return &mhp->ibmr; err_pbl: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err: ib_umem_release(mhp->umem); kfree(mhp); return ERR_PTR(err); } struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mw *mhp; u32 mmid; u32 stag = 0; int ret; php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); ret = allocate_window(&rhp->rdev, &stag, php->pdid); if (ret) { kfree(mhp); return ERR_PTR(ret); } mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.type = FW_RI_STAG_MW; mhp->attr.stag = stag; mmid = (stag) >> 8; mhp->ibmw.rkey = stag; if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { deallocate_window(&rhp->rdev, mhp->attr.stag); kfree(mhp); return ERR_PTR(-ENOMEM); } CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp, stag); return &(mhp->ibmw); } int c4iw_dealloc_mw(struct ib_mw *mw) { struct c4iw_dev *rhp; struct c4iw_mw *mhp; u32 mmid; mhp = to_c4iw_mw(mw); rhp = mhp->rhp; mmid = (mw->rkey) >> 8; remove_handle(rhp, &rhp->mmidr, mmid); deallocate_window(&rhp->rdev, mhp->attr.stag); kfree(mhp); CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid, mhp); return 0; } struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; u32 mmid; u32 stag = 0; int ret = 0; php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) { ret = -ENOMEM; goto err; } mhp->rhp = rhp; ret = alloc_pbl(mhp, pbl_depth); if (ret) goto err1; mhp->attr.pbl_size = pbl_depth; ret = allocate_stag(&rhp->rdev, &stag, php->pdid, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (ret) goto err2; mhp->attr.pdid = php->pdid; mhp->attr.type = FW_RI_STAG_NSMR; mhp->attr.stag = stag; mhp->attr.state = 1; mmid = (stag) >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { ret = -ENOMEM; goto err3; } CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp, stag); return &(mhp->ibmr); err3: dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); err2: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err1: kfree(mhp); err: return ERR_PTR(ret); } struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, int page_list_len) { struct c4iw_fr_page_list *c4pl; struct c4iw_dev *dev = to_c4iw_dev(device); bus_addr_t dma_addr; int size = sizeof *c4pl + page_list_len * sizeof(u64); c4pl = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0); if (c4pl) dma_addr = vtophys(c4pl); else return ERR_PTR(-ENOMEM);; pci_unmap_addr_set(c4pl, mapping, dma_addr); c4pl->dma_addr = dma_addr; c4pl->dev = dev; c4pl->size = size; c4pl->ibpl.page_list = (u64 *)(c4pl + 1); c4pl->ibpl.max_page_list_len = page_list_len; return &c4pl->ibpl; } void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl) { struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); contigfree(c4pl, c4pl->size, M_DEVBUF); } int c4iw_dereg_mr(struct ib_mr *ib_mr) { struct c4iw_dev *rhp; struct c4iw_mr *mhp; u32 mmid; CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr); /* There can be no memory windows */ if (atomic_read(&ib_mr->usecnt)) return -EINVAL; mhp = to_c4iw_mr(ib_mr); rhp = mhp->rhp; mmid = mhp->attr.stag >> 8; remove_handle(rhp, &rhp->mmidr, mmid); dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (mhp->attr.pbl_size) c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); if (mhp->kva) kfree((void *) (unsigned long) mhp->kva); if (mhp->umem) ib_umem_release(mhp->umem); CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp); kfree(mhp); return 0; } #endif Index: stable/10/sys/dev/cxgbe/iw_cxgbe/provider.c =================================================================== --- stable/10/sys/dev/cxgbe/iw_cxgbe/provider.c (revision 318798) +++ stable/10/sys/dev/cxgbe/iw_cxgbe/provider.c (revision 318799) @@ -1,502 +1,505 @@ /* * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #ifdef TCP_OFFLOAD #include #include #include #include #include "iw_cxgbe.h" #include "user.h" static int fastreg_support = 1; module_param(fastreg_support, int, 0644); MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)"); static int c4iw_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props) { return -ENOSYS; } static struct ib_ah *c4iw_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { return ERR_PTR(-ENOSYS); } static int c4iw_ah_destroy(struct ib_ah *ah) { return -ENOSYS; } static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { return -ENOSYS; } static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { return -ENOSYS; } static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { return -ENOSYS; } static int c4iw_dealloc_ucontext(struct ib_ucontext *context) { struct c4iw_dev *rhp = to_c4iw_dev(context->device); struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); struct c4iw_mm_entry *mm, *tmp; CTR2(KTR_IW_CXGBE, "%s context %p", __func__, context); list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) kfree(mm); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); kfree(ucontext); return 0; } static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct c4iw_ucontext *context; struct c4iw_dev *rhp = to_c4iw_dev(ibdev); CTR2(KTR_IW_CXGBE, "%s ibdev %p", __func__, ibdev); context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); return &context->ibucontext; } #ifdef DOT5 static inline pgprot_t t4_pgprot_wc(pgprot_t prot) { return pgprot_writecombine(prot); } #endif static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { int len = vma->vm_end - vma->vm_start; u32 key = vma->vm_pgoff << PAGE_SHIFT; struct c4iw_rdev *rdev; int ret = 0; struct c4iw_mm_entry *mm; struct c4iw_ucontext *ucontext; u64 addr, paddr; u64 va_regs_res = 0, va_udbs_res = 0; u64 len_regs_res = 0, len_udbs_res = 0; CTR3(KTR_IW_CXGBE, "%s:1 ctx %p vma %p", __func__, context, vma); CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__, vma->vm_pgoff, key, len); if (vma->vm_start & (PAGE_SIZE-1)) { CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p", __func__, vma->vm_start, vma); return -EINVAL; } rdev = &(to_c4iw_dev(context->device)->rdev); ucontext = to_c4iw_ucontext(context); mm = remove_mmap(ucontext, key, len); if (!mm) { CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__, ucontext, key, len); return -EINVAL; } addr = mm->addr; kfree(mm); va_regs_res = (u64)rman_get_virtual(rdev->adap->regs_res); len_regs_res = (u64)rman_get_size(rdev->adap->regs_res); va_udbs_res = (u64)rman_get_virtual(rdev->adap->udbs_res); len_udbs_res = (u64)rman_get_size(rdev->adap->udbs_res); CTR6(KTR_IW_CXGBE, "%s:4 addr %p, masync region %p:%p, udb region %p:%p", __func__, addr, va_regs_res, va_regs_res+len_regs_res, va_udbs_res, va_udbs_res+len_udbs_res); if (addr >= va_regs_res && addr < va_regs_res + len_regs_res) { CTR4(KTR_IW_CXGBE, "%s:5 MA_SYNC addr %p region %p, reglen %u", __func__, addr, va_regs_res, len_regs_res); /* * MA_SYNC register... */ paddr = vtophys(addr); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = io_remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT, len, vma->vm_page_prot); } else { if (addr >= va_udbs_res && addr < va_udbs_res + len_udbs_res) { /* * Map user DB or OCQP memory... */ paddr = vtophys(addr); CTR4(KTR_IW_CXGBE, "%s:6 USER DB-GTS addr %p region %p, reglen %u", __func__, addr, va_udbs_res, len_udbs_res); #ifdef DOT5 - if (is_t5(rdev->lldi.adapter_type) && map_udb_as_wc) + if (!is_t4(rdev->lldi.adapter_type) && map_udb_as_wc) vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); else #endif vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = io_remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT, len, vma->vm_page_prot); } else { /* * Map WQ or CQ contig dma memory... */ CTR4(KTR_IW_CXGBE, "%s:7 WQ/CQ addr %p vm_start %u vma %p", __func__, addr, vma->vm_start, vma); ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, len, vma->vm_page_prot); } } CTR4(KTR_IW_CXGBE, "%s:8 ctx %p vma %p ret %u", __func__, context, vma, ret); return ret; } static int c4iw_deallocate_pd(struct ib_pd *pd) { struct c4iw_pd *php = to_c4iw_pd(pd); struct c4iw_dev *rhp = php->rhp; CTR3(KTR_IW_CXGBE, "%s: pd %p, pdid 0x%x", __func__, pd, php->pdid); c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid); mutex_lock(&rhp->rdev.stats.lock); rhp->rdev.stats.pd.cur--; mutex_unlock(&rhp->rdev.stats.lock); kfree(php); return (0); } static struct ib_pd * c4iw_allocate_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct c4iw_pd *php; u32 pdid; struct c4iw_dev *rhp; CTR4(KTR_IW_CXGBE, "%s: ibdev %p, context %p, data %p", __func__, ibdev, context, udata); rhp = (struct c4iw_dev *) ibdev; pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table); if (!pdid) return ERR_PTR(-EINVAL); php = kzalloc(sizeof(*php), GFP_KERNEL); if (!php) { c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid); return ERR_PTR(-ENOMEM); } php->pdid = pdid; php->rhp = rhp; if (context) { if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) { c4iw_deallocate_pd(&php->ibpd); return ERR_PTR(-EFAULT); } } mutex_lock(&rhp->rdev.stats.lock); rhp->rdev.stats.pd.cur++; if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max) rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur; mutex_unlock(&rhp->rdev.stats.lock); CTR6(KTR_IW_CXGBE, "%s: ibdev %p, context %p, data %p, pddid 0x%x, pd %p", __func__, ibdev, context, udata, pdid, php); return (&php->ibpd); } static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, pkey %p", __func__, ibdev, port, index, pkey); *pkey = 0; return (0); } static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct c4iw_dev *dev; struct port_info *pi; struct adapter *sc; CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, gid %p", __func__, ibdev, port, index, gid); memset(&gid->raw[0], 0, sizeof(gid->raw)); dev = to_c4iw_dev(ibdev); sc = dev->rdev.adap; if (port == 0 || port > sc->params.nports) return (-EINVAL); pi = sc->port[port - 1]; memcpy(&gid->raw[0], pi->vi[0].hw_addr, ETHER_ADDR_LEN); return (0); } static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct c4iw_dev *dev = to_c4iw_dev(ibdev); struct adapter *sc = dev->rdev.adap; + const int spg_ndesc = sc->params.sge.spg_len / EQ_ESIZE; CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props); memset(props, 0, sizeof *props); memcpy(&props->sys_image_guid, sc->port[0]->vi[0].hw_addr, ETHER_ADDR_LEN); props->hw_ver = sc->params.chipid; props->fw_ver = sc->params.fw_vers; props->device_cap_flags = dev->device_cap_flags; props->page_size_cap = T4_PAGESIZE_MASK; props->vendor_id = pci_get_vendor(sc->dev); props->vendor_part_id = pci_get_device(sc->dev); props->max_mr_size = T4_MAX_MR_SIZE; - props->max_qp = T4_MAX_NUM_QP; - props->max_qp_wr = T4_MAX_QP_DEPTH; + props->max_qp = sc->vres.qp.size / 2; + props->max_qp_wr = T4_MAX_QP_DEPTH(spg_ndesc); props->max_sge = T4_MAX_RECV_SGE; props->max_sge_rd = 1; - props->max_qp_rd_atom = c4iw_max_read_depth; - props->max_qp_init_rd_atom = c4iw_max_read_depth; - props->max_cq = T4_MAX_NUM_CQ; + props->max_res_rd_atom = sc->params.max_ird_adapter; + props->max_qp_rd_atom = min(sc->params.max_ordird_qp, + c4iw_max_read_depth); + props->max_qp_init_rd_atom = props->max_qp_rd_atom; + props->max_cq = sc->vres.qp.size; props->max_cqe = T4_MAX_CQ_DEPTH; props->max_mr = c4iw_num_stags(&dev->rdev); props->max_pd = T4_MAX_NUM_PD; props->local_ca_ack_delay = 0; props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH; return (0); } /* * Returns -errno on failure. */ static int c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct c4iw_dev *dev; struct adapter *sc; struct port_info *pi; struct ifnet *ifp; CTR4(KTR_IW_CXGBE, "%s ibdev %p, port %d, props %p", __func__, ibdev, port, props); dev = to_c4iw_dev(ibdev); sc = dev->rdev.adap; if (port > sc->params.nports) return (-EINVAL); pi = sc->port[port - 1]; ifp = pi->vi[0].ifp; memset(props, 0, sizeof(struct ib_port_attr)); props->max_mtu = IB_MTU_4096; if (ifp->if_mtu >= 4096) props->active_mtu = IB_MTU_4096; else if (ifp->if_mtu >= 2048) props->active_mtu = IB_MTU_2048; else if (ifp->if_mtu >= 1024) props->active_mtu = IB_MTU_1024; else if (ifp->if_mtu >= 512) props->active_mtu = IB_MTU_512; else props->active_mtu = IB_MTU_256; props->state = pi->link_cfg.link_ok ? IB_PORT_ACTIVE : IB_PORT_DOWN; props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_REINIT_SUP | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->active_width = 2; props->active_speed = 2; props->max_msg_sz = -1; return 0; } /* * Returns -errno on error. */ int c4iw_register_device(struct c4iw_dev *dev) { struct adapter *sc = dev->rdev.adap; struct ib_device *ibdev = &dev->ibdev; struct iw_cm_verbs *iwcm; int ret; CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, sc); BUG_ON(!sc->port[0]); strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name)); memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid)); memcpy(&ibdev->node_guid, sc->port[0]->vi[0].hw_addr, ETHER_ADDR_LEN); ibdev->owner = THIS_MODULE; dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; if (fastreg_support) dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; ibdev->local_dma_lkey = 0; ibdev->uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_POST_SEND) | (1ull << IB_USER_VERBS_CMD_POST_RECV); ibdev->node_type = RDMA_NODE_RNIC; strlcpy(ibdev->node_desc, C4IW_NODE_DESC, sizeof(ibdev->node_desc)); ibdev->phys_port_cnt = sc->params.nports; ibdev->num_comp_vectors = 1; - ibdev->dma_device = sc->dev; + ibdev->dma_device = NULL; ibdev->query_device = c4iw_query_device; ibdev->query_port = c4iw_query_port; ibdev->modify_port = c4iw_modify_port; ibdev->query_pkey = c4iw_query_pkey; ibdev->query_gid = c4iw_query_gid; ibdev->alloc_ucontext = c4iw_alloc_ucontext; ibdev->dealloc_ucontext = c4iw_dealloc_ucontext; ibdev->mmap = c4iw_mmap; ibdev->alloc_pd = c4iw_allocate_pd; ibdev->dealloc_pd = c4iw_deallocate_pd; ibdev->create_ah = c4iw_ah_create; ibdev->destroy_ah = c4iw_ah_destroy; ibdev->create_qp = c4iw_create_qp; ibdev->modify_qp = c4iw_ib_modify_qp; ibdev->query_qp = c4iw_ib_query_qp; ibdev->destroy_qp = c4iw_destroy_qp; ibdev->create_cq = c4iw_create_cq; ibdev->destroy_cq = c4iw_destroy_cq; ibdev->resize_cq = c4iw_resize_cq; ibdev->poll_cq = c4iw_poll_cq; ibdev->get_dma_mr = c4iw_get_dma_mr; ibdev->reg_phys_mr = c4iw_register_phys_mem; ibdev->rereg_phys_mr = c4iw_reregister_phys_mem; ibdev->reg_user_mr = c4iw_reg_user_mr; ibdev->dereg_mr = c4iw_dereg_mr; ibdev->alloc_mw = c4iw_alloc_mw; ibdev->bind_mw = c4iw_bind_mw; ibdev->dealloc_mw = c4iw_dealloc_mw; ibdev->alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr; ibdev->alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl; ibdev->free_fast_reg_page_list = c4iw_free_fastreg_pbl; ibdev->attach_mcast = c4iw_multicast_attach; ibdev->detach_mcast = c4iw_multicast_detach; ibdev->process_mad = c4iw_process_mad; ibdev->req_notify_cq = c4iw_arm_cq; ibdev->post_send = c4iw_post_send; ibdev->post_recv = c4iw_post_receive; ibdev->uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL); if (iwcm == NULL) return (-ENOMEM); iwcm->connect = c4iw_connect; iwcm->accept = c4iw_accept_cr; iwcm->reject = c4iw_reject_cr; iwcm->create_listen_ep = c4iw_create_listen_ep; iwcm->destroy_listen_ep = c4iw_destroy_listen_ep; iwcm->newconn = process_newconn; iwcm->add_ref = c4iw_qp_add_ref; iwcm->rem_ref = c4iw_qp_rem_ref; iwcm->get_qp = c4iw_get_qp; ibdev->iwcm = iwcm; ret = ib_register_device(&dev->ibdev, NULL); if (ret) kfree(iwcm); return (ret); } void c4iw_unregister_device(struct c4iw_dev *dev) { CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, dev->rdev.adap); ib_unregister_device(&dev->ibdev); kfree(dev->ibdev.iwcm); return; } #endif Index: stable/10/sys/dev/cxgbe/iw_cxgbe/qp.c =================================================================== --- stable/10/sys/dev/cxgbe/iw_cxgbe/qp.c (revision 318798) +++ stable/10/sys/dev/cxgbe/iw_cxgbe/qp.c (revision 318799) @@ -1,1760 +1,1761 @@ /* * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #ifdef TCP_OFFLOAD #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct sge_iq; struct rss_header; #include #include "offload.h" #include "tom/t4_tom.h" #include "iw_cxgbe.h" #include "user.h" static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize); static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) { unsigned long flag; spin_lock_irqsave(&qhp->lock, flag); qhp->attr.state = state; spin_unlock_irqrestore(&qhp->lock, flag); } static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { contigfree(sq->queue, sq->memsize, M_DEVBUF); } static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { dealloc_host_sq(rdev, sq); } static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { sq->queue = contigmalloc(sq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0); if (sq->queue) sq->dma_addr = vtophys(sq->queue); else return -ENOMEM; sq->phys_addr = vtophys(sq->queue); pci_unmap_addr_set(sq, mapping, sq->dma_addr); CTR4(KTR_IW_CXGBE, "%s sq %p dma_addr %p phys_addr %p", __func__, sq->queue, sq->dma_addr, sq->phys_addr); return 0; } static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct c4iw_dev_ucontext *uctx) { /* * uP clears EQ contexts when the connection exits rdma mode, * so no need to post a RESET WR for these EQs. */ contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF); dealloc_sq(rdev, &wq->sq); c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); kfree(wq->rq.sw_rq); kfree(wq->sq.sw_sq); c4iw_put_qpid(rdev, wq->rq.qid, uctx); c4iw_put_qpid(rdev, wq->sq.qid, uctx); return 0; } static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx) { struct adapter *sc = rdev->adap; int user = (uctx != &rdev->uctx); struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; struct c4iw_wr_wait wr_wait; int ret; int eqsize; struct wrqe *wr; + const int spg_ndesc = sc->params.sge.spg_len / EQ_ESIZE; wq->sq.qid = c4iw_get_qpid(rdev, uctx); if (!wq->sq.qid) return -ENOMEM; wq->rq.qid = c4iw_get_qpid(rdev, uctx); if (!wq->rq.qid) goto err1; if (!user) { wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, GFP_KERNEL); if (!wq->sq.sw_sq) goto err2; wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, GFP_KERNEL); if (!wq->rq.sw_rq) goto err3; } /* RQT must be a power of 2. */ wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); if (!wq->rq.rqt_hwaddr) goto err4; if (alloc_host_sq(rdev, &wq->sq)) goto err5; memset(wq->sq.queue, 0, wq->sq.memsize); pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); wq->rq.queue = contigmalloc(wq->rq.memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0); if (wq->rq.queue) wq->rq.dma_addr = vtophys(wq->rq.queue); else goto err6; CTR5(KTR_IW_CXGBE, "%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx", __func__, wq->sq.queue, (unsigned long long)vtophys(wq->sq.queue), wq->rq.queue, (unsigned long long)vtophys(wq->rq.queue)); memset(wq->rq.queue, 0, wq->rq.memsize); pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); wq->db = (void *)((unsigned long)rman_get_virtual(sc->regs_res) + sc->sge_kdoorbell_reg); wq->gts = (void *)((unsigned long)rman_get_virtual(rdev->adap->regs_res) + sc->sge_gts_reg); if (user) { wq->sq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) + (wq->sq.qid << rdev->qpshift)); wq->sq.udb &= PAGE_MASK; wq->rq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) + (wq->rq.qid << rdev->qpshift)); wq->rq.udb &= PAGE_MASK; } wq->rdev = rdev; wq->rq.msn = 1; /* build fw_ri_res_wr */ wr_len = sizeof *res_wr + 2 * sizeof *res; wr = alloc_wrqe(wr_len, &sc->sge.mgmtq); if (wr == NULL) return (0); res_wr = wrtod(wr); memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( V_FW_WR_OP(FW_RI_RES_WR) | V_FW_RI_RES_WR_NRES(2) | F_FW_WR_COMPL); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; res = res_wr->res; res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; /* eqsize is the number of 64B entries plus the status page size. */ - eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + - (sc->params.sge.spg_len / EQ_ESIZE); + eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + spg_ndesc; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ V_FW_RI_RES_WR_IQID(scq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | V_FW_RI_RES_WR_DCACPU(0) | V_FW_RI_RES_WR_FBMIN(2) | V_FW_RI_RES_WR_FBMAX(2) | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | V_FW_RI_RES_WR_CIDXFTHRESH(0) | V_FW_RI_RES_WR_EQSIZE(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); res++; res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; /* eqsize is the number of 64B entries plus the status page size. */ - eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + - (sc->params.sge.spg_len / EQ_ESIZE); + eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + spg_ndesc; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ V_FW_RI_RES_WR_IQID(rcq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | V_FW_RI_RES_WR_DCACPU(0) | V_FW_RI_RES_WR_FBMIN(2) | V_FW_RI_RES_WR_FBMAX(2) | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | V_FW_RI_RES_WR_CIDXFTHRESH(0) | V_FW_RI_RES_WR_EQSIZE(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); c4iw_init_wr_wait(&wr_wait); t4_wrq_tx(sc, wr); ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); if (ret) goto err7; CTR6(KTR_IW_CXGBE, "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx", __func__, wq->sq.qid, wq->rq.qid, wq->db, (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); return 0; err7: contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF); err6: dealloc_sq(rdev, &wq->sq); err5: c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); err4: kfree(wq->rq.sw_rq); err3: kfree(wq->sq.sw_sq); err2: c4iw_put_qpid(rdev, wq->rq.qid, uctx); err1: c4iw_put_qpid(rdev, wq->sq.qid, uctx); return -ENOMEM; } static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, struct ib_send_wr *wr, int max, u32 *plenp) { u8 *dstp, *srcp; u32 plen = 0; int i; int rem, len; dstp = (u8 *)immdp->data; for (i = 0; i < wr->num_sge; i++) { if ((plen + wr->sg_list[i].length) > max) return -EMSGSIZE; srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; plen += wr->sg_list[i].length; rem = wr->sg_list[i].length; while (rem) { if (dstp == (u8 *)&sq->queue[sq->size]) dstp = (u8 *)sq->queue; if (rem <= (u8 *)&sq->queue[sq->size] - dstp) len = rem; else len = (u8 *)&sq->queue[sq->size] - dstp; memcpy(dstp, srcp, len); dstp += len; srcp += len; rem -= len; } } len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); if (len) memset(dstp, 0, len); immdp->op = FW_RI_DATA_IMMD; immdp->r1 = 0; immdp->r2 = 0; immdp->immdlen = cpu_to_be32(plen); *plenp = plen; return 0; } static int build_isgl(__be64 *queue_start, __be64 *queue_end, struct fw_ri_isgl *isglp, struct ib_sge *sg_list, int num_sge, u32 *plenp) { int i; u32 plen = 0; __be64 *flitp = (__be64 *)isglp->sge; for (i = 0; i < num_sge; i++) { if ((plen + sg_list[i].length) < plen) return -EMSGSIZE; plen += sg_list[i].length; *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | sg_list[i].length); if (++flitp == queue_end) flitp = queue_start; *flitp = cpu_to_be64(sg_list[i].addr); if (++flitp == queue_end) flitp = queue_start; } *flitp = (__force __be64)0; isglp->op = FW_RI_DATA_ISGL; isglp->r1 = 0; isglp->nsge = cpu_to_be16(num_sge); isglp->r2 = 0; if (plenp) *plenp = plen; return 0; } static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) { u32 plen; int size; int ret; if (wr->num_sge > T4_MAX_SEND_SGE) return -EINVAL; switch (wr->opcode) { case IB_WR_SEND: if (wr->send_flags & IB_SEND_SOLICITED) wqe->send.sendop_pkd = cpu_to_be32( V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); else wqe->send.sendop_pkd = cpu_to_be32( V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); wqe->send.stag_inv = 0; break; case IB_WR_SEND_WITH_INV: if (wr->send_flags & IB_SEND_SOLICITED) wqe->send.sendop_pkd = cpu_to_be32( V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV)); else wqe->send.sendop_pkd = cpu_to_be32( V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV)); wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); break; default: return -EINVAL; } plen = 0; if (wr->num_sge) { if (wr->send_flags & IB_SEND_INLINE) { ret = build_immd(sq, wqe->send.u.immd_src, wr, T4_MAX_SEND_INLINE, &plen); if (ret) return ret; size = sizeof wqe->send + sizeof(struct fw_ri_immd) + plen; } else { ret = build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], wqe->send.u.isgl_src, wr->sg_list, wr->num_sge, &plen); if (ret) return ret; size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + wr->num_sge * sizeof(struct fw_ri_sge); } } else { wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; wqe->send.u.immd_src[0].r1 = 0; wqe->send.u.immd_src[0].r2 = 0; wqe->send.u.immd_src[0].immdlen = 0; size = sizeof wqe->send + sizeof(struct fw_ri_immd); plen = 0; } *len16 = DIV_ROUND_UP(size, 16); wqe->send.plen = cpu_to_be32(plen); return 0; } static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) { u32 plen; int size; int ret; if (wr->num_sge > T4_MAX_SEND_SGE) return -EINVAL; wqe->write.r2 = 0; wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); if (wr->num_sge) { if (wr->send_flags & IB_SEND_INLINE) { ret = build_immd(sq, wqe->write.u.immd_src, wr, T4_MAX_WRITE_INLINE, &plen); if (ret) return ret; size = sizeof wqe->write + sizeof(struct fw_ri_immd) + plen; } else { ret = build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], wqe->write.u.isgl_src, wr->sg_list, wr->num_sge, &plen); if (ret) return ret; size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + wr->num_sge * sizeof(struct fw_ri_sge); } } else { wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; wqe->write.u.immd_src[0].r1 = 0; wqe->write.u.immd_src[0].r2 = 0; wqe->write.u.immd_src[0].immdlen = 0; size = sizeof wqe->write + sizeof(struct fw_ri_immd); plen = 0; } *len16 = DIV_ROUND_UP(size, 16); wqe->write.plen = cpu_to_be32(plen); return 0; } static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) { if (wr->num_sge > 1) return -EINVAL; if (wr->num_sge) { wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr >> 32)); wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr >> 32)); wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); } else { wqe->read.stag_src = cpu_to_be32(2); wqe->read.to_src_hi = 0; wqe->read.to_src_lo = 0; wqe->read.stag_sink = cpu_to_be32(2); wqe->read.plen = 0; wqe->read.to_sink_hi = 0; wqe->read.to_sink_lo = 0; } wqe->read.r2 = 0; wqe->read.r5 = 0; *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); return 0; } static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, struct ib_recv_wr *wr, u8 *len16) { int ret; ret = build_isgl((__be64 *)qhp->wq.rq.queue, (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); if (ret) return ret; *len16 = DIV_ROUND_UP(sizeof wqe->recv + wr->num_sge * sizeof(struct fw_ri_sge), 16); return 0; } static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) { struct fw_ri_immd *imdp; __be64 *p; int i; int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); int rem; if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) return -EINVAL; wqe->fr.qpbinde_to_dcacpu = 0; wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12; wqe->fr.addr_type = FW_RI_VA_BASED_TO; wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags); wqe->fr.len_hi = 0; wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length); wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey); wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff); WARN_ON(pbllen > T4_MAX_FR_IMMD); imdp = (struct fw_ri_immd *)(&wqe->fr + 1); imdp->op = FW_RI_DATA_IMMD; imdp->r1 = 0; imdp->r2 = 0; imdp->immdlen = cpu_to_be32(pbllen); p = (__be64 *)(imdp + 1); rem = pbllen; for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); rem -= sizeof *p; if (++p == (__be64 *)&sq->queue[sq->size]) p = (__be64 *)sq->queue; } BUG_ON(rem < 0); while (rem) { *p = 0; rem -= sizeof *p; if (++p == (__be64 *)&sq->queue[sq->size]) p = (__be64 *)sq->queue; } *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16); return 0; } static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) { wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); wqe->inv.r2 = 0; *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); return 0; } void c4iw_qp_add_ref(struct ib_qp *qp) { CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp); atomic_inc(&(to_c4iw_qp(qp)->refcnt)); } void c4iw_qp_rem_ref(struct ib_qp *qp) { CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp); if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) wake_up(&(to_c4iw_qp(qp)->wait)); } static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) { struct t4_cqe cqe = {}; struct c4iw_cq *schp; unsigned long flag; struct t4_cq *cq; schp = to_c4iw_cq(qhp->ibqp.send_cq); cq = &schp->cq; PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid); cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | V_CQE_OPCODE(C4IW_DRAIN_OPCODE) | V_CQE_TYPE(1) | V_CQE_SWCQE(1) | V_CQE_QPID(qhp->wq.sq.qid)); spin_lock_irqsave(&schp->lock, flag); cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); cq->sw_queue[cq->sw_pidx] = cqe; t4_swcq_produce(cq); spin_unlock_irqrestore(&schp->lock, flag); spin_lock_irqsave(&schp->comp_handler_lock, flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, flag); } static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) { struct t4_cqe cqe = {}; struct c4iw_cq *rchp; unsigned long flag; struct t4_cq *cq; rchp = to_c4iw_cq(qhp->ibqp.recv_cq); cq = &rchp->cq; PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid); cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | V_CQE_OPCODE(C4IW_DRAIN_OPCODE) | V_CQE_TYPE(0) | V_CQE_SWCQE(1) | V_CQE_QPID(qhp->wq.sq.qid)); spin_lock_irqsave(&rchp->lock, flag); cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); cq->sw_queue[cq->sw_pidx] = cqe; t4_swcq_produce(cq); spin_unlock_irqrestore(&rchp->lock, flag); spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { int err = 0; u8 len16 = 0; enum fw_wr_opcodes fw_opcode = 0; enum fw_ri_wr_flags fw_flags; struct c4iw_qp *qhp; union t4_wr *wqe; u32 num_wrs; struct t4_swsqe *swsqe; unsigned long flag; u16 idx = 0; qhp = to_c4iw_qp(ibqp); spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); complete_sq_drain_wr(qhp, wr); return err; } num_wrs = t4_sq_avail(&qhp->wq); if (num_wrs == 0) { spin_unlock_irqrestore(&qhp->lock, flag); return -ENOMEM; } while (wr) { if (num_wrs == 0) { err = -ENOMEM; *bad_wr = wr; break; } wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); fw_flags = 0; if (wr->send_flags & IB_SEND_SOLICITED) fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) fw_flags |= FW_RI_COMPLETION_FLAG; swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; switch (wr->opcode) { case IB_WR_SEND_WITH_INV: case IB_WR_SEND: if (wr->send_flags & IB_SEND_FENCE) fw_flags |= FW_RI_READ_FENCE_FLAG; fw_opcode = FW_RI_SEND_WR; if (wr->opcode == IB_WR_SEND) swsqe->opcode = FW_RI_SEND; else swsqe->opcode = FW_RI_SEND_WITH_INV; err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); break; case IB_WR_RDMA_WRITE: fw_opcode = FW_RI_RDMA_WRITE_WR; swsqe->opcode = FW_RI_RDMA_WRITE; err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); break; case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: fw_opcode = FW_RI_RDMA_READ_WR; swsqe->opcode = FW_RI_READ_REQ; if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) fw_flags = FW_RI_RDMA_READ_INVALIDATE; else fw_flags = 0; err = build_rdma_read(wqe, wr, &len16); if (err) break; swsqe->read_len = wr->sg_list[0].length; if (!qhp->wq.sq.oldest_read) qhp->wq.sq.oldest_read = swsqe; break; case IB_WR_FAST_REG_MR: fw_opcode = FW_RI_FR_NSMR_WR; swsqe->opcode = FW_RI_FAST_REGISTER; err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); break; case IB_WR_LOCAL_INV: if (wr->send_flags & IB_SEND_FENCE) fw_flags |= FW_RI_LOCAL_FENCE_FLAG; fw_opcode = FW_RI_INV_LSTAG_WR; swsqe->opcode = FW_RI_LOCAL_INV; err = build_inv_stag(wqe, wr, &len16); break; default: CTR2(KTR_IW_CXGBE, "%s post of type =%d TBD!", __func__, wr->opcode); err = -EINVAL; } if (err) { *bad_wr = wr; break; } swsqe->idx = qhp->wq.sq.pidx; swsqe->complete = 0; swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || qhp->sq_sig_all; swsqe->wr_id = wr->wr_id; init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); CTR5(KTR_IW_CXGBE, "%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u", __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, swsqe->opcode, swsqe->read_len); wr = wr->next; num_wrs--; t4_sq_produce(&qhp->wq, len16); idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); } t4_ring_sq_db(&qhp->wq, idx); spin_unlock_irqrestore(&qhp->lock, flag); return err; } int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { int err = 0; struct c4iw_qp *qhp; union t4_recv_wr *wqe; u32 num_wrs; u8 len16 = 0; unsigned long flag; u16 idx = 0; qhp = to_c4iw_qp(ibqp); spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); complete_rq_drain_wr(qhp, wr); return err; } num_wrs = t4_rq_avail(&qhp->wq); if (num_wrs == 0) { spin_unlock_irqrestore(&qhp->lock, flag); return -ENOMEM; } while (wr) { if (wr->num_sge > T4_MAX_RECV_SGE) { err = -EINVAL; *bad_wr = wr; break; } wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + qhp->wq.rq.wq_pidx * T4_EQ_ENTRY_SIZE); if (num_wrs) err = build_rdma_recv(qhp, wqe, wr, &len16); else err = -ENOMEM; if (err) { *bad_wr = wr; break; } qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; wqe->recv.opcode = FW_RI_RECV_WR; wqe->recv.r1 = 0; wqe->recv.wrid = qhp->wq.rq.pidx; wqe->recv.r2[0] = 0; wqe->recv.r2[1] = 0; wqe->recv.r2[2] = 0; wqe->recv.len16 = len16; CTR3(KTR_IW_CXGBE, "%s cookie 0x%llx pidx %u", __func__, (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); t4_rq_produce(&qhp->wq, len16); idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); wr = wr->next; num_wrs--; } t4_ring_rq_db(&qhp->wq, idx); spin_unlock_irqrestore(&qhp->lock, flag); return err; } int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) { return -ENOSYS; } static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, u8 *ecode) { int status; int tagged; int opcode; int rqtype; int send_inv; if (!err_cqe) { *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; *ecode = 0; return; } status = CQE_STATUS(err_cqe); opcode = CQE_OPCODE(err_cqe); rqtype = RQ_TYPE(err_cqe); send_inv = (opcode == FW_RI_SEND_WITH_INV) || (opcode == FW_RI_SEND_WITH_SE_INV); tagged = (opcode == FW_RI_RDMA_WRITE) || (rqtype && (opcode == FW_RI_READ_RESP)); switch (status) { case T4_ERR_STAG: if (send_inv) { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; *ecode = RDMAP_CANT_INV_STAG; } else { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_INV_STAG; } break; case T4_ERR_PDID: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; if ((opcode == FW_RI_SEND_WITH_INV) || (opcode == FW_RI_SEND_WITH_SE_INV)) *ecode = RDMAP_CANT_INV_STAG; else *ecode = RDMAP_STAG_NOT_ASSOC; break; case T4_ERR_QPID: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_STAG_NOT_ASSOC; break; case T4_ERR_ACCESS: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_ACC_VIOL; break; case T4_ERR_WRAP: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_TO_WRAP; break; case T4_ERR_BOUND: if (tagged) { *layer_type = LAYER_DDP|DDP_TAGGED_ERR; *ecode = DDPT_BASE_BOUNDS; } else { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_BASE_BOUNDS; } break; case T4_ERR_INVALIDATE_SHARED_MR: case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; *ecode = RDMAP_CANT_INV_STAG; break; case T4_ERR_ECC: case T4_ERR_ECC_PSTAG: case T4_ERR_INTERNAL_ERR: *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; *ecode = 0; break; case T4_ERR_OUT_OF_RQE: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_MSN_NOBUF; break; case T4_ERR_PBL_ADDR_BOUND: *layer_type = LAYER_DDP|DDP_TAGGED_ERR; *ecode = DDPT_BASE_BOUNDS; break; case T4_ERR_CRC: *layer_type = LAYER_MPA|DDP_LLP; *ecode = MPA_CRC_ERR; break; case T4_ERR_MARKER: *layer_type = LAYER_MPA|DDP_LLP; *ecode = MPA_MARKER_ERR; break; case T4_ERR_PDU_LEN_ERR: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_MSG_TOOBIG; break; case T4_ERR_DDP_VERSION: if (tagged) { *layer_type = LAYER_DDP|DDP_TAGGED_ERR; *ecode = DDPT_INV_VERS; } else { *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_VERS; } break; case T4_ERR_RDMA_VERSION: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; *ecode = RDMAP_INV_VERS; break; case T4_ERR_OPCODE: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; *ecode = RDMAP_INV_OPCODE; break; case T4_ERR_DDP_QUEUE_NUM: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_QN; break; case T4_ERR_MSN: case T4_ERR_MSN_GAP: case T4_ERR_MSN_RANGE: case T4_ERR_IRD_OVERFLOW: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_MSN_RANGE; break; case T4_ERR_TBIT: *layer_type = LAYER_DDP|DDP_LOCAL_CATA; *ecode = 0; break; case T4_ERR_MO: *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; *ecode = DDPU_INV_MO; break; default: *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; *ecode = 0; break; } } static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, gfp_t gfp) { int ret; struct fw_ri_wr *wqe; struct terminate_message *term; struct wrqe *wr; struct socket *so = qhp->ep->com.so; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); struct toepcb *toep = tp->t_toe; CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, qhp->wq.sq.qid, qhp->ep->hwtid); wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); if (wr == NULL) return; wqe = wrtod(wr); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR)); wqe->flowid_len16 = cpu_to_be32( V_FW_WR_FLOWID(qhp->ep->hwtid) | V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); term = (struct terminate_message *)wqe->u.terminate.termmsg; if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { term->layer_etype = qhp->attr.layer_etype; term->ecode = qhp->attr.ecode; } else build_term_codes(err_cqe, &term->layer_etype, &term->ecode); ret = creds(toep, inp, sizeof(*wqe)); if (ret) { free_wrqe(wr); return; } t4_wrq_tx(qhp->rhp->rdev.adap, wr); } /* Assumes qhp lock is held. */ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, struct c4iw_cq *schp) { int count; int flushed; unsigned long flag; CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp, schp); /* locking hierarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&rchp->lock, flag); spin_lock(&qhp->lock); c4iw_flush_hw_cq(&rchp->cq); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&rchp->lock, flag); if (flushed && rchp->ibcq.comp_handler) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } /* locking hierarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&schp->lock, flag); spin_lock(&qhp->lock); c4iw_flush_hw_cq(&schp->cq); c4iw_count_scqes(&schp->cq, &qhp->wq, &count); flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&schp->lock, flag); if (flushed && schp->ibcq.comp_handler) { spin_lock_irqsave(&schp->comp_handler_lock, flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, flag); } } static void flush_qp(struct c4iw_qp *qhp) { struct c4iw_cq *rchp, *schp; unsigned long flag; rchp = get_chp(qhp->rhp, qhp->attr.rcq); schp = get_chp(qhp->rhp, qhp->attr.scq); if (qhp->ibqp.uobject) { t4_set_wq_in_error(&qhp->wq); t4_set_cq_in_error(&rchp->cq); spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); if (schp != rchp) { t4_set_cq_in_error(&schp->cq); spin_lock_irqsave(&schp->comp_handler_lock, flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, flag); } return; } __flush_qp(qhp, rchp, schp); } static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep) { struct c4iw_rdev *rdev = &rhp->rdev; struct adapter *sc = rdev->adap; struct fw_ri_wr *wqe; int ret; struct wrqe *wr; struct socket *so = ep->com.so; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); struct toepcb *toep = tp->t_toe; KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__)); CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, qhp->wq.sq.qid, ep->hwtid); wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); if (wr == NULL) return (0); wqe = wrtod(wr); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL); wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) | V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); wqe->cookie = (unsigned long) &ep->com.wr_wait; wqe->u.fini.type = FW_RI_TYPE_FINI; c4iw_init_wr_wait(&ep->com.wr_wait); ret = creds(toep, inp, sizeof(*wqe)); if (ret) { free_wrqe(wr); return ret; } t4_wrq_tx(sc, wr); ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid, qhp->wq.sq.qid, __func__); return ret; } static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) { CTR2(KTR_IW_CXGBE, "%s p2p_type = %d", __func__, p2p_type); memset(&init->u, 0, sizeof init->u); switch (p2p_type) { case FW_RI_INIT_P2PTYPE_RDMA_WRITE: init->u.write.opcode = FW_RI_RDMA_WRITE_WR; init->u.write.stag_sink = cpu_to_be32(1); init->u.write.to_sink = cpu_to_be64(1); init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + sizeof(struct fw_ri_immd), 16); break; case FW_RI_INIT_P2PTYPE_READ_REQ: init->u.write.opcode = FW_RI_RDMA_READ_WR; init->u.read.stag_src = cpu_to_be32(1); init->u.read.to_src_lo = cpu_to_be32(1); init->u.read.stag_sink = cpu_to_be32(1); init->u.read.to_sink_lo = cpu_to_be32(1); init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); break; } } static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize) { struct ofld_tx_sdesc *txsd; CTR3(KTR_IW_CXGBE, "%s:creB %p %u", __func__, toep , wrsize); INP_WLOCK(inp); if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) { INP_WUNLOCK(inp); return (EINVAL); } txsd = &toep->txsd[toep->txsd_pidx]; txsd->tx_credits = howmany(wrsize, 16); txsd->plen = 0; KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, ("%s: not enough credits (%d)", __func__, toep->tx_credits)); toep->tx_credits -= txsd->tx_credits; if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) toep->txsd_pidx = 0; toep->txsd_avail--; INP_WUNLOCK(inp); CTR5(KTR_IW_CXGBE, "%s:creE %p %u %u %u", __func__, toep , txsd->tx_credits, toep->tx_credits, toep->txsd_pidx); return (0); } static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) { struct fw_ri_wr *wqe; int ret; struct wrqe *wr; struct c4iw_ep *ep = qhp->ep; struct c4iw_rdev *rdev = &qhp->rhp->rdev; struct adapter *sc = rdev->adap; struct socket *so = ep->com.so; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); struct toepcb *toep = tp->t_toe; CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, qhp->wq.sq.qid, ep->hwtid); wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); if (wr == NULL) return (0); wqe = wrtod(wr); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32( V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL); wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) | V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); wqe->cookie = (unsigned long) &ep->com.wr_wait; wqe->u.init.type = FW_RI_TYPE_INIT; wqe->u.init.mpareqbit_p2ptype = V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) | V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type); wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; if (qhp->attr.mpa_attr.recv_marker_enabled) wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; if (qhp->attr.mpa_attr.xmit_marker_enabled) wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; if (qhp->attr.mpa_attr.crc_enabled) wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | FW_RI_QP_RDMA_WRITE_ENABLE | FW_RI_QP_BIND_ENABLE; if (!qhp->ibqp.uobject) wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | FW_RI_QP_STAG0_ENABLE; wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); wqe->u.init.iss = cpu_to_be32(ep->snd_seq); wqe->u.init.irs = cpu_to_be32(ep->rcv_seq); wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - sc->vres.rq.start); if (qhp->attr.mpa_attr.initiator) build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); c4iw_init_wr_wait(&ep->com.wr_wait); ret = creds(toep, inp, sizeof(*wqe)); if (ret) { free_wrqe(wr); return ret; } t4_wrq_tx(sc, wr); ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid, qhp->wq.sq.qid, __func__); toep->ulp_mode = ULP_MODE_RDMA; return ret; } int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, enum c4iw_qp_attr_mask mask, struct c4iw_qp_attributes *attrs, int internal) { int ret = 0; struct c4iw_qp_attributes newattr = qhp->attr; int disconnect = 0; int terminate = 0; int abort = 0; int free = 0; struct c4iw_ep *ep = NULL; CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep); CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state, (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); mutex_lock(&qhp->mutex); /* Process attr changes if in IDLE */ if (mask & C4IW_QP_ATTR_VALID_MODIFY) { if (qhp->attr.state != C4IW_QP_STATE_IDLE) { ret = -EIO; goto out; } if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) newattr.enable_rdma_read = attrs->enable_rdma_read; if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) newattr.enable_rdma_write = attrs->enable_rdma_write; if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) newattr.enable_bind = attrs->enable_bind; if (mask & C4IW_QP_ATTR_MAX_ORD) { if (attrs->max_ord > c4iw_max_read_depth) { ret = -EINVAL; goto out; } newattr.max_ord = attrs->max_ord; } if (mask & C4IW_QP_ATTR_MAX_IRD) { if (attrs->max_ird > c4iw_max_read_depth) { ret = -EINVAL; goto out; } newattr.max_ird = attrs->max_ird; } qhp->attr = newattr; } if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) goto out; if (qhp->attr.state == attrs->next_state) goto out; switch (qhp->attr.state) { case C4IW_QP_STATE_IDLE: switch (attrs->next_state) { case C4IW_QP_STATE_RTS: if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { ret = -EINVAL; goto out; } if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { ret = -EINVAL; goto out; } qhp->attr.mpa_attr = attrs->mpa_attr; qhp->attr.llp_stream_handle = attrs->llp_stream_handle; qhp->ep = qhp->attr.llp_stream_handle; set_state(qhp, C4IW_QP_STATE_RTS); /* * Ref the endpoint here and deref when we * disassociate the endpoint from the QP. This * happens in CLOSING->IDLE transition or *->ERROR * transition. */ c4iw_get_ep(&qhp->ep->com); ret = rdma_init(rhp, qhp); if (ret) goto err; break; case C4IW_QP_STATE_ERROR: set_state(qhp, C4IW_QP_STATE_ERROR); flush_qp(qhp); break; default: ret = -EINVAL; goto out; } break; case C4IW_QP_STATE_RTS: switch (attrs->next_state) { case C4IW_QP_STATE_CLOSING: BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); set_state(qhp, C4IW_QP_STATE_CLOSING); ep = qhp->ep; if (!internal) { abort = 0; disconnect = 1; c4iw_get_ep(&qhp->ep->com); } if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq); ret = rdma_fini(rhp, qhp, ep); if (ret) goto err; break; case C4IW_QP_STATE_TERMINATE: set_state(qhp, C4IW_QP_STATE_TERMINATE); qhp->attr.layer_etype = attrs->layer_etype; qhp->attr.ecode = attrs->ecode; if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq); ep = qhp->ep; if (!internal) terminate = 1; disconnect = 1; c4iw_get_ep(&qhp->ep->com); break; case C4IW_QP_STATE_ERROR: set_state(qhp, C4IW_QP_STATE_ERROR); if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq); if (!internal) { abort = 1; disconnect = 1; ep = qhp->ep; c4iw_get_ep(&qhp->ep->com); } goto err; break; default: ret = -EINVAL; goto out; } break; case C4IW_QP_STATE_CLOSING: /* * Allow kernel users to move to ERROR for qp draining. */ if (!internal && (qhp->ibqp.uobject || attrs->next_state != C4IW_QP_STATE_ERROR)) { ret = -EINVAL; goto out; } switch (attrs->next_state) { case C4IW_QP_STATE_IDLE: flush_qp(qhp); set_state(qhp, C4IW_QP_STATE_IDLE); qhp->attr.llp_stream_handle = NULL; c4iw_put_ep(&qhp->ep->com); qhp->ep = NULL; wake_up(&qhp->wait); break; case C4IW_QP_STATE_ERROR: goto err; default: ret = -EINVAL; goto err; } break; case C4IW_QP_STATE_ERROR: if (attrs->next_state != C4IW_QP_STATE_IDLE) { ret = -EINVAL; goto out; } if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { ret = -EINVAL; goto out; } set_state(qhp, C4IW_QP_STATE_IDLE); break; case C4IW_QP_STATE_TERMINATE: if (!internal) { ret = -EINVAL; goto out; } goto err; break; default: printf("%s in a bad state %d\n", __func__, qhp->attr.state); ret = -EINVAL; goto err; break; } goto out; err: CTR3(KTR_IW_CXGBE, "%s disassociating ep %p qpid 0x%x", __func__, qhp->ep, qhp->wq.sq.qid); /* disassociate the LLP connection */ qhp->attr.llp_stream_handle = NULL; if (!ep) ep = qhp->ep; qhp->ep = NULL; set_state(qhp, C4IW_QP_STATE_ERROR); free = 1; abort = 1; BUG_ON(!ep); flush_qp(qhp); wake_up(&qhp->wait); out: mutex_unlock(&qhp->mutex); if (terminate) post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); /* * If disconnect is 1, then we need to initiate a disconnect * on the EP. This can be a normal close (RTS->CLOSING) or * an abnormal close (RTS/CLOSING->ERROR). */ if (disconnect) { c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : GFP_KERNEL); c4iw_put_ep(&ep->com); } /* * If free is 1, then we've disassociated the EP from the QP * and we need to dereference the EP. */ if (free) c4iw_put_ep(&ep->com); CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state); return ret; } int c4iw_destroy_qp(struct ib_qp *ib_qp) { struct c4iw_dev *rhp; struct c4iw_qp *qhp; struct c4iw_qp_attributes attrs; struct c4iw_ucontext *ucontext; CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ib_qp); qhp = to_c4iw_qp(ib_qp); rhp = qhp->rhp; attrs.next_state = C4IW_QP_STATE_ERROR; if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); else c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); wait_event(qhp->wait, !qhp->ep); spin_lock_irq(&rhp->lock); remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid); spin_unlock_irq(&rhp->lock); atomic_dec(&qhp->refcnt); wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); ucontext = ib_qp->uobject ? to_c4iw_ucontext(ib_qp->uobject->context) : NULL; destroy_qp(&rhp->rdev, &qhp->wq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); CTR3(KTR_IW_CXGBE, "%s ib_qp %p qpid 0x%0x", __func__, ib_qp, qhp->wq.sq.qid); kfree(qhp); return 0; } struct ib_qp * c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_qp *qhp; struct c4iw_pd *php; struct c4iw_cq *schp; struct c4iw_cq *rchp; struct c4iw_create_qp_resp uresp; int sqsize, rqsize; struct c4iw_ucontext *ucontext; - int ret; + int ret, spg_ndesc; struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4; CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); if (attrs->qp_type != IB_QPT_RC) return ERR_PTR(-EINVAL); php = to_c4iw_pd(pd); rhp = php->rhp; schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); if (!schp || !rchp) return ERR_PTR(-EINVAL); if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) return ERR_PTR(-EINVAL); + spg_ndesc = rhp->rdev.adap->params.sge.spg_len / EQ_ESIZE; rqsize = roundup(attrs->cap.max_recv_wr + 1, 16); - if (rqsize > T4_MAX_RQ_SIZE) + if (rqsize > T4_MAX_RQ_SIZE(spg_ndesc)) return ERR_PTR(-E2BIG); sqsize = roundup(attrs->cap.max_send_wr + 1, 16); - if (sqsize > T4_MAX_SQ_SIZE) + if (sqsize > T4_MAX_SQ_SIZE(spg_ndesc)) return ERR_PTR(-E2BIG); ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); if (!qhp) return ERR_PTR(-ENOMEM); qhp->wq.sq.size = sqsize; - qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue; + qhp->wq.sq.memsize = (sqsize + spg_ndesc) * sizeof *qhp->wq.sq.queue + + 16 * sizeof(__be64); qhp->wq.rq.size = rqsize; - qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue; + qhp->wq.rq.memsize = (rqsize + spg_ndesc) * sizeof *qhp->wq.rq.queue; if (ucontext) { qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); } CTR5(KTR_IW_CXGBE, "%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu", __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize); ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); if (ret) goto err1; attrs->cap.max_recv_wr = rqsize - 1; attrs->cap.max_send_wr = sqsize - 1; attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; qhp->rhp = rhp; qhp->attr.pd = php->pdid; qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; qhp->attr.sq_num_entries = attrs->cap.max_send_wr; qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; qhp->attr.sq_max_sges = attrs->cap.max_send_sge; qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; qhp->attr.state = C4IW_QP_STATE_IDLE; qhp->attr.next_state = C4IW_QP_STATE_IDLE; qhp->attr.enable_rdma_read = 1; qhp->attr.enable_rdma_write = 1; qhp->attr.enable_bind = 1; qhp->attr.max_ord = 1; qhp->attr.max_ird = 1; qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; spin_lock_init(&qhp->lock); mutex_init(&qhp->mutex); init_waitqueue_head(&qhp->wait); atomic_set(&qhp->refcnt, 1); spin_lock_irq(&rhp->lock); ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); spin_unlock_irq(&rhp->lock); if (ret) goto err2; if (udata) { mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); if (!mm1) { ret = -ENOMEM; goto err3; } mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) { ret = -ENOMEM; goto err4; } mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); if (!mm3) { ret = -ENOMEM; goto err5; } mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); if (!mm4) { ret = -ENOMEM; goto err6; } uresp.flags = 0; uresp.qid_mask = rhp->rdev.qpmask; uresp.sqid = qhp->wq.sq.qid; uresp.sq_size = qhp->wq.sq.size; uresp.sq_memsize = qhp->wq.sq.memsize; uresp.rqid = qhp->wq.rq.qid; uresp.rq_size = qhp->wq.rq.size; uresp.rq_memsize = qhp->wq.rq.memsize; spin_lock(&ucontext->mmap_lock); uresp.sq_key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.rq_key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.sq_db_gts_key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.rq_db_gts_key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (ret) goto err7; mm1->key = uresp.sq_key; mm1->addr = qhp->wq.sq.phys_addr; mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); CTR4(KTR_IW_CXGBE, "%s mm1 %x, %x, %d", __func__, mm1->key, mm1->addr, mm1->len); insert_mmap(ucontext, mm1); mm2->key = uresp.rq_key; mm2->addr = vtophys(qhp->wq.rq.queue); mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); CTR4(KTR_IW_CXGBE, "%s mm2 %x, %x, %d", __func__, mm2->key, mm2->addr, mm2->len); insert_mmap(ucontext, mm2); mm3->key = uresp.sq_db_gts_key; mm3->addr = qhp->wq.sq.udb; mm3->len = PAGE_SIZE; CTR4(KTR_IW_CXGBE, "%s mm3 %x, %x, %d", __func__, mm3->key, mm3->addr, mm3->len); insert_mmap(ucontext, mm3); mm4->key = uresp.rq_db_gts_key; mm4->addr = qhp->wq.rq.udb; mm4->len = PAGE_SIZE; CTR4(KTR_IW_CXGBE, "%s mm4 %x, %x, %d", __func__, mm4->key, mm4->addr, mm4->len); insert_mmap(ucontext, mm4); } qhp->ibqp.qp_num = qhp->wq.sq.qid; init_timer(&(qhp->timer)); CTR5(KTR_IW_CXGBE, "%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x", __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, qhp->wq.sq.qid); return &qhp->ibqp; err7: kfree(mm4); err6: kfree(mm3); err5: kfree(mm2); err4: kfree(mm1); err3: remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); err2: destroy_qp(&rhp->rdev, &qhp->wq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); err1: kfree(qhp); return ERR_PTR(ret); } int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_qp *qhp; enum c4iw_qp_attr_mask mask = 0; struct c4iw_qp_attributes attrs; CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ibqp); /* iwarp does not support the RTR state */ if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) attr_mask &= ~IB_QP_STATE; /* Make sure we still have something left to do */ if (!attr_mask) return 0; memset(&attrs, 0, sizeof attrs); qhp = to_c4iw_qp(ibqp); rhp = qhp->rhp; attrs.next_state = c4iw_convert_state(attr->qp_state); attrs.enable_rdma_read = (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) ? 1 : 0; attrs.enable_rdma_write = (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? (C4IW_QP_ATTR_ENABLE_RDMA_READ | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); } struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) { CTR3(KTR_IW_CXGBE, "%s ib_dev %p qpn 0x%x", __func__, dev, qpn); return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); } int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { struct c4iw_qp *qhp = to_c4iw_qp(ibqp); memset(attr, 0, sizeof *attr); memset(init_attr, 0, sizeof *init_attr); attr->qp_state = to_ib_qp_state(qhp->attr.state); init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; return 0; } #endif Index: stable/10/sys/dev/cxgbe/iw_cxgbe/t4.h =================================================================== --- stable/10/sys/dev/cxgbe/iw_cxgbe/t4.h (revision 318798) +++ stable/10/sys/dev/cxgbe/iw_cxgbe/t4.h (revision 318799) @@ -1,583 +1,579 @@ /* * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $FreeBSD$ */ #ifndef __T4_H__ #define __T4_H__ /* * Fixme: Adding missing defines */ #define SGE_PF_KDOORBELL 0x0 #define QID_MASK 0xffff8000U #define QID_SHIFT 15 #define QID(x) ((x) << QID_SHIFT) #define DBPRIO 0x00004000U #define PIDX_MASK 0x00003fffU #define PIDX_SHIFT 0 #define PIDX(x) ((x) << PIDX_SHIFT) #define SGE_PF_GTS 0x4 #define INGRESSQID_MASK 0xffff0000U #define INGRESSQID_SHIFT 16 #define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) #define TIMERREG_MASK 0x0000e000U #define TIMERREG_SHIFT 13 #define TIMERREG(x) ((x) << TIMERREG_SHIFT) #define SEINTARM_MASK 0x00001000U #define SEINTARM_SHIFT 12 #define SEINTARM(x) ((x) << SEINTARM_SHIFT) #define CIDXINC_MASK 0x00000fffU #define CIDXINC_SHIFT 0 #define CIDXINC(x) ((x) << CIDXINC_SHIFT) -#define T4_MAX_NUM_QP (1<<16) -#define T4_MAX_NUM_CQ (1<<15) -#define T4_MAX_NUM_PD (1<<15) -#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) -#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) -#define T4_MAX_IQ_SIZE (65520 - 1) -#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES) -#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1) -#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1) -#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1) +#define T4_MAX_NUM_PD 65536 +#define T4_MAX_EQ_SIZE 65520 +#define T4_MAX_IQ_SIZE 65520 +#define T4_MAX_RQ_SIZE(n) (8192 - (n) - 1) +#define T4_MAX_SQ_SIZE(n) (T4_MAX_EQ_SIZE - (n) - 1) +#define T4_MAX_QP_DEPTH(n) (T4_MAX_RQ_SIZE(n)) +#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 2) #define T4_MAX_MR_SIZE (~0ULL - 1) -#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ +#define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */ #define T4_STAG_UNSET 0xffffffff #define T4_FW_MAJ 0 -#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) #define A_PCIE_MA_SYNC 0x30b4 struct t4_status_page { __be32 rsvd1; /* flit 0 - hw owns */ __be16 rsvd2; __be16 qid; __be16 cidx; __be16 pidx; u8 qp_err; /* flit 1 - sw owns */ u8 db_off; u8 pad; u16 host_wq_pidx; u16 host_cidx; u16 host_pidx; }; #define T4_EQ_ENTRY_SIZE 64 #define T4_SQ_NUM_SLOTS 5 #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS) #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ sizeof(struct fw_ri_immd))) #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \ sizeof(struct fw_ri_rdma_write_wr) - \ sizeof(struct fw_ri_immd))) #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \ sizeof(struct fw_ri_rdma_write_wr) - \ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ sizeof(struct fw_ri_immd)) & ~31UL) #define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) #define T4_RQ_NUM_SLOTS 2 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) #define T4_MAX_RECV_SGE 4 union t4_wr { struct fw_ri_res_wr res; struct fw_ri_wr ri; struct fw_ri_rdma_write_wr write; struct fw_ri_send_wr send; struct fw_ri_rdma_read_wr read; struct fw_ri_bind_mw_wr bind; struct fw_ri_fr_nsmr_wr fr; struct fw_ri_inv_lstag_wr inv; struct t4_status_page status; __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; }; union t4_recv_wr { struct fw_ri_recv_wr recv; struct t4_status_page status; __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; }; static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, enum fw_wr_opcodes opcode, u8 flags, u8 len16) { wqe->send.opcode = (u8)opcode; wqe->send.flags = flags; wqe->send.wrid = wrid; wqe->send.r1[0] = 0; wqe->send.r1[1] = 0; wqe->send.r1[2] = 0; wqe->send.len16 = len16; } /* CQE/AE status codes */ #define T4_ERR_SUCCESS 0x0 #define T4_ERR_STAG 0x1 /* STAG invalid: either the */ /* STAG is offlimt, being 0, */ /* or STAG_key mismatch */ #define T4_ERR_PDID 0x2 /* PDID mismatch */ #define T4_ERR_QPID 0x3 /* QPID mismatch */ #define T4_ERR_ACCESS 0x4 /* Invalid access right */ #define T4_ERR_WRAP 0x5 /* Wrap error */ #define T4_ERR_BOUND 0x6 /* base and bounds voilation */ #define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */ /* shared memory region */ #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */ /* shared memory region */ #define T4_ERR_ECC 0x9 /* ECC error detected */ #define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */ /* reading PSTAG for a MW */ /* Invalidate */ #define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */ /* software error */ #define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */ #define T4_ERR_CRC 0x10 /* CRC error */ #define T4_ERR_MARKER 0x11 /* Marker error */ #define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */ #define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */ #define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */ #define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */ #define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */ #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */ #define T4_ERR_MSN 0x18 /* MSN error */ #define T4_ERR_TBIT 0x19 /* tag bit not set correctly */ #define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */ /* or READ_REQ */ #define T4_ERR_MSN_GAP 0x1B #define T4_ERR_MSN_RANGE 0x1C #define T4_ERR_IRD_OVERFLOW 0x1D #define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */ /* software error */ #define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */ /* mismatch) */ /* * CQE defs */ struct t4_cqe { __be32 header; __be32 len; union { struct { __be32 stag; __be32 msn; } rcqe; struct { u32 nada1; u16 nada2; u16 cidx; } scqe; struct { __be32 wrid_hi; __be32 wrid_low; } gen; u64 drain_cookie; } u; __be64 reserved; __be64 bits_type_ts; }; /* macros for flit 0 of the cqe */ #define S_CQE_QPID 12 #define M_CQE_QPID 0xFFFFF #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) #define V_CQE_QPID(x) ((x)<> S_CQE_SWCQE)) & M_CQE_SWCQE) #define V_CQE_SWCQE(x) ((x)<> S_CQE_STATUS)) & M_CQE_STATUS) #define V_CQE_STATUS(x) ((x)<> S_CQE_TYPE)) & M_CQE_TYPE) #define V_CQE_TYPE(x) ((x)<> S_CQE_OPCODE)) & M_CQE_OPCODE) #define V_CQE_OPCODE(x) ((x)<header))) #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) #define SQ_TYPE(x) (CQE_TYPE((x))) #define RQ_TYPE(x) (!CQE_TYPE((x))) #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) #define CQE_SEND_OPCODE(x)(\ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) #define CQE_LEN(x) (be32_to_cpu((x)->len)) /* used for RQ completion processing */ #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag)) #define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn)) /* used for SQ completion processing */ #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx) /* generic accessor macros */ #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) #define CQE_DRAIN_COOKIE(x) (x)->u.drain_cookie; /* macros for flit 3 of the cqe */ #define S_CQE_GENBIT 63 #define M_CQE_GENBIT 0x1 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) #define V_CQE_GENBIT(x) ((x)<> S_CQE_OVFBIT)) & M_CQE_OVFBIT) #define S_CQE_IQTYPE 60 #define M_CQE_IQTYPE 0x3 #define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) #define M_CQE_TS 0x0fffffffffffffffULL #define G_CQE_TS(x) ((x) & M_CQE_TS) #define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) #define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) #define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) struct t4_swsqe { u64 wr_id; struct t4_cqe cqe; int read_len; int opcode; int complete; int signaled; u16 idx; }; struct t4_sq { union t4_wr *queue; bus_addr_t dma_addr; DECLARE_PCI_UNMAP_ADDR(mapping); unsigned long phys_addr; struct t4_swsqe *sw_sq; struct t4_swsqe *oldest_read; u64 udb; size_t memsize; u32 qid; u16 in_use; u16 size; u16 cidx; u16 pidx; u16 wq_pidx; u16 flags; }; struct t4_swrqe { u64 wr_id; }; struct t4_rq { union t4_recv_wr *queue; bus_addr_t dma_addr; DECLARE_PCI_UNMAP_ADDR(mapping); struct t4_swrqe *sw_rq; u64 udb; size_t memsize; u32 qid; u32 msn; u32 rqt_hwaddr; u16 rqt_size; u16 in_use; u16 size; u16 cidx; u16 pidx; u16 wq_pidx; }; struct t4_wq { struct t4_sq sq; struct t4_rq rq; void __iomem *db; void __iomem *gts; struct c4iw_rdev *rdev; }; static inline int t4_rqes_posted(struct t4_wq *wq) { return wq->rq.in_use; } static inline int t4_rq_empty(struct t4_wq *wq) { return wq->rq.in_use == 0; } static inline int t4_rq_full(struct t4_wq *wq) { return wq->rq.in_use == (wq->rq.size - 1); } static inline u32 t4_rq_avail(struct t4_wq *wq) { return wq->rq.size - 1 - wq->rq.in_use; } static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) { wq->rq.in_use++; if (++wq->rq.pidx == wq->rq.size) wq->rq.pidx = 0; wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; } static inline void t4_rq_consume(struct t4_wq *wq) { wq->rq.in_use--; wq->rq.msn++; if (++wq->rq.cidx == wq->rq.size) wq->rq.cidx = 0; } static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq) { return wq->rq.queue[wq->rq.size].status.host_wq_pidx; } static inline u16 t4_rq_wq_size(struct t4_wq *wq) { return wq->rq.size * T4_RQ_NUM_SLOTS; } static inline int t4_sq_empty(struct t4_wq *wq) { return wq->sq.in_use == 0; } static inline int t4_sq_full(struct t4_wq *wq) { return wq->sq.in_use == (wq->sq.size - 1); } static inline u32 t4_sq_avail(struct t4_wq *wq) { return wq->sq.size - 1 - wq->sq.in_use; } static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) { wq->sq.in_use++; if (++wq->sq.pidx == wq->sq.size) wq->sq.pidx = 0; wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; } static inline void t4_sq_consume(struct t4_wq *wq) { wq->sq.in_use--; if (++wq->sq.cidx == wq->sq.size) wq->sq.cidx = 0; } static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq) { return wq->sq.queue[wq->sq.size].status.host_wq_pidx; } static inline u16 t4_sq_wq_size(struct t4_wq *wq) { return wq->sq.size * T4_SQ_NUM_SLOTS; } static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) { wmb(); writel(QID(wq->sq.qid) | PIDX(inc), wq->db); } static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) { wmb(); writel(QID(wq->rq.qid) | PIDX(inc), wq->db); } static inline int t4_wq_in_error(struct t4_wq *wq) { return wq->rq.queue[wq->rq.size].status.qp_err; } static inline void t4_set_wq_in_error(struct t4_wq *wq) { wq->rq.queue[wq->rq.size].status.qp_err = 1; } struct t4_cq { struct t4_cqe *queue; bus_addr_t dma_addr; DECLARE_PCI_UNMAP_ADDR(mapping); struct t4_cqe *sw_queue; void __iomem *gts; struct c4iw_rdev *rdev; u64 ugts; size_t memsize; __be64 bits_type_ts; u32 cqid; u16 size; /* including status page */ u16 cidx; u16 sw_pidx; u16 sw_cidx; u16 sw_in_use; u16 cidx_inc; u8 gen; u8 error; }; static inline int t4_arm_cq(struct t4_cq *cq, int se) { u32 val; while (cq->cidx_inc > CIDXINC_MASK) { val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | INGRESSQID(cq->cqid); writel(val, cq->gts); cq->cidx_inc -= CIDXINC_MASK; } val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | INGRESSQID(cq->cqid); writel(val, cq->gts); cq->cidx_inc = 0; return 0; } static inline void t4_swcq_produce(struct t4_cq *cq) { cq->sw_in_use++; if (++cq->sw_pidx == cq->size) cq->sw_pidx = 0; } static inline void t4_swcq_consume(struct t4_cq *cq) { cq->sw_in_use--; if (++cq->sw_cidx == cq->size) cq->sw_cidx = 0; } static inline void t4_hwcq_consume(struct t4_cq *cq) { cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == M_CIDXINC) { u32 val; val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | INGRESSQID(cq->cqid); writel(val, cq->gts); cq->cidx_inc = 0; } if (++cq->cidx == cq->size) { cq->cidx = 0; cq->gen ^= 1; } } static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) { return (CQE_GENBIT(cqe) == cq->gen); } static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) { int ret; u16 prev_cidx; if (cq->cidx == 0) prev_cidx = cq->size - 1; else prev_cidx = cq->cidx - 1; if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { ret = -EOVERFLOW; cq->error = 1; printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { *cqe = &cq->queue[cq->cidx]; ret = 0; } else ret = -ENODATA; return ret; } static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) { if (cq->sw_in_use) return &cq->sw_queue[cq->sw_cidx]; return NULL; } static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) { int ret = 0; if (cq->error) ret = -ENODATA; else if (cq->sw_in_use) *cqe = &cq->sw_queue[cq->sw_cidx]; else ret = t4_next_hw_cqe(cq, cqe); return ret; } static inline int t4_cq_in_error(struct t4_cq *cq) { return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err; } static inline void t4_set_cq_in_error(struct t4_cq *cq) { ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; } #endif Index: stable/10 =================================================================== --- stable/10 (revision 318798) +++ stable/10 (revision 318799) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r311880,314167,316118,316571,316573,316580,316936-316937,316940,317410