Changeset View
Changeset View
Standalone View
Standalone View
sys/ofed/drivers/infiniband/core/ib_mad.c
- This file was moved from sys/ofed/drivers/infiniband/core/mad.c.
/* | /* | ||||
* Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. | * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. | ||||
* Copyright (c) 2005 Intel Corporation. All rights reserved. | * Copyright (c) 2005 Intel Corporation. All rights reserved. | ||||
* Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. | ||||
* Copyright (c) 2009 HNR Consulting. All rights reserved. | * Copyright (c) 2009 HNR Consulting. All rights reserved. | ||||
* Copyright (c) 2014 Intel Corporation. All rights reserved. | |||||
* | * | ||||
* This software is available to you under a choice of one of two | * This software is available to you under a choice of one of two | ||||
* licenses. You may choose to be licensed under the terms of the GNU | * licenses. You may choose to be licensed under the terms of the GNU | ||||
* General Public License (GPL) Version 2, available from the file | * General Public License (GPL) Version 2, available from the file | ||||
* COPYING in the main directory of this source tree, or the | * COPYING in the main directory of this source tree, or the | ||||
* OpenIB.org BSD license below: | * OpenIB.org BSD license below: | ||||
* | * | ||||
* Redistribution and use in source and binary forms, with or | * Redistribution and use in source and binary forms, with or | ||||
Show All 16 Lines | |||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
* SOFTWARE. | * SOFTWARE. | ||||
* | * | ||||
*/ | */ | ||||
#define LINUXKPI_PARAM_PREFIX ibcore_ | #define LINUXKPI_PARAM_PREFIX ibcore_ | ||||
#define KBUILD_MODNAME "ibcore" | |||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |||||
#include <linux/dma-mapping.h> | #include <linux/dma-mapping.h> | ||||
#include <linux/slab.h> | #include <linux/slab.h> | ||||
#include <linux/module.h> | #include <linux/module.h> | ||||
#include <linux/string.h> | |||||
#include <rdma/ib_cache.h> | #include <rdma/ib_cache.h> | ||||
#include "mad_priv.h" | #include "mad_priv.h" | ||||
#include "mad_rmpp.h" | #include "mad_rmpp.h" | ||||
#include "smi.h" | #include "smi.h" | ||||
#include "opa_smi.h" | |||||
#include "agent.h" | #include "agent.h" | ||||
#include "core_priv.h" | |||||
MODULE_LICENSE("Dual BSD/GPL"); | |||||
MODULE_DESCRIPTION("kernel IB MAD API"); | |||||
MODULE_AUTHOR("Hal Rosenstock"); | |||||
MODULE_AUTHOR("Sean Hefty"); | |||||
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; | static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; | ||||
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; | static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; | ||||
module_param_named(send_queue_size, mad_sendq_size, int, 0444); | module_param_named(send_queue_size, mad_sendq_size, int, 0444); | ||||
MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); | MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); | ||||
module_param_named(recv_queue_size, mad_recvq_size, int, 0444); | module_param_named(recv_queue_size, mad_recvq_size, int, 0444); | ||||
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); | MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); | ||||
static struct kmem_cache *ib_mad_cache; | |||||
static struct list_head ib_mad_port_list; | static struct list_head ib_mad_port_list; | ||||
static u32 ib_mad_client_id = 0; | static u32 ib_mad_client_id = 0; | ||||
/* | |||||
* Timeout FIFO (tf) param | |||||
*/ | |||||
enum { | |||||
/* min time between 2 consecutive activations of tf workqueue */ | |||||
MIN_BETWEEN_ACTIVATIONS_MS = 5 | |||||
}; | |||||
/* | |||||
* SA congestion control params | |||||
*/ | |||||
enum { | |||||
MAX_OUTSTANDING_SA_MADS = 10, | |||||
MIN_TIME_FOR_SA_MAD_SEND_MS = 20, | |||||
MAX_SA_MADS = 10000 | |||||
}; | |||||
/* Port list lock */ | /* Port list lock */ | ||||
static DEFINE_SPINLOCK(ib_mad_port_list_lock); | static DEFINE_SPINLOCK(ib_mad_port_list_lock); | ||||
/* Forward declarations */ | /* Forward declarations */ | ||||
static int method_in_use(struct ib_mad_mgmt_method_table **method, | static int method_in_use(struct ib_mad_mgmt_method_table **method, | ||||
struct ib_mad_reg_req *mad_reg_req); | struct ib_mad_reg_req *mad_reg_req); | ||||
static void remove_mad_reg_req(struct ib_mad_agent_private *priv); | static void remove_mad_reg_req(struct ib_mad_agent_private *priv); | ||||
static struct ib_mad_agent_private *find_mad_agent( | static struct ib_mad_agent_private *find_mad_agent( | ||||
struct ib_mad_port_private *port_priv, | struct ib_mad_port_private *port_priv, | ||||
struct ib_mad *mad); | const struct ib_mad_hdr *mad); | ||||
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | ||||
struct ib_mad_private *mad); | struct ib_mad_private *mad); | ||||
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | ||||
static void timeout_sends(struct work_struct *work); | static void timeout_sends(struct work_struct *work); | ||||
static void local_completions(struct work_struct *work); | static void local_completions(struct work_struct *work); | ||||
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | ||||
struct ib_mad_agent_private *agent_priv, | struct ib_mad_agent_private *agent_priv, | ||||
u8 mgmt_class); | u8 mgmt_class); | ||||
static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | ||||
struct ib_mad_agent_private *agent_priv); | struct ib_mad_agent_private *agent_priv); | ||||
static int send_sa_cc_mad(struct ib_mad_send_wr_private *mad_send_wr, | static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, | ||||
u32 timeout_ms, u32 retries_left); | struct ib_wc *wc); | ||||
static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); | |||||
/* | /* | ||||
* Timeout FIFO functions - implements FIFO with timeout mechanism | |||||
*/ | |||||
static void activate_timeout_handler_task(unsigned long data) | |||||
{ | |||||
struct to_fifo *tf; | |||||
tf = (struct to_fifo *)data; | |||||
del_timer(&tf->timer); | |||||
queue_work(tf->workq, &tf->work); | |||||
} | |||||
static unsigned long adjusted_time(unsigned long last, unsigned long next) | |||||
{ | |||||
unsigned long min_next; | |||||
min_next = last + msecs_to_jiffies(MIN_BETWEEN_ACTIVATIONS_MS); | |||||
if (time_after(min_next, next)) | |||||
return min_next; | |||||
return next; | |||||
} | |||||
static void notify_failure(struct ib_mad_send_wr_private *mad_send_wr, | |||||
enum ib_wc_status status) | |||||
{ | |||||
struct ib_mad_send_wc mad_send_wc; | |||||
struct ib_mad_agent_private *mad_agent_priv; | |||||
mad_send_wc.status = status; | |||||
mad_send_wc.vendor_err = 0; | |||||
mad_send_wc.send_buf = &mad_send_wr->send_buf; | |||||
mad_agent_priv = mad_send_wr->mad_agent_priv; | |||||
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); | |||||
} | |||||
static inline struct sa_cc_data * | |||||
get_cc_obj(struct ib_mad_send_wr_private *mad_send_wr) | |||||
{ | |||||
return &mad_send_wr->mad_agent_priv->qp_info->port_priv->sa_cc; | |||||
} | |||||
static inline struct ib_mad_send_wr_private *tfe_to_mad(struct tf_entry *tfe) | |||||
{ | |||||
return container_of(tfe, struct ib_mad_send_wr_private, tf_list); | |||||
} | |||||
static void timeout_handler_task(struct work_struct *work) | |||||
{ | |||||
struct tf_entry *tmp1, *tmp2; | |||||
struct list_head *list_item, exp_lst; | |||||
unsigned long flags, curr_time; | |||||
int lst_empty; | |||||
struct to_fifo *tf; | |||||
tf = container_of(work, struct to_fifo, work); | |||||
do { | |||||
INIT_LIST_HEAD(&exp_lst); | |||||
spin_lock_irqsave(&tf->lists_lock, flags); | |||||
curr_time = jiffies; | |||||
list_for_each(list_item, &tf->to_head) { | |||||
tmp1 = list_entry(list_item, struct tf_entry, to_list); | |||||
if (time_before(curr_time, tmp1->exp_time)) | |||||
break; | |||||
list_del(&tmp1->fifo_list); | |||||
tf->num_items--; | |||||
} | |||||
/* cut list up to and including list_item->prev */ | |||||
list_cut_position(&exp_lst, &tf->to_head, list_item->prev); | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
lst_empty = list_empty(&exp_lst); | |||||
list_for_each_entry_safe(tmp1, tmp2, &exp_lst, to_list) { | |||||
list_del(&tmp1->to_list); | |||||
if (tmp1->canceled) { | |||||
tmp1->canceled = 0; | |||||
notify_failure(tfe_to_mad(tmp1), IB_WC_WR_FLUSH_ERR); | |||||
} else { | |||||
notify_failure(tfe_to_mad(tmp1), IB_WC_RESP_TIMEOUT_ERR); | |||||
} | |||||
} | |||||
} while (!lst_empty); | |||||
spin_lock_irqsave(&tf->lists_lock, flags); | |||||
if (!list_empty(&tf->to_head)) { | |||||
tmp1 = list_entry(tf->to_head.next, struct tf_entry, to_list); | |||||
mod_timer(&tf->timer, adjusted_time(curr_time, tmp1->exp_time)); | |||||
} | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
} | |||||
/** | |||||
* tf_create - creates new timeout-fifo object | |||||
* @fifo_size: Maximum fifo size | |||||
* | |||||
* Allocate and initialize new timeout-fifo object | |||||
*/ | |||||
static struct to_fifo *tf_create(u32 fifo_size) | |||||
{ | |||||
struct to_fifo *tf; | |||||
tf = kzalloc(sizeof(*tf), GFP_KERNEL); | |||||
if (tf) { | |||||
tf->workq = create_singlethread_workqueue("to_fifo"); | |||||
if (!tf->workq) { | |||||
kfree(tf); | |||||
return NULL; | |||||
} | |||||
spin_lock_init(&tf->lists_lock); | |||||
INIT_LIST_HEAD(&tf->to_head); | |||||
INIT_LIST_HEAD(&tf->fifo_head); | |||||
init_timer(&tf->timer); | |||||
INIT_WORK(&tf->work, timeout_handler_task); | |||||
tf->timer.data = (unsigned long) tf; | |||||
tf->timer.function = activate_timeout_handler_task; | |||||
tf->timer.expires = jiffies; | |||||
tf->fifo_size = fifo_size; | |||||
tf->stop_enqueue = 0; | |||||
tf->num_items = 0; | |||||
} | |||||
return tf; | |||||
} | |||||
/** | |||||
* tf_enqueue - enqueue item to timeout-fifo object | |||||
* @tf:timeout-fifo object | |||||
* @item: item to enqueue. | |||||
* @timeout_ms: item expiration time in ms. | |||||
* | |||||
* Enqueue item to fifo and modify expiration timer when required. | |||||
* | |||||
* Returns 0 on success and negative on failure. | |||||
*/ | |||||
static int tf_enqueue(struct to_fifo *tf, struct tf_entry *item, u32 timeout_ms) | |||||
{ | |||||
struct tf_entry *tmp; | |||||
struct list_head *list_item; | |||||
unsigned long flags; | |||||
item->exp_time = jiffies + msecs_to_jiffies(timeout_ms); | |||||
spin_lock_irqsave(&tf->lists_lock, flags); | |||||
if (tf->num_items >= tf->fifo_size || tf->stop_enqueue) { | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
return -EBUSY; | |||||
} | |||||
/* Insert item to timeout list */ | |||||
list_for_each_prev(list_item, &tf->to_head) { | |||||
tmp = list_entry(list_item, struct tf_entry, to_list); | |||||
if (time_after(item->exp_time, tmp->exp_time)) | |||||
break; | |||||
} | |||||
list_add(&item->to_list, list_item); | |||||
/* Insert item to fifo list */ | |||||
list_add_tail(&item->fifo_list, &tf->fifo_head); | |||||
tf->num_items++; | |||||
/* modify expiration timer if required */ | |||||
if (list_item == &tf->to_head) | |||||
mod_timer(&tf->timer, item->exp_time); | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
return 0; | |||||
} | |||||
/** | |||||
* tf_dequeue - dequeue item from timeout-fifo object | |||||
* @tf:timeout-fifo object | |||||
* @time_left_ms: returns the time left for expiration in ms. | |||||
* | |||||
* Dequeue item from fifo and modify expiration timer when required. | |||||
* | |||||
* Returns pointer to tf_entry on success and NULL on failure. | |||||
*/ | |||||
static struct tf_entry *tf_dequeue(struct to_fifo *tf, u32 *time_left_ms) | |||||
{ | |||||
unsigned long flags; | |||||
unsigned long time_left; | |||||
struct tf_entry *tmp, *tmp1; | |||||
bool found = false; | |||||
spin_lock_irqsave(&tf->lists_lock, flags); | |||||
if (list_empty(&tf->fifo_head)) { | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
return NULL; | |||||
} | |||||
list_for_each_entry(tmp, &tf->fifo_head, fifo_list) { | |||||
if (!tmp->canceled) { | |||||
found = true; | |||||
break; | |||||
} | |||||
} | |||||
if (!found) { | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
return NULL; | |||||
} | |||||
/* modify timer in case enqueued item is the next to expire */ | |||||
if (tf->to_head.next == &tmp->to_list) { | |||||
if (list_is_last(&tmp->to_list, &tf->to_head)) { | |||||
del_timer(&tf->timer); | |||||
} else { | |||||
tmp1 = list_entry(tmp->to_list.next, struct tf_entry, to_list); | |||||
mod_timer(&tf->timer, tmp1->exp_time); | |||||
} | |||||
} | |||||
list_del(&tmp->fifo_list); | |||||
list_del(&tmp->to_list); | |||||
tf->num_items--; | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
time_left = tmp->exp_time - jiffies; | |||||
if ((long) time_left <= 0) | |||||
time_left = 0; | |||||
*time_left_ms = jiffies_to_msecs(time_left); | |||||
return tmp; | |||||
} | |||||
static void tf_stop_enqueue(struct to_fifo *tf) | |||||
{ | |||||
unsigned long flags; | |||||
spin_lock_irqsave(&tf->lists_lock, flags); | |||||
tf->stop_enqueue = 1; | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
} | |||||
/** | |||||
* tf_free - free empty timeout-fifo object | |||||
* @tf:timeout-fifo object | |||||
* | |||||
*/ | |||||
static void tf_free(struct to_fifo *tf) | |||||
{ | |||||
del_timer_sync(&tf->timer); | |||||
flush_workqueue(tf->workq); | |||||
destroy_workqueue(tf->workq); | |||||
kfree(tf); | |||||
} | |||||
/** | |||||
* tf_free_agent - free MADs related to specific MAD agent from timeout-fifo | |||||
* @tf:timeout-fifo object | |||||
* @mad_agent_priv: MAD agent. | |||||
* | |||||
*/ | |||||
static void tf_free_agent(struct to_fifo *tf, struct ib_mad_agent_private *mad_agent_priv) | |||||
{ | |||||
unsigned long flags; | |||||
struct tf_entry *tmp, *tmp1; | |||||
struct list_head tmp_head; | |||||
INIT_LIST_HEAD(&tmp_head); | |||||
spin_lock_irqsave(&tf->lists_lock, flags); | |||||
list_for_each_entry_safe(tmp, tmp1, &tf->fifo_head, fifo_list) { | |||||
if (tfe_to_mad(tmp)->mad_agent_priv == mad_agent_priv) { | |||||
list_del(&tmp->to_list); | |||||
list_move(&tmp->fifo_list, &tmp_head); | |||||
tf->num_items--; | |||||
} | |||||
} | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
list_for_each_entry_safe(tmp, tmp1, &tmp_head, fifo_list) { | |||||
list_del(&tmp->fifo_list); | |||||
notify_failure(tfe_to_mad(tmp), IB_WC_WR_FLUSH_ERR); | |||||
} | |||||
} | |||||
/** | |||||
* tf_modify_item - to modify expiration time for specific item | |||||
* @tf:timeout-fifo object | |||||
* @mad_agent_priv: MAD agent. | |||||
* @send_buf: the MAD to modify in queue | |||||
* @timeout_ms: new timeout to set. | |||||
* | |||||
* Returns 0 if item found on list and -ENXIO if not. | |||||
* | |||||
* Note: The send_buf may point on MAD that is already released. | |||||
* Therefore we can't use this struct before finding it in the list | |||||
*/ | |||||
static int tf_modify_item(struct to_fifo *tf, | |||||
struct ib_mad_agent_private *mad_agent_priv, | |||||
struct ib_mad_send_buf *send_buf, u32 timeout_ms) | |||||
{ | |||||
struct tf_entry *tmp, *item; | |||||
struct list_head *list_item; | |||||
unsigned long flags; | |||||
int found = 0; | |||||
spin_lock_irqsave(&tf->lists_lock, flags); | |||||
list_for_each_entry(item, &tf->fifo_head, fifo_list) { | |||||
if (tfe_to_mad(item)->mad_agent_priv == mad_agent_priv && | |||||
&tfe_to_mad(item)->send_buf == send_buf) { | |||||
found = 1; | |||||
break; | |||||
} | |||||
} | |||||
if (!found) { | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
return -ENXIO; | |||||
} | |||||
item->exp_time = jiffies + msecs_to_jiffies(timeout_ms); | |||||
if (timeout_ms) { | |||||
list_del(&item->to_list); | |||||
list_for_each_prev(list_item, &tf->to_head) { | |||||
tmp = list_entry(list_item, struct tf_entry, to_list); | |||||
if (time_after(item->exp_time, tmp->exp_time)) | |||||
break; | |||||
} | |||||
list_add(&item->to_list, list_item); | |||||
/* modify expiration timer if required */ | |||||
if (list_item == &tf->to_head) | |||||
mod_timer(&tf->timer, item->exp_time); | |||||
} else { | |||||
/* | |||||
* when item canceled (timeout_ms == 0) move item to | |||||
* head of timeout list and to the tail of fifo list | |||||
*/ | |||||
item->canceled = 1; | |||||
list_move(&item->to_list, &tf->to_head); | |||||
list_move_tail(&item->fifo_list, &tf->fifo_head); | |||||
mod_timer(&tf->timer, item->exp_time); | |||||
} | |||||
spin_unlock_irqrestore(&tf->lists_lock, flags); | |||||
return 0; | |||||
} | |||||
/* | |||||
* SA congestion control functions | |||||
*/ | |||||
/* | |||||
* Defines which MAD is under congestion control. | |||||
*/ | |||||
static int is_sa_cc_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||||
{ | |||||
struct ib_mad_hdr *mad; | |||||
mad = (struct ib_mad_hdr *)mad_send_wr->send_buf.mad; | |||||
return ((mad_send_wr->send_buf.timeout_ms) && | |||||
(mad->mgmt_class == IB_MGMT_CLASS_SUBN_ADM) && | |||||
((mad->method == IB_MGMT_METHOD_GET) || | |||||
(mad->method == IB_MGMT_METHOD_SET))); | |||||
} | |||||
/* | |||||
* Notify that SA congestion controlled MAD is done. | |||||
* to allow dequeuing SA MAD from congestion control queue. | |||||
*/ | |||||
static void sa_cc_mad_done(struct sa_cc_data *cc_obj) | |||||
{ | |||||
unsigned long flags; | |||||
struct tf_entry *tfe; | |||||
struct ib_mad_send_wr_private *mad_send_wr; | |||||
u32 time_left_ms, timeout_ms, retries; | |||||
int ret; | |||||
do { | |||||
spin_lock_irqsave(&cc_obj->lock, flags); | |||||
tfe = tf_dequeue(cc_obj->tf, &time_left_ms); | |||||
if (!tfe) { | |||||
if (cc_obj->outstanding > 0) | |||||
cc_obj->outstanding--; | |||||
spin_unlock_irqrestore(&cc_obj->lock, flags); | |||||
break; | |||||
} | |||||
spin_unlock_irqrestore(&cc_obj->lock, flags); | |||||
mad_send_wr = tfe_to_mad(tfe); | |||||
time_left_ms += MIN_TIME_FOR_SA_MAD_SEND_MS; | |||||
if (time_left_ms > mad_send_wr->send_buf.timeout_ms) { | |||||
retries = time_left_ms / mad_send_wr->send_buf.timeout_ms - 1; | |||||
timeout_ms = mad_send_wr->send_buf.timeout_ms; | |||||
} else { | |||||
retries = 0; | |||||
timeout_ms = time_left_ms; | |||||
} | |||||
ret = send_sa_cc_mad(mad_send_wr, timeout_ms, retries); | |||||
if (ret) { | |||||
if (ret == -ENOMEM) | |||||
notify_failure(mad_send_wr, IB_WC_GENERAL_ERR); | |||||
else | |||||
notify_failure(mad_send_wr, IB_WC_LOC_QP_OP_ERR); | |||||
} | |||||
} while (ret); | |||||
} | |||||
/* | |||||
* Send SA MAD under congestion control. | |||||
*/ | |||||
static int sa_cc_mad_send(struct ib_mad_send_wr_private *mad_send_wr) | |||||
{ | |||||
unsigned long flags; | |||||
int ret; | |||||
struct sa_cc_data *cc_obj; | |||||
cc_obj = get_cc_obj(mad_send_wr); | |||||
spin_lock_irqsave(&cc_obj->lock, flags); | |||||
if (cc_obj->outstanding < MAX_OUTSTANDING_SA_MADS) { | |||||
cc_obj->outstanding++; | |||||
spin_unlock_irqrestore(&cc_obj->lock, flags); | |||||
ret = send_sa_cc_mad(mad_send_wr, mad_send_wr->send_buf.timeout_ms, | |||||
mad_send_wr->retries_left); | |||||
if (ret) | |||||
sa_cc_mad_done(cc_obj); | |||||
} else { | |||||
int qtime = (mad_send_wr->send_buf.timeout_ms * | |||||
(mad_send_wr->retries_left + 1)) | |||||
- MIN_TIME_FOR_SA_MAD_SEND_MS; | |||||
if (qtime < 0) | |||||
qtime = 0; | |||||
ret = tf_enqueue(cc_obj->tf, &mad_send_wr->tf_list, (u32)qtime); | |||||
spin_unlock_irqrestore(&cc_obj->lock, flags); | |||||
} | |||||
return ret; | |||||
} | |||||
/* | |||||
* Initialize SA congestion control. | |||||
*/ | |||||
static int sa_cc_init(struct sa_cc_data *cc_obj) | |||||
{ | |||||
spin_lock_init(&cc_obj->lock); | |||||
cc_obj->outstanding = 0; | |||||
cc_obj->tf = tf_create(MAX_SA_MADS); | |||||
if (!cc_obj->tf) | |||||
return -ENOMEM; | |||||
return 0; | |||||
} | |||||
/* | |||||
* Cancel SA MADs from congestion control queue. | |||||
*/ | |||||
static void cancel_sa_cc_mads(struct ib_mad_agent_private *mad_agent_priv) | |||||
{ | |||||
tf_free_agent(mad_agent_priv->qp_info->port_priv->sa_cc.tf, | |||||
mad_agent_priv); | |||||
} | |||||
/* | |||||
* Modify timeout of SA MAD on congestion control queue. | |||||
*/ | |||||
static int modify_sa_cc_mad(struct ib_mad_agent_private *mad_agent_priv, | |||||
struct ib_mad_send_buf *send_buf, u32 timeout_ms) | |||||
{ | |||||
int ret; | |||||
int qtime = 0; | |||||
if (timeout_ms > MIN_TIME_FOR_SA_MAD_SEND_MS) | |||||
qtime = timeout_ms - MIN_TIME_FOR_SA_MAD_SEND_MS; | |||||
ret = tf_modify_item(mad_agent_priv->qp_info->port_priv->sa_cc.tf, | |||||
mad_agent_priv, send_buf, (u32)qtime); | |||||
return ret; | |||||
} | |||||
static void sa_cc_destroy(struct sa_cc_data *cc_obj) | |||||
{ | |||||
struct ib_mad_send_wr_private *mad_send_wr; | |||||
struct tf_entry *tfe; | |||||
struct ib_mad_send_wc mad_send_wc; | |||||
struct ib_mad_agent_private *mad_agent_priv; | |||||
u32 time_left_ms; | |||||
mad_send_wc.status = IB_WC_WR_FLUSH_ERR; | |||||
mad_send_wc.vendor_err = 0; | |||||
tf_stop_enqueue(cc_obj->tf); | |||||
tfe = tf_dequeue(cc_obj->tf, &time_left_ms); | |||||
while (tfe) { | |||||
mad_send_wr = tfe_to_mad(tfe); | |||||
mad_send_wc.send_buf = &mad_send_wr->send_buf; | |||||
mad_agent_priv = mad_send_wr->mad_agent_priv; | |||||
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | |||||
&mad_send_wc); | |||||
tfe = tf_dequeue(cc_obj->tf, &time_left_ms); | |||||
} | |||||
tf_free(cc_obj->tf); | |||||
} | |||||
/* | |||||
* Returns a ib_mad_port_private structure or NULL for a device/port | * Returns a ib_mad_port_private structure or NULL for a device/port | ||||
* Assumes ib_mad_port_list_lock is being held | * Assumes ib_mad_port_list_lock is being held | ||||
*/ | */ | ||||
static inline struct ib_mad_port_private * | static inline struct ib_mad_port_private * | ||||
__ib_get_mad_port(struct ib_device *device, int port_num) | __ib_get_mad_port(struct ib_device *device, int port_num) | ||||
{ | { | ||||
struct ib_mad_port_private *entry; | struct ib_mad_port_private *entry; | ||||
▲ Show 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { | ||||
else | else | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
int ib_response_mad(struct ib_mad *mad) | int ib_response_mad(const struct ib_mad_hdr *hdr) | ||||
{ | { | ||||
return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) || | return ((hdr->method & IB_MGMT_METHOD_RESP) || | ||||
(mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || | (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || | ||||
((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) && | ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && | ||||
(mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP))); | (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); | ||||
} | } | ||||
EXPORT_SYMBOL(ib_response_mad); | EXPORT_SYMBOL(ib_response_mad); | ||||
/* | /* | ||||
* ib_register_mad_agent - Register to send/receive MADs | * ib_register_mad_agent - Register to send/receive MADs | ||||
*/ | */ | ||||
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | ||||
u8 port_num, | u8 port_num, | ||||
enum ib_qp_type qp_type, | enum ib_qp_type qp_type, | ||||
struct ib_mad_reg_req *mad_reg_req, | struct ib_mad_reg_req *mad_reg_req, | ||||
u8 rmpp_version, | u8 rmpp_version, | ||||
ib_mad_send_handler send_handler, | ib_mad_send_handler send_handler, | ||||
ib_mad_recv_handler recv_handler, | ib_mad_recv_handler recv_handler, | ||||
void *context) | void *context, | ||||
u32 registration_flags) | |||||
{ | { | ||||
struct ib_mad_port_private *port_priv; | struct ib_mad_port_private *port_priv; | ||||
struct ib_mad_agent *ret = ERR_PTR(-EINVAL); | struct ib_mad_agent *ret = ERR_PTR(-EINVAL); | ||||
struct ib_mad_agent_private *mad_agent_priv; | struct ib_mad_agent_private *mad_agent_priv; | ||||
struct ib_mad_reg_req *reg_req = NULL; | struct ib_mad_reg_req *reg_req = NULL; | ||||
struct ib_mad_mgmt_class_table *class; | struct ib_mad_mgmt_class_table *class; | ||||
struct ib_mad_mgmt_vendor_class_table *vendor; | struct ib_mad_mgmt_vendor_class_table *vendor; | ||||
struct ib_mad_mgmt_vendor_class *vendor_class; | struct ib_mad_mgmt_vendor_class *vendor_class; | ||||
struct ib_mad_mgmt_method_table *method; | struct ib_mad_mgmt_method_table *method; | ||||
int ret2, qpn; | int ret2, qpn; | ||||
unsigned long flags; | unsigned long flags; | ||||
u8 mgmt_class, vclass; | u8 mgmt_class, vclass; | ||||
/* Validate parameters */ | /* Validate parameters */ | ||||
qpn = get_spl_qp_index(qp_type); | qpn = get_spl_qp_index(qp_type); | ||||
if (qpn == -1) | if (qpn == -1) { | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: invalid QP Type %d\n", | |||||
qp_type); | |||||
goto error1; | goto error1; | ||||
} | |||||
if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) | if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: invalid RMPP Version %u\n", | |||||
rmpp_version); | |||||
goto error1; | goto error1; | ||||
} | |||||
/* Validate MAD registration request if supplied */ | /* Validate MAD registration request if supplied */ | ||||
if (mad_reg_req) { | if (mad_reg_req) { | ||||
if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) | if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: invalid Class Version %u\n", | |||||
mad_reg_req->mgmt_class_version); | |||||
goto error1; | goto error1; | ||||
if (!recv_handler) | } | ||||
if (!recv_handler) { | |||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: no recv_handler\n"); | |||||
goto error1; | goto error1; | ||||
} | |||||
if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { | if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { | ||||
/* | /* | ||||
* IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only | * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only | ||||
* one in this range currently allowed | * one in this range currently allowed | ||||
*/ | */ | ||||
if (mad_reg_req->mgmt_class != | if (mad_reg_req->mgmt_class != | ||||
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", | |||||
mad_reg_req->mgmt_class); | |||||
goto error1; | goto error1; | ||||
} | |||||
} else if (mad_reg_req->mgmt_class == 0) { | } else if (mad_reg_req->mgmt_class == 0) { | ||||
/* | /* | ||||
* Class 0 is reserved in IBA and is used for | * Class 0 is reserved in IBA and is used for | ||||
* aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE | * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE | ||||
*/ | */ | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: Invalid Mgmt Class 0\n"); | |||||
goto error1; | goto error1; | ||||
} else if (is_vendor_class(mad_reg_req->mgmt_class)) { | } else if (is_vendor_class(mad_reg_req->mgmt_class)) { | ||||
/* | /* | ||||
* If class is in "new" vendor range, | * If class is in "new" vendor range, | ||||
* ensure supplied OUI is not zero | * ensure supplied OUI is not zero | ||||
*/ | */ | ||||
if (!is_vendor_oui(mad_reg_req->oui)) | if (!is_vendor_oui(mad_reg_req->oui)) { | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: No OUI specified for class 0x%x\n", | |||||
mad_reg_req->mgmt_class); | |||||
goto error1; | goto error1; | ||||
} | } | ||||
} | |||||
/* Make sure class supplied is consistent with RMPP */ | /* Make sure class supplied is consistent with RMPP */ | ||||
if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { | if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { | ||||
if (rmpp_version) | if (rmpp_version) { | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", | |||||
mad_reg_req->mgmt_class); | |||||
goto error1; | goto error1; | ||||
} | } | ||||
} | |||||
/* Make sure class supplied is consistent with QP type */ | /* Make sure class supplied is consistent with QP type */ | ||||
if (qp_type == IB_QPT_SMI) { | if (qp_type == IB_QPT_SMI) { | ||||
if ((mad_reg_req->mgmt_class != | if ((mad_reg_req->mgmt_class != | ||||
IB_MGMT_CLASS_SUBN_LID_ROUTED) && | IB_MGMT_CLASS_SUBN_LID_ROUTED) && | ||||
(mad_reg_req->mgmt_class != | (mad_reg_req->mgmt_class != | ||||
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", | |||||
mad_reg_req->mgmt_class); | |||||
goto error1; | goto error1; | ||||
} | |||||
} else { | } else { | ||||
if ((mad_reg_req->mgmt_class == | if ((mad_reg_req->mgmt_class == | ||||
IB_MGMT_CLASS_SUBN_LID_ROUTED) || | IB_MGMT_CLASS_SUBN_LID_ROUTED) || | ||||
(mad_reg_req->mgmt_class == | (mad_reg_req->mgmt_class == | ||||
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", | |||||
mad_reg_req->mgmt_class); | |||||
goto error1; | goto error1; | ||||
} | } | ||||
} | |||||
} else { | } else { | ||||
/* No registration request supplied */ | /* No registration request supplied */ | ||||
if (!send_handler) | if (!send_handler) | ||||
goto error1; | goto error1; | ||||
if (registration_flags & IB_MAD_USER_RMPP) | |||||
goto error1; | |||||
} | } | ||||
/* Validate device and port */ | /* Validate device and port */ | ||||
port_priv = ib_get_mad_port(device, port_num); | port_priv = ib_get_mad_port(device, port_num); | ||||
if (!port_priv) { | if (!port_priv) { | ||||
dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); | |||||
ret = ERR_PTR(-ENODEV); | ret = ERR_PTR(-ENODEV); | ||||
goto error1; | goto error1; | ||||
} | } | ||||
/* Verify the QP requested is supported. For example, Ethernet devices | /* Verify the QP requested is supported. For example, Ethernet devices | ||||
* will not have QP0 */ | * will not have QP0 */ | ||||
if (!port_priv->qp_info[qpn].qp) { | if (!port_priv->qp_info[qpn].qp) { | ||||
dev_notice(&device->dev, | |||||
"ib_register_mad_agent: QP %d not supported\n", qpn); | |||||
ret = ERR_PTR(-EPROTONOSUPPORT); | ret = ERR_PTR(-EPROTONOSUPPORT); | ||||
goto error1; | goto error1; | ||||
} | } | ||||
/* Allocate structures */ | /* Allocate structures */ | ||||
mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); | mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); | ||||
if (!mad_agent_priv) { | if (!mad_agent_priv) { | ||||
ret = ERR_PTR(-ENOMEM); | ret = ERR_PTR(-ENOMEM); | ||||
goto error1; | goto error1; | ||||
} | } | ||||
mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, | |||||
IB_ACCESS_LOCAL_WRITE); | |||||
if (IS_ERR(mad_agent_priv->agent.mr)) { | |||||
ret = ERR_PTR(-ENOMEM); | |||||
goto error2; | |||||
} | |||||
if (mad_reg_req) { | if (mad_reg_req) { | ||||
reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); | reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); | ||||
if (!reg_req) { | if (!reg_req) { | ||||
ret = ERR_PTR(-ENOMEM); | ret = ERR_PTR(-ENOMEM); | ||||
goto error3; | goto error3; | ||||
} | } | ||||
} | } | ||||
/* Now, fill in the various structures */ | /* Now, fill in the various structures */ | ||||
mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; | ||||
mad_agent_priv->reg_req = reg_req; | mad_agent_priv->reg_req = reg_req; | ||||
mad_agent_priv->agent.rmpp_version = rmpp_version; | mad_agent_priv->agent.rmpp_version = rmpp_version; | ||||
mad_agent_priv->agent.device = device; | mad_agent_priv->agent.device = device; | ||||
mad_agent_priv->agent.recv_handler = recv_handler; | mad_agent_priv->agent.recv_handler = recv_handler; | ||||
mad_agent_priv->agent.send_handler = send_handler; | mad_agent_priv->agent.send_handler = send_handler; | ||||
mad_agent_priv->agent.context = context; | mad_agent_priv->agent.context = context; | ||||
mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; | mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; | ||||
mad_agent_priv->agent.port_num = port_num; | mad_agent_priv->agent.port_num = port_num; | ||||
mad_agent_priv->agent.flags = registration_flags; | |||||
spin_lock_init(&mad_agent_priv->lock); | spin_lock_init(&mad_agent_priv->lock); | ||||
INIT_LIST_HEAD(&mad_agent_priv->send_list); | INIT_LIST_HEAD(&mad_agent_priv->send_list); | ||||
INIT_LIST_HEAD(&mad_agent_priv->wait_list); | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | ||||
INIT_LIST_HEAD(&mad_agent_priv->done_list); | INIT_LIST_HEAD(&mad_agent_priv->done_list); | ||||
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | ||||
INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); | INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); | ||||
INIT_LIST_HEAD(&mad_agent_priv->local_list); | INIT_LIST_HEAD(&mad_agent_priv->local_list); | ||||
INIT_WORK(&mad_agent_priv->local_work, local_completions); | INIT_WORK(&mad_agent_priv->local_work, local_completions); | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | ||||
spin_unlock_irqrestore(&port_priv->reg_lock, flags); | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | ||||
return &mad_agent_priv->agent; | return &mad_agent_priv->agent; | ||||
error4: | error4: | ||||
spin_unlock_irqrestore(&port_priv->reg_lock, flags); | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | ||||
kfree(reg_req); | kfree(reg_req); | ||||
error3: | error3: | ||||
ib_dereg_mr(mad_agent_priv->agent.mr); | |||||
error2: | |||||
kfree(mad_agent_priv); | kfree(mad_agent_priv); | ||||
error1: | error1: | ||||
return ret; | return ret; | ||||
} | } | ||||
EXPORT_SYMBOL(ib_register_mad_agent); | EXPORT_SYMBOL(ib_register_mad_agent); | ||||
static inline int is_snooping_sends(int mad_snoop_flags) | static inline int is_snooping_sends(int mad_snoop_flags) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 129 Lines • ▼ Show 20 Lines | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | ||||
/* Note that we could still be handling received MADs */ | /* Note that we could still be handling received MADs */ | ||||
/* | /* | ||||
* Canceling all sends results in dropping received response | * Canceling all sends results in dropping received response | ||||
* MADs, preventing us from queuing additional work | * MADs, preventing us from queuing additional work | ||||
*/ | */ | ||||
cancel_mads(mad_agent_priv); | cancel_mads(mad_agent_priv); | ||||
port_priv = mad_agent_priv->qp_info->port_priv; | port_priv = mad_agent_priv->qp_info->port_priv; | ||||
cancel_delayed_work_sync(&mad_agent_priv->timed_work); | cancel_delayed_work(&mad_agent_priv->timed_work); | ||||
spin_lock_irqsave(&port_priv->reg_lock, flags); | spin_lock_irqsave(&port_priv->reg_lock, flags); | ||||
remove_mad_reg_req(mad_agent_priv); | remove_mad_reg_req(mad_agent_priv); | ||||
list_del(&mad_agent_priv->agent_list); | list_del(&mad_agent_priv->agent_list); | ||||
spin_unlock_irqrestore(&port_priv->reg_lock, flags); | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | ||||
flush_workqueue(port_priv->wq); | flush_workqueue(port_priv->wq); | ||||
ib_cancel_rmpp_recvs(mad_agent_priv); | ib_cancel_rmpp_recvs(mad_agent_priv); | ||||
deref_mad_agent(mad_agent_priv); | deref_mad_agent(mad_agent_priv); | ||||
wait_for_completion(&mad_agent_priv->comp); | wait_for_completion(&mad_agent_priv->comp); | ||||
kfree(mad_agent_priv->reg_req); | kfree(mad_agent_priv->reg_req); | ||||
ib_dereg_mr(mad_agent_priv->agent.mr); | |||||
kfree(mad_agent_priv); | kfree(mad_agent_priv); | ||||
} | } | ||||
static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) | static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) | ||||
{ | { | ||||
struct ib_mad_qp_info *qp_info; | struct ib_mad_qp_info *qp_info; | ||||
unsigned long flags; | unsigned long flags; | ||||
Show All 12 Lines | |||||
/* | /* | ||||
* ib_unregister_mad_agent - Unregisters a client from using MAD services | * ib_unregister_mad_agent - Unregisters a client from using MAD services | ||||
*/ | */ | ||||
int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) | int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) | ||||
{ | { | ||||
struct ib_mad_agent_private *mad_agent_priv; | struct ib_mad_agent_private *mad_agent_priv; | ||||
struct ib_mad_snoop_private *mad_snoop_priv; | struct ib_mad_snoop_private *mad_snoop_priv; | ||||
if (!IS_ERR(mad_agent)) { | |||||
/* If the TID is zero, the agent can only snoop. */ | /* If the TID is zero, the agent can only snoop. */ | ||||
if (mad_agent->hi_tid) { | if (mad_agent->hi_tid) { | ||||
mad_agent_priv = container_of(mad_agent, | mad_agent_priv = container_of(mad_agent, | ||||
struct ib_mad_agent_private, | struct ib_mad_agent_private, | ||||
agent); | agent); | ||||
unregister_mad_agent(mad_agent_priv); | unregister_mad_agent(mad_agent_priv); | ||||
} else { | } else { | ||||
mad_snoop_priv = container_of(mad_agent, | mad_snoop_priv = container_of(mad_agent, | ||||
struct ib_mad_snoop_private, | struct ib_mad_snoop_private, | ||||
agent); | agent); | ||||
unregister_mad_snoop(mad_snoop_priv); | unregister_mad_snoop(mad_snoop_priv); | ||||
} | } | ||||
} | |||||
return 0; | return 0; | ||||
} | } | ||||
EXPORT_SYMBOL(ib_unregister_mad_agent); | EXPORT_SYMBOL(ib_unregister_mad_agent); | ||||
static void dequeue_mad(struct ib_mad_list_head *mad_list) | static void dequeue_mad(struct ib_mad_list_head *mad_list) | ||||
{ | { | ||||
struct ib_mad_queue *mad_queue; | struct ib_mad_queue *mad_queue; | ||||
unsigned long flags; | unsigned long flags; | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | static void snoop_recv(struct ib_mad_qp_info *qp_info, | ||||
for (i = 0; i < qp_info->snoop_table_size; i++) { | for (i = 0; i < qp_info->snoop_table_size; i++) { | ||||
mad_snoop_priv = qp_info->snoop_table[i]; | mad_snoop_priv = qp_info->snoop_table[i]; | ||||
if (!mad_snoop_priv || | if (!mad_snoop_priv || | ||||
!(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) | !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) | ||||
continue; | continue; | ||||
atomic_inc(&mad_snoop_priv->refcount); | atomic_inc(&mad_snoop_priv->refcount); | ||||
spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | ||||
mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, | ||||
mad_recv_wc); | mad_recv_wc); | ||||
deref_snoop_agent(mad_snoop_priv); | deref_snoop_agent(mad_snoop_priv); | ||||
spin_lock_irqsave(&qp_info->snoop_lock, flags); | spin_lock_irqsave(&qp_info->snoop_lock, flags); | ||||
} | } | ||||
spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | ||||
} | } | ||||
static void build_smp_wc(struct ib_qp *qp, | static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, | ||||
u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, | u16 pkey_index, u8 port_num, struct ib_wc *wc) | ||||
struct ib_wc *wc) | |||||
{ | { | ||||
memset(wc, 0, sizeof *wc); | memset(wc, 0, sizeof *wc); | ||||
wc->wr_id = wr_id; | wc->wr_cqe = cqe; | ||||
wc->status = IB_WC_SUCCESS; | wc->status = IB_WC_SUCCESS; | ||||
wc->opcode = IB_WC_RECV; | wc->opcode = IB_WC_RECV; | ||||
wc->pkey_index = pkey_index; | wc->pkey_index = pkey_index; | ||||
wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); | wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); | ||||
wc->src_qp = IB_QP0; | wc->src_qp = IB_QP0; | ||||
wc->qp = qp; | wc->qp = qp; | ||||
wc->slid = slid; | wc->slid = slid; | ||||
wc->sl = 0; | wc->sl = 0; | ||||
wc->dlid_path_bits = 0; | wc->dlid_path_bits = 0; | ||||
wc->port_num = port_num; | wc->port_num = port_num; | ||||
} | } | ||||
static size_t mad_priv_size(const struct ib_mad_private *mp) | |||||
{ | |||||
return sizeof(struct ib_mad_private) + mp->mad_size; | |||||
} | |||||
static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) | |||||
{ | |||||
size_t size = sizeof(struct ib_mad_private) + mad_size; | |||||
struct ib_mad_private *ret = kzalloc(size, flags); | |||||
if (ret) | |||||
ret->mad_size = mad_size; | |||||
return ret; | |||||
} | |||||
static size_t port_mad_size(const struct ib_mad_port_private *port_priv) | |||||
{ | |||||
return rdma_max_mad_size(port_priv->device, port_priv->port_num); | |||||
} | |||||
static size_t mad_priv_dma_size(const struct ib_mad_private *mp) | |||||
{ | |||||
return sizeof(struct ib_grh) + mp->mad_size; | |||||
} | |||||
/* | /* | ||||
* Return 0 if SMP is to be sent | * Return 0 if SMP is to be sent | ||||
* Return 1 if SMP was consumed locally (whether or not solicited) | * Return 1 if SMP was consumed locally (whether or not solicited) | ||||
* Return < 0 if error | * Return < 0 if error | ||||
*/ | */ | ||||
static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | ||||
struct ib_mad_send_wr_private *mad_send_wr) | struct ib_mad_send_wr_private *mad_send_wr) | ||||
{ | { | ||||
int ret = 0; | int ret = 0; | ||||
struct ib_smp *smp = mad_send_wr->send_buf.mad; | struct ib_smp *smp = mad_send_wr->send_buf.mad; | ||||
struct opa_smp *opa_smp = (struct opa_smp *)smp; | |||||
unsigned long flags; | unsigned long flags; | ||||
struct ib_mad_local_private *local; | struct ib_mad_local_private *local; | ||||
struct ib_mad_private *mad_priv; | struct ib_mad_private *mad_priv; | ||||
struct ib_mad_port_private *port_priv; | struct ib_mad_port_private *port_priv; | ||||
struct ib_mad_agent_private *recv_mad_agent = NULL; | struct ib_mad_agent_private *recv_mad_agent = NULL; | ||||
struct ib_device *device = mad_agent_priv->agent.device; | struct ib_device *device = mad_agent_priv->agent.device; | ||||
u8 port_num; | u8 port_num; | ||||
struct ib_wc mad_wc; | struct ib_wc mad_wc; | ||||
struct ib_send_wr *send_wr = &mad_send_wr->send_wr; | struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; | ||||
size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); | |||||
u16 out_mad_pkey_index = 0; | |||||
u16 drslid; | |||||
bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, | |||||
mad_agent_priv->qp_info->port_priv->port_num); | |||||
if (device->node_type == RDMA_NODE_IB_SWITCH && | if (rdma_cap_ib_switch(device) && | ||||
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | ||||
port_num = send_wr->wr.ud.port_num; | port_num = send_wr->port_num; | ||||
else | else | ||||
port_num = mad_agent_priv->agent.port_num; | port_num = mad_agent_priv->agent.port_num; | ||||
/* | /* | ||||
* Directed route handling starts if the initial LID routed part of | * Directed route handling starts if the initial LID routed part of | ||||
* a request or the ending LID routed part of a response is empty. | * a request or the ending LID routed part of a response is empty. | ||||
* If we are at the start of the LID routed part, don't update the | * If we are at the start of the LID routed part, don't update the | ||||
* hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. | * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. | ||||
*/ | */ | ||||
if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) != | if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) { | ||||
IB_LID_PERMISSIVE) | u32 opa_drslid; | ||||
if ((opa_get_smp_direction(opa_smp) | |||||
? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == | |||||
OPA_LID_PERMISSIVE && | |||||
opa_smi_handle_dr_smp_send(opa_smp, | |||||
rdma_cap_ib_switch(device), | |||||
port_num) == IB_SMI_DISCARD) { | |||||
ret = -EINVAL; | |||||
dev_err(&device->dev, "OPA Invalid directed route\n"); | |||||
goto out; | goto out; | ||||
if (smi_handle_dr_smp_send(smp, device->node_type, port_num) == | } | ||||
opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); | |||||
if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && | |||||
opa_drslid & 0xffff0000) { | |||||
ret = -EINVAL; | |||||
dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", | |||||
opa_drslid); | |||||
goto out; | |||||
} | |||||
drslid = (u16)(opa_drslid & 0x0000ffff); | |||||
/* Check to post send on QP or process locally */ | |||||
if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && | |||||
opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) | |||||
goto out; | |||||
} else { | |||||
if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == | |||||
IB_LID_PERMISSIVE && | |||||
smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == | |||||
IB_SMI_DISCARD) { | IB_SMI_DISCARD) { | ||||
ret = -EINVAL; | ret = -EINVAL; | ||||
printk(KERN_ERR PFX "Invalid directed route\n"); | dev_err(&device->dev, "Invalid directed route\n"); | ||||
goto out; | goto out; | ||||
} | } | ||||
drslid = be16_to_cpu(smp->dr_slid); | |||||
/* Check to post send on QP or process locally */ | /* Check to post send on QP or process locally */ | ||||
if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && | if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && | ||||
smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) | smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) | ||||
goto out; | goto out; | ||||
} | |||||
local = kmalloc(sizeof *local, GFP_ATOMIC); | local = kmalloc(sizeof *local, GFP_ATOMIC); | ||||
if (!local) { | if (!local) { | ||||
ret = -ENOMEM; | ret = -ENOMEM; | ||||
printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); | dev_err(&device->dev, "No memory for ib_mad_local_private\n"); | ||||
goto out; | goto out; | ||||
} | } | ||||
local->mad_priv = NULL; | local->mad_priv = NULL; | ||||
local->recv_mad_agent = NULL; | local->recv_mad_agent = NULL; | ||||
mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); | mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); | ||||
if (!mad_priv) { | if (!mad_priv) { | ||||
ret = -ENOMEM; | ret = -ENOMEM; | ||||
printk(KERN_ERR PFX "No memory for local response MAD\n"); | dev_err(&device->dev, "No memory for local response MAD\n"); | ||||
kfree(local); | kfree(local); | ||||
goto out; | goto out; | ||||
} | } | ||||
build_smp_wc(mad_agent_priv->agent.qp, | build_smp_wc(mad_agent_priv->agent.qp, | ||||
send_wr->wr_id, be16_to_cpu(smp->dr_slid), | send_wr->wr.wr_cqe, drslid, | ||||
send_wr->wr.ud.pkey_index, | send_wr->pkey_index, | ||||
send_wr->wr.ud.port_num, &mad_wc); | send_wr->port_num, &mad_wc); | ||||
if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { | |||||
mad_wc.byte_len = mad_send_wr->send_buf.hdr_len | |||||
+ mad_send_wr->send_buf.data_len | |||||
+ sizeof(struct ib_grh); | |||||
} | |||||
/* No GRH for DR SMP */ | /* No GRH for DR SMP */ | ||||
ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, | ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, | ||||
(struct ib_mad *)smp, | (const struct ib_mad_hdr *)smp, mad_size, | ||||
(struct ib_mad *)&mad_priv->mad); | (struct ib_mad_hdr *)mad_priv->mad, | ||||
&mad_size, &out_mad_pkey_index); | |||||
switch (ret) | switch (ret) | ||||
{ | { | ||||
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: | ||||
if (ib_response_mad(&mad_priv->mad.mad) && | if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && | ||||
mad_agent_priv->agent.recv_handler) { | mad_agent_priv->agent.recv_handler) { | ||||
local->mad_priv = mad_priv; | local->mad_priv = mad_priv; | ||||
local->recv_mad_agent = mad_agent_priv; | local->recv_mad_agent = mad_agent_priv; | ||||
/* | /* | ||||
* Reference MAD agent until receive | * Reference MAD agent until receive | ||||
* side of local completion handled | * side of local completion handled | ||||
*/ | */ | ||||
atomic_inc(&mad_agent_priv->refcount); | atomic_inc(&mad_agent_priv->refcount); | ||||
} else | } else | ||||
kmem_cache_free(ib_mad_cache, mad_priv); | kfree(mad_priv); | ||||
break; | break; | ||||
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: | ||||
kmem_cache_free(ib_mad_cache, mad_priv); | kfree(mad_priv); | ||||
break; | break; | ||||
case IB_MAD_RESULT_SUCCESS: | case IB_MAD_RESULT_SUCCESS: | ||||
/* Treat like an incoming receive MAD */ | /* Treat like an incoming receive MAD */ | ||||
port_priv = ib_get_mad_port(mad_agent_priv->agent.device, | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, | ||||
mad_agent_priv->agent.port_num); | mad_agent_priv->agent.port_num); | ||||
if (port_priv) { | if (port_priv) { | ||||
memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad)); | memcpy(mad_priv->mad, smp, mad_priv->mad_size); | ||||
recv_mad_agent = find_mad_agent(port_priv, | recv_mad_agent = find_mad_agent(port_priv, | ||||
&mad_priv->mad.mad); | (const struct ib_mad_hdr *)mad_priv->mad); | ||||
} | } | ||||
if (!port_priv || !recv_mad_agent) { | if (!port_priv || !recv_mad_agent) { | ||||
/* | /* | ||||
* No receiving agent so drop packet and | * No receiving agent so drop packet and | ||||
* generate send completion. | * generate send completion. | ||||
*/ | */ | ||||
kmem_cache_free(ib_mad_cache, mad_priv); | kfree(mad_priv); | ||||
break; | break; | ||||
} | } | ||||
local->mad_priv = mad_priv; | local->mad_priv = mad_priv; | ||||
local->recv_mad_agent = recv_mad_agent; | local->recv_mad_agent = recv_mad_agent; | ||||
break; | break; | ||||
default: | default: | ||||
kmem_cache_free(ib_mad_cache, mad_priv); | kfree(mad_priv); | ||||
kfree(local); | kfree(local); | ||||
ret = -EINVAL; | ret = -EINVAL; | ||||
goto out; | goto out; | ||||
} | } | ||||
local->mad_send_wr = mad_send_wr; | local->mad_send_wr = mad_send_wr; | ||||
if (opa) { | |||||
local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; | |||||
local->return_wc_byte_len = mad_size; | |||||
} | |||||
/* Reference MAD agent until send side of local completion handled */ | /* Reference MAD agent until send side of local completion handled */ | ||||
atomic_inc(&mad_agent_priv->refcount); | atomic_inc(&mad_agent_priv->refcount); | ||||
/* Queue local completion to local list */ | /* Queue local completion to local list */ | ||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
list_add_tail(&local->completion_list, &mad_agent_priv->local_list); | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
queue_work(mad_agent_priv->qp_info->port_priv->wq, | queue_work(mad_agent_priv->qp_info->port_priv->wq, | ||||
&mad_agent_priv->local_work); | &mad_agent_priv->local_work); | ||||
ret = 1; | ret = 1; | ||||
out: | out: | ||||
return ret; | return ret; | ||||
} | } | ||||
static int get_pad_size(int hdr_len, int data_len) | static int get_pad_size(int hdr_len, int data_len, size_t mad_size) | ||||
{ | { | ||||
int seg_size, pad; | int seg_size, pad; | ||||
seg_size = sizeof(struct ib_mad) - hdr_len; | seg_size = mad_size - hdr_len; | ||||
if (data_len && seg_size) { | if (data_len && seg_size) { | ||||
pad = seg_size - data_len % seg_size; | pad = seg_size - data_len % seg_size; | ||||
return pad == seg_size ? 0 : pad; | return pad == seg_size ? 0 : pad; | ||||
} else | } else | ||||
return seg_size; | return seg_size; | ||||
} | } | ||||
static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) | static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) | ||||
{ | { | ||||
struct ib_rmpp_segment *s, *t; | struct ib_rmpp_segment *s, *t; | ||||
list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { | list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { | ||||
list_del(&s->list); | list_del(&s->list); | ||||
kfree(s); | kfree(s); | ||||
} | } | ||||
} | } | ||||
static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, | static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, | ||||
gfp_t gfp_mask) | size_t mad_size, gfp_t gfp_mask) | ||||
{ | { | ||||
struct ib_mad_send_buf *send_buf = &send_wr->send_buf; | struct ib_mad_send_buf *send_buf = &send_wr->send_buf; | ||||
struct ib_rmpp_mad *rmpp_mad = send_buf->mad; | struct ib_rmpp_mad *rmpp_mad = send_buf->mad; | ||||
struct ib_rmpp_segment *seg = NULL; | struct ib_rmpp_segment *seg = NULL; | ||||
int left, seg_size, pad; | int left, seg_size, pad; | ||||
send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len; | send_buf->seg_size = mad_size - send_buf->hdr_len; | ||||
send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; | |||||
seg_size = send_buf->seg_size; | seg_size = send_buf->seg_size; | ||||
pad = send_wr->pad; | pad = send_wr->pad; | ||||
/* Allocate data segments. */ | /* Allocate data segments. */ | ||||
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { | for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { | ||||
seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); | seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); | ||||
if (!seg) { | if (!seg) { | ||||
printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem " | dev_err(&send_buf->mad_agent->device->dev, | ||||
"alloc failed for len %zd, gfp %#x\n", | "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", | ||||
sizeof (*seg) + seg_size, gfp_mask); | sizeof (*seg) + seg_size, gfp_mask); | ||||
free_send_rmpp_list(send_wr); | free_send_rmpp_list(send_wr); | ||||
return -ENOMEM; | return -ENOMEM; | ||||
} | } | ||||
seg->num = ++send_buf->seg_count; | seg->num = ++send_buf->seg_count; | ||||
list_add_tail(&seg->list, &send_wr->rmpp_list); | list_add_tail(&seg->list, &send_wr->rmpp_list); | ||||
} | } | ||||
/* Zero any padding */ | /* Zero any padding */ | ||||
if (pad) | if (pad) | ||||
memset(seg->data + seg_size - pad, 0, pad); | memset(seg->data + seg_size - pad, 0, pad); | ||||
rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> | rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> | ||||
agent.rmpp_version; | agent.rmpp_version; | ||||
rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; | rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; | ||||
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | ||||
send_wr->cur_seg = container_of(send_wr->rmpp_list.next, | send_wr->cur_seg = container_of(send_wr->rmpp_list.next, | ||||
struct ib_rmpp_segment, list); | struct ib_rmpp_segment, list); | ||||
send_wr->last_ack_seg = send_wr->cur_seg; | send_wr->last_ack_seg = send_wr->cur_seg; | ||||
return 0; | return 0; | ||||
} | } | ||||
int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) | |||||
{ | |||||
return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); | |||||
} | |||||
EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); | |||||
struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | ||||
u32 remote_qpn, u16 pkey_index, | u32 remote_qpn, u16 pkey_index, | ||||
int rmpp_active, | int rmpp_active, | ||||
int hdr_len, int data_len, | int hdr_len, int data_len, | ||||
gfp_t gfp_mask) | gfp_t gfp_mask, | ||||
u8 base_version) | |||||
{ | { | ||||
struct ib_mad_agent_private *mad_agent_priv; | struct ib_mad_agent_private *mad_agent_priv; | ||||
struct ib_mad_send_wr_private *mad_send_wr; | struct ib_mad_send_wr_private *mad_send_wr; | ||||
int pad, message_size, ret, size; | int pad, message_size, ret, size; | ||||
void *buf; | void *buf; | ||||
size_t mad_size; | |||||
bool opa; | |||||
mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | ||||
agent); | agent); | ||||
pad = get_pad_size(hdr_len, data_len); | |||||
opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); | |||||
if (opa && base_version == OPA_MGMT_BASE_VERSION) | |||||
mad_size = sizeof(struct opa_mad); | |||||
else | |||||
mad_size = sizeof(struct ib_mad); | |||||
pad = get_pad_size(hdr_len, data_len, mad_size); | |||||
message_size = hdr_len + data_len + pad; | message_size = hdr_len + data_len + pad; | ||||
if ((!mad_agent->rmpp_version && | if (ib_mad_kernel_rmpp_agent(mad_agent)) { | ||||
(rmpp_active || message_size > sizeof(struct ib_mad))) || | if (!rmpp_active && message_size > mad_size) | ||||
(!rmpp_active && message_size > sizeof(struct ib_mad))) | |||||
return ERR_PTR(-EINVAL); | return ERR_PTR(-EINVAL); | ||||
} else | |||||
if (rmpp_active || message_size > mad_size) | |||||
return ERR_PTR(-EINVAL); | |||||
size = rmpp_active ? hdr_len : sizeof(struct ib_mad); | size = rmpp_active ? hdr_len : mad_size; | ||||
buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); | buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); | ||||
if (!buf) | if (!buf) | ||||
return ERR_PTR(-ENOMEM); | return ERR_PTR(-ENOMEM); | ||||
mad_send_wr = buf + size; | mad_send_wr = (struct ib_mad_send_wr_private *)((char *)buf + size); | ||||
INIT_LIST_HEAD(&mad_send_wr->rmpp_list); | INIT_LIST_HEAD(&mad_send_wr->rmpp_list); | ||||
mad_send_wr->send_buf.mad = buf; | mad_send_wr->send_buf.mad = buf; | ||||
mad_send_wr->send_buf.hdr_len = hdr_len; | mad_send_wr->send_buf.hdr_len = hdr_len; | ||||
mad_send_wr->send_buf.data_len = data_len; | mad_send_wr->send_buf.data_len = data_len; | ||||
mad_send_wr->pad = pad; | mad_send_wr->pad = pad; | ||||
mad_send_wr->mad_agent_priv = mad_agent_priv; | mad_send_wr->mad_agent_priv = mad_agent_priv; | ||||
mad_send_wr->sg_list[0].length = hdr_len; | mad_send_wr->sg_list[0].length = hdr_len; | ||||
mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; | mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; | ||||
mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len; | |||||
mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey; | |||||
mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; | /* OPA MADs don't have to be the full 2048 bytes */ | ||||
mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; | if (opa && base_version == OPA_MGMT_BASE_VERSION && | ||||
mad_send_wr->send_wr.num_sge = 2; | data_len < mad_size - hdr_len) | ||||
mad_send_wr->send_wr.opcode = IB_WR_SEND; | mad_send_wr->sg_list[1].length = data_len; | ||||
mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; | else | ||||
mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; | mad_send_wr->sg_list[1].length = mad_size - hdr_len; | ||||
mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | |||||
mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; | |||||
mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; | |||||
mad_send_wr->mad_list.cqe.done = ib_mad_send_done; | |||||
mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; | |||||
mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; | |||||
mad_send_wr->send_wr.wr.num_sge = 2; | |||||
mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; | |||||
mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; | |||||
mad_send_wr->send_wr.remote_qpn = remote_qpn; | |||||
mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; | |||||
mad_send_wr->send_wr.pkey_index = pkey_index; | |||||
if (rmpp_active) { | if (rmpp_active) { | ||||
ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask); | ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); | ||||
if (ret) { | if (ret) { | ||||
kfree(buf); | kfree(buf); | ||||
return ERR_PTR(ret); | return ERR_PTR(ret); | ||||
} | } | ||||
} | } | ||||
mad_send_wr->send_buf.mad_agent = mad_agent; | mad_send_wr->send_buf.mad_agent = mad_agent; | ||||
atomic_inc(&mad_agent_priv->refcount); | atomic_inc(&mad_agent_priv->refcount); | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | |||||
EXPORT_SYMBOL(ib_get_rmpp_segment); | EXPORT_SYMBOL(ib_get_rmpp_segment); | ||||
static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) | static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) | ||||
{ | { | ||||
if (mad_send_wr->send_buf.seg_count) | if (mad_send_wr->send_buf.seg_count) | ||||
return ib_get_rmpp_segment(&mad_send_wr->send_buf, | return ib_get_rmpp_segment(&mad_send_wr->send_buf, | ||||
mad_send_wr->seg_num); | mad_send_wr->seg_num); | ||||
else | else | ||||
return mad_send_wr->send_buf.mad + | return (char *)mad_send_wr->send_buf.mad + | ||||
mad_send_wr->send_buf.hdr_len; | mad_send_wr->send_buf.hdr_len; | ||||
} | } | ||||
void ib_free_send_mad(struct ib_mad_send_buf *send_buf) | void ib_free_send_mad(struct ib_mad_send_buf *send_buf) | ||||
{ | { | ||||
struct ib_mad_agent_private *mad_agent_priv; | struct ib_mad_agent_private *mad_agent_priv; | ||||
struct ib_mad_send_wr_private *mad_send_wr; | struct ib_mad_send_wr_private *mad_send_wr; | ||||
Show All 15 Lines | int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | ||||
struct ib_send_wr *bad_send_wr; | struct ib_send_wr *bad_send_wr; | ||||
struct ib_mad_agent *mad_agent; | struct ib_mad_agent *mad_agent; | ||||
struct ib_sge *sge; | struct ib_sge *sge; | ||||
unsigned long flags; | unsigned long flags; | ||||
int ret; | int ret; | ||||
/* Set WR ID to find mad_send_wr upon completion */ | /* Set WR ID to find mad_send_wr upon completion */ | ||||
qp_info = mad_send_wr->mad_agent_priv->qp_info; | qp_info = mad_send_wr->mad_agent_priv->qp_info; | ||||
mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; | |||||
mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; | ||||
mad_send_wr->mad_list.cqe.done = ib_mad_send_done; | |||||
mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; | |||||
mad_agent = mad_send_wr->send_buf.mad_agent; | mad_agent = mad_send_wr->send_buf.mad_agent; | ||||
sge = mad_send_wr->sg_list; | sge = mad_send_wr->sg_list; | ||||
sge[0].addr = ib_dma_map_single(mad_agent->device, | sge[0].addr = ib_dma_map_single(mad_agent->device, | ||||
mad_send_wr->send_buf.mad, | mad_send_wr->send_buf.mad, | ||||
sge[0].length, | sge[0].length, | ||||
DMA_TO_DEVICE); | DMA_TO_DEVICE); | ||||
if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) | ||||
return -ENOMEM; | return -ENOMEM; | ||||
mad_send_wr->header_mapping = sge[0].addr; | |||||
sge[1].addr = ib_dma_map_single(mad_agent->device, | sge[1].addr = ib_dma_map_single(mad_agent->device, | ||||
ib_get_payload(mad_send_wr), | ib_get_payload(mad_send_wr), | ||||
sge[1].length, | sge[1].length, | ||||
DMA_TO_DEVICE); | DMA_TO_DEVICE); | ||||
if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { | ||||
ret = -ENOMEM; | ib_dma_unmap_single(mad_agent->device, | ||||
goto dma1_err; | mad_send_wr->header_mapping, | ||||
sge[0].length, DMA_TO_DEVICE); | |||||
return -ENOMEM; | |||||
} | } | ||||
mad_send_wr->header_mapping = sge[0].addr; | |||||
mad_send_wr->payload_mapping = sge[1].addr; | mad_send_wr->payload_mapping = sge[1].addr; | ||||
spin_lock_irqsave(&qp_info->send_queue.lock, flags); | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | ||||
if (qp_info->send_queue.count < qp_info->send_queue.max_active) { | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { | ||||
ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, | ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, | ||||
&bad_send_wr); | &bad_send_wr); | ||||
list = &qp_info->send_queue.list; | list = &qp_info->send_queue.list; | ||||
} else { | } else { | ||||
ret = 0; | ret = 0; | ||||
list = &qp_info->overflow_list; | list = &qp_info->overflow_list; | ||||
} | } | ||||
if (!ret) { | if (!ret) { | ||||
qp_info->send_queue.count++; | qp_info->send_queue.count++; | ||||
list_add_tail(&mad_send_wr->mad_list.list, list); | list_add_tail(&mad_send_wr->mad_list.list, list); | ||||
} | } | ||||
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | ||||
if (ret) { | |||||
if (!ret) | |||||
return 0; | |||||
ib_dma_unmap_single(mad_agent->device, | ib_dma_unmap_single(mad_agent->device, | ||||
mad_send_wr->header_mapping, | mad_send_wr->header_mapping, | ||||
sge[1].length, DMA_TO_DEVICE); | sge[0].length, DMA_TO_DEVICE); | ||||
dma1_err: | |||||
ib_dma_unmap_single(mad_agent->device, | ib_dma_unmap_single(mad_agent->device, | ||||
mad_send_wr->payload_mapping, | mad_send_wr->payload_mapping, | ||||
sge[0].length, DMA_TO_DEVICE); | sge[1].length, DMA_TO_DEVICE); | ||||
return ret; | |||||
} | } | ||||
/* | |||||
* Send SA MAD that passed congestion control | |||||
*/ | |||||
static int send_sa_cc_mad(struct ib_mad_send_wr_private *mad_send_wr, | |||||
u32 timeout_ms, u32 retries_left) | |||||
{ | |||||
int ret; | |||||
unsigned long flags; | |||||
struct ib_mad_agent_private *mad_agent_priv; | |||||
mad_agent_priv = mad_send_wr->mad_agent_priv; | |||||
mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); | |||||
mad_send_wr->retries_left = retries_left; | |||||
mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); | |||||
/* Reference MAD agent until send completes */ | |||||
atomic_inc(&mad_agent_priv->refcount); | |||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | |||||
list_add_tail(&mad_send_wr->agent_list, | |||||
&mad_agent_priv->send_list); | |||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |||||
ret = ib_send_mad(mad_send_wr); | |||||
if (ret < 0) { | |||||
/* Fail send request */ | |||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | |||||
list_del(&mad_send_wr->agent_list); | |||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |||||
atomic_dec(&mad_agent_priv->refcount); | |||||
} | |||||
return ret; | return ret; | ||||
} | } | ||||
/* | /* | ||||
* ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated | * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated | ||||
* with the registered client | * with the registered client | ||||
*/ | */ | ||||
int ib_post_send_mad(struct ib_mad_send_buf *send_buf, | int ib_post_send_mad(struct ib_mad_send_buf *send_buf, | ||||
Show All 28 Lines | for (; send_buf; send_buf = next_send_buf) { | ||||
} | } | ||||
/* | /* | ||||
* Save pointer to next work request to post in case the | * Save pointer to next work request to post in case the | ||||
* current one completes, and the user modifies the work | * current one completes, and the user modifies the work | ||||
* request associated with the completion | * request associated with the completion | ||||
*/ | */ | ||||
next_send_buf = send_buf->next; | next_send_buf = send_buf->next; | ||||
mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; | mad_send_wr->send_wr.ah = send_buf->ah; | ||||
if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == | if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == | ||||
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | ||||
ret = handle_outgoing_dr_smp(mad_agent_priv, | ret = handle_outgoing_dr_smp(mad_agent_priv, | ||||
mad_send_wr); | mad_send_wr); | ||||
if (ret < 0) /* error */ | if (ret < 0) /* error */ | ||||
goto error; | goto error; | ||||
else if (ret == 1) /* locally consumed */ | else if (ret == 1) /* locally consumed */ | ||||
continue; | continue; | ||||
} | } | ||||
mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; | mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; | ||||
/* Timeout will be updated after send completes */ | /* Timeout will be updated after send completes */ | ||||
mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); | mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); | ||||
mad_send_wr->max_retries = send_buf->retries; | mad_send_wr->max_retries = send_buf->retries; | ||||
mad_send_wr->retries_left = send_buf->retries; | mad_send_wr->retries_left = send_buf->retries; | ||||
send_buf->retries = 0; | send_buf->retries = 0; | ||||
/* Reference for work request to QP + response */ | /* Reference for work request to QP + response */ | ||||
mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); | mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); | ||||
mad_send_wr->status = IB_WC_SUCCESS; | mad_send_wr->status = IB_WC_SUCCESS; | ||||
if (is_sa_cc_mad(mad_send_wr)) { | |||||
mad_send_wr->is_sa_cc_mad = 1; | |||||
ret = sa_cc_mad_send(mad_send_wr); | |||||
if (ret < 0) | |||||
goto error; | |||||
} else { | |||||
/* Reference MAD agent until send completes */ | /* Reference MAD agent until send completes */ | ||||
atomic_inc(&mad_agent_priv->refcount); | atomic_inc(&mad_agent_priv->refcount); | ||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
list_add_tail(&mad_send_wr->agent_list, | list_add_tail(&mad_send_wr->agent_list, | ||||
&mad_agent_priv->send_list); | &mad_agent_priv->send_list); | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
if (mad_agent_priv->agent.rmpp_version) { | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { | ||||
ret = ib_send_rmpp_mad(mad_send_wr); | ret = ib_send_rmpp_mad(mad_send_wr); | ||||
if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) | if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) | ||||
ret = ib_send_mad(mad_send_wr); | ret = ib_send_mad(mad_send_wr); | ||||
} else | } else | ||||
ret = ib_send_mad(mad_send_wr); | ret = ib_send_mad(mad_send_wr); | ||||
if (ret < 0) { | if (ret < 0) { | ||||
/* Fail send request */ | /* Fail send request */ | ||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
list_del(&mad_send_wr->agent_list); | list_del(&mad_send_wr->agent_list); | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
atomic_dec(&mad_agent_priv->refcount); | atomic_dec(&mad_agent_priv->refcount); | ||||
goto error; | goto error; | ||||
} | } | ||||
} | } | ||||
} | |||||
return 0; | return 0; | ||||
error: | error: | ||||
if (bad_send_buf) | if (bad_send_buf) | ||||
*bad_send_buf = send_buf; | *bad_send_buf = send_buf; | ||||
return ret; | return ret; | ||||
} | } | ||||
EXPORT_SYMBOL(ib_post_send_mad); | EXPORT_SYMBOL(ib_post_send_mad); | ||||
Show All 15 Lines | list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, | ||||
&free_list, list) { | &free_list, list) { | ||||
mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, | mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, | ||||
recv_buf); | recv_buf); | ||||
mad_priv_hdr = container_of(mad_recv_wc, | mad_priv_hdr = container_of(mad_recv_wc, | ||||
struct ib_mad_private_header, | struct ib_mad_private_header, | ||||
recv_wc); | recv_wc); | ||||
priv = container_of(mad_priv_hdr, struct ib_mad_private, | priv = container_of(mad_priv_hdr, struct ib_mad_private, | ||||
header); | header); | ||||
kmem_cache_free(ib_mad_cache, priv); | kfree(priv); | ||||
} | } | ||||
} | } | ||||
EXPORT_SYMBOL(ib_free_recv_mad); | EXPORT_SYMBOL(ib_free_recv_mad); | ||||
struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, | struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, | ||||
u8 rmpp_version, | u8 rmpp_version, | ||||
ib_mad_send_handler send_handler, | ib_mad_send_handler send_handler, | ||||
ib_mad_recv_handler recv_handler, | ib_mad_recv_handler recv_handler, | ||||
void *context) | void *context) | ||||
{ | { | ||||
return ERR_PTR(-EINVAL); /* XXX: for now */ | return ERR_PTR(-EINVAL); /* XXX: for now */ | ||||
} | } | ||||
EXPORT_SYMBOL(ib_redirect_mad_qp); | EXPORT_SYMBOL(ib_redirect_mad_qp); | ||||
int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | ||||
struct ib_wc *wc) | struct ib_wc *wc) | ||||
{ | { | ||||
printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); | dev_err(&mad_agent->device->dev, | ||||
"ib_process_mad_wc() not implemented yet\n"); | |||||
return 0; | return 0; | ||||
} | } | ||||
EXPORT_SYMBOL(ib_process_mad_wc); | EXPORT_SYMBOL(ib_process_mad_wc); | ||||
static int method_in_use(struct ib_mad_mgmt_method_table **method, | static int method_in_use(struct ib_mad_mgmt_method_table **method, | ||||
struct ib_mad_reg_req *mad_reg_req) | struct ib_mad_reg_req *mad_reg_req) | ||||
{ | { | ||||
int i; | int i; | ||||
for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { | ||||
if ((*method)->agent[i]) { | if ((*method)->agent[i]) { | ||||
printk(KERN_ERR PFX "Method %d already in use\n", i); | pr_err("Method %d already in use\n", i); | ||||
return -EINVAL; | return -EINVAL; | ||||
} | } | ||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
static int allocate_method_table(struct ib_mad_mgmt_method_table **method) | static int allocate_method_table(struct ib_mad_mgmt_method_table **method) | ||||
{ | { | ||||
/* Allocate management method table */ | /* Allocate management method table */ | ||||
*method = kzalloc(sizeof **method, GFP_ATOMIC); | *method = kzalloc(sizeof **method, GFP_ATOMIC); | ||||
if (!*method) { | if (!*method) { | ||||
printk(KERN_ERR PFX "No memory for " | pr_err("No memory for ib_mad_mgmt_method_table\n"); | ||||
"ib_mad_mgmt_method_table\n"); | |||||
return -ENOMEM; | return -ENOMEM; | ||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
/* | /* | ||||
* Check to see if there are any methods still in use | * Check to see if there are any methods still in use | ||||
Show All 27 Lines | static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) | ||||
for (i = 0; i < MAX_MGMT_OUI; i++) | for (i = 0; i < MAX_MGMT_OUI; i++) | ||||
if (vendor_class->method_table[i]) | if (vendor_class->method_table[i]) | ||||
return 1; | return 1; | ||||
return 0; | return 0; | ||||
} | } | ||||
static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, | static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, | ||||
char *oui) | const char *oui) | ||||
{ | { | ||||
int i; | int i; | ||||
for (i = 0; i < MAX_MGMT_OUI; i++) | for (i = 0; i < MAX_MGMT_OUI; i++) | ||||
/* Is there matching OUI for this vendor class ? */ | /* Is there matching OUI for this vendor class ? */ | ||||
if (!memcmp(vendor_class->oui[i], oui, 3)) | if (!memcmp(vendor_class->oui[i], oui, 3)) | ||||
return i; | return i; | ||||
Show All 34 Lines | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | ||||
int i, ret; | int i, ret; | ||||
port_priv = agent_priv->qp_info->port_priv; | port_priv = agent_priv->qp_info->port_priv; | ||||
class = &port_priv->version[mad_reg_req->mgmt_class_version].class; | class = &port_priv->version[mad_reg_req->mgmt_class_version].class; | ||||
if (!*class) { | if (!*class) { | ||||
/* Allocate management class table for "new" class version */ | /* Allocate management class table for "new" class version */ | ||||
*class = kzalloc(sizeof **class, GFP_ATOMIC); | *class = kzalloc(sizeof **class, GFP_ATOMIC); | ||||
if (!*class) { | if (!*class) { | ||||
printk(KERN_ERR PFX "No memory for " | dev_err(&agent_priv->agent.device->dev, | ||||
"ib_mad_mgmt_class_table\n"); | "No memory for ib_mad_mgmt_class_table\n"); | ||||
ret = -ENOMEM; | ret = -ENOMEM; | ||||
goto error1; | goto error1; | ||||
} | } | ||||
/* Allocate method table for this management class */ | /* Allocate method table for this management class */ | ||||
method = &(*class)->method_table[mgmt_class]; | method = &(*class)->method_table[mgmt_class]; | ||||
if ((ret = allocate_method_table(method))) | if ((ret = allocate_method_table(method))) | ||||
goto error2; | goto error2; | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | ||||
vclass = vendor_class_index(mad_reg_req->mgmt_class); | vclass = vendor_class_index(mad_reg_req->mgmt_class); | ||||
port_priv = agent_priv->qp_info->port_priv; | port_priv = agent_priv->qp_info->port_priv; | ||||
vendor_table = &port_priv->version[ | vendor_table = &port_priv->version[ | ||||
mad_reg_req->mgmt_class_version].vendor; | mad_reg_req->mgmt_class_version].vendor; | ||||
if (!*vendor_table) { | if (!*vendor_table) { | ||||
/* Allocate mgmt vendor class table for "new" class version */ | /* Allocate mgmt vendor class table for "new" class version */ | ||||
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); | vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); | ||||
if (!vendor) { | if (!vendor) { | ||||
printk(KERN_ERR PFX "No memory for " | dev_err(&agent_priv->agent.device->dev, | ||||
"ib_mad_mgmt_vendor_class_table\n"); | "No memory for ib_mad_mgmt_vendor_class_table\n"); | ||||
goto error1; | goto error1; | ||||
} | } | ||||
*vendor_table = vendor; | *vendor_table = vendor; | ||||
} | } | ||||
if (!(*vendor_table)->vendor_class[vclass]) { | if (!(*vendor_table)->vendor_class[vclass]) { | ||||
/* Allocate table for this management vendor class */ | /* Allocate table for this management vendor class */ | ||||
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); | vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); | ||||
if (!vendor_class) { | if (!vendor_class) { | ||||
printk(KERN_ERR PFX "No memory for " | dev_err(&agent_priv->agent.device->dev, | ||||
"ib_mad_mgmt_vendor_class\n"); | "No memory for ib_mad_mgmt_vendor_class\n"); | ||||
goto error2; | goto error2; | ||||
} | } | ||||
(*vendor_table)->vendor_class[vclass] = vendor_class; | (*vendor_table)->vendor_class[vclass] = vendor_class; | ||||
} | } | ||||
for (i = 0; i < MAX_MGMT_OUI; i++) { | for (i = 0; i < MAX_MGMT_OUI; i++) { | ||||
/* Is there matching OUI for this vendor class ? */ | /* Is there matching OUI for this vendor class ? */ | ||||
if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], | if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], | ||||
Show All 14 Lines | if (!is_vendor_oui((*vendor_table)->vendor_class[ | ||||
/* Allocate method table for this OUI */ | /* Allocate method table for this OUI */ | ||||
if ((ret = allocate_method_table(method))) | if ((ret = allocate_method_table(method))) | ||||
goto error3; | goto error3; | ||||
memcpy((*vendor_table)->vendor_class[vclass]->oui[i], | memcpy((*vendor_table)->vendor_class[vclass]->oui[i], | ||||
mad_reg_req->oui, 3); | mad_reg_req->oui, 3); | ||||
goto check_in_use; | goto check_in_use; | ||||
} | } | ||||
} | } | ||||
printk(KERN_ERR PFX "All OUI slots in use\n"); | dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); | ||||
goto error3; | goto error3; | ||||
check_in_use: | check_in_use: | ||||
/* Now, make sure methods are not already in use */ | /* Now, make sure methods are not already in use */ | ||||
if (method_in_use(method, mad_reg_req)) | if (method_in_use(method, mad_reg_req)) | ||||
goto error4; | goto error4; | ||||
/* Finally, add in methods being registered */ | /* Finally, add in methods being registered */ | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) | ||||
method = class->method_table[mgmt_class]; | method = class->method_table[mgmt_class]; | ||||
if (method) { | if (method) { | ||||
/* Remove any methods for this mad agent */ | /* Remove any methods for this mad agent */ | ||||
remove_methods_mad_agent(method, agent_priv); | remove_methods_mad_agent(method, agent_priv); | ||||
/* Now, check to see if there are any methods still in use */ | /* Now, check to see if there are any methods still in use */ | ||||
if (!check_method_table(method)) { | if (!check_method_table(method)) { | ||||
/* If not, release management method table */ | /* If not, release management method table */ | ||||
kfree(method); | kfree(method); | ||||
class->method_table[mgmt_class] = NULL; | class->method_table[mgmt_class] = NULL; | ||||
/* Any management classes left ? */ | /* Any management classes left ? */ | ||||
if (!check_class_table(class)) { | if (!check_class_table(class)) { | ||||
/* If not, release management class table */ | /* If not, release management class table */ | ||||
kfree(class); | kfree(class); | ||||
port_priv->version[ | port_priv->version[ | ||||
agent_priv->reg_req-> | agent_priv->reg_req-> | ||||
mgmt_class_version].class = NULL; | mgmt_class_version].class = NULL; | ||||
} | } | ||||
} | } | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | vendor_check: | ||||
} | } | ||||
out: | out: | ||||
return; | return; | ||||
} | } | ||||
static struct ib_mad_agent_private * | static struct ib_mad_agent_private * | ||||
find_mad_agent(struct ib_mad_port_private *port_priv, | find_mad_agent(struct ib_mad_port_private *port_priv, | ||||
struct ib_mad *mad) | const struct ib_mad_hdr *mad_hdr) | ||||
{ | { | ||||
struct ib_mad_agent_private *mad_agent = NULL; | struct ib_mad_agent_private *mad_agent = NULL; | ||||
unsigned long flags; | unsigned long flags; | ||||
spin_lock_irqsave(&port_priv->reg_lock, flags); | spin_lock_irqsave(&port_priv->reg_lock, flags); | ||||
if (ib_response_mad(mad)) { | if (ib_response_mad(mad_hdr)) { | ||||
u32 hi_tid; | u32 hi_tid; | ||||
struct ib_mad_agent_private *entry; | struct ib_mad_agent_private *entry; | ||||
/* | /* | ||||
* Routing is based on high 32 bits of transaction ID | * Routing is based on high 32 bits of transaction ID | ||||
* of MAD. | * of MAD. | ||||
*/ | */ | ||||
hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; | hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; | ||||
list_for_each_entry(entry, &port_priv->agent_list, agent_list) { | list_for_each_entry(entry, &port_priv->agent_list, agent_list) { | ||||
if (entry->agent.hi_tid == hi_tid) { | if (entry->agent.hi_tid == hi_tid) { | ||||
mad_agent = entry; | mad_agent = entry; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
} else { | } else { | ||||
struct ib_mad_mgmt_class_table *class; | struct ib_mad_mgmt_class_table *class; | ||||
struct ib_mad_mgmt_method_table *method; | struct ib_mad_mgmt_method_table *method; | ||||
struct ib_mad_mgmt_vendor_class_table *vendor; | struct ib_mad_mgmt_vendor_class_table *vendor; | ||||
struct ib_mad_mgmt_vendor_class *vendor_class; | struct ib_mad_mgmt_vendor_class *vendor_class; | ||||
struct ib_vendor_mad *vendor_mad; | const struct ib_vendor_mad *vendor_mad; | ||||
int index; | int index; | ||||
/* | /* | ||||
* Routing is based on version, class, and method | * Routing is based on version, class, and method | ||||
* For "newer" vendor MADs, also based on OUI | * For "newer" vendor MADs, also based on OUI | ||||
*/ | */ | ||||
if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION) | if (mad_hdr->class_version >= MAX_MGMT_VERSION) | ||||
goto out; | goto out; | ||||
if (!is_vendor_class(mad->mad_hdr.mgmt_class)) { | if (!is_vendor_class(mad_hdr->mgmt_class)) { | ||||
class = port_priv->version[ | class = port_priv->version[ | ||||
mad->mad_hdr.class_version].class; | mad_hdr->class_version].class; | ||||
if (!class) | if (!class) | ||||
goto out; | goto out; | ||||
if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= | if (convert_mgmt_class(mad_hdr->mgmt_class) >= | ||||
IB_MGMT_MAX_METHODS) | IB_MGMT_MAX_METHODS) | ||||
goto out; | goto out; | ||||
method = class->method_table[convert_mgmt_class( | method = class->method_table[convert_mgmt_class( | ||||
mad->mad_hdr.mgmt_class)]; | mad_hdr->mgmt_class)]; | ||||
if (method) | if (method) | ||||
mad_agent = method->agent[mad->mad_hdr.method & | mad_agent = method->agent[mad_hdr->method & | ||||
~IB_MGMT_METHOD_RESP]; | ~IB_MGMT_METHOD_RESP]; | ||||
} else { | } else { | ||||
vendor = port_priv->version[ | vendor = port_priv->version[ | ||||
mad->mad_hdr.class_version].vendor; | mad_hdr->class_version].vendor; | ||||
if (!vendor) | if (!vendor) | ||||
goto out; | goto out; | ||||
vendor_class = vendor->vendor_class[vendor_class_index( | vendor_class = vendor->vendor_class[vendor_class_index( | ||||
mad->mad_hdr.mgmt_class)]; | mad_hdr->mgmt_class)]; | ||||
if (!vendor_class) | if (!vendor_class) | ||||
goto out; | goto out; | ||||
/* Find matching OUI */ | /* Find matching OUI */ | ||||
vendor_mad = (struct ib_vendor_mad *)mad; | vendor_mad = (const struct ib_vendor_mad *)mad_hdr; | ||||
index = find_vendor_oui(vendor_class, vendor_mad->oui); | index = find_vendor_oui(vendor_class, vendor_mad->oui); | ||||
if (index == -1) | if (index == -1) | ||||
goto out; | goto out; | ||||
method = vendor_class->method_table[index]; | method = vendor_class->method_table[index]; | ||||
if (method) { | if (method) { | ||||
mad_agent = method->agent[mad->mad_hdr.method & | mad_agent = method->agent[mad_hdr->method & | ||||
~IB_MGMT_METHOD_RESP]; | ~IB_MGMT_METHOD_RESP]; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
if (mad_agent) { | if (mad_agent) { | ||||
if (mad_agent->agent.recv_handler) | if (mad_agent->agent.recv_handler) | ||||
atomic_inc(&mad_agent->refcount); | atomic_inc(&mad_agent->refcount); | ||||
else { | else { | ||||
printk(KERN_NOTICE PFX "No receive handler for client " | dev_notice(&port_priv->device->dev, | ||||
"%p on port %d\n", | "No receive handler for client %p on port %d\n", | ||||
&mad_agent->agent, port_priv->port_num); | &mad_agent->agent, port_priv->port_num); | ||||
mad_agent = NULL; | mad_agent = NULL; | ||||
} | } | ||||
} | } | ||||
out: | out: | ||||
spin_unlock_irqrestore(&port_priv->reg_lock, flags); | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | ||||
return mad_agent; | return mad_agent; | ||||
} | } | ||||
static int validate_mad(struct ib_mad *mad, u32 qp_num) | static int validate_mad(const struct ib_mad_hdr *mad_hdr, | ||||
const struct ib_mad_qp_info *qp_info, | |||||
bool opa) | |||||
{ | { | ||||
int valid = 0; | int valid = 0; | ||||
u32 qp_num = qp_info->qp->qp_num; | |||||
/* Make sure MAD base version is understood */ | /* Make sure MAD base version is understood */ | ||||
if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { | if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && | ||||
printk(KERN_ERR PFX "MAD received with unsupported base " | (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { | ||||
"version %d\n", mad->mad_hdr.base_version); | pr_err("MAD received with unsupported base version %d %s\n", | ||||
mad_hdr->base_version, opa ? "(opa)" : ""); | |||||
goto out; | goto out; | ||||
} | } | ||||
/* Filter SMI packets sent to other than QP0 */ | /* Filter SMI packets sent to other than QP0 */ | ||||
if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || | if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || | ||||
(mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { | (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { | ||||
if (qp_num == 0) | if (qp_num == 0) | ||||
valid = 1; | valid = 1; | ||||
} else { | } else { | ||||
/* CM attributes other than ClassPortInfo only use Send method */ | |||||
if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && | |||||
(mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && | |||||
(mad_hdr->method != IB_MGMT_METHOD_SEND)) | |||||
goto out; | |||||
/* Filter GSI packets sent to QP0 */ | /* Filter GSI packets sent to QP0 */ | ||||
if (qp_num != 0) | if (qp_num != 0) | ||||
valid = 1; | valid = 1; | ||||
} | } | ||||
out: | out: | ||||
return valid; | return valid; | ||||
} | } | ||||
static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, | static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, | ||||
struct ib_mad_hdr *mad_hdr) | const struct ib_mad_hdr *mad_hdr) | ||||
{ | { | ||||
struct ib_rmpp_mad *rmpp_mad; | const struct ib_rmpp_mad *rmpp_mad; | ||||
rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; | rmpp_mad = (const struct ib_rmpp_mad *)mad_hdr; | ||||
return !mad_agent_priv->agent.rmpp_version || | return !mad_agent_priv->agent.rmpp_version || | ||||
!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || | |||||
!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | ||||
IB_MGMT_RMPP_FLAG_ACTIVE) || | IB_MGMT_RMPP_FLAG_ACTIVE) || | ||||
(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); | (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); | ||||
} | } | ||||
static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr, | static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, | ||||
struct ib_mad_recv_wc *rwc) | const struct ib_mad_recv_wc *rwc) | ||||
{ | { | ||||
return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class == | return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == | ||||
rwc->recv_buf.mad->mad_hdr.mgmt_class; | rwc->recv_buf.mad->mad_hdr.mgmt_class; | ||||
} | } | ||||
static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv, | static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, | ||||
struct ib_mad_send_wr_private *wr, | const struct ib_mad_send_wr_private *wr, | ||||
struct ib_mad_recv_wc *rwc ) | const struct ib_mad_recv_wc *rwc ) | ||||
{ | { | ||||
struct ib_ah_attr attr; | struct ib_ah_attr attr; | ||||
u8 send_resp, rcv_resp; | u8 send_resp, rcv_resp; | ||||
union ib_gid sgid; | union ib_gid sgid; | ||||
struct ib_device *device = mad_agent_priv->agent.device; | struct ib_device *device = mad_agent_priv->agent.device; | ||||
u8 port_num = mad_agent_priv->agent.port_num; | u8 port_num = mad_agent_priv->agent.port_num; | ||||
u8 lmc; | u8 lmc; | ||||
send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad); | send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); | ||||
rcv_resp = ib_response_mad(rwc->recv_buf.mad); | rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); | ||||
if (send_resp == rcv_resp) | if (send_resp == rcv_resp) | ||||
/* both requests, or both responses. GIDs different */ | /* both requests, or both responses. GIDs different */ | ||||
return 0; | return 0; | ||||
if (ib_query_ah(wr->send_buf.ah, &attr)) | if (ib_query_ah(wr->send_buf.ah, &attr)) | ||||
/* Assume not equal, to avoid false positives. */ | /* Assume not equal, to avoid false positives. */ | ||||
return 0; | return 0; | ||||
if (!!(attr.ah_flags & IB_AH_GRH) != | if (!!(attr.ah_flags & IB_AH_GRH) != | ||||
!!(rwc->wc->wc_flags & IB_WC_GRH)) | !!(rwc->wc->wc_flags & IB_WC_GRH)) | ||||
/* one has GID, other does not. Assume different */ | /* one has GID, other does not. Assume different */ | ||||
return 0; | return 0; | ||||
if (!send_resp && rcv_resp) { | if (!send_resp && rcv_resp) { | ||||
/* is request/response. */ | /* is request/response. */ | ||||
if (!(attr.ah_flags & IB_AH_GRH)) { | if (!(attr.ah_flags & IB_AH_GRH)) { | ||||
if (ib_get_cached_lmc(device, port_num, &lmc)) | if (ib_get_cached_lmc(device, port_num, &lmc)) | ||||
return 0; | return 0; | ||||
return (!lmc || !((attr.src_path_bits ^ | return (!lmc || !((attr.src_path_bits ^ | ||||
rwc->wc->dlid_path_bits) & | rwc->wc->dlid_path_bits) & | ||||
((1 << lmc) - 1))); | ((1 << lmc) - 1))); | ||||
} else { | } else { | ||||
if (ib_get_cached_gid(device, port_num, | if (ib_get_cached_gid(device, port_num, | ||||
attr.grh.sgid_index, &sgid)) | attr.grh.sgid_index, &sgid, NULL)) | ||||
return 0; | return 0; | ||||
return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, | return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, | ||||
16); | 16); | ||||
} | } | ||||
} | } | ||||
if (!(attr.ah_flags & IB_AH_GRH)) | if (!(attr.ah_flags & IB_AH_GRH)) | ||||
return attr.dlid == rwc->wc->slid; | return attr.dlid == rwc->wc->slid; | ||||
else | else | ||||
return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, | return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, | ||||
16); | 16); | ||||
} | } | ||||
static inline int is_direct(u8 class) | static inline int is_direct(u8 class) | ||||
{ | { | ||||
return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); | return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); | ||||
} | } | ||||
struct ib_mad_send_wr_private* | struct ib_mad_send_wr_private* | ||||
ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, | ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, | ||||
struct ib_mad_recv_wc *wc) | const struct ib_mad_recv_wc *wc) | ||||
{ | { | ||||
struct ib_mad_send_wr_private *wr; | struct ib_mad_send_wr_private *wr; | ||||
struct ib_mad *mad; | const struct ib_mad_hdr *mad_hdr; | ||||
mad = (struct ib_mad *)wc->recv_buf.mad; | mad_hdr = &wc->recv_buf.mad->mad_hdr; | ||||
list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { | list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { | ||||
if ((wr->tid == mad->mad_hdr.tid) && | if ((wr->tid == mad_hdr->tid) && | ||||
rcv_has_same_class(wr, wc) && | rcv_has_same_class(wr, wc) && | ||||
/* | /* | ||||
* Don't check GID for direct routed MADs. | * Don't check GID for direct routed MADs. | ||||
* These might have permissive LIDs. | * These might have permissive LIDs. | ||||
*/ | */ | ||||
(is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || | (is_direct(mad_hdr->mgmt_class) || | ||||
rcv_has_same_gid(mad_agent_priv, wr, wc))) | rcv_has_same_gid(mad_agent_priv, wr, wc))) | ||||
return (wr->status == IB_WC_SUCCESS) ? wr : NULL; | return (wr->status == IB_WC_SUCCESS) ? wr : NULL; | ||||
} | } | ||||
/* | /* | ||||
* It's possible to receive the response before we've | * It's possible to receive the response before we've | ||||
* been notified that the send has completed | * been notified that the send has completed | ||||
*/ | */ | ||||
list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { | list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { | ||||
if (is_data_mad(mad_agent_priv, wr->send_buf.mad) && | if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && | ||||
wr->tid == mad->mad_hdr.tid && | wr->tid == mad_hdr->tid && | ||||
wr->timeout && | wr->timeout && | ||||
rcv_has_same_class(wr, wc) && | rcv_has_same_class(wr, wc) && | ||||
/* | /* | ||||
* Don't check GID for direct routed MADs. | * Don't check GID for direct routed MADs. | ||||
* These might have permissive LIDs. | * These might have permissive LIDs. | ||||
*/ | */ | ||||
(is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || | (is_direct(mad_hdr->mgmt_class) || | ||||
rcv_has_same_gid(mad_agent_priv, wr, wc))) | rcv_has_same_gid(mad_agent_priv, wr, wc))) | ||||
/* Verify request has not been canceled */ | /* Verify request has not been canceled */ | ||||
return (wr->status == IB_WC_SUCCESS) ? wr : NULL; | return (wr->status == IB_WC_SUCCESS) ? wr : NULL; | ||||
} | } | ||||
return NULL; | return NULL; | ||||
} | } | ||||
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) | void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) | ||||
{ | { | ||||
mad_send_wr->timeout = 0; | mad_send_wr->timeout = 0; | ||||
if (mad_send_wr->refcount == 1) | if (mad_send_wr->refcount == 1) | ||||
list_move_tail(&mad_send_wr->agent_list, | list_move_tail(&mad_send_wr->agent_list, | ||||
&mad_send_wr->mad_agent_priv->done_list); | &mad_send_wr->mad_agent_priv->done_list); | ||||
} | } | ||||
static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | ||||
struct ib_mad_recv_wc *mad_recv_wc) | struct ib_mad_recv_wc *mad_recv_wc) | ||||
{ | { | ||||
struct ib_mad_send_wr_private *mad_send_wr; | struct ib_mad_send_wr_private *mad_send_wr; | ||||
struct ib_mad_send_wc mad_send_wc; | struct ib_mad_send_wc mad_send_wc; | ||||
unsigned long flags; | unsigned long flags; | ||||
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); | INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); | ||||
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); | list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); | ||||
if (mad_agent_priv->agent.rmpp_version) { | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { | ||||
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, | ||||
mad_recv_wc); | mad_recv_wc); | ||||
if (!mad_recv_wc) { | if (!mad_recv_wc) { | ||||
deref_mad_agent(mad_agent_priv); | deref_mad_agent(mad_agent_priv); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
/* Complete corresponding request */ | /* Complete corresponding request */ | ||||
if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { | if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { | ||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); | ||||
if (!mad_send_wr) { | if (!mad_send_wr) { | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) | |||||
&& ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) | |||||
&& (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) | |||||
& IB_MGMT_RMPP_FLAG_ACTIVE)) { | |||||
/* user rmpp is in effect | |||||
* and this is an active RMPP MAD | |||||
*/ | |||||
mad_agent_priv->agent.recv_handler( | |||||
&mad_agent_priv->agent, NULL, | |||||
mad_recv_wc); | |||||
atomic_dec(&mad_agent_priv->refcount); | |||||
} else { | |||||
/* not user rmpp, revert to normal behavior and | |||||
* drop the mad */ | |||||
ib_free_recv_mad(mad_recv_wc); | ib_free_recv_mad(mad_recv_wc); | ||||
deref_mad_agent(mad_agent_priv); | deref_mad_agent(mad_agent_priv); | ||||
return; | return; | ||||
} | } | ||||
} else { | |||||
ib_mark_mad_done(mad_send_wr); | ib_mark_mad_done(mad_send_wr); | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
/* Defined behavior is to complete response before request */ | /* Defined behavior is to complete response before request */ | ||||
mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; | mad_agent_priv->agent.recv_handler( | ||||
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | &mad_agent_priv->agent, | ||||
&mad_send_wr->send_buf, | |||||
mad_recv_wc); | mad_recv_wc); | ||||
atomic_dec(&mad_agent_priv->refcount); | atomic_dec(&mad_agent_priv->refcount); | ||||
mad_send_wc.status = IB_WC_SUCCESS; | mad_send_wc.status = IB_WC_SUCCESS; | ||||
mad_send_wc.vendor_err = 0; | mad_send_wc.vendor_err = 0; | ||||
mad_send_wc.send_buf = &mad_send_wr->send_buf; | mad_send_wc.send_buf = &mad_send_wr->send_buf; | ||||
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | ||||
} | |||||
} else { | } else { | ||||
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, | ||||
mad_recv_wc); | mad_recv_wc); | ||||
deref_mad_agent(mad_agent_priv); | deref_mad_agent(mad_agent_priv); | ||||
} | } | ||||
} | } | ||||
static bool generate_unmatched_resp(struct ib_mad_private *recv, | static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, | ||||
const struct ib_mad_qp_info *qp_info, | |||||
const struct ib_wc *wc, | |||||
int port_num, | |||||
struct ib_mad_private *recv, | |||||
struct ib_mad_private *response) | struct ib_mad_private *response) | ||||
{ | { | ||||
if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || | enum smi_forward_action retsmi; | ||||
recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { | struct ib_smp *smp = (struct ib_smp *)recv->mad; | ||||
memcpy(response, recv, sizeof *response); | |||||
if (smi_handle_dr_smp_recv(smp, | |||||
rdma_cap_ib_switch(port_priv->device), | |||||
port_num, | |||||
port_priv->device->phys_port_cnt) == | |||||
IB_SMI_DISCARD) | |||||
return IB_SMI_DISCARD; | |||||
retsmi = smi_check_forward_dr_smp(smp); | |||||
if (retsmi == IB_SMI_LOCAL) | |||||
return IB_SMI_HANDLE; | |||||
if (retsmi == IB_SMI_SEND) { /* don't forward */ | |||||
if (smi_handle_dr_smp_send(smp, | |||||
rdma_cap_ib_switch(port_priv->device), | |||||
port_num) == IB_SMI_DISCARD) | |||||
return IB_SMI_DISCARD; | |||||
if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) | |||||
return IB_SMI_DISCARD; | |||||
} else if (rdma_cap_ib_switch(port_priv->device)) { | |||||
/* forward case for switches */ | |||||
memcpy(response, recv, mad_priv_size(response)); | |||||
response->header.recv_wc.wc = &response->header.wc; | response->header.recv_wc.wc = &response->header.wc; | ||||
response->header.recv_wc.recv_buf.mad = &response->mad.mad; | response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; | ||||
response->header.recv_wc.recv_buf.grh = &response->grh; | response->header.recv_wc.recv_buf.grh = &response->grh; | ||||
response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; | |||||
response->mad.mad.mad_hdr.status = | |||||
cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); | |||||
if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |||||
response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION; | |||||
agent_send_response((const struct ib_mad_hdr *)response->mad, | |||||
&response->grh, wc, | |||||
port_priv->device, | |||||
smi_get_fwd_port(smp), | |||||
qp_info->qp->qp_num, | |||||
response->mad_size, | |||||
false); | |||||
return IB_SMI_DISCARD; | |||||
} | |||||
return IB_SMI_HANDLE; | |||||
} | |||||
static bool generate_unmatched_resp(const struct ib_mad_private *recv, | |||||
struct ib_mad_private *response, | |||||
size_t *resp_len, bool opa) | |||||
{ | |||||
const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; | |||||
struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; | |||||
if (recv_hdr->method == IB_MGMT_METHOD_GET || | |||||
recv_hdr->method == IB_MGMT_METHOD_SET) { | |||||
memcpy(response, recv, mad_priv_size(response)); | |||||
response->header.recv_wc.wc = &response->header.wc; | |||||
response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; | |||||
response->header.recv_wc.recv_buf.grh = &response->grh; | |||||
resp_hdr->method = IB_MGMT_METHOD_GET_RESP; | |||||
resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); | |||||
if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |||||
resp_hdr->status |= IB_SMP_DIRECTION; | |||||
if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { | |||||
if (recv_hdr->mgmt_class == | |||||
IB_MGMT_CLASS_SUBN_LID_ROUTED || | |||||
recv_hdr->mgmt_class == | |||||
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |||||
*resp_len = opa_get_smp_header_size( | |||||
(const struct opa_smp *)recv->mad); | |||||
else | |||||
*resp_len = sizeof(struct ib_mad_hdr); | |||||
} | |||||
return true; | return true; | ||||
} else { | } else { | ||||
return false; | return false; | ||||
} | } | ||||
} | } | ||||
static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||||
struct ib_wc *wc) | static enum smi_action | ||||
handle_opa_smi(struct ib_mad_port_private *port_priv, | |||||
struct ib_mad_qp_info *qp_info, | |||||
struct ib_wc *wc, | |||||
int port_num, | |||||
struct ib_mad_private *recv, | |||||
struct ib_mad_private *response) | |||||
{ | { | ||||
enum smi_forward_action retsmi; | |||||
struct opa_smp *smp = (struct opa_smp *)recv->mad; | |||||
if (opa_smi_handle_dr_smp_recv(smp, | |||||
rdma_cap_ib_switch(port_priv->device), | |||||
port_num, | |||||
port_priv->device->phys_port_cnt) == | |||||
IB_SMI_DISCARD) | |||||
return IB_SMI_DISCARD; | |||||
retsmi = opa_smi_check_forward_dr_smp(smp); | |||||
if (retsmi == IB_SMI_LOCAL) | |||||
return IB_SMI_HANDLE; | |||||
if (retsmi == IB_SMI_SEND) { /* don't forward */ | |||||
if (opa_smi_handle_dr_smp_send(smp, | |||||
rdma_cap_ib_switch(port_priv->device), | |||||
port_num) == IB_SMI_DISCARD) | |||||
return IB_SMI_DISCARD; | |||||
if (opa_smi_check_local_smp(smp, port_priv->device) == | |||||
IB_SMI_DISCARD) | |||||
return IB_SMI_DISCARD; | |||||
} else if (rdma_cap_ib_switch(port_priv->device)) { | |||||
/* forward case for switches */ | |||||
memcpy(response, recv, mad_priv_size(response)); | |||||
response->header.recv_wc.wc = &response->header.wc; | |||||
response->header.recv_wc.recv_buf.opa_mad = | |||||
(struct opa_mad *)response->mad; | |||||
response->header.recv_wc.recv_buf.grh = &response->grh; | |||||
agent_send_response((const struct ib_mad_hdr *)response->mad, | |||||
&response->grh, wc, | |||||
port_priv->device, | |||||
opa_smi_get_fwd_port(smp), | |||||
qp_info->qp->qp_num, | |||||
recv->header.wc.byte_len, | |||||
true); | |||||
return IB_SMI_DISCARD; | |||||
} | |||||
return IB_SMI_HANDLE; | |||||
} | |||||
static enum smi_action | |||||
handle_smi(struct ib_mad_port_private *port_priv, | |||||
struct ib_mad_qp_info *qp_info, | |||||
struct ib_wc *wc, | |||||
int port_num, | |||||
struct ib_mad_private *recv, | |||||
struct ib_mad_private *response, | |||||
bool opa) | |||||
{ | |||||
struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; | |||||
if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && | |||||
mad_hdr->class_version == OPA_SMI_CLASS_VERSION) | |||||
return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, | |||||
response); | |||||
return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); | |||||
} | |||||
static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) | |||||
{ | |||||
struct ib_mad_port_private *port_priv = cq->cq_context; | |||||
struct ib_mad_list_head *mad_list = | |||||
container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); | |||||
struct ib_mad_qp_info *qp_info; | struct ib_mad_qp_info *qp_info; | ||||
struct ib_mad_private_header *mad_priv_hdr; | struct ib_mad_private_header *mad_priv_hdr; | ||||
struct ib_mad_private *recv, *response = NULL; | struct ib_mad_private *recv, *response = NULL; | ||||
struct ib_mad_list_head *mad_list; | |||||
struct ib_mad_agent_private *mad_agent; | struct ib_mad_agent_private *mad_agent; | ||||
int port_num; | int port_num; | ||||
int ret = IB_MAD_RESULT_SUCCESS; | int ret = IB_MAD_RESULT_SUCCESS; | ||||
size_t mad_size; | |||||
u16 resp_mad_pkey_index = 0; | |||||
bool opa; | |||||
mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | if (list_empty_careful(&port_priv->port_list)) | ||||
return; | |||||
if (wc->status != IB_WC_SUCCESS) { | |||||
/* | |||||
* Receive errors indicate that the QP has entered the error | |||||
* state - error handling/shutdown code will cleanup | |||||
*/ | |||||
return; | |||||
} | |||||
qp_info = mad_list->mad_queue->qp_info; | qp_info = mad_list->mad_queue->qp_info; | ||||
dequeue_mad(mad_list); | dequeue_mad(mad_list); | ||||
opa = rdma_cap_opa_mad(qp_info->port_priv->device, | |||||
qp_info->port_priv->port_num); | |||||
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, | mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, | ||||
mad_list); | mad_list); | ||||
recv = container_of(mad_priv_hdr, struct ib_mad_private, header); | recv = container_of(mad_priv_hdr, struct ib_mad_private, header); | ||||
ib_dma_unmap_single(port_priv->device, | ib_dma_unmap_single(port_priv->device, | ||||
recv->header.mapping, | recv->header.mapping, | ||||
sizeof(struct ib_mad_private) - | mad_priv_dma_size(recv), | ||||
sizeof(struct ib_mad_private_header), | |||||
DMA_FROM_DEVICE); | DMA_FROM_DEVICE); | ||||
/* Setup MAD receive work completion from "normal" work completion */ | /* Setup MAD receive work completion from "normal" work completion */ | ||||
recv->header.wc = *wc; | recv->header.wc = *wc; | ||||
recv->header.recv_wc.wc = &recv->header.wc; | recv->header.recv_wc.wc = &recv->header.wc; | ||||
if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { | |||||
recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); | |||||
recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); | |||||
} else { | |||||
recv->header.recv_wc.mad_len = sizeof(struct ib_mad); | recv->header.recv_wc.mad_len = sizeof(struct ib_mad); | ||||
recv->header.recv_wc.recv_buf.mad = &recv->mad.mad; | recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); | ||||
} | |||||
recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; | |||||
recv->header.recv_wc.recv_buf.grh = &recv->grh; | recv->header.recv_wc.recv_buf.grh = &recv->grh; | ||||
if (atomic_read(&qp_info->snoop_count)) | if (atomic_read(&qp_info->snoop_count)) | ||||
snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); | snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); | ||||
/* Validate MAD */ | /* Validate MAD */ | ||||
if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) | if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) | ||||
goto out; | goto out; | ||||
response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | mad_size = recv->mad_size; | ||||
response = alloc_mad_private(mad_size, GFP_KERNEL); | |||||
if (!response) { | if (!response) { | ||||
printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " | dev_err(&port_priv->device->dev, | ||||
"for response buffer\n"); | "%s: no memory for response buffer\n", __func__); | ||||
goto out; | goto out; | ||||
} | } | ||||
if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) | if (rdma_cap_ib_switch(port_priv->device)) | ||||
port_num = wc->port_num; | port_num = wc->port_num; | ||||
else | else | ||||
port_num = port_priv->port_num; | port_num = port_priv->port_num; | ||||
if (recv->mad.mad.mad_hdr.mgmt_class == | if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == | ||||
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | ||||
enum smi_forward_action retsmi; | if (handle_smi(port_priv, qp_info, wc, port_num, recv, | ||||
response, opa) | |||||
if (smi_handle_dr_smp_recv(&recv->mad.smp, | == IB_SMI_DISCARD) | ||||
port_priv->device->node_type, | |||||
port_num, | |||||
port_priv->device->phys_port_cnt) == | |||||
IB_SMI_DISCARD) | |||||
goto out; | goto out; | ||||
retsmi = smi_check_forward_dr_smp(&recv->mad.smp); | |||||
if (retsmi == IB_SMI_LOCAL) | |||||
goto local; | |||||
if (retsmi == IB_SMI_SEND) { /* don't forward */ | |||||
if (smi_handle_dr_smp_send(&recv->mad.smp, | |||||
port_priv->device->node_type, | |||||
port_num) == IB_SMI_DISCARD) | |||||
goto out; | |||||
if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD) | |||||
goto out; | |||||
} else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { | |||||
/* forward case for switches */ | |||||
memcpy(response, recv, sizeof(*response)); | |||||
response->header.recv_wc.wc = &response->header.wc; | |||||
response->header.recv_wc.recv_buf.mad = &response->mad.mad; | |||||
response->header.recv_wc.recv_buf.grh = &response->grh; | |||||
agent_send_response(&response->mad.mad, | |||||
&response->grh, wc, | |||||
port_priv->device, | |||||
smi_get_fwd_port(&recv->mad.smp), | |||||
qp_info->qp->qp_num); | |||||
goto out; | |||||
} | } | ||||
} | |||||
local: | |||||
/* Give driver "right of first refusal" on incoming MAD */ | /* Give driver "right of first refusal" on incoming MAD */ | ||||
if (port_priv->device->process_mad) { | if (port_priv->device->process_mad) { | ||||
ret = port_priv->device->process_mad(port_priv->device, 0, | ret = port_priv->device->process_mad(port_priv->device, 0, | ||||
port_priv->port_num, | port_priv->port_num, | ||||
wc, &recv->grh, | wc, &recv->grh, | ||||
&recv->mad.mad, | (const struct ib_mad_hdr *)recv->mad, | ||||
&response->mad.mad); | recv->mad_size, | ||||
(struct ib_mad_hdr *)response->mad, | |||||
&mad_size, &resp_mad_pkey_index); | |||||
if (opa) | |||||
wc->pkey_index = resp_mad_pkey_index; | |||||
if (ret & IB_MAD_RESULT_SUCCESS) { | if (ret & IB_MAD_RESULT_SUCCESS) { | ||||
if (ret & IB_MAD_RESULT_CONSUMED) | if (ret & IB_MAD_RESULT_CONSUMED) | ||||
goto out; | goto out; | ||||
if (ret & IB_MAD_RESULT_REPLY) { | if (ret & IB_MAD_RESULT_REPLY) { | ||||
agent_send_response(&response->mad.mad, | agent_send_response((const struct ib_mad_hdr *)response->mad, | ||||
&recv->grh, wc, | &recv->grh, wc, | ||||
port_priv->device, | port_priv->device, | ||||
port_num, | port_num, | ||||
qp_info->qp->qp_num); | qp_info->qp->qp_num, | ||||
mad_size, opa); | |||||
goto out; | goto out; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
mad_agent = find_mad_agent(port_priv, &recv->mad.mad); | mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); | ||||
if (mad_agent) { | if (mad_agent) { | ||||
ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); | ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); | ||||
/* | /* | ||||
* recv is freed up in error cases in ib_mad_complete_recv | * recv is freed up in error cases in ib_mad_complete_recv | ||||
* or via recv_handler in ib_mad_complete_recv() | * or via recv_handler in ib_mad_complete_recv() | ||||
*/ | */ | ||||
recv = NULL; | recv = NULL; | ||||
} else if ((ret & IB_MAD_RESULT_SUCCESS) && | } else if ((ret & IB_MAD_RESULT_SUCCESS) && | ||||
generate_unmatched_resp(recv, response)) { | generate_unmatched_resp(recv, response, &mad_size, opa)) { | ||||
agent_send_response(&response->mad.mad, &recv->grh, wc, | agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, | ||||
port_priv->device, port_num, qp_info->qp->qp_num); | port_priv->device, port_num, | ||||
qp_info->qp->qp_num, mad_size, opa); | |||||
} | } | ||||
out: | out: | ||||
/* Post another receive request for this QP */ | /* Post another receive request for this QP */ | ||||
if (response) { | if (response) { | ||||
ib_mad_post_receive_mads(qp_info, response); | ib_mad_post_receive_mads(qp_info, response); | ||||
if (recv) | kfree(recv); | ||||
kmem_cache_free(ib_mad_cache, recv); | |||||
} else | } else | ||||
ib_mad_post_receive_mads(qp_info, recv); | ib_mad_post_receive_mads(qp_info, recv); | ||||
} | } | ||||
static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | ||||
{ | { | ||||
struct ib_mad_send_wr_private *mad_send_wr; | struct ib_mad_send_wr_private *mad_send_wr; | ||||
unsigned long delay; | unsigned long delay; | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | ||||
struct ib_mad_send_wc *mad_send_wc) | struct ib_mad_send_wc *mad_send_wc) | ||||
{ | { | ||||
struct ib_mad_agent_private *mad_agent_priv; | struct ib_mad_agent_private *mad_agent_priv; | ||||
unsigned long flags; | unsigned long flags; | ||||
int ret; | int ret; | ||||
mad_agent_priv = mad_send_wr->mad_agent_priv; | mad_agent_priv = mad_send_wr->mad_agent_priv; | ||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
if (mad_agent_priv->agent.rmpp_version) { | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { | ||||
ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); | ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); | ||||
if (ret == IB_RMPP_RESULT_CONSUMED) | if (ret == IB_RMPP_RESULT_CONSUMED) | ||||
goto done; | goto done; | ||||
} else | } else | ||||
ret = IB_RMPP_RESULT_UNHANDLED; | ret = IB_RMPP_RESULT_UNHANDLED; | ||||
if (mad_send_wc->status != IB_WC_SUCCESS && | if (mad_send_wc->status != IB_WC_SUCCESS && | ||||
mad_send_wr->status == IB_WC_SUCCESS) { | mad_send_wr->status == IB_WC_SUCCESS) { | ||||
Show All 13 Lines | void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | ||||
list_del(&mad_send_wr->agent_list); | list_del(&mad_send_wr->agent_list); | ||||
adjust_timeout(mad_agent_priv); | adjust_timeout(mad_agent_priv); | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
if (mad_send_wr->status != IB_WC_SUCCESS ) | if (mad_send_wr->status != IB_WC_SUCCESS ) | ||||
mad_send_wc->status = mad_send_wr->status; | mad_send_wc->status = mad_send_wr->status; | ||||
if (ret == IB_RMPP_RESULT_INTERNAL) | if (ret == IB_RMPP_RESULT_INTERNAL) | ||||
ib_rmpp_send_handler(mad_send_wc); | ib_rmpp_send_handler(mad_send_wc); | ||||
else { | else | ||||
if (mad_send_wr->is_sa_cc_mad) | |||||
sa_cc_mad_done(get_cc_obj(mad_send_wr)); | |||||
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | ||||
mad_send_wc); | mad_send_wc); | ||||
} | |||||
/* Release reference on agent taken when sending */ | /* Release reference on agent taken when sending */ | ||||
deref_mad_agent(mad_agent_priv); | deref_mad_agent(mad_agent_priv); | ||||
return; | return; | ||||
done: | done: | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
} | } | ||||
static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, | static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) | ||||
struct ib_wc *wc) | |||||
{ | { | ||||
struct ib_mad_port_private *port_priv = cq->cq_context; | |||||
struct ib_mad_list_head *mad_list = | |||||
container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); | |||||
struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; | struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; | ||||
struct ib_mad_list_head *mad_list; | |||||
struct ib_mad_qp_info *qp_info; | struct ib_mad_qp_info *qp_info; | ||||
struct ib_mad_queue *send_queue; | struct ib_mad_queue *send_queue; | ||||
struct ib_send_wr *bad_send_wr; | struct ib_send_wr *bad_send_wr; | ||||
struct ib_mad_send_wc mad_send_wc; | struct ib_mad_send_wc mad_send_wc; | ||||
unsigned long flags; | unsigned long flags; | ||||
int ret; | int ret; | ||||
mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | if (list_empty_careful(&port_priv->port_list)) | ||||
return; | |||||
if (wc->status != IB_WC_SUCCESS) { | |||||
if (!ib_mad_send_error(port_priv, wc)) | |||||
return; | |||||
} | |||||
mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | ||||
mad_list); | mad_list); | ||||
send_queue = mad_list->mad_queue; | send_queue = mad_list->mad_queue; | ||||
qp_info = send_queue->qp_info; | qp_info = send_queue->qp_info; | ||||
retry: | retry: | ||||
ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, | ||||
mad_send_wr->header_mapping, | mad_send_wr->header_mapping, | ||||
Show All 20 Lines | retry: | ||||
mad_send_wc.status = wc->status; | mad_send_wc.status = wc->status; | ||||
mad_send_wc.vendor_err = wc->vendor_err; | mad_send_wc.vendor_err = wc->vendor_err; | ||||
if (atomic_read(&qp_info->snoop_count)) | if (atomic_read(&qp_info->snoop_count)) | ||||
snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, | snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, | ||||
IB_MAD_SNOOP_SEND_COMPLETIONS); | IB_MAD_SNOOP_SEND_COMPLETIONS); | ||||
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | ||||
if (queued_send_wr) { | if (queued_send_wr) { | ||||
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, | ||||
&bad_send_wr); | &bad_send_wr); | ||||
if (ret) { | if (ret) { | ||||
printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); | dev_err(&port_priv->device->dev, | ||||
"ib_post_send failed: %d\n", ret); | |||||
mad_send_wr = queued_send_wr; | mad_send_wr = queued_send_wr; | ||||
wc->status = IB_WC_LOC_QP_OP_ERR; | wc->status = IB_WC_LOC_QP_OP_ERR; | ||||
goto retry; | goto retry; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) | static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) | ||||
{ | { | ||||
struct ib_mad_send_wr_private *mad_send_wr; | struct ib_mad_send_wr_private *mad_send_wr; | ||||
struct ib_mad_list_head *mad_list; | struct ib_mad_list_head *mad_list; | ||||
unsigned long flags; | unsigned long flags; | ||||
spin_lock_irqsave(&qp_info->send_queue.lock, flags); | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | ||||
list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { | list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { | ||||
mad_send_wr = container_of(mad_list, | mad_send_wr = container_of(mad_list, | ||||
struct ib_mad_send_wr_private, | struct ib_mad_send_wr_private, | ||||
mad_list); | mad_list); | ||||
mad_send_wr->retry = 1; | mad_send_wr->retry = 1; | ||||
} | } | ||||
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | ||||
} | } | ||||
static void mad_error_handler(struct ib_mad_port_private *port_priv, | static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, | ||||
struct ib_wc *wc) | struct ib_wc *wc) | ||||
{ | { | ||||
struct ib_mad_list_head *mad_list; | struct ib_mad_list_head *mad_list = | ||||
struct ib_mad_qp_info *qp_info; | container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); | ||||
struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; | |||||
struct ib_mad_send_wr_private *mad_send_wr; | struct ib_mad_send_wr_private *mad_send_wr; | ||||
int ret; | int ret; | ||||
/* Determine if failure was a send or receive */ | |||||
mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | |||||
qp_info = mad_list->mad_queue->qp_info; | |||||
if (mad_list->mad_queue == &qp_info->recv_queue) | |||||
/* | /* | ||||
* Receive errors indicate that the QP has entered the error | |||||
* state - error handling/shutdown code will cleanup | |||||
*/ | |||||
return; | |||||
/* | |||||
* Send errors will transition the QP to SQE - move | * Send errors will transition the QP to SQE - move | ||||
* QP to RTS and repost flushed work requests | * QP to RTS and repost flushed work requests | ||||
*/ | */ | ||||
mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | ||||
mad_list); | mad_list); | ||||
if (wc->status == IB_WC_WR_FLUSH_ERR) { | if (wc->status == IB_WC_WR_FLUSH_ERR) { | ||||
if (mad_send_wr->retry) { | if (mad_send_wr->retry) { | ||||
/* Repost send */ | /* Repost send */ | ||||
struct ib_send_wr *bad_send_wr; | struct ib_send_wr *bad_send_wr; | ||||
mad_send_wr->retry = 0; | mad_send_wr->retry = 0; | ||||
ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, | ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, | ||||
&bad_send_wr); | &bad_send_wr); | ||||
if (ret) | if (!ret) | ||||
ib_mad_send_done_handler(port_priv, wc); | return false; | ||||
} else | } | ||||
ib_mad_send_done_handler(port_priv, wc); | |||||
} else { | } else { | ||||
struct ib_qp_attr *attr; | struct ib_qp_attr *attr; | ||||
/* Transition QP to RTS and fail offending send */ | /* Transition QP to RTS and fail offending send */ | ||||
attr = kmalloc(sizeof *attr, GFP_KERNEL); | attr = kmalloc(sizeof *attr, GFP_KERNEL); | ||||
if (attr) { | if (attr) { | ||||
attr->qp_state = IB_QPS_RTS; | attr->qp_state = IB_QPS_RTS; | ||||
attr->cur_qp_state = IB_QPS_SQE; | attr->cur_qp_state = IB_QPS_SQE; | ||||
ret = ib_modify_qp(qp_info->qp, attr, | ret = ib_modify_qp(qp_info->qp, attr, | ||||
IB_QP_STATE | IB_QP_CUR_STATE); | IB_QP_STATE | IB_QP_CUR_STATE); | ||||
kfree(attr); | kfree(attr); | ||||
if (ret) | if (ret) | ||||
printk(KERN_ERR PFX "mad_error_handler - " | dev_err(&port_priv->device->dev, | ||||
"ib_modify_qp to RTS : %d\n", ret); | "%s - ib_modify_qp to RTS: %d\n", | ||||
__func__, ret); | |||||
else | else | ||||
mark_sends_for_retry(qp_info); | mark_sends_for_retry(qp_info); | ||||
} | } | ||||
ib_mad_send_done_handler(port_priv, wc); | |||||
} | } | ||||
} | |||||
/* | return true; | ||||
* IB MAD completion callback | |||||
*/ | |||||
static void ib_mad_completion_handler(struct work_struct *work) | |||||
{ | |||||
struct ib_mad_port_private *port_priv; | |||||
struct ib_wc wc; | |||||
port_priv = container_of(work, struct ib_mad_port_private, work); | |||||
ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | |||||
while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { | |||||
if (wc.status == IB_WC_SUCCESS) { | |||||
switch (wc.opcode) { | |||||
case IB_WC_SEND: | |||||
ib_mad_send_done_handler(port_priv, &wc); | |||||
break; | |||||
case IB_WC_RECV: | |||||
ib_mad_recv_done_handler(port_priv, &wc); | |||||
break; | |||||
default: | |||||
BUG_ON(1); | |||||
break; | |||||
} | } | ||||
} else | |||||
mad_error_handler(port_priv, &wc); | |||||
} | |||||
} | |||||
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | ||||
{ | { | ||||
unsigned long flags; | unsigned long flags; | ||||
struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; | struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; | ||||
struct ib_mad_send_wc mad_send_wc; | struct ib_mad_send_wc mad_send_wc; | ||||
struct list_head cancel_list; | struct list_head cancel_list; | ||||
INIT_LIST_HEAD(&cancel_list); | INIT_LIST_HEAD(&cancel_list); | ||||
cancel_sa_cc_mads(mad_agent_priv); | |||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | ||||
&mad_agent_priv->send_list, agent_list) { | &mad_agent_priv->send_list, agent_list) { | ||||
if (mad_send_wr->status == IB_WC_SUCCESS) { | if (mad_send_wr->status == IB_WC_SUCCESS) { | ||||
mad_send_wr->status = IB_WC_WR_FLUSH_ERR; | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; | ||||
mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | ||||
} | } | ||||
} | } | ||||
/* Empty wait list to prevent receives from finding a request */ | /* Empty wait list to prevent receives from finding a request */ | ||||
list_splice_init(&mad_agent_priv->wait_list, &cancel_list); | list_splice_init(&mad_agent_priv->wait_list, &cancel_list); | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
/* Report all cancelled requests */ | /* Report all cancelled requests */ | ||||
mad_send_wc.status = IB_WC_WR_FLUSH_ERR; | mad_send_wc.status = IB_WC_WR_FLUSH_ERR; | ||||
mad_send_wc.vendor_err = 0; | mad_send_wc.vendor_err = 0; | ||||
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | ||||
&cancel_list, agent_list) { | &cancel_list, agent_list) { | ||||
mad_send_wc.send_buf = &mad_send_wr->send_buf; | mad_send_wc.send_buf = &mad_send_wr->send_buf; | ||||
list_del(&mad_send_wr->agent_list); | list_del(&mad_send_wr->agent_list); | ||||
if (mad_send_wr->is_sa_cc_mad) | |||||
sa_cc_mad_done(get_cc_obj(mad_send_wr)); | |||||
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | ||||
&mad_send_wc); | &mad_send_wc); | ||||
atomic_dec(&mad_agent_priv->refcount); | atomic_dec(&mad_agent_priv->refcount); | ||||
} | } | ||||
} | } | ||||
static struct ib_mad_send_wr_private* | static struct ib_mad_send_wr_private* | ||||
find_send_wr(struct ib_mad_agent_private *mad_agent_priv, | find_send_wr(struct ib_mad_agent_private *mad_agent_priv, | ||||
struct ib_mad_send_buf *send_buf) | struct ib_mad_send_buf *send_buf) | ||||
{ | { | ||||
struct ib_mad_send_wr_private *mad_send_wr; | struct ib_mad_send_wr_private *mad_send_wr; | ||||
list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, | list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, | ||||
agent_list) { | agent_list) { | ||||
if (&mad_send_wr->send_buf == send_buf) | if (&mad_send_wr->send_buf == send_buf) | ||||
return mad_send_wr; | return mad_send_wr; | ||||
} | } | ||||
list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | ||||
agent_list) { | agent_list) { | ||||
if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && | if (is_rmpp_data_mad(mad_agent_priv, | ||||
mad_send_wr->send_buf.mad) && | |||||
&mad_send_wr->send_buf == send_buf) | &mad_send_wr->send_buf == send_buf) | ||||
return mad_send_wr; | return mad_send_wr; | ||||
} | } | ||||
return NULL; | return NULL; | ||||
} | } | ||||
int ib_modify_mad(struct ib_mad_agent *mad_agent, | int ib_modify_mad(struct ib_mad_agent *mad_agent, | ||||
struct ib_mad_send_buf *send_buf, u32 timeout_ms) | struct ib_mad_send_buf *send_buf, u32 timeout_ms) | ||||
{ | { | ||||
struct ib_mad_agent_private *mad_agent_priv; | struct ib_mad_agent_private *mad_agent_priv; | ||||
struct ib_mad_send_wr_private *mad_send_wr; | struct ib_mad_send_wr_private *mad_send_wr; | ||||
unsigned long flags; | unsigned long flags; | ||||
int active; | int active; | ||||
mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | ||||
agent); | agent); | ||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
mad_send_wr = find_send_wr(mad_agent_priv, send_buf); | mad_send_wr = find_send_wr(mad_agent_priv, send_buf); | ||||
if (!mad_send_wr) { | if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
if (modify_sa_cc_mad(mad_agent_priv, send_buf, timeout_ms)) | |||||
return -EINVAL; | return -EINVAL; | ||||
return 0; | |||||
} | } | ||||
if (mad_send_wr->status != IB_WC_SUCCESS) { | |||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |||||
return -EINVAL; | |||||
} | |||||
active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); | active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); | ||||
if (!timeout_ms) { | if (!timeout_ms) { | ||||
mad_send_wr->status = IB_WC_WR_FLUSH_ERR; | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; | ||||
mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | ||||
} | } | ||||
mad_send_wr->send_buf.timeout_ms = timeout_ms; | mad_send_wr->send_buf.timeout_ms = timeout_ms; | ||||
Show All 18 Lines | |||||
{ | { | ||||
struct ib_mad_agent_private *mad_agent_priv; | struct ib_mad_agent_private *mad_agent_priv; | ||||
struct ib_mad_local_private *local; | struct ib_mad_local_private *local; | ||||
struct ib_mad_agent_private *recv_mad_agent; | struct ib_mad_agent_private *recv_mad_agent; | ||||
unsigned long flags; | unsigned long flags; | ||||
int free_mad; | int free_mad; | ||||
struct ib_wc wc; | struct ib_wc wc; | ||||
struct ib_mad_send_wc mad_send_wc; | struct ib_mad_send_wc mad_send_wc; | ||||
bool opa; | |||||
mad_agent_priv = | mad_agent_priv = | ||||
container_of(work, struct ib_mad_agent_private, local_work); | container_of(work, struct ib_mad_agent_private, local_work); | ||||
opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, | |||||
mad_agent_priv->qp_info->port_priv->port_num); | |||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
while (!list_empty(&mad_agent_priv->local_list)) { | while (!list_empty(&mad_agent_priv->local_list)) { | ||||
local = list_entry(mad_agent_priv->local_list.next, | local = list_entry(mad_agent_priv->local_list.next, | ||||
struct ib_mad_local_private, | struct ib_mad_local_private, | ||||
completion_list); | completion_list); | ||||
list_del(&local->completion_list); | list_del(&local->completion_list); | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
free_mad = 0; | free_mad = 0; | ||||
if (local->mad_priv) { | if (local->mad_priv) { | ||||
u8 base_version; | |||||
recv_mad_agent = local->recv_mad_agent; | recv_mad_agent = local->recv_mad_agent; | ||||
if (!recv_mad_agent) { | if (!recv_mad_agent) { | ||||
printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); | dev_err(&mad_agent_priv->agent.device->dev, | ||||
"No receive MAD agent for local completion\n"); | |||||
free_mad = 1; | free_mad = 1; | ||||
goto local_send_completion; | goto local_send_completion; | ||||
} | } | ||||
/* | /* | ||||
* Defined behavior is to complete response | * Defined behavior is to complete response | ||||
* before request | * before request | ||||
*/ | */ | ||||
build_smp_wc(recv_mad_agent->agent.qp, | build_smp_wc(recv_mad_agent->agent.qp, | ||||
(unsigned long) local->mad_send_wr, | local->mad_send_wr->send_wr.wr.wr_cqe, | ||||
be16_to_cpu(IB_LID_PERMISSIVE), | be16_to_cpu(IB_LID_PERMISSIVE), | ||||
0, recv_mad_agent->agent.port_num, &wc); | local->mad_send_wr->send_wr.pkey_index, | ||||
recv_mad_agent->agent.port_num, &wc); | |||||
local->mad_priv->header.recv_wc.wc = &wc; | local->mad_priv->header.recv_wc.wc = &wc; | ||||
local->mad_priv->header.recv_wc.mad_len = | |||||
sizeof(struct ib_mad); | base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; | ||||
if (opa && base_version == OPA_MGMT_BASE_VERSION) { | |||||
local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; | |||||
local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); | |||||
} else { | |||||
local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); | |||||
local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); | |||||
} | |||||
INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); | INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); | ||||
list_add(&local->mad_priv->header.recv_wc.recv_buf.list, | list_add(&local->mad_priv->header.recv_wc.recv_buf.list, | ||||
&local->mad_priv->header.recv_wc.rmpp_list); | &local->mad_priv->header.recv_wc.rmpp_list); | ||||
local->mad_priv->header.recv_wc.recv_buf.grh = NULL; | local->mad_priv->header.recv_wc.recv_buf.grh = NULL; | ||||
local->mad_priv->header.recv_wc.recv_buf.mad = | local->mad_priv->header.recv_wc.recv_buf.mad = | ||||
&local->mad_priv->mad.mad; | (struct ib_mad *)local->mad_priv->mad; | ||||
if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) | if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) | ||||
snoop_recv(recv_mad_agent->qp_info, | snoop_recv(recv_mad_agent->qp_info, | ||||
&local->mad_priv->header.recv_wc, | &local->mad_priv->header.recv_wc, | ||||
IB_MAD_SNOOP_RECVS); | IB_MAD_SNOOP_RECVS); | ||||
recv_mad_agent->agent.recv_handler( | recv_mad_agent->agent.recv_handler( | ||||
&recv_mad_agent->agent, | &recv_mad_agent->agent, | ||||
&local->mad_send_wr->send_buf, | |||||
&local->mad_priv->header.recv_wc); | &local->mad_priv->header.recv_wc); | ||||
spin_lock_irqsave(&recv_mad_agent->lock, flags); | spin_lock_irqsave(&recv_mad_agent->lock, flags); | ||||
atomic_dec(&recv_mad_agent->refcount); | atomic_dec(&recv_mad_agent->refcount); | ||||
spin_unlock_irqrestore(&recv_mad_agent->lock, flags); | spin_unlock_irqrestore(&recv_mad_agent->lock, flags); | ||||
} | } | ||||
local_send_completion: | local_send_completion: | ||||
/* Complete send */ | /* Complete send */ | ||||
mad_send_wc.status = IB_WC_SUCCESS; | mad_send_wc.status = IB_WC_SUCCESS; | ||||
mad_send_wc.vendor_err = 0; | mad_send_wc.vendor_err = 0; | ||||
mad_send_wc.send_buf = &local->mad_send_wr->send_buf; | mad_send_wc.send_buf = &local->mad_send_wr->send_buf; | ||||
if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) | if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) | ||||
snoop_send(mad_agent_priv->qp_info, | snoop_send(mad_agent_priv->qp_info, | ||||
&local->mad_send_wr->send_buf, | &local->mad_send_wr->send_buf, | ||||
&mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); | &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); | ||||
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | ||||
&mad_send_wc); | &mad_send_wc); | ||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
atomic_dec(&mad_agent_priv->refcount); | atomic_dec(&mad_agent_priv->refcount); | ||||
if (free_mad) | if (free_mad) | ||||
kmem_cache_free(ib_mad_cache, local->mad_priv); | kfree(local->mad_priv); | ||||
kfree(local); | kfree(local); | ||||
} | } | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
} | } | ||||
static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | ||||
{ | { | ||||
int ret; | int ret; | ||||
if (!mad_send_wr->retries_left) | if (!mad_send_wr->retries_left) | ||||
return -ETIMEDOUT; | return -ETIMEDOUT; | ||||
mad_send_wr->retries_left--; | mad_send_wr->retries_left--; | ||||
mad_send_wr->send_buf.retries++; | mad_send_wr->send_buf.retries++; | ||||
mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); | ||||
if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { | if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { | ||||
ret = ib_retry_rmpp(mad_send_wr); | ret = ib_retry_rmpp(mad_send_wr); | ||||
switch (ret) { | switch (ret) { | ||||
case IB_RMPP_RESULT_UNHANDLED: | case IB_RMPP_RESULT_UNHANDLED: | ||||
ret = ib_send_mad(mad_send_wr); | ret = ib_send_mad(mad_send_wr); | ||||
break; | break; | ||||
case IB_RMPP_RESULT_CONSUMED: | case IB_RMPP_RESULT_CONSUMED: | ||||
ret = 0; | ret = 0; | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | while (!list_empty(&mad_agent_priv->wait_list)) { | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
if (mad_send_wr->status == IB_WC_SUCCESS) | if (mad_send_wr->status == IB_WC_SUCCESS) | ||||
mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; | mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; | ||||
else | else | ||||
mad_send_wc.status = mad_send_wr->status; | mad_send_wc.status = mad_send_wr->status; | ||||
mad_send_wc.send_buf = &mad_send_wr->send_buf; | mad_send_wc.send_buf = &mad_send_wr->send_buf; | ||||
if (mad_send_wr->is_sa_cc_mad) | |||||
sa_cc_mad_done(get_cc_obj(mad_send_wr)); | |||||
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | ||||
&mad_send_wc); | &mad_send_wc); | ||||
atomic_dec(&mad_agent_priv->refcount); | atomic_dec(&mad_agent_priv->refcount); | ||||
spin_lock_irqsave(&mad_agent_priv->lock, flags); | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||||
} | } | ||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||||
} | } | ||||
static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) | |||||
{ | |||||
struct ib_mad_port_private *port_priv = cq->cq_context; | |||||
unsigned long flags; | |||||
spin_lock_irqsave(&ib_mad_port_list_lock, flags); | |||||
if (!list_empty(&port_priv->port_list)) | |||||
queue_work(port_priv->wq, &port_priv->work); | |||||
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |||||
} | |||||
/* | /* | ||||
* Allocate receive MADs and post receive WRs for them | * Allocate receive MADs and post receive WRs for them | ||||
*/ | */ | ||||
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | ||||
struct ib_mad_private *mad) | struct ib_mad_private *mad) | ||||
{ | { | ||||
unsigned long flags; | unsigned long flags; | ||||
int post, ret; | int post, ret; | ||||
struct ib_mad_private *mad_priv; | struct ib_mad_private *mad_priv; | ||||
struct ib_sge sg_list; | struct ib_sge sg_list; | ||||
struct ib_recv_wr recv_wr, *bad_recv_wr; | struct ib_recv_wr recv_wr, *bad_recv_wr; | ||||
struct ib_mad_queue *recv_queue = &qp_info->recv_queue; | struct ib_mad_queue *recv_queue = &qp_info->recv_queue; | ||||
/* Initialize common scatter list fields */ | /* Initialize common scatter list fields */ | ||||
sg_list.length = sizeof *mad_priv - sizeof mad_priv->header; | sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; | ||||
sg_list.lkey = (*qp_info->port_priv->mr).lkey; | |||||
/* Initialize common receive WR fields */ | /* Initialize common receive WR fields */ | ||||
recv_wr.next = NULL; | recv_wr.next = NULL; | ||||
recv_wr.sg_list = &sg_list; | recv_wr.sg_list = &sg_list; | ||||
recv_wr.num_sge = 1; | recv_wr.num_sge = 1; | ||||
do { | do { | ||||
/* Allocate and map receive buffer */ | /* Allocate and map receive buffer */ | ||||
if (mad) { | if (mad) { | ||||
mad_priv = mad; | mad_priv = mad; | ||||
mad = NULL; | mad = NULL; | ||||
} else { | } else { | ||||
mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), | ||||
GFP_ATOMIC); | |||||
if (!mad_priv) { | if (!mad_priv) { | ||||
printk(KERN_ERR PFX "No memory for receive buffer\n"); | dev_err(&qp_info->port_priv->device->dev, | ||||
"No memory for receive buffer\n"); | |||||
ret = -ENOMEM; | ret = -ENOMEM; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
sg_list.length = mad_priv_dma_size(mad_priv); | |||||
sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, | sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, | ||||
&mad_priv->grh, | &mad_priv->grh, | ||||
sizeof *mad_priv - | mad_priv_dma_size(mad_priv), | ||||
sizeof mad_priv->header, | |||||
DMA_FROM_DEVICE); | DMA_FROM_DEVICE); | ||||
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, | if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, | ||||
sg_list.addr))) { | sg_list.addr))) { | ||||
ret = -ENOMEM; | ret = -ENOMEM; | ||||
kmem_cache_free(ib_mad_cache, mad_priv); | |||||
printk(KERN_ERR PFX "ib_dma_map_single failed\n"); | |||||
break; | break; | ||||
} | } | ||||
mad_priv->header.mapping = sg_list.addr; | mad_priv->header.mapping = sg_list.addr; | ||||
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; | |||||
mad_priv->header.mad_list.mad_queue = recv_queue; | mad_priv->header.mad_list.mad_queue = recv_queue; | ||||
mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; | |||||
recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; | |||||
/* Post receive WR */ | /* Post receive WR */ | ||||
spin_lock_irqsave(&recv_queue->lock, flags); | spin_lock_irqsave(&recv_queue->lock, flags); | ||||
post = (++recv_queue->count < recv_queue->max_active); | post = (++recv_queue->count < recv_queue->max_active); | ||||
list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); | list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); | ||||
spin_unlock_irqrestore(&recv_queue->lock, flags); | spin_unlock_irqrestore(&recv_queue->lock, flags); | ||||
ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); | ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); | ||||
if (ret) { | if (ret) { | ||||
spin_lock_irqsave(&recv_queue->lock, flags); | spin_lock_irqsave(&recv_queue->lock, flags); | ||||
list_del(&mad_priv->header.mad_list.list); | list_del(&mad_priv->header.mad_list.list); | ||||
recv_queue->count--; | recv_queue->count--; | ||||
spin_unlock_irqrestore(&recv_queue->lock, flags); | spin_unlock_irqrestore(&recv_queue->lock, flags); | ||||
ib_dma_unmap_single(qp_info->port_priv->device, | ib_dma_unmap_single(qp_info->port_priv->device, | ||||
mad_priv->header.mapping, | mad_priv->header.mapping, | ||||
sizeof *mad_priv - | mad_priv_dma_size(mad_priv), | ||||
sizeof mad_priv->header, | |||||
DMA_FROM_DEVICE); | DMA_FROM_DEVICE); | ||||
kmem_cache_free(ib_mad_cache, mad_priv); | kfree(mad_priv); | ||||
printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); | dev_err(&qp_info->port_priv->device->dev, | ||||
"ib_post_recv failed: %d\n", ret); | |||||
break; | break; | ||||
} | } | ||||
} while (post); | } while (post); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* | /* | ||||
Show All 18 Lines | while (!list_empty(&qp_info->recv_queue.list)) { | ||||
recv = container_of(mad_priv_hdr, struct ib_mad_private, | recv = container_of(mad_priv_hdr, struct ib_mad_private, | ||||
header); | header); | ||||
/* Remove from posted receive MAD list */ | /* Remove from posted receive MAD list */ | ||||
list_del(&mad_list->list); | list_del(&mad_list->list); | ||||
ib_dma_unmap_single(qp_info->port_priv->device, | ib_dma_unmap_single(qp_info->port_priv->device, | ||||
recv->header.mapping, | recv->header.mapping, | ||||
sizeof(struct ib_mad_private) - | mad_priv_dma_size(recv), | ||||
sizeof(struct ib_mad_private_header), | |||||
DMA_FROM_DEVICE); | DMA_FROM_DEVICE); | ||||
kmem_cache_free(ib_mad_cache, recv); | kfree(recv); | ||||
} | } | ||||
qp_info->recv_queue.count = 0; | qp_info->recv_queue.count = 0; | ||||
} | } | ||||
/* | /* | ||||
* Start the port | * Start the port | ||||
*/ | */ | ||||
static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | ||||
{ | { | ||||
int ret, i; | int ret, i; | ||||
struct ib_qp_attr *attr; | struct ib_qp_attr *attr; | ||||
struct ib_qp *qp; | struct ib_qp *qp; | ||||
u16 pkey_index = 0; | u16 pkey_index; | ||||
attr = kmalloc(sizeof *attr, GFP_KERNEL); | attr = kmalloc(sizeof *attr, GFP_KERNEL); | ||||
if (!attr) { | if (!attr) { | ||||
printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); | dev_err(&port_priv->device->dev, | ||||
"Couldn't kmalloc ib_qp_attr\n"); | |||||
return -ENOMEM; | return -ENOMEM; | ||||
} | } | ||||
ret = ib_find_pkey(port_priv->device, port_priv->port_num, | ret = ib_find_pkey(port_priv->device, port_priv->port_num, | ||||
0xFFFF, &pkey_index); | IB_DEFAULT_PKEY_FULL, &pkey_index); | ||||
if (ret) | if (ret) | ||||
pkey_index = 0; | pkey_index = 0; | ||||
for (i = 0; i < IB_MAD_QPS_CORE; i++) { | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | ||||
qp = port_priv->qp_info[i].qp; | qp = port_priv->qp_info[i].qp; | ||||
if (!qp) | if (!qp) | ||||
continue; | continue; | ||||
/* | /* | ||||
* PKey index for QP1 is irrelevant but | * PKey index for QP1 is irrelevant but | ||||
* one is needed for the Reset to Init transition | * one is needed for the Reset to Init transition | ||||
*/ | */ | ||||
attr->qp_state = IB_QPS_INIT; | attr->qp_state = IB_QPS_INIT; | ||||
attr->pkey_index = pkey_index; | attr->pkey_index = pkey_index; | ||||
attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; | attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; | ||||
ret = ib_modify_qp(qp, attr, IB_QP_STATE | | ret = ib_modify_qp(qp, attr, IB_QP_STATE | | ||||
IB_QP_PKEY_INDEX | IB_QP_QKEY); | IB_QP_PKEY_INDEX | IB_QP_QKEY); | ||||
if (ret) { | if (ret) { | ||||
printk(KERN_ERR PFX "Couldn't change QP%d state to " | dev_err(&port_priv->device->dev, | ||||
"INIT: %d\n", i, ret); | "Couldn't change QP%d state to INIT: %d\n", | ||||
i, ret); | |||||
goto out; | goto out; | ||||
} | } | ||||
attr->qp_state = IB_QPS_RTR; | attr->qp_state = IB_QPS_RTR; | ||||
ret = ib_modify_qp(qp, attr, IB_QP_STATE); | ret = ib_modify_qp(qp, attr, IB_QP_STATE); | ||||
if (ret) { | if (ret) { | ||||
printk(KERN_ERR PFX "Couldn't change QP%d state to " | dev_err(&port_priv->device->dev, | ||||
"RTR: %d\n", i, ret); | "Couldn't change QP%d state to RTR: %d\n", | ||||
i, ret); | |||||
goto out; | goto out; | ||||
} | } | ||||
attr->qp_state = IB_QPS_RTS; | attr->qp_state = IB_QPS_RTS; | ||||
attr->sq_psn = IB_MAD_SEND_Q_PSN; | attr->sq_psn = IB_MAD_SEND_Q_PSN; | ||||
ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); | ||||
if (ret) { | if (ret) { | ||||
printk(KERN_ERR PFX "Couldn't change QP%d state to " | dev_err(&port_priv->device->dev, | ||||
"RTS: %d\n", i, ret); | "Couldn't change QP%d state to RTS: %d\n", | ||||
i, ret); | |||||
goto out; | goto out; | ||||
} | } | ||||
} | } | ||||
ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | ||||
if (ret) { | if (ret) { | ||||
printk(KERN_ERR PFX "Failed to request completion " | dev_err(&port_priv->device->dev, | ||||
"notification: %d\n", ret); | "Failed to request completion notification: %d\n", | ||||
ret); | |||||
goto out; | goto out; | ||||
} | } | ||||
for (i = 0; i < IB_MAD_QPS_CORE; i++) { | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | ||||
if (!port_priv->qp_info[i].qp) | if (!port_priv->qp_info[i].qp) | ||||
continue; | continue; | ||||
ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); | ||||
if (ret) { | if (ret) { | ||||
printk(KERN_ERR PFX "Couldn't post receive WRs\n"); | dev_err(&port_priv->device->dev, | ||||
"Couldn't post receive WRs\n"); | |||||
goto out; | goto out; | ||||
} | } | ||||
} | } | ||||
out: | out: | ||||
kfree(attr); | kfree(attr); | ||||
return ret; | return ret; | ||||
} | } | ||||
static void qp_event_handler(struct ib_event *event, void *qp_context) | static void qp_event_handler(struct ib_event *event, void *qp_context) | ||||
{ | { | ||||
struct ib_mad_qp_info *qp_info = qp_context; | struct ib_mad_qp_info *qp_info = qp_context; | ||||
/* It's worse than that! He's dead, Jim! */ | /* It's worse than that! He's dead, Jim! */ | ||||
printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", | dev_err(&qp_info->port_priv->device->dev, | ||||
"Fatal error (%d) on MAD QP (%d)\n", | |||||
event->event, qp_info->qp->qp_num); | event->event, qp_info->qp->qp_num); | ||||
} | } | ||||
static void init_mad_queue(struct ib_mad_qp_info *qp_info, | static void init_mad_queue(struct ib_mad_qp_info *qp_info, | ||||
struct ib_mad_queue *mad_queue) | struct ib_mad_queue *mad_queue) | ||||
{ | { | ||||
mad_queue->qp_info = qp_info; | mad_queue->qp_info = qp_info; | ||||
mad_queue->count = 0; | mad_queue->count = 0; | ||||
Show All 29 Lines | static int create_mad_qp(struct ib_mad_qp_info *qp_info, | ||||
qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; | qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; | ||||
qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; | qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; | ||||
qp_init_attr.qp_type = qp_type; | qp_init_attr.qp_type = qp_type; | ||||
qp_init_attr.port_num = qp_info->port_priv->port_num; | qp_init_attr.port_num = qp_info->port_priv->port_num; | ||||
qp_init_attr.qp_context = qp_info; | qp_init_attr.qp_context = qp_info; | ||||
qp_init_attr.event_handler = qp_event_handler; | qp_init_attr.event_handler = qp_event_handler; | ||||
qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); | qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); | ||||
if (IS_ERR(qp_info->qp)) { | if (IS_ERR(qp_info->qp)) { | ||||
printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", | dev_err(&qp_info->port_priv->device->dev, | ||||
"Couldn't create ib_mad QP%d\n", | |||||
get_spl_qp_index(qp_type)); | get_spl_qp_index(qp_type)); | ||||
ret = PTR_ERR(qp_info->qp); | ret = PTR_ERR(qp_info->qp); | ||||
goto error; | goto error; | ||||
} | } | ||||
/* Use minimum queue sizes unless the CQ is resized */ | /* Use minimum queue sizes unless the CQ is resized */ | ||||
qp_info->send_queue.max_active = mad_sendq_size; | qp_info->send_queue.max_active = mad_sendq_size; | ||||
qp_info->recv_queue.max_active = mad_recvq_size; | qp_info->recv_queue.max_active = mad_recvq_size; | ||||
return 0; | return 0; | ||||
Show All 18 Lines | static int ib_mad_port_open(struct ib_device *device, | ||||
int port_num) | int port_num) | ||||
{ | { | ||||
int ret, cq_size; | int ret, cq_size; | ||||
struct ib_mad_port_private *port_priv; | struct ib_mad_port_private *port_priv; | ||||
unsigned long flags; | unsigned long flags; | ||||
char name[sizeof "ib_mad123"]; | char name[sizeof "ib_mad123"]; | ||||
int has_smi; | int has_smi; | ||||
if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) | |||||
return -EFAULT; | |||||
if (WARN_ON(rdma_cap_opa_mad(device, port_num) && | |||||
rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) | |||||
return -EFAULT; | |||||
/* Create new device info */ | /* Create new device info */ | ||||
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); | ||||
if (!port_priv) { | if (!port_priv) { | ||||
printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); | dev_err(&device->dev, "No memory for ib_mad_port_private\n"); | ||||
return -ENOMEM; | return -ENOMEM; | ||||
} | } | ||||
port_priv->device = device; | port_priv->device = device; | ||||
port_priv->port_num = port_num; | port_priv->port_num = port_num; | ||||
spin_lock_init(&port_priv->reg_lock); | spin_lock_init(&port_priv->reg_lock); | ||||
INIT_LIST_HEAD(&port_priv->agent_list); | INIT_LIST_HEAD(&port_priv->agent_list); | ||||
init_mad_qp(port_priv, &port_priv->qp_info[0]); | init_mad_qp(port_priv, &port_priv->qp_info[0]); | ||||
init_mad_qp(port_priv, &port_priv->qp_info[1]); | init_mad_qp(port_priv, &port_priv->qp_info[1]); | ||||
cq_size = mad_sendq_size + mad_recvq_size; | cq_size = mad_sendq_size + mad_recvq_size; | ||||
has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND; | has_smi = rdma_cap_ib_smi(device, port_num); | ||||
if (has_smi) | if (has_smi) | ||||
cq_size *= 2; | cq_size *= 2; | ||||
port_priv->cq = ib_create_cq(port_priv->device, | port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, | ||||
ib_mad_thread_completion_handler, | IB_POLL_WORKQUEUE); | ||||
NULL, port_priv, cq_size, 0); | |||||
if (IS_ERR(port_priv->cq)) { | if (IS_ERR(port_priv->cq)) { | ||||
printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); | dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); | ||||
ret = PTR_ERR(port_priv->cq); | ret = PTR_ERR(port_priv->cq); | ||||
goto error3; | goto error3; | ||||
} | } | ||||
port_priv->pd = ib_alloc_pd(device); | port_priv->pd = ib_alloc_pd(device, 0); | ||||
if (IS_ERR(port_priv->pd)) { | if (IS_ERR(port_priv->pd)) { | ||||
printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); | dev_err(&device->dev, "Couldn't create ib_mad PD\n"); | ||||
ret = PTR_ERR(port_priv->pd); | ret = PTR_ERR(port_priv->pd); | ||||
goto error4; | goto error4; | ||||
} | } | ||||
port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); | |||||
if (IS_ERR(port_priv->mr)) { | |||||
printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); | |||||
ret = PTR_ERR(port_priv->mr); | |||||
goto error5; | |||||
} | |||||
if (has_smi) { | if (has_smi) { | ||||
ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); | ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); | ||||
if (ret) | if (ret) | ||||
goto error6; | goto error6; | ||||
} | } | ||||
ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); | ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); | ||||
if (ret) | if (ret) | ||||
goto error7; | goto error7; | ||||
snprintf(name, sizeof name, "ib_mad%d", port_num); | snprintf(name, sizeof name, "ib_mad%d", port_num); | ||||
port_priv->wq = create_singlethread_workqueue(name); | port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); | ||||
if (!port_priv->wq) { | if (!port_priv->wq) { | ||||
ret = -ENOMEM; | ret = -ENOMEM; | ||||
goto error8; | goto error8; | ||||
} | } | ||||
INIT_WORK(&port_priv->work, ib_mad_completion_handler); | |||||
if (sa_cc_init(&port_priv->sa_cc)) | |||||
goto error9; | |||||
spin_lock_irqsave(&ib_mad_port_list_lock, flags); | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | ||||
list_add_tail(&port_priv->port_list, &ib_mad_port_list); | list_add_tail(&port_priv->port_list, &ib_mad_port_list); | ||||
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | ||||
ret = ib_mad_port_start(port_priv); | ret = ib_mad_port_start(port_priv); | ||||
if (ret) { | if (ret) { | ||||
printk(KERN_ERR PFX "Couldn't start port\n"); | dev_err(&device->dev, "Couldn't start port\n"); | ||||
goto error10; | goto error9; | ||||
} | } | ||||
return 0; | return 0; | ||||
error10: | error9: | ||||
spin_lock_irqsave(&ib_mad_port_list_lock, flags); | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | ||||
list_del_init(&port_priv->port_list); | list_del_init(&port_priv->port_list); | ||||
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | ||||
destroy_workqueue(port_priv->wq); | destroy_workqueue(port_priv->wq); | ||||
error9: | |||||
sa_cc_destroy(&port_priv->sa_cc); | |||||
error8: | error8: | ||||
destroy_mad_qp(&port_priv->qp_info[1]); | destroy_mad_qp(&port_priv->qp_info[1]); | ||||
error7: | error7: | ||||
destroy_mad_qp(&port_priv->qp_info[0]); | destroy_mad_qp(&port_priv->qp_info[0]); | ||||
error6: | error6: | ||||
ib_dereg_mr(port_priv->mr); | |||||
error5: | |||||
ib_dealloc_pd(port_priv->pd); | ib_dealloc_pd(port_priv->pd); | ||||
error4: | error4: | ||||
ib_destroy_cq(port_priv->cq); | ib_free_cq(port_priv->cq); | ||||
cleanup_recv_queue(&port_priv->qp_info[1]); | cleanup_recv_queue(&port_priv->qp_info[1]); | ||||
cleanup_recv_queue(&port_priv->qp_info[0]); | cleanup_recv_queue(&port_priv->qp_info[0]); | ||||
error3: | error3: | ||||
kfree(port_priv); | kfree(port_priv); | ||||
return ret; | return ret; | ||||
} | } | ||||
/* | /* | ||||
* Close the port | * Close the port | ||||
* If there are no classes using the port, free the port | * If there are no classes using the port, free the port | ||||
* resources (CQ, MR, PD, QP) and remove the port's info structure | * resources (CQ, MR, PD, QP) and remove the port's info structure | ||||
*/ | */ | ||||
static int ib_mad_port_close(struct ib_device *device, int port_num) | static int ib_mad_port_close(struct ib_device *device, int port_num) | ||||
{ | { | ||||
struct ib_mad_port_private *port_priv; | struct ib_mad_port_private *port_priv; | ||||
unsigned long flags; | unsigned long flags; | ||||
spin_lock_irqsave(&ib_mad_port_list_lock, flags); | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | ||||
port_priv = __ib_get_mad_port(device, port_num); | port_priv = __ib_get_mad_port(device, port_num); | ||||
if (port_priv == NULL) { | if (port_priv == NULL) { | ||||
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | ||||
printk(KERN_ERR PFX "Port %d not found\n", port_num); | dev_err(&device->dev, "Port %d not found\n", port_num); | ||||
return -ENODEV; | return -ENODEV; | ||||
} | } | ||||
list_del_init(&port_priv->port_list); | list_del_init(&port_priv->port_list); | ||||
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | ||||
destroy_workqueue(port_priv->wq); | destroy_workqueue(port_priv->wq); | ||||
sa_cc_destroy(&port_priv->sa_cc); | |||||
destroy_mad_qp(&port_priv->qp_info[1]); | destroy_mad_qp(&port_priv->qp_info[1]); | ||||
destroy_mad_qp(&port_priv->qp_info[0]); | destroy_mad_qp(&port_priv->qp_info[0]); | ||||
ib_dereg_mr(port_priv->mr); | |||||
ib_dealloc_pd(port_priv->pd); | ib_dealloc_pd(port_priv->pd); | ||||
ib_destroy_cq(port_priv->cq); | ib_free_cq(port_priv->cq); | ||||
cleanup_recv_queue(&port_priv->qp_info[1]); | cleanup_recv_queue(&port_priv->qp_info[1]); | ||||
cleanup_recv_queue(&port_priv->qp_info[0]); | cleanup_recv_queue(&port_priv->qp_info[0]); | ||||
/* XXX: Handle deallocation of MAD registration tables */ | /* XXX: Handle deallocation of MAD registration tables */ | ||||
kfree(port_priv); | kfree(port_priv); | ||||
return 0; | return 0; | ||||
} | } | ||||
static void ib_mad_init_device(struct ib_device *device) | static void ib_mad_init_device(struct ib_device *device) | ||||
{ | { | ||||
int start, end, i; | int start, i; | ||||
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) | start = rdma_start_port(device); | ||||
return; | |||||
if (device->node_type == RDMA_NODE_IB_SWITCH) { | for (i = start; i <= rdma_end_port(device); i++) { | ||||
start = 0; | if (!rdma_cap_ib_mad(device, i)) | ||||
end = 0; | continue; | ||||
} else { | |||||
start = 1; | |||||
end = device->phys_port_cnt; | |||||
} | |||||
for (i = start; i <= end; i++) { | |||||
if (ib_mad_port_open(device, i)) { | if (ib_mad_port_open(device, i)) { | ||||
printk(KERN_ERR PFX "Couldn't open %s port %d\n", | dev_err(&device->dev, "Couldn't open port %d\n", i); | ||||
device->name, i); | |||||
goto error; | goto error; | ||||
} | } | ||||
if (ib_agent_port_open(device, i)) { | if (ib_agent_port_open(device, i)) { | ||||
printk(KERN_ERR PFX "Couldn't open %s port %d " | dev_err(&device->dev, | ||||
"for agents\n", | "Couldn't open port %d for agents\n", i); | ||||
device->name, i); | |||||
goto error_agent; | goto error_agent; | ||||
} | } | ||||
} | } | ||||
return; | return; | ||||
error_agent: | error_agent: | ||||
if (ib_mad_port_close(device, i)) | if (ib_mad_port_close(device, i)) | ||||
printk(KERN_ERR PFX "Couldn't close %s port %d\n", | dev_err(&device->dev, "Couldn't close port %d\n", i); | ||||
device->name, i); | |||||
error: | error: | ||||
i--; | while (--i >= start) { | ||||
if (!rdma_cap_ib_mad(device, i)) | |||||
continue; | |||||
while (i >= start) { | |||||
if (ib_agent_port_close(device, i)) | if (ib_agent_port_close(device, i)) | ||||
printk(KERN_ERR PFX "Couldn't close %s port %d " | dev_err(&device->dev, | ||||
"for agents\n", | "Couldn't close port %d for agents\n", i); | ||||
device->name, i); | |||||
if (ib_mad_port_close(device, i)) | if (ib_mad_port_close(device, i)) | ||||
printk(KERN_ERR PFX "Couldn't close %s port %d\n", | dev_err(&device->dev, "Couldn't close port %d\n", i); | ||||
device->name, i); | |||||
i--; | |||||
} | } | ||||
} | } | ||||
static void ib_mad_remove_device(struct ib_device *device) | static void ib_mad_remove_device(struct ib_device *device, void *client_data) | ||||
{ | { | ||||
int i, num_ports, cur_port; | int i; | ||||
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) | for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { | ||||
return; | if (!rdma_cap_ib_mad(device, i)) | ||||
continue; | |||||
if (device->node_type == RDMA_NODE_IB_SWITCH) { | if (ib_agent_port_close(device, i)) | ||||
num_ports = 1; | dev_err(&device->dev, | ||||
cur_port = 0; | "Couldn't close port %d for agents\n", i); | ||||
} else { | if (ib_mad_port_close(device, i)) | ||||
num_ports = device->phys_port_cnt; | dev_err(&device->dev, "Couldn't close port %d\n", i); | ||||
cur_port = 1; | |||||
} | } | ||||
for (i = 0; i < num_ports; i++, cur_port++) { | |||||
if (ib_agent_port_close(device, cur_port)) | |||||
printk(KERN_ERR PFX "Couldn't close %s port %d " | |||||
"for agents\n", | |||||
device->name, cur_port); | |||||
if (ib_mad_port_close(device, cur_port)) | |||||
printk(KERN_ERR PFX "Couldn't close %s port %d\n", | |||||
device->name, cur_port); | |||||
} | } | ||||
} | |||||
static struct ib_client mad_client = { | static struct ib_client mad_client = { | ||||
.name = "mad", | .name = "mad", | ||||
.add = ib_mad_init_device, | .add = ib_mad_init_device, | ||||
.remove = ib_mad_remove_device | .remove = ib_mad_remove_device | ||||
}; | }; | ||||
static int __init ib_mad_init_module(void) | int ib_mad_init(void) | ||||
{ | { | ||||
int ret; | |||||
mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); | mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); | ||||
mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); | mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); | ||||
mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); | mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); | ||||
mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); | mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); | ||||
ib_mad_cache = kmem_cache_create("ib_mad", | |||||
sizeof(struct ib_mad_private), | |||||
0, | |||||
SLAB_HWCACHE_ALIGN, | |||||
NULL); | |||||
if (!ib_mad_cache) { | |||||
printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); | |||||
ret = -ENOMEM; | |||||
goto error1; | |||||
} | |||||
INIT_LIST_HEAD(&ib_mad_port_list); | INIT_LIST_HEAD(&ib_mad_port_list); | ||||
if (ib_register_client(&mad_client)) { | if (ib_register_client(&mad_client)) { | ||||
printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); | pr_err("Couldn't register ib_mad client\n"); | ||||
ret = -EINVAL; | return -EINVAL; | ||||
goto error2; | |||||
} | } | ||||
return 0; | return 0; | ||||
error2: | |||||
kmem_cache_destroy(ib_mad_cache); | |||||
error1: | |||||
return ret; | |||||
} | } | ||||
static void __exit ib_mad_cleanup_module(void) | void ib_mad_cleanup(void) | ||||
{ | { | ||||
ib_unregister_client(&mad_client); | ib_unregister_client(&mad_client); | ||||
kmem_cache_destroy(ib_mad_cache); | |||||
} | } | ||||
module_init(ib_mad_init_module); | |||||
module_exit(ib_mad_cleanup_module); |