summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/mad.c22
-rw-r--r--drivers/infiniband/core/user_mad.c87
-rw-r--r--include/rdma/ib_mad.h7
3 files changed, 95 insertions, 21 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed4dab52a6..1c3cfbbe6a9 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -167,6 +167,15 @@ static int is_vendor_method_in_use(
return 0;
}
+int ib_response_mad(struct ib_mad *mad)
+{
+ return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
+ (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
+ ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
+ (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
+}
+EXPORT_SYMBOL(ib_response_mad);
+
/*
* ib_register_mad_agent - Register to send/receive MADs
*/
@@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
}
EXPORT_SYMBOL(ib_unregister_mad_agent);
-static inline int response_mad(struct ib_mad *mad)
-{
- /* Trap represses are responses although response bit is reset */
- return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
- (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
-}
-
static void dequeue_mad(struct ib_mad_list_head *mad_list)
{
struct ib_mad_queue *mad_queue;
@@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
- if (response_mad(&mad_priv->mad.mad) &&
+ if (ib_response_mad(&mad_priv->mad.mad) &&
mad_agent_priv->agent.recv_handler) {
local->mad_priv = mad_priv;
local->recv_mad_agent = mad_agent_priv;
@@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
unsigned long flags;
spin_lock_irqsave(&port_priv->reg_lock, flags);
- if (response_mad(mad)) {
+ if (ib_response_mad(mad)) {
u32 hi_tid;
struct ib_mad_agent_private *entry;
@@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
}
/* Complete corresponding request */
- if (response_mad(mad_recv_wc->recv_buf.mad)) {
+ if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
spin_lock_irqsave(&mad_agent_priv->lock, flags);
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
if (!mad_send_wr) {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index afe70a549c2..1273f8807e8 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -112,8 +112,10 @@ struct ib_umad_device {
struct ib_umad_file {
struct ib_umad_port *port;
struct list_head recv_list;
+ struct list_head send_list;
struct list_head port_list;
spinlock_t recv_lock;
+ spinlock_t send_lock;
wait_queue_head_t recv_wait;
struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
int agents_dead;
@@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file,
return ret;
}
+static void dequeue_send(struct ib_umad_file *file,
+ struct ib_umad_packet *packet)
+ {
+ spin_lock_irq(&file->send_lock);
+ list_del(&packet->list);
+ spin_unlock_irq(&file->send_lock);
+ }
+
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *send_wc)
{
struct ib_umad_file *file = agent->context;
struct ib_umad_packet *packet = send_wc->send_buf->context[0];
+ dequeue_send(file, packet);
ib_destroy_ah(packet->msg->ah);
ib_free_send_mad(packet->msg);
@@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
return 0;
}
+static int same_destination(struct ib_user_mad_hdr *hdr1,
+ struct ib_user_mad_hdr *hdr2)
+{
+ if (!hdr1->grh_present && !hdr2->grh_present)
+ return (hdr1->lid == hdr2->lid);
+
+ if (hdr1->grh_present && hdr2->grh_present)
+ return !memcmp(hdr1->gid, hdr2->gid, 16);
+
+ return 0;
+}
+
+static int is_duplicate(struct ib_umad_file *file,
+ struct ib_umad_packet *packet)
+{
+ struct ib_umad_packet *sent_packet;
+ struct ib_mad_hdr *sent_hdr, *hdr;
+
+ hdr = (struct ib_mad_hdr *) packet->mad.data;
+ list_for_each_entry(sent_packet, &file->send_list, list) {
+ sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
+
+ if ((hdr->tid != sent_hdr->tid) ||
+ (hdr->mgmt_class != sent_hdr->mgmt_class))
+ continue;
+
+ /*
+ * No need to be overly clever here. If two new operations have
+ * the same TID, reject the second as a duplicate. This is more
+ * restrictive than required by the spec.
+ */
+ if (!ib_response_mad((struct ib_mad *) hdr)) {
+ if (!ib_response_mad((struct ib_mad *) sent_hdr))
+ return 1;
+ continue;
+ } else if (!ib_response_mad((struct ib_mad *) sent_hdr))
+ continue;
+
+ if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
+ return 1;
+ }
+
+ return 0;
+}
+
static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
@@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
struct ib_ah_attr ah_attr;
struct ib_ah *ah;
struct ib_rmpp_mad *rmpp_mad;
- u8 method;
__be64 *tid;
int ret, data_len, hdr_len, copy_offset, rmpp_active;
@@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
}
/*
- * If userspace is generating a request that will generate a
- * response, we need to make sure the high-order part of the
- * transaction ID matches the agent being used to send the
- * MAD.
+ * Set the high-order part of the transaction ID to make MADs from
+ * different agents unique, and allow routing responses back to the
+ * original requestor.
*/
- method = ((struct ib_mad_hdr *) packet->msg->mad)->method;
-
- if (!(method & IB_MGMT_METHOD_RESP) &&
- method != IB_MGMT_METHOD_TRAP_REPRESS &&
- method != IB_MGMT_METHOD_SEND) {
+ if (!ib_response_mad(packet->msg->mad)) {
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
(be64_to_cpup(tid) & 0xffffffff));
+ rmpp_mad->mad_hdr.tid = *tid;
+ }
+
+ spin_lock_irq(&file->send_lock);
+ ret = is_duplicate(file, packet);
+ if (!ret)
+ list_add_tail(&packet->list, &file->send_list);
+ spin_unlock_irq(&file->send_lock);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_msg;
}
ret = ib_post_send_mad(packet->msg, NULL);
if (ret)
- goto err_msg;
+ goto err_send;
up_read(&file->port->mutex);
return count;
+err_send:
+ dequeue_send(file, packet);
err_msg:
ib_free_send_mad(packet->msg);
err_ah:
@@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
}
spin_lock_init(&file->recv_lock);
+ spin_lock_init(&file->send_lock);
INIT_LIST_HEAD(&file->recv_list);
+ INIT_LIST_HEAD(&file->send_list);
init_waitqueue_head(&file->recv_wait);
file->port = port;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 5ff77558013..585d28e960d 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -75,6 +75,7 @@
#define IB_MGMT_METHOD_TRAP_REPRESS 0x07
#define IB_MGMT_METHOD_RESP 0x80
+#define IB_BM_ATTR_MOD_RESP cpu_to_be32(1)
#define IB_MGMT_MAX_METHODS 128
@@ -247,6 +248,12 @@ struct ib_mad_send_buf {
};
/**
+ * ib_response_mad - Returns if the specified MAD has been generated in
+ * response to a sent request or trap.
+ */
+int ib_response_mad(struct ib_mad *mad);
+
+/**
* ib_get_rmpp_resptime - Returns the RMPP response time.
* @rmpp_hdr: An RMPP header.
*/