summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2012-06-27 18:33:05 -0400
committerRoland Dreier <roland@purestorage.com>2012-07-08 18:05:19 -0700
commit354dff1bd8ccd41b6e8421226d586d35e7fb8920 (patch)
treedc228fa9509f66c288f76063dc1fb3e5a6493dbb /drivers/infiniband
parent6887a4131da3adaab011613776d865f4bcfb5678 (diff)
downloadlinux-3.10-354dff1bd8ccd41b6e8421226d586d35e7fb8920.tar.gz
linux-3.10-354dff1bd8ccd41b6e8421226d586d35e7fb8920.tar.bz2
linux-3.10-354dff1bd8ccd41b6e8421226d586d35e7fb8920.zip
IB/qib: Fix UC MR refs for immediate operations
An MR reference leak exists when handling UC RDMA writes with immediate data because we manipulate the reference counts as if the operation had been a send. This patch moves the last_imm label so that the RDMA write operations with immediate data converge at the cq building code. The copy/mr deref code is now done correctly prior to the branch to last_imm. Reviewed-by: Edward Mascarenhas <edward.mascarenhas@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index ce7387ff5d9..70b4cb710f9 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -403,7 +403,6 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
-last_imm:
qib_copy_sge(&qp->r_sge, data, tlen, 0);
while (qp->s_rdma_read_sge.num_sge) {
atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
@@ -411,6 +410,7 @@ last_imm:
qp->s_rdma_read_sge.sge =
*qp->s_rdma_read_sge.sg_list++;
}
+last_imm:
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
@@ -509,6 +509,12 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
goto last_imm;
case OP(RDMA_WRITE_LAST):