summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZach Brown <zach.brown@oracle.com>2010-06-04 14:26:32 -0700
committerAndy Grover <andy.grover@oracle.com>2010-09-08 18:15:25 -0700
commit671202f3491cccdb267f88ad59ba0635aeb2a22e (patch)
tree0dbe27af4940fa3c6ede0a96b119653c4805185c
parent037f18a3074753991656189a091a5fa371999107 (diff)
downloadlinux-3.10-671202f3491cccdb267f88ad59ba0635aeb2a22e.tar.gz
linux-3.10-671202f3491cccdb267f88ad59ba0635aeb2a22e.tar.bz2
linux-3.10-671202f3491cccdb267f88ad59ba0635aeb2a22e.zip
rds: remove unused rds_send_acked_before()
rds_send_acked_before() wasn't blocking interrupts when acquiring c_lock from user context but nothing calls it. Rather than fix its use of c_lock we just remove the function. Signed-off-by: Zach Brown <zach.brown@oracle.com>
-rw-r--r--net/rds/rds.h1
-rw-r--r--net/rds/send.c29
2 files changed, 0 insertions, 30 deletions
diff --git a/net/rds/rds.h b/net/rds/rds.h
index cba5f8bb478..270ded76fd5 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -724,7 +724,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
is_acked_func is_acked);
-int rds_send_acked_before(struct rds_connection *conn, u64 seq);
void rds_send_remove_from_sock(struct list_head *messages, int status);
int rds_send_pong(struct rds_connection *conn, __be16 dport);
struct rds_message *rds_send_get_message(struct rds_connection *,
diff --git a/net/rds/send.c b/net/rds/send.c
index a6295993e3e..b9e41afef32 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -405,35 +405,6 @@ static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
}
/*
- * Returns true if there are no messages on the send and retransmit queues
- * which have a sequence number greater than or equal to the given sequence
- * number.
- */
-int rds_send_acked_before(struct rds_connection *conn, u64 seq)
-{
- struct rds_message *rm, *tmp;
- int ret = 1;
-
- spin_lock(&conn->c_lock);
-
- list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
- if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
- ret = 0;
- break;
- }
-
- list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
- if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
- ret = 0;
- break;
- }
-
- spin_unlock(&conn->c_lock);
-
- return ret;
-}
-
-/*
* This is pretty similar to what happens below in the ACK
* handling code - except that we call here as soon as we get
* the IB send completion on the RDMA op and the accompanying