summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWedson Almeida Filho <wedsonaf@gmail.com>2013-06-23 23:33:48 -0700
committerDavid S. Miller <davem@davemloft.net>2013-06-24 01:46:01 -0700
commitaeb193ea6cef28e33589de05ef932424f8e19bde (patch)
tree222421861caac0d53654984cd227c6946c1a4241
parent7e2f934dc52089da5b196714f0c286a8e71396c2 (diff)
downloadkernel-common-aeb193ea6cef28e33589de05ef932424f8e19bde.tar.gz
kernel-common-aeb193ea6cef28e33589de05ef932424f8e19bde.tar.bz2
kernel-common-aeb193ea6cef28e33589de05ef932424f8e19bde.zip
net: Unmap fragment page once iterator is done
Callers of skb_seq_read() are currently forced to call skb_abort_seq_read() even when consuming all the data because the last call to skb_seq_read (the one that returns 0 to indicate the end) fails to unmap the last fragment page. With this patch callers will be allowed to traverse the SKB data by calling skb_prepare_seq_read() once and repeatedly calling skb_seq_read() as originally intended (and documented in the original commit 677e90eda), that is, only call skb_abort_seq_read() if the sequential read is actually aborted. Signed-off-by: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/scsi/libiscsi_tcp.c1
-rw-r--r--net/batman-adv/main.c1
-rw-r--r--net/core/skbuff.c7
3 files changed, 6 insertions, 3 deletions
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 552e8a2b6f5f..448eae850b9c 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -906,7 +906,6 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n",
consumed);
*status = ISCSI_TCP_SKB_DONE;
- skb_abort_seq_read(&seq);
goto skb_done;
}
BUG_ON(segment->copied >= segment->size);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 51aafd669cbb..08125f3f6064 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -473,7 +473,6 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
crc = crc32c(crc, data, len);
consumed += len;
}
- skb_abort_seq_read(&st);
return htonl(crc);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index edf37578e21e..9f73eca29fbe 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2541,8 +2541,13 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
unsigned int block_limit, abs_offset = consumed + st->lower_offset;
skb_frag_t *frag;
- if (unlikely(abs_offset >= st->upper_offset))
+ if (unlikely(abs_offset >= st->upper_offset)) {
+ if (st->frag_data) {
+ kunmap_atomic(st->frag_data);
+ st->frag_data = NULL;
+ }
return 0;
+ }
next_skb:
block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;