diff options
author | Ursula Braun <ursula.braun@de.ibm.com> | 2011-12-19 22:56:29 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-12-20 14:05:03 -0500 |
commit | 816abbadf981e64b2342e1a875592623619560a4 (patch) | |
tree | c930e4633966e25cb129ff9d86c21281780d4550 /net/iucv | |
parent | 42bd48e0145567acf7b3d2ae48bea765315bdd89 (diff) | |
download | linux-3.10-816abbadf981e64b2342e1a875592623619560a4.tar.gz linux-3.10-816abbadf981e64b2342e1a875592623619560a4.tar.bz2 linux-3.10-816abbadf981e64b2342e1a875592623619560a4.zip |
af_iucv: release reference to HS device
For HiperSockets transport skbs sent are bound to one of the
available HiperSockets devices. Add missing release of reference to
a HiperSockets device before freeing an skb.
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/af_iucv.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 32a5010b294..ad90cf29c96 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -130,6 +130,17 @@ static inline void low_nmcpy(unsigned char *dst, char *src) memcpy(&dst[8], src, 8); } +static void iucv_skb_queue_purge(struct sk_buff_head *list) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(list)) != NULL) { + if (skb->dev) + dev_put(skb->dev); + kfree_skb(skb); + } +} + static int afiucv_pm_prepare(struct device *dev) { #ifdef CONFIG_PM_DEBUG @@ -164,7 +175,7 @@ static int afiucv_pm_freeze(struct device *dev) read_lock(&iucv_sk_list.lock); sk_for_each(sk, node, &iucv_sk_list.head) { iucv = iucv_sk(sk); - skb_queue_purge(&iucv->send_skb_q); + iucv_skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); switch (sk->sk_state) { case IUCV_SEVERED: @@ -366,9 +377,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, if (imsg) memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); - rcu_read_lock(); - skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if); - rcu_read_unlock(); + skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if); if (!skb->dev) return -ENODEV; if (!(skb->dev->flags & IFF_UP)) @@ -388,6 +397,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, err = dev_queue_xmit(skb); if (err) { skb_unlink(nskb, &iucv->send_skb_q); + dev_put(nskb->dev); kfree_skb(nskb); } else { atomic_sub(confirm_recv, &iucv->msg_recv); @@ -481,16 +491,14 @@ static void iucv_sock_close(struct sock *sk) blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; skb = sock_alloc_send_skb(sk, blen, 1, &err); if (skb) { - skb_reserve(skb, - sizeof(struct af_iucv_trans_hdr) + - ETH_HLEN); + skb_reserve(skb, blen); err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_FIN); } sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } - case IUCV_DISCONN: + case IUCV_DISCONN: /* fall through */ sk->sk_state = IUCV_CLOSING; sk->sk_state_change(sk); @@ -520,7 +528,7 @@ static void iucv_sock_close(struct sock *sk) sk->sk_err = ECONNRESET; sk->sk_state_change(sk); - skb_queue_purge(&iucv->send_skb_q); + iucv_skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); break; @@ -739,7 +747,7 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, if (!memcmp(dev->perm_addr, uid, 8)) { memcpy(iucv->src_name, sa->siucv_name, 8); memcpy(iucv->src_user_id, sa->siucv_user_id, 8); - sock->sk->sk_bound_dev_if = dev->ifindex; + sk->sk_bound_dev_if = dev->ifindex; sk->sk_state = IUCV_BOUND; iucv->transport = AF_IUCV_TRANS_HIPER; if (!iucv->msglimit) @@ -1225,6 +1233,8 @@ release: return len; fail: + if (skb->dev) + dev_put(skb->dev); kfree_skb(skb); out: release_sock(sk); @@ -1441,9 +1451,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, ETH_HLEN; sskb = sock_alloc_send_skb(sk, blen, 1, &err); if (sskb) { - skb_reserve(sskb, - sizeof(struct af_iucv_trans_hdr) - + ETH_HLEN); + skb_reserve(sskb, blen); err = afiucv_hs_send(NULL, sk, sskb, AF_IUCV_FLAG_WIN); } @@ -2261,6 +2269,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, case TX_NOTIFY_OK: __skb_unlink(this, list); iucv_sock_wake_msglim(sk); + dev_put(this->dev); kfree_skb(this); break; case TX_NOTIFY_PENDING: @@ -2271,6 +2280,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, atomic_dec(&iucv->pendings); if (atomic_read(&iucv->pendings) <= 0) iucv_sock_wake_msglim(sk); + dev_put(this->dev); kfree_skb(this); break; case TX_NOTIFY_UNREACHABLE: @@ -2279,6 +2289,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, case TX_NOTIFY_GENERALERROR: case TX_NOTIFY_DELAYED_GENERALERROR: __skb_unlink(this, list); + dev_put(this->dev); kfree_skb(this); if (!list_empty(&iucv->accept_q)) sk->sk_state = IUCV_SEVERED; |