memcpy(&dst[8], src, 8);
}
+static void iucv_skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(list)) != NULL) {
+ if (skb->dev)
+ dev_put(skb->dev);
+ kfree_skb(skb);
+ }
+}
+
static int afiucv_pm_prepare(struct device *dev)
{
#ifdef CONFIG_PM_DEBUG
read_lock(&iucv_sk_list.lock);
sk_for_each(sk, node, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
- skb_queue_purge(&iucv->send_skb_q);
+ iucv_skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
switch (sk->sk_state) {
case IUCV_SEVERED:
if (imsg)
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
- rcu_read_lock();
- skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
- rcu_read_unlock();
+ skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
if (!skb->dev)
return -ENODEV;
if (!(skb->dev->flags & IFF_UP))
err = dev_queue_xmit(skb);
if (err) {
skb_unlink(nskb, &iucv->send_skb_q);
+ dev_put(nskb->dev);
kfree_skb(nskb);
} else {
atomic_sub(confirm_recv, &iucv->msg_recv);
blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
skb = sock_alloc_send_skb(sk, blen, 1, &err);
if (skb) {
- skb_reserve(skb,
- sizeof(struct af_iucv_trans_hdr) +
- ETH_HLEN);
+ skb_reserve(skb, blen);
err = afiucv_hs_send(NULL, sk, skb,
AF_IUCV_FLAG_FIN);
}
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
- case IUCV_DISCONN:
+ case IUCV_DISCONN: /* fall through */
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
sk->sk_err = ECONNRESET;
sk->sk_state_change(sk);
- skb_queue_purge(&iucv->send_skb_q);
+ iucv_skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
break;
if (!memcmp(dev->perm_addr, uid, 8)) {
memcpy(iucv->src_name, sa->siucv_name, 8);
memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
- sock->sk->sk_bound_dev_if = dev->ifindex;
+ sk->sk_bound_dev_if = dev->ifindex;
sk->sk_state = IUCV_BOUND;
iucv->transport = AF_IUCV_TRANS_HIPER;
if (!iucv->msglimit)
return len;
fail:
+ if (skb->dev)
+ dev_put(skb->dev);
kfree_skb(skb);
out:
release_sock(sk);
ETH_HLEN;
sskb = sock_alloc_send_skb(sk, blen, 1, &err);
if (sskb) {
- skb_reserve(sskb,
- sizeof(struct af_iucv_trans_hdr)
- + ETH_HLEN);
+ skb_reserve(sskb, blen);
err = afiucv_hs_send(NULL, sk, sskb,
AF_IUCV_FLAG_WIN);
}
case TX_NOTIFY_OK:
__skb_unlink(this, list);
iucv_sock_wake_msglim(sk);
+ dev_put(this->dev);
kfree_skb(this);
break;
case TX_NOTIFY_PENDING:
atomic_dec(&iucv->pendings);
if (atomic_read(&iucv->pendings) <= 0)
iucv_sock_wake_msglim(sk);
+ dev_put(this->dev);
kfree_skb(this);
break;
case TX_NOTIFY_UNREACHABLE:
case TX_NOTIFY_GENERALERROR:
case TX_NOTIFY_DELAYED_GENERALERROR:
__skb_unlink(this, list);
+ dev_put(this->dev);
kfree_skb(this);
if (!list_empty(&iucv->accept_q))
sk->sk_state = IUCV_SEVERED;