From: Patrick McHardy Date: Wed, 17 Apr 2013 06:47:05 +0000 (+0000) Subject: netlink: add flow control for memory mapped I/O X-Git-Tag: MMI-PSA29.97-13-9~14469^2~96^2~4 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=cd1df525da59c64244d27b4548ff5d132489488a;p=GitHub%2FMotorolaMobilityLLC%2Fkernel-slsi.git netlink: add flow control for memory mapped I/O Add flow control for memory mapped RX. Since user-space usually doesn't invoke recvmsg() when using memory mapped I/O, flow control is performed in netlink_poll(). Dumps are allowed to continue if at least half of the ring frames are unused. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d120b5d4d86a..2a3e9ba814c4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -3,6 +3,7 @@ * * Authors: Alan Cox * Alexey Kuznetsov + * Patrick McHardy * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -110,6 +111,29 @@ static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask]; } +static void netlink_overrun(struct sock *sk) +{ + struct netlink_sock *nlk = nlk_sk(sk); + + if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) { + if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) { + sk->sk_err = ENOBUFS; + sk->sk_error_report(sk); + } + } + atomic_inc(&sk->sk_drops); +} + +static void netlink_rcv_wake(struct sock *sk) +{ + struct netlink_sock *nlk = nlk_sk(sk); + + if (skb_queue_empty(&sk->sk_receive_queue)) + clear_bit(NETLINK_CONGESTED, &nlk->state); + if (!test_bit(NETLINK_CONGESTED, &nlk->state)) + wake_up_interruptible(&nlk->wait); +} + #ifdef CONFIG_NETLINK_MMAP static bool netlink_skb_is_mmaped(const struct sk_buff *skb) { @@ -441,15 +465,48 @@ static void netlink_forward_ring(struct netlink_ring *ring) } while (ring->head != head); } +static bool netlink_dump_space(struct netlink_sock *nlk) +{ + struct netlink_ring *ring = &nlk->rx_ring; + struct nl_mmap_hdr *hdr; + unsigned int n; + + hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED); + if (hdr == NULL) + return false; + + n = ring->head + ring->frame_max / 2; + if (n > ring->frame_max) + n -= ring->frame_max; + + hdr = __netlink_lookup_frame(ring, n); + + return hdr->nm_status == NL_MMAP_STATUS_UNUSED; +} + static unsigned int netlink_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); unsigned int mask; + int err; - if (nlk->cb != NULL && nlk->rx_ring.pg_vec != NULL) - netlink_dump(sk); + if (nlk->rx_ring.pg_vec != NULL) { + /* Memory mapped sockets don't call recvmsg(), so flow control + * for dumps is performed here. A dump is allowed to continue + * if at least half the ring is unused. + */ + while (nlk->cb != NULL && netlink_dump_space(nlk)) { + err = netlink_dump(sk); + if (err < 0) { + sk->sk_err = err; + sk->sk_error_report(sk); + break; + } + } + netlink_rcv_wake(sk); + } mask = datagram_poll(file, sock, wait); @@ -623,8 +680,7 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) if (hdr == NULL) { spin_unlock_bh(&sk->sk_receive_queue.lock); kfree_skb(skb); - sk->sk_err = ENOBUFS; - sk->sk_error_report(sk); + netlink_overrun(sk); return; } netlink_increment_head(ring); @@ -1329,19 +1385,6 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, return 0; } -static void netlink_overrun(struct sock *sk) -{ - struct netlink_sock *nlk = nlk_sk(sk); - - if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) { - if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) { - sk->sk_err = ENOBUFS; - sk->sk_error_report(sk); - } - } - atomic_inc(&sk->sk_drops); -} - static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid) { struct sock *sock; @@ -1484,16 +1527,6 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) return skb; } -static void netlink_rcv_wake(struct sock *sk) -{ - struct netlink_sock *nlk = nlk_sk(sk); - - if (skb_queue_empty(&sk->sk_receive_queue)) - clear_bit(NETLINK_CONGESTED, &nlk->state); - if (!test_bit(NETLINK_CONGESTED, &nlk->state)) - wake_up_interruptible(&nlk->wait); -} - static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, struct sock *ssk) { @@ -1597,6 +1630,7 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, err2: kfree_skb(skb); spin_unlock_bh(&sk->sk_receive_queue.lock); + netlink_overrun(sk); err1: sock_put(sk); return NULL;