2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/string.h>
18 #include <linux/if_arp.h>
19 #include <linux/inetdevice.h>
20 #include <linux/inet.h>
21 #include <linux/interrupt.h>
22 #include <linux/netpoll.h>
23 #include <linux/sched.h>
24 #include <linux/delay.h>
25 #include <linux/rcupdate.h>
26 #include <linux/workqueue.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 #include <linux/if_vlan.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
34 #include <net/ip6_checksum.h>
35 #include <asm/unaligned.h>
36 #include <trace/events/napi.h>
39 * We maintain a small pool of fully-sized skbs, to make sure the
40 * message gets out even in extreme OOM situations.
43 #define MAX_UDP_CHUNK 1460
46 static struct sk_buff_head skb_pool
;
48 static atomic_t trapped
;
50 #define USEC_PER_POLL 50
51 #define NETPOLL_RX_ENABLED 1
52 #define NETPOLL_RX_DROP 2
54 #define MAX_SKB_SIZE \
55 (sizeof(struct ethhdr) + \
56 sizeof(struct iphdr) + \
57 sizeof(struct udphdr) + \
60 static void zap_completion_queue(void);
61 static void netpoll_neigh_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
);
63 static unsigned int carrier_timeout
= 4;
64 module_param(carrier_timeout
, uint
, 0644);
66 #define np_info(np, fmt, ...) \
67 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
68 #define np_err(np, fmt, ...) \
69 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
70 #define np_notice(np, fmt, ...) \
71 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
73 static void queue_process(struct work_struct
*work
)
75 struct netpoll_info
*npinfo
=
76 container_of(work
, struct netpoll_info
, tx_work
.work
);
80 while ((skb
= skb_dequeue(&npinfo
->txq
))) {
81 struct net_device
*dev
= skb
->dev
;
82 const struct net_device_ops
*ops
= dev
->netdev_ops
;
83 struct netdev_queue
*txq
;
85 if (!netif_device_present(dev
) || !netif_running(dev
)) {
90 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
92 local_irq_save(flags
);
93 __netif_tx_lock(txq
, smp_processor_id());
94 if (netif_xmit_frozen_or_stopped(txq
) ||
95 ops
->ndo_start_xmit(skb
, dev
) != NETDEV_TX_OK
) {
96 skb_queue_head(&npinfo
->txq
, skb
);
97 __netif_tx_unlock(txq
);
98 local_irq_restore(flags
);
100 schedule_delayed_work(&npinfo
->tx_work
, HZ
/10);
103 __netif_tx_unlock(txq
);
104 local_irq_restore(flags
);
108 static __sum16
checksum_udp(struct sk_buff
*skb
, struct udphdr
*uh
,
109 unsigned short ulen
, __be32 saddr
, __be32 daddr
)
113 if (uh
->check
== 0 || skb_csum_unnecessary(skb
))
116 psum
= csum_tcpudp_nofold(saddr
, daddr
, ulen
, IPPROTO_UDP
, 0);
118 if (skb
->ip_summed
== CHECKSUM_COMPLETE
&&
119 !csum_fold(csum_add(psum
, skb
->csum
)))
124 return __skb_checksum_complete(skb
);
128 * Check whether delayed processing was scheduled for our NIC. If so,
129 * we attempt to grab the poll lock and use ->poll() to pump the card.
130 * If this fails, either we've recursed in ->poll() or it's already
131 * running on another CPU.
133 * Note: we don't mask interrupts with this lock because we're using
134 * trylock here and interrupts are already disabled in the softirq
135 * case. Further, we test the poll_owner to avoid recursion on UP
136 * systems where the lock doesn't exist.
138 * In cases where there is bi-directional communications, reading only
139 * one message at a time can lead to packets being dropped by the
140 * network adapter, forcing superfluous retries and possibly timeouts.
141 * Thus, we set our budget to greater than 1.
143 static int poll_one_napi(struct netpoll_info
*npinfo
,
144 struct napi_struct
*napi
, int budget
)
148 /* net_rx_action's ->poll() invocations and our's are
149 * synchronized by this test which is only made while
150 * holding the napi->poll_lock.
152 if (!test_bit(NAPI_STATE_SCHED
, &napi
->state
))
155 npinfo
->rx_flags
|= NETPOLL_RX_DROP
;
156 atomic_inc(&trapped
);
157 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
159 work
= napi
->poll(napi
, budget
);
160 trace_napi_poll(napi
);
162 clear_bit(NAPI_STATE_NPSVC
, &napi
->state
);
163 atomic_dec(&trapped
);
164 npinfo
->rx_flags
&= ~NETPOLL_RX_DROP
;
166 return budget
- work
;
169 static void poll_napi(struct net_device
*dev
)
171 struct napi_struct
*napi
;
174 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
175 if (napi
->poll_owner
!= smp_processor_id() &&
176 spin_trylock(&napi
->poll_lock
)) {
177 budget
= poll_one_napi(rcu_dereference_bh(dev
->npinfo
),
179 spin_unlock(&napi
->poll_lock
);
187 static void service_neigh_queue(struct netpoll_info
*npi
)
192 while ((skb
= skb_dequeue(&npi
->neigh_tx
)))
193 netpoll_neigh_reply(skb
, npi
);
197 static void netpoll_poll_dev(struct net_device
*dev
)
199 const struct net_device_ops
*ops
;
200 struct netpoll_info
*ni
= rcu_dereference_bh(dev
->npinfo
);
202 if (!dev
|| !netif_running(dev
))
205 ops
= dev
->netdev_ops
;
206 if (!ops
->ndo_poll_controller
)
209 /* Process pending work on NIC */
210 ops
->ndo_poll_controller(dev
);
214 if (dev
->flags
& IFF_SLAVE
) {
216 struct net_device
*bond_dev
;
218 struct netpoll_info
*bond_ni
;
220 bond_dev
= netdev_master_upper_dev_get_rcu(dev
);
221 bond_ni
= rcu_dereference_bh(bond_dev
->npinfo
);
222 while ((skb
= skb_dequeue(&ni
->neigh_tx
))) {
224 skb_queue_tail(&bond_ni
->neigh_tx
, skb
);
229 service_neigh_queue(ni
);
231 zap_completion_queue();
234 static void refill_skbs(void)
239 spin_lock_irqsave(&skb_pool
.lock
, flags
);
240 while (skb_pool
.qlen
< MAX_SKBS
) {
241 skb
= alloc_skb(MAX_SKB_SIZE
, GFP_ATOMIC
);
245 __skb_queue_tail(&skb_pool
, skb
);
247 spin_unlock_irqrestore(&skb_pool
.lock
, flags
);
250 static void zap_completion_queue(void)
253 struct softnet_data
*sd
= &get_cpu_var(softnet_data
);
255 if (sd
->completion_queue
) {
256 struct sk_buff
*clist
;
258 local_irq_save(flags
);
259 clist
= sd
->completion_queue
;
260 sd
->completion_queue
= NULL
;
261 local_irq_restore(flags
);
263 while (clist
!= NULL
) {
264 struct sk_buff
*skb
= clist
;
266 if (skb
->destructor
) {
267 atomic_inc(&skb
->users
);
268 dev_kfree_skb_any(skb
); /* put this one back */
275 put_cpu_var(softnet_data
);
278 static struct sk_buff
*find_skb(struct netpoll
*np
, int len
, int reserve
)
283 zap_completion_queue();
287 skb
= alloc_skb(len
, GFP_ATOMIC
);
289 skb
= skb_dequeue(&skb_pool
);
293 netpoll_poll_dev(np
->dev
);
299 atomic_set(&skb
->users
, 1);
300 skb_reserve(skb
, reserve
);
304 static int netpoll_owner_active(struct net_device
*dev
)
306 struct napi_struct
*napi
;
308 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
309 if (napi
->poll_owner
== smp_processor_id())
315 /* call with IRQ disabled */
316 void netpoll_send_skb_on_dev(struct netpoll
*np
, struct sk_buff
*skb
,
317 struct net_device
*dev
)
319 int status
= NETDEV_TX_BUSY
;
321 const struct net_device_ops
*ops
= dev
->netdev_ops
;
322 /* It is up to the caller to keep npinfo alive. */
323 struct netpoll_info
*npinfo
;
325 WARN_ON_ONCE(!irqs_disabled());
327 npinfo
= rcu_dereference_bh(np
->dev
->npinfo
);
328 if (!npinfo
|| !netif_running(dev
) || !netif_device_present(dev
)) {
333 /* don't get messages out of order, and no recursion */
334 if (skb_queue_len(&npinfo
->txq
) == 0 && !netpoll_owner_active(dev
)) {
335 struct netdev_queue
*txq
;
337 txq
= netdev_pick_tx(dev
, skb
);
339 /* try until next clock tick */
340 for (tries
= jiffies_to_usecs(1)/USEC_PER_POLL
;
341 tries
> 0; --tries
) {
342 if (__netif_tx_trylock(txq
)) {
343 if (!netif_xmit_stopped(txq
)) {
344 if (vlan_tx_tag_present(skb
) &&
345 !(netif_skb_features(skb
) & NETIF_F_HW_VLAN_TX
)) {
346 skb
= __vlan_put_tag(skb
, vlan_tx_tag_get(skb
));
352 status
= ops
->ndo_start_xmit(skb
, dev
);
353 if (status
== NETDEV_TX_OK
)
354 txq_trans_update(txq
);
356 __netif_tx_unlock(txq
);
358 if (status
== NETDEV_TX_OK
)
363 /* tickle device maybe there is some cleanup */
364 netpoll_poll_dev(np
->dev
);
366 udelay(USEC_PER_POLL
);
369 WARN_ONCE(!irqs_disabled(),
370 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
371 dev
->name
, ops
->ndo_start_xmit
);
375 if (status
!= NETDEV_TX_OK
) {
376 skb_queue_tail(&npinfo
->txq
, skb
);
377 schedule_delayed_work(&npinfo
->tx_work
,0);
380 EXPORT_SYMBOL(netpoll_send_skb_on_dev
);
382 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
)
384 int total_len
, ip_len
, udp_len
;
389 static atomic_t ip_ident
;
390 struct ipv6hdr
*ip6h
;
392 udp_len
= len
+ sizeof(*udph
);
394 ip_len
= udp_len
+ sizeof(*ip6h
);
396 ip_len
= udp_len
+ sizeof(*iph
);
398 total_len
= ip_len
+ LL_RESERVED_SPACE(np
->dev
);
400 skb
= find_skb(np
, total_len
+ np
->dev
->needed_tailroom
,
405 skb_copy_to_linear_data(skb
, msg
, len
);
408 skb_push(skb
, sizeof(*udph
));
409 skb_reset_transport_header(skb
);
411 udph
->source
= htons(np
->local_port
);
412 udph
->dest
= htons(np
->remote_port
);
413 udph
->len
= htons(udp_len
);
417 udph
->check
= csum_ipv6_magic(&np
->local_ip
.in6
,
419 udp_len
, IPPROTO_UDP
,
420 csum_partial(udph
, udp_len
, 0));
421 if (udph
->check
== 0)
422 udph
->check
= CSUM_MANGLED_0
;
424 skb_push(skb
, sizeof(*ip6h
));
425 skb_reset_network_header(skb
);
426 ip6h
= ipv6_hdr(skb
);
428 /* ip6h->version = 6; ip6h->priority = 0; */
429 put_unaligned(0x60, (unsigned char *)ip6h
);
430 ip6h
->flow_lbl
[0] = 0;
431 ip6h
->flow_lbl
[1] = 0;
432 ip6h
->flow_lbl
[2] = 0;
434 ip6h
->payload_len
= htons(sizeof(struct udphdr
) + len
);
435 ip6h
->nexthdr
= IPPROTO_UDP
;
436 ip6h
->hop_limit
= 32;
437 ip6h
->saddr
= np
->local_ip
.in6
;
438 ip6h
->daddr
= np
->remote_ip
.in6
;
440 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
441 skb_reset_mac_header(skb
);
442 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IPV6
);
445 udph
->check
= csum_tcpudp_magic(np
->local_ip
.ip
,
447 udp_len
, IPPROTO_UDP
,
448 csum_partial(udph
, udp_len
, 0));
449 if (udph
->check
== 0)
450 udph
->check
= CSUM_MANGLED_0
;
452 skb_push(skb
, sizeof(*iph
));
453 skb_reset_network_header(skb
);
456 /* iph->version = 4; iph->ihl = 5; */
457 put_unaligned(0x45, (unsigned char *)iph
);
459 put_unaligned(htons(ip_len
), &(iph
->tot_len
));
460 iph
->id
= htons(atomic_inc_return(&ip_ident
));
463 iph
->protocol
= IPPROTO_UDP
;
465 put_unaligned(np
->local_ip
.ip
, &(iph
->saddr
));
466 put_unaligned(np
->remote_ip
.ip
, &(iph
->daddr
));
467 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
469 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
470 skb_reset_mac_header(skb
);
471 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IP
);
474 memcpy(eth
->h_source
, np
->dev
->dev_addr
, ETH_ALEN
);
475 memcpy(eth
->h_dest
, np
->remote_mac
, ETH_ALEN
);
479 netpoll_send_skb(np
, skb
);
481 EXPORT_SYMBOL(netpoll_send_udp
);
483 static void netpoll_neigh_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
485 int size
, type
= ARPOP_REPLY
;
488 struct sk_buff
*send_skb
;
489 struct netpoll
*np
, *tmp
;
494 if (list_empty(&npinfo
->rx_np
))
497 /* Before checking the packet, we do some early
498 inspection whether this is interesting at all */
499 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
500 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
501 if (np
->dev
== skb
->dev
)
504 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
506 /* No netpoll struct is using this dev */
510 proto
= ntohs(eth_hdr(skb
)->h_proto
);
511 if (proto
== ETH_P_IP
) {
513 unsigned char *arp_ptr
;
514 /* No arp on this interface */
515 if (skb
->dev
->flags
& IFF_NOARP
)
518 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
521 skb_reset_network_header(skb
);
522 skb_reset_transport_header(skb
);
525 if ((arp
->ar_hrd
!= htons(ARPHRD_ETHER
) &&
526 arp
->ar_hrd
!= htons(ARPHRD_IEEE802
)) ||
527 arp
->ar_pro
!= htons(ETH_P_IP
) ||
528 arp
->ar_op
!= htons(ARPOP_REQUEST
))
531 arp_ptr
= (unsigned char *)(arp
+1);
532 /* save the location of the src hw addr */
534 arp_ptr
+= skb
->dev
->addr_len
;
535 memcpy(&sip
, arp_ptr
, 4);
537 /* If we actually cared about dst hw addr,
538 it would get copied here */
539 arp_ptr
+= skb
->dev
->addr_len
;
540 memcpy(&tip
, arp_ptr
, 4);
542 /* Should we ignore arp? */
543 if (ipv4_is_loopback(tip
) || ipv4_is_multicast(tip
))
546 size
= arp_hdr_len(skb
->dev
);
548 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
549 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
550 if (tip
!= np
->local_ip
.ip
)
553 hlen
= LL_RESERVED_SPACE(np
->dev
);
554 tlen
= np
->dev
->needed_tailroom
;
555 send_skb
= find_skb(np
, size
+ hlen
+ tlen
, hlen
);
559 skb_reset_network_header(send_skb
);
560 arp
= (struct arphdr
*) skb_put(send_skb
, size
);
561 send_skb
->dev
= skb
->dev
;
562 send_skb
->protocol
= htons(ETH_P_ARP
);
564 /* Fill the device header for the ARP frame */
565 if (dev_hard_header(send_skb
, skb
->dev
, ETH_P_ARP
,
566 sha
, np
->dev
->dev_addr
,
567 send_skb
->len
) < 0) {
573 * Fill out the arp protocol part.
575 * we only support ethernet device type,
576 * which (according to RFC 1390) should
577 * always equal 1 (Ethernet).
580 arp
->ar_hrd
= htons(np
->dev
->type
);
581 arp
->ar_pro
= htons(ETH_P_IP
);
582 arp
->ar_hln
= np
->dev
->addr_len
;
584 arp
->ar_op
= htons(type
);
586 arp_ptr
= (unsigned char *)(arp
+ 1);
587 memcpy(arp_ptr
, np
->dev
->dev_addr
, np
->dev
->addr_len
);
588 arp_ptr
+= np
->dev
->addr_len
;
589 memcpy(arp_ptr
, &tip
, 4);
591 memcpy(arp_ptr
, sha
, np
->dev
->addr_len
);
592 arp_ptr
+= np
->dev
->addr_len
;
593 memcpy(arp_ptr
, &sip
, 4);
595 netpoll_send_skb(np
, send_skb
);
597 /* If there are several rx_hooks for the same address,
598 we're fine by sending a single reply */
601 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
602 } else if( proto
== ETH_P_IPV6
) {
603 #if IS_ENABLED(CONFIG_IPV6)
607 struct icmp6hdr
*icmp6h
;
608 const struct in6_addr
*saddr
;
609 const struct in6_addr
*daddr
;
610 struct inet6_dev
*in6_dev
= NULL
;
611 struct in6_addr
*target
;
613 in6_dev
= in6_dev_get(skb
->dev
);
614 if (!in6_dev
|| !in6_dev
->cnf
.accept_ra
)
617 if (!pskb_may_pull(skb
, skb
->len
))
620 msg
= (struct nd_msg
*)skb_transport_header(skb
);
622 __skb_push(skb
, skb
->data
- skb_transport_header(skb
));
624 if (ipv6_hdr(skb
)->hop_limit
!= 255)
626 if (msg
->icmph
.icmp6_code
!= 0)
628 if (msg
->icmph
.icmp6_type
!= NDISC_NEIGHBOUR_SOLICITATION
)
631 saddr
= &ipv6_hdr(skb
)->saddr
;
632 daddr
= &ipv6_hdr(skb
)->daddr
;
634 size
= sizeof(struct icmp6hdr
) + sizeof(struct in6_addr
);
636 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
637 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
638 if (memcmp(daddr
, &np
->local_ip
, sizeof(*daddr
)))
641 hlen
= LL_RESERVED_SPACE(np
->dev
);
642 tlen
= np
->dev
->needed_tailroom
;
643 send_skb
= find_skb(np
, size
+ hlen
+ tlen
, hlen
);
647 send_skb
->protocol
= htons(ETH_P_IPV6
);
648 send_skb
->dev
= skb
->dev
;
650 skb_reset_network_header(send_skb
);
651 skb_put(send_skb
, sizeof(struct ipv6hdr
));
652 hdr
= ipv6_hdr(send_skb
);
654 *(__be32
*)hdr
= htonl(0x60000000);
656 hdr
->payload_len
= htons(size
);
657 hdr
->nexthdr
= IPPROTO_ICMPV6
;
658 hdr
->hop_limit
= 255;
662 send_skb
->transport_header
= send_skb
->tail
;
663 skb_put(send_skb
, size
);
665 icmp6h
= (struct icmp6hdr
*)skb_transport_header(skb
);
666 icmp6h
->icmp6_type
= NDISC_NEIGHBOUR_ADVERTISEMENT
;
667 icmp6h
->icmp6_router
= 0;
668 icmp6h
->icmp6_solicited
= 1;
669 target
= (struct in6_addr
*)skb_transport_header(send_skb
) + sizeof(struct icmp6hdr
);
670 *target
= msg
->target
;
671 icmp6h
->icmp6_cksum
= csum_ipv6_magic(saddr
, daddr
, size
,
676 if (dev_hard_header(send_skb
, skb
->dev
, ETH_P_IPV6
,
677 lladdr
, np
->dev
->dev_addr
,
678 send_skb
->len
) < 0) {
683 netpoll_send_skb(np
, send_skb
);
685 /* If there are several rx_hooks for the same address,
686 we're fine by sending a single reply */
689 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
694 static bool pkt_is_ns(struct sk_buff
*skb
)
699 if (skb
->protocol
!= htons(ETH_P_ARP
))
701 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
) + sizeof(struct nd_msg
)))
704 msg
= (struct nd_msg
*)skb_transport_header(skb
);
705 __skb_push(skb
, skb
->data
- skb_transport_header(skb
));
708 if (hdr
->nexthdr
!= IPPROTO_ICMPV6
)
710 if (hdr
->hop_limit
!= 255)
712 if (msg
->icmph
.icmp6_code
!= 0)
714 if (msg
->icmph
.icmp6_type
!= NDISC_NEIGHBOUR_SOLICITATION
)
720 int __netpoll_rx(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
722 int proto
, len
, ulen
;
724 const struct iphdr
*iph
;
726 struct netpoll
*np
, *tmp
;
728 if (list_empty(&npinfo
->rx_np
))
731 if (skb
->dev
->type
!= ARPHRD_ETHER
)
734 /* check if netpoll clients need ARP */
735 if (skb
->protocol
== htons(ETH_P_ARP
) && atomic_read(&trapped
)) {
736 skb_queue_tail(&npinfo
->neigh_tx
, skb
);
738 } else if (pkt_is_ns(skb
) && atomic_read(&trapped
)) {
739 skb_queue_tail(&npinfo
->neigh_tx
, skb
);
743 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
744 skb
= vlan_untag(skb
);
749 proto
= ntohs(eth_hdr(skb
)->h_proto
);
750 if (proto
!= ETH_P_IP
&& proto
!= ETH_P_IPV6
)
752 if (skb
->pkt_type
== PACKET_OTHERHOST
)
757 if (proto
== ETH_P_IP
) {
758 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
760 iph
= (struct iphdr
*)skb
->data
;
761 if (iph
->ihl
< 5 || iph
->version
!= 4)
763 if (!pskb_may_pull(skb
, iph
->ihl
*4))
765 iph
= (struct iphdr
*)skb
->data
;
766 if (ip_fast_csum((u8
*)iph
, iph
->ihl
) != 0)
769 len
= ntohs(iph
->tot_len
);
770 if (skb
->len
< len
|| len
< iph
->ihl
*4)
774 * Our transport medium may have padded the buffer out.
775 * Now We trim to the true length of the frame.
777 if (pskb_trim_rcsum(skb
, len
))
780 iph
= (struct iphdr
*)skb
->data
;
781 if (iph
->protocol
!= IPPROTO_UDP
)
785 uh
= (struct udphdr
*)(((char *)iph
) + iph
->ihl
*4);
786 ulen
= ntohs(uh
->len
);
790 if (checksum_udp(skb
, uh
, ulen
, iph
->saddr
, iph
->daddr
))
792 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
793 if (np
->local_ip
.ip
&& np
->local_ip
.ip
!= iph
->daddr
)
795 if (np
->remote_ip
.ip
&& np
->remote_ip
.ip
!= iph
->saddr
)
797 if (np
->local_port
&& np
->local_port
!= ntohs(uh
->dest
))
800 np
->rx_hook(np
, ntohs(uh
->source
),
802 ulen
- sizeof(struct udphdr
));
806 #if IS_ENABLED(CONFIG_IPV6)
807 const struct ipv6hdr
*ip6h
;
809 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
811 ip6h
= (struct ipv6hdr
*)skb
->data
;
812 if (ip6h
->version
!= 6)
814 len
= ntohs(ip6h
->payload_len
);
817 if (len
+ sizeof(struct ipv6hdr
) > skb
->len
)
819 if (pskb_trim_rcsum(skb
, len
+ sizeof(struct ipv6hdr
)))
821 ip6h
= ipv6_hdr(skb
);
822 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
825 ulen
= ntohs(uh
->len
);
826 if (ulen
!= skb
->len
)
828 if (udp6_csum_init(skb
, uh
, IPPROTO_UDP
))
830 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
831 if (memcmp(&np
->local_ip
.in6
, &ip6h
->daddr
, sizeof(struct in6_addr
)) != 0)
833 if (memcmp(&np
->remote_ip
.in6
, &ip6h
->saddr
, sizeof(struct in6_addr
)) != 0)
835 if (np
->local_port
&& np
->local_port
!= ntohs(uh
->dest
))
838 np
->rx_hook(np
, ntohs(uh
->source
),
840 ulen
- sizeof(struct udphdr
));
853 if (atomic_read(&trapped
)) {
861 void netpoll_print_options(struct netpoll
*np
)
863 np_info(np
, "local port %d\n", np
->local_port
);
865 np_info(np
, "local IPv6 address %pI6c\n", &np
->local_ip
.in6
);
867 np_info(np
, "local IPv4 address %pI4\n", &np
->local_ip
.ip
);
868 np_info(np
, "interface '%s'\n", np
->dev_name
);
869 np_info(np
, "remote port %d\n", np
->remote_port
);
871 np_info(np
, "remote IPv6 address %pI6c\n", &np
->remote_ip
.in6
);
873 np_info(np
, "remote IPv4 address %pI4\n", &np
->remote_ip
.ip
);
874 np_info(np
, "remote ethernet address %pM\n", np
->remote_mac
);
876 EXPORT_SYMBOL(netpoll_print_options
);
878 static int netpoll_parse_ip_addr(const char *str
, union inet_addr
*addr
)
882 if (!strchr(str
, ':') &&
883 in4_pton(str
, -1, (void *)addr
, -1, &end
) > 0) {
887 if (in6_pton(str
, -1, addr
->in6
.s6_addr
, -1, &end
) > 0) {
888 #if IS_ENABLED(CONFIG_IPV6)
898 int netpoll_parse_options(struct netpoll
*np
, char *opt
)
900 char *cur
=opt
, *delim
;
904 if ((delim
= strchr(cur
, '@')) == NULL
)
907 if (kstrtou16(cur
, 10, &np
->local_port
))
914 if ((delim
= strchr(cur
, '/')) == NULL
)
917 ipv6
= netpoll_parse_ip_addr(cur
, &np
->local_ip
);
921 np
->ipv6
= (bool)ipv6
;
927 /* parse out dev name */
928 if ((delim
= strchr(cur
, ',')) == NULL
)
931 strlcpy(np
->dev_name
, cur
, sizeof(np
->dev_name
));
938 if ((delim
= strchr(cur
, '@')) == NULL
)
941 if (*cur
== ' ' || *cur
== '\t')
942 np_info(np
, "warning: whitespace is not allowed\n");
943 if (kstrtou16(cur
, 10, &np
->remote_port
))
950 if ((delim
= strchr(cur
, '/')) == NULL
)
953 ipv6
= netpoll_parse_ip_addr(cur
, &np
->remote_ip
);
956 else if (np
->ipv6
!= (bool)ipv6
)
959 np
->ipv6
= (bool)ipv6
;
964 if (!mac_pton(cur
, np
->remote_mac
))
968 netpoll_print_options(np
);
973 np_info(np
, "couldn't parse config at '%s'!\n", cur
);
976 EXPORT_SYMBOL(netpoll_parse_options
);
978 int __netpoll_setup(struct netpoll
*np
, struct net_device
*ndev
, gfp_t gfp
)
980 struct netpoll_info
*npinfo
;
981 const struct net_device_ops
*ops
;
986 strlcpy(np
->dev_name
, ndev
->name
, IFNAMSIZ
);
988 if ((ndev
->priv_flags
& IFF_DISABLE_NETPOLL
) ||
989 !ndev
->netdev_ops
->ndo_poll_controller
) {
990 np_err(np
, "%s doesn't support polling, aborting\n",
997 npinfo
= kmalloc(sizeof(*npinfo
), gfp
);
1003 npinfo
->rx_flags
= 0;
1004 INIT_LIST_HEAD(&npinfo
->rx_np
);
1006 spin_lock_init(&npinfo
->rx_lock
);
1007 skb_queue_head_init(&npinfo
->neigh_tx
);
1008 skb_queue_head_init(&npinfo
->txq
);
1009 INIT_DELAYED_WORK(&npinfo
->tx_work
, queue_process
);
1011 atomic_set(&npinfo
->refcnt
, 1);
1013 ops
= np
->dev
->netdev_ops
;
1014 if (ops
->ndo_netpoll_setup
) {
1015 err
= ops
->ndo_netpoll_setup(ndev
, npinfo
, gfp
);
1020 npinfo
= ndev
->npinfo
;
1021 atomic_inc(&npinfo
->refcnt
);
1024 npinfo
->netpoll
= np
;
1027 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
1028 npinfo
->rx_flags
|= NETPOLL_RX_ENABLED
;
1029 list_add_tail(&np
->rx
, &npinfo
->rx_np
);
1030 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
1033 /* last thing to do is link it to the net device structure */
1034 rcu_assign_pointer(ndev
->npinfo
, npinfo
);
1043 EXPORT_SYMBOL_GPL(__netpoll_setup
);
1045 int netpoll_setup(struct netpoll
*np
)
1047 struct net_device
*ndev
= NULL
;
1048 struct in_device
*in_dev
;
1053 ndev
= __dev_get_by_name(&init_net
, np
->dev_name
);
1055 np_err(np
, "%s doesn't exist, aborting\n", np
->dev_name
);
1061 if (netdev_master_upper_dev_get(ndev
)) {
1062 np_err(np
, "%s is a slave device, aborting\n", np
->dev_name
);
1067 if (!netif_running(ndev
)) {
1068 unsigned long atmost
, atleast
;
1070 np_info(np
, "device %s not up yet, forcing it\n", np
->dev_name
);
1072 err
= dev_open(ndev
);
1075 np_err(np
, "failed to open %s\n", ndev
->name
);
1080 atleast
= jiffies
+ HZ
/10;
1081 atmost
= jiffies
+ carrier_timeout
* HZ
;
1082 while (!netif_carrier_ok(ndev
)) {
1083 if (time_after(jiffies
, atmost
)) {
1084 np_notice(np
, "timeout waiting for carrier\n");
1090 /* If carrier appears to come up instantly, we don't
1091 * trust it and pause so that we don't pump all our
1092 * queued console messages into the bitbucket.
1095 if (time_before(jiffies
, atleast
)) {
1096 np_notice(np
, "carrier detect appears untrustworthy, waiting 4 seconds\n");
1102 if (!np
->local_ip
.ip
) {
1104 in_dev
= __in_dev_get_rtnl(ndev
);
1106 if (!in_dev
|| !in_dev
->ifa_list
) {
1107 np_err(np
, "no IP address for %s, aborting\n",
1109 err
= -EDESTADDRREQ
;
1113 np
->local_ip
.ip
= in_dev
->ifa_list
->ifa_local
;
1114 np_info(np
, "local IP %pI4\n", &np
->local_ip
.ip
);
1116 #if IS_ENABLED(CONFIG_IPV6)
1117 struct inet6_dev
*idev
;
1119 err
= -EDESTADDRREQ
;
1120 idev
= __in6_dev_get(ndev
);
1122 struct inet6_ifaddr
*ifp
;
1124 read_lock_bh(&idev
->lock
);
1125 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
1126 if (ipv6_addr_type(&ifp
->addr
) & IPV6_ADDR_LINKLOCAL
)
1128 np
->local_ip
.in6
= ifp
->addr
;
1132 read_unlock_bh(&idev
->lock
);
1135 np_err(np
, "no IPv6 address for %s, aborting\n",
1139 np_info(np
, "local IPv6 %pI6c\n", &np
->local_ip
.in6
);
1141 np_err(np
, "IPv6 is not supported %s, aborting\n",
1148 /* fill up the skb queue */
1151 err
= __netpoll_setup(np
, ndev
, GFP_KERNEL
);
1164 EXPORT_SYMBOL(netpoll_setup
);
1166 static int __init
netpoll_init(void)
1168 skb_queue_head_init(&skb_pool
);
1171 core_initcall(netpoll_init
);
1173 static void rcu_cleanup_netpoll_info(struct rcu_head
*rcu_head
)
1175 struct netpoll_info
*npinfo
=
1176 container_of(rcu_head
, struct netpoll_info
, rcu
);
1178 skb_queue_purge(&npinfo
->neigh_tx
);
1179 skb_queue_purge(&npinfo
->txq
);
1181 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
1182 cancel_delayed_work(&npinfo
->tx_work
);
1184 /* clean after last, unfinished work */
1185 __skb_queue_purge(&npinfo
->txq
);
1186 /* now cancel it again */
1187 cancel_delayed_work(&npinfo
->tx_work
);
1191 void __netpoll_cleanup(struct netpoll
*np
)
1193 struct netpoll_info
*npinfo
;
1194 unsigned long flags
;
1196 npinfo
= np
->dev
->npinfo
;
1200 if (!list_empty(&npinfo
->rx_np
)) {
1201 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
1203 if (list_empty(&npinfo
->rx_np
))
1204 npinfo
->rx_flags
&= ~NETPOLL_RX_ENABLED
;
1205 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
1208 if (atomic_dec_and_test(&npinfo
->refcnt
)) {
1209 const struct net_device_ops
*ops
;
1211 ops
= np
->dev
->netdev_ops
;
1212 if (ops
->ndo_netpoll_cleanup
)
1213 ops
->ndo_netpoll_cleanup(np
->dev
);
1215 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
1216 call_rcu_bh(&npinfo
->rcu
, rcu_cleanup_netpoll_info
);
1219 EXPORT_SYMBOL_GPL(__netpoll_cleanup
);
1221 static void rcu_cleanup_netpoll(struct rcu_head
*rcu_head
)
1223 struct netpoll
*np
= container_of(rcu_head
, struct netpoll
, rcu
);
1225 __netpoll_cleanup(np
);
1229 void __netpoll_free_rcu(struct netpoll
*np
)
1231 call_rcu_bh(&np
->rcu
, rcu_cleanup_netpoll
);
1233 EXPORT_SYMBOL_GPL(__netpoll_free_rcu
);
1235 void netpoll_cleanup(struct netpoll
*np
)
1241 __netpoll_cleanup(np
);
1247 EXPORT_SYMBOL(netpoll_cleanup
);
1249 int netpoll_trap(void)
1251 return atomic_read(&trapped
);
1253 EXPORT_SYMBOL(netpoll_trap
);
1255 void netpoll_set_trap(int trap
)
1258 atomic_inc(&trapped
);
1260 atomic_dec(&trapped
);
1262 EXPORT_SYMBOL(netpoll_set_trap
);