2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
44 #define MAX_UDP_CHUNK 1460
47 static struct sk_buff_head skb_pool
;
49 static atomic_t trapped
;
51 DEFINE_STATIC_SRCU(netpoll_srcu
);
53 #define USEC_PER_POLL 50
54 #define NETPOLL_RX_ENABLED 1
55 #define NETPOLL_RX_DROP 2
57 #define MAX_SKB_SIZE \
58 (sizeof(struct ethhdr) + \
59 sizeof(struct iphdr) + \
60 sizeof(struct udphdr) + \
63 static void zap_completion_queue(void);
64 static void netpoll_neigh_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
);
65 static void netpoll_async_cleanup(struct work_struct
*work
);
67 static unsigned int carrier_timeout
= 4;
68 module_param(carrier_timeout
, uint
, 0644);
70 #define np_info(np, fmt, ...) \
71 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
72 #define np_err(np, fmt, ...) \
73 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
74 #define np_notice(np, fmt, ...) \
75 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
77 static void queue_process(struct work_struct
*work
)
79 struct netpoll_info
*npinfo
=
80 container_of(work
, struct netpoll_info
, tx_work
.work
);
84 while ((skb
= skb_dequeue(&npinfo
->txq
))) {
85 struct net_device
*dev
= skb
->dev
;
86 const struct net_device_ops
*ops
= dev
->netdev_ops
;
87 struct netdev_queue
*txq
;
89 if (!netif_device_present(dev
) || !netif_running(dev
)) {
94 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
96 local_irq_save(flags
);
97 __netif_tx_lock(txq
, smp_processor_id());
98 if (netif_xmit_frozen_or_stopped(txq
) ||
99 ops
->ndo_start_xmit(skb
, dev
) != NETDEV_TX_OK
) {
100 skb_queue_head(&npinfo
->txq
, skb
);
101 __netif_tx_unlock(txq
);
102 local_irq_restore(flags
);
104 schedule_delayed_work(&npinfo
->tx_work
, HZ
/10);
107 __netif_tx_unlock(txq
);
108 local_irq_restore(flags
);
112 static __sum16
checksum_udp(struct sk_buff
*skb
, struct udphdr
*uh
,
113 unsigned short ulen
, __be32 saddr
, __be32 daddr
)
117 if (uh
->check
== 0 || skb_csum_unnecessary(skb
))
120 psum
= csum_tcpudp_nofold(saddr
, daddr
, ulen
, IPPROTO_UDP
, 0);
122 if (skb
->ip_summed
== CHECKSUM_COMPLETE
&&
123 !csum_fold(csum_add(psum
, skb
->csum
)))
128 return __skb_checksum_complete(skb
);
132 * Check whether delayed processing was scheduled for our NIC. If so,
133 * we attempt to grab the poll lock and use ->poll() to pump the card.
134 * If this fails, either we've recursed in ->poll() or it's already
135 * running on another CPU.
137 * Note: we don't mask interrupts with this lock because we're using
138 * trylock here and interrupts are already disabled in the softirq
139 * case. Further, we test the poll_owner to avoid recursion on UP
140 * systems where the lock doesn't exist.
142 * In cases where there is bi-directional communications, reading only
143 * one message at a time can lead to packets being dropped by the
144 * network adapter, forcing superfluous retries and possibly timeouts.
145 * Thus, we set our budget to greater than 1.
147 static int poll_one_napi(struct netpoll_info
*npinfo
,
148 struct napi_struct
*napi
, int budget
)
152 /* net_rx_action's ->poll() invocations and our's are
153 * synchronized by this test which is only made while
154 * holding the napi->poll_lock.
156 if (!test_bit(NAPI_STATE_SCHED
, &napi
->state
))
159 npinfo
->rx_flags
|= NETPOLL_RX_DROP
;
160 atomic_inc(&trapped
);
161 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
163 work
= napi
->poll(napi
, budget
);
164 trace_napi_poll(napi
);
166 clear_bit(NAPI_STATE_NPSVC
, &napi
->state
);
167 atomic_dec(&trapped
);
168 npinfo
->rx_flags
&= ~NETPOLL_RX_DROP
;
170 return budget
- work
;
173 static void poll_napi(struct net_device
*dev
)
175 struct napi_struct
*napi
;
178 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
179 if (napi
->poll_owner
!= smp_processor_id() &&
180 spin_trylock(&napi
->poll_lock
)) {
181 budget
= poll_one_napi(rcu_dereference_bh(dev
->npinfo
),
183 spin_unlock(&napi
->poll_lock
);
191 static void service_neigh_queue(struct netpoll_info
*npi
)
196 while ((skb
= skb_dequeue(&npi
->neigh_tx
)))
197 netpoll_neigh_reply(skb
, npi
);
201 static void netpoll_poll_dev(struct net_device
*dev
)
203 const struct net_device_ops
*ops
;
204 struct netpoll_info
*ni
= rcu_dereference_bh(dev
->npinfo
);
206 /* Don't do any rx activity if the dev_lock mutex is held
207 * the dev_open/close paths use this to block netpoll activity
208 * while changing device state
210 if (down_trylock(&ni
->dev_lock
))
213 if (!netif_running(dev
)) {
218 ops
= dev
->netdev_ops
;
219 if (!ops
->ndo_poll_controller
) {
224 /* Process pending work on NIC */
225 ops
->ndo_poll_controller(dev
);
231 if (dev
->flags
& IFF_SLAVE
) {
233 struct net_device
*bond_dev
;
235 struct netpoll_info
*bond_ni
;
237 bond_dev
= netdev_master_upper_dev_get_rcu(dev
);
238 bond_ni
= rcu_dereference_bh(bond_dev
->npinfo
);
239 while ((skb
= skb_dequeue(&ni
->neigh_tx
))) {
241 skb_queue_tail(&bond_ni
->neigh_tx
, skb
);
246 service_neigh_queue(ni
);
248 zap_completion_queue();
251 int netpoll_rx_disable(struct net_device
*dev
)
253 struct netpoll_info
*ni
;
256 idx
= srcu_read_lock(&netpoll_srcu
);
257 ni
= srcu_dereference(dev
->npinfo
, &netpoll_srcu
);
260 srcu_read_unlock(&netpoll_srcu
, idx
);
263 EXPORT_SYMBOL(netpoll_rx_disable
);
265 void netpoll_rx_enable(struct net_device
*dev
)
267 struct netpoll_info
*ni
;
269 ni
= rcu_dereference(dev
->npinfo
);
274 EXPORT_SYMBOL(netpoll_rx_enable
);
276 static void refill_skbs(void)
281 spin_lock_irqsave(&skb_pool
.lock
, flags
);
282 while (skb_pool
.qlen
< MAX_SKBS
) {
283 skb
= alloc_skb(MAX_SKB_SIZE
, GFP_ATOMIC
);
287 __skb_queue_tail(&skb_pool
, skb
);
289 spin_unlock_irqrestore(&skb_pool
.lock
, flags
);
292 static void zap_completion_queue(void)
295 struct softnet_data
*sd
= &get_cpu_var(softnet_data
);
297 if (sd
->completion_queue
) {
298 struct sk_buff
*clist
;
300 local_irq_save(flags
);
301 clist
= sd
->completion_queue
;
302 sd
->completion_queue
= NULL
;
303 local_irq_restore(flags
);
305 while (clist
!= NULL
) {
306 struct sk_buff
*skb
= clist
;
308 if (skb
->destructor
) {
309 atomic_inc(&skb
->users
);
310 dev_kfree_skb_any(skb
); /* put this one back */
317 put_cpu_var(softnet_data
);
320 static struct sk_buff
*find_skb(struct netpoll
*np
, int len
, int reserve
)
325 zap_completion_queue();
329 skb
= alloc_skb(len
, GFP_ATOMIC
);
331 skb
= skb_dequeue(&skb_pool
);
335 netpoll_poll_dev(np
->dev
);
341 atomic_set(&skb
->users
, 1);
342 skb_reserve(skb
, reserve
);
346 static int netpoll_owner_active(struct net_device
*dev
)
348 struct napi_struct
*napi
;
350 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
351 if (napi
->poll_owner
== smp_processor_id())
357 /* call with IRQ disabled */
358 void netpoll_send_skb_on_dev(struct netpoll
*np
, struct sk_buff
*skb
,
359 struct net_device
*dev
)
361 int status
= NETDEV_TX_BUSY
;
363 const struct net_device_ops
*ops
= dev
->netdev_ops
;
364 /* It is up to the caller to keep npinfo alive. */
365 struct netpoll_info
*npinfo
;
367 WARN_ON_ONCE(!irqs_disabled());
369 npinfo
= rcu_dereference_bh(np
->dev
->npinfo
);
370 if (!npinfo
|| !netif_running(dev
) || !netif_device_present(dev
)) {
375 /* don't get messages out of order, and no recursion */
376 if (skb_queue_len(&npinfo
->txq
) == 0 && !netpoll_owner_active(dev
)) {
377 struct netdev_queue
*txq
;
379 txq
= netdev_pick_tx(dev
, skb
);
381 /* try until next clock tick */
382 for (tries
= jiffies_to_usecs(1)/USEC_PER_POLL
;
383 tries
> 0; --tries
) {
384 if (__netif_tx_trylock(txq
)) {
385 if (!netif_xmit_stopped(txq
)) {
386 if (vlan_tx_tag_present(skb
) &&
387 !vlan_hw_offload_capable(netif_skb_features(skb
),
389 skb
= __vlan_put_tag(skb
, skb
->vlan_proto
, vlan_tx_tag_get(skb
));
395 status
= ops
->ndo_start_xmit(skb
, dev
);
396 if (status
== NETDEV_TX_OK
)
397 txq_trans_update(txq
);
399 __netif_tx_unlock(txq
);
401 if (status
== NETDEV_TX_OK
)
406 /* tickle device maybe there is some cleanup */
407 netpoll_poll_dev(np
->dev
);
409 udelay(USEC_PER_POLL
);
412 WARN_ONCE(!irqs_disabled(),
413 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
414 dev
->name
, ops
->ndo_start_xmit
);
418 if (status
!= NETDEV_TX_OK
) {
419 skb_queue_tail(&npinfo
->txq
, skb
);
420 schedule_delayed_work(&npinfo
->tx_work
,0);
423 EXPORT_SYMBOL(netpoll_send_skb_on_dev
);
425 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
)
427 int total_len
, ip_len
, udp_len
;
432 static atomic_t ip_ident
;
433 struct ipv6hdr
*ip6h
;
435 udp_len
= len
+ sizeof(*udph
);
437 ip_len
= udp_len
+ sizeof(*ip6h
);
439 ip_len
= udp_len
+ sizeof(*iph
);
441 total_len
= ip_len
+ LL_RESERVED_SPACE(np
->dev
);
443 skb
= find_skb(np
, total_len
+ np
->dev
->needed_tailroom
,
448 skb_copy_to_linear_data(skb
, msg
, len
);
451 skb_push(skb
, sizeof(*udph
));
452 skb_reset_transport_header(skb
);
454 udph
->source
= htons(np
->local_port
);
455 udph
->dest
= htons(np
->remote_port
);
456 udph
->len
= htons(udp_len
);
460 udph
->check
= csum_ipv6_magic(&np
->local_ip
.in6
,
462 udp_len
, IPPROTO_UDP
,
463 csum_partial(udph
, udp_len
, 0));
464 if (udph
->check
== 0)
465 udph
->check
= CSUM_MANGLED_0
;
467 skb_push(skb
, sizeof(*ip6h
));
468 skb_reset_network_header(skb
);
469 ip6h
= ipv6_hdr(skb
);
471 /* ip6h->version = 6; ip6h->priority = 0; */
472 put_unaligned(0x60, (unsigned char *)ip6h
);
473 ip6h
->flow_lbl
[0] = 0;
474 ip6h
->flow_lbl
[1] = 0;
475 ip6h
->flow_lbl
[2] = 0;
477 ip6h
->payload_len
= htons(sizeof(struct udphdr
) + len
);
478 ip6h
->nexthdr
= IPPROTO_UDP
;
479 ip6h
->hop_limit
= 32;
480 ip6h
->saddr
= np
->local_ip
.in6
;
481 ip6h
->daddr
= np
->remote_ip
.in6
;
483 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
484 skb_reset_mac_header(skb
);
485 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IPV6
);
488 udph
->check
= csum_tcpudp_magic(np
->local_ip
.ip
,
490 udp_len
, IPPROTO_UDP
,
491 csum_partial(udph
, udp_len
, 0));
492 if (udph
->check
== 0)
493 udph
->check
= CSUM_MANGLED_0
;
495 skb_push(skb
, sizeof(*iph
));
496 skb_reset_network_header(skb
);
499 /* iph->version = 4; iph->ihl = 5; */
500 put_unaligned(0x45, (unsigned char *)iph
);
502 put_unaligned(htons(ip_len
), &(iph
->tot_len
));
503 iph
->id
= htons(atomic_inc_return(&ip_ident
));
506 iph
->protocol
= IPPROTO_UDP
;
508 put_unaligned(np
->local_ip
.ip
, &(iph
->saddr
));
509 put_unaligned(np
->remote_ip
.ip
, &(iph
->daddr
));
510 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
512 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
513 skb_reset_mac_header(skb
);
514 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IP
);
517 memcpy(eth
->h_source
, np
->dev
->dev_addr
, ETH_ALEN
);
518 memcpy(eth
->h_dest
, np
->remote_mac
, ETH_ALEN
);
522 netpoll_send_skb(np
, skb
);
524 EXPORT_SYMBOL(netpoll_send_udp
);
526 static void netpoll_neigh_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
528 int size
, type
= ARPOP_REPLY
;
531 struct sk_buff
*send_skb
;
532 struct netpoll
*np
, *tmp
;
537 if (list_empty(&npinfo
->rx_np
))
540 /* Before checking the packet, we do some early
541 inspection whether this is interesting at all */
542 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
543 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
544 if (np
->dev
== skb
->dev
)
547 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
549 /* No netpoll struct is using this dev */
553 proto
= ntohs(eth_hdr(skb
)->h_proto
);
554 if (proto
== ETH_P_IP
) {
556 unsigned char *arp_ptr
;
557 /* No arp on this interface */
558 if (skb
->dev
->flags
& IFF_NOARP
)
561 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
564 skb_reset_network_header(skb
);
565 skb_reset_transport_header(skb
);
568 if ((arp
->ar_hrd
!= htons(ARPHRD_ETHER
) &&
569 arp
->ar_hrd
!= htons(ARPHRD_IEEE802
)) ||
570 arp
->ar_pro
!= htons(ETH_P_IP
) ||
571 arp
->ar_op
!= htons(ARPOP_REQUEST
))
574 arp_ptr
= (unsigned char *)(arp
+1);
575 /* save the location of the src hw addr */
577 arp_ptr
+= skb
->dev
->addr_len
;
578 memcpy(&sip
, arp_ptr
, 4);
580 /* If we actually cared about dst hw addr,
581 it would get copied here */
582 arp_ptr
+= skb
->dev
->addr_len
;
583 memcpy(&tip
, arp_ptr
, 4);
585 /* Should we ignore arp? */
586 if (ipv4_is_loopback(tip
) || ipv4_is_multicast(tip
))
589 size
= arp_hdr_len(skb
->dev
);
591 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
592 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
593 if (tip
!= np
->local_ip
.ip
)
596 hlen
= LL_RESERVED_SPACE(np
->dev
);
597 tlen
= np
->dev
->needed_tailroom
;
598 send_skb
= find_skb(np
, size
+ hlen
+ tlen
, hlen
);
602 skb_reset_network_header(send_skb
);
603 arp
= (struct arphdr
*) skb_put(send_skb
, size
);
604 send_skb
->dev
= skb
->dev
;
605 send_skb
->protocol
= htons(ETH_P_ARP
);
607 /* Fill the device header for the ARP frame */
608 if (dev_hard_header(send_skb
, skb
->dev
, ETH_P_ARP
,
609 sha
, np
->dev
->dev_addr
,
610 send_skb
->len
) < 0) {
616 * Fill out the arp protocol part.
618 * we only support ethernet device type,
619 * which (according to RFC 1390) should
620 * always equal 1 (Ethernet).
623 arp
->ar_hrd
= htons(np
->dev
->type
);
624 arp
->ar_pro
= htons(ETH_P_IP
);
625 arp
->ar_hln
= np
->dev
->addr_len
;
627 arp
->ar_op
= htons(type
);
629 arp_ptr
= (unsigned char *)(arp
+ 1);
630 memcpy(arp_ptr
, np
->dev
->dev_addr
, np
->dev
->addr_len
);
631 arp_ptr
+= np
->dev
->addr_len
;
632 memcpy(arp_ptr
, &tip
, 4);
634 memcpy(arp_ptr
, sha
, np
->dev
->addr_len
);
635 arp_ptr
+= np
->dev
->addr_len
;
636 memcpy(arp_ptr
, &sip
, 4);
638 netpoll_send_skb(np
, send_skb
);
640 /* If there are several rx_hooks for the same address,
641 we're fine by sending a single reply */
644 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
645 } else if( proto
== ETH_P_IPV6
) {
646 #if IS_ENABLED(CONFIG_IPV6)
650 struct icmp6hdr
*icmp6h
;
651 const struct in6_addr
*saddr
;
652 const struct in6_addr
*daddr
;
653 struct inet6_dev
*in6_dev
= NULL
;
654 struct in6_addr
*target
;
656 in6_dev
= in6_dev_get(skb
->dev
);
657 if (!in6_dev
|| !in6_dev
->cnf
.accept_ra
)
660 if (!pskb_may_pull(skb
, skb
->len
))
663 msg
= (struct nd_msg
*)skb_transport_header(skb
);
665 __skb_push(skb
, skb
->data
- skb_transport_header(skb
));
667 if (ipv6_hdr(skb
)->hop_limit
!= 255)
669 if (msg
->icmph
.icmp6_code
!= 0)
671 if (msg
->icmph
.icmp6_type
!= NDISC_NEIGHBOUR_SOLICITATION
)
674 saddr
= &ipv6_hdr(skb
)->saddr
;
675 daddr
= &ipv6_hdr(skb
)->daddr
;
677 size
= sizeof(struct icmp6hdr
) + sizeof(struct in6_addr
);
679 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
680 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
681 if (!ipv6_addr_equal(daddr
, &np
->local_ip
.in6
))
684 hlen
= LL_RESERVED_SPACE(np
->dev
);
685 tlen
= np
->dev
->needed_tailroom
;
686 send_skb
= find_skb(np
, size
+ hlen
+ tlen
, hlen
);
690 send_skb
->protocol
= htons(ETH_P_IPV6
);
691 send_skb
->dev
= skb
->dev
;
693 skb_reset_network_header(send_skb
);
694 skb_put(send_skb
, sizeof(struct ipv6hdr
));
695 hdr
= ipv6_hdr(send_skb
);
697 *(__be32
*)hdr
= htonl(0x60000000);
699 hdr
->payload_len
= htons(size
);
700 hdr
->nexthdr
= IPPROTO_ICMPV6
;
701 hdr
->hop_limit
= 255;
705 send_skb
->transport_header
= send_skb
->tail
;
706 skb_put(send_skb
, size
);
708 icmp6h
= (struct icmp6hdr
*)skb_transport_header(skb
);
709 icmp6h
->icmp6_type
= NDISC_NEIGHBOUR_ADVERTISEMENT
;
710 icmp6h
->icmp6_router
= 0;
711 icmp6h
->icmp6_solicited
= 1;
712 target
= (struct in6_addr
*)(skb_transport_header(send_skb
) + sizeof(struct icmp6hdr
));
713 *target
= msg
->target
;
714 icmp6h
->icmp6_cksum
= csum_ipv6_magic(saddr
, daddr
, size
,
719 if (dev_hard_header(send_skb
, skb
->dev
, ETH_P_IPV6
,
720 lladdr
, np
->dev
->dev_addr
,
721 send_skb
->len
) < 0) {
726 netpoll_send_skb(np
, send_skb
);
728 /* If there are several rx_hooks for the same address,
729 we're fine by sending a single reply */
732 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
737 static bool pkt_is_ns(struct sk_buff
*skb
)
742 if (skb
->protocol
!= htons(ETH_P_ARP
))
744 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
) + sizeof(struct nd_msg
)))
747 msg
= (struct nd_msg
*)skb_transport_header(skb
);
748 __skb_push(skb
, skb
->data
- skb_transport_header(skb
));
751 if (hdr
->nexthdr
!= IPPROTO_ICMPV6
)
753 if (hdr
->hop_limit
!= 255)
755 if (msg
->icmph
.icmp6_code
!= 0)
757 if (msg
->icmph
.icmp6_type
!= NDISC_NEIGHBOUR_SOLICITATION
)
763 int __netpoll_rx(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
765 int proto
, len
, ulen
;
767 const struct iphdr
*iph
;
769 struct netpoll
*np
, *tmp
;
771 if (list_empty(&npinfo
->rx_np
))
774 if (skb
->dev
->type
!= ARPHRD_ETHER
)
777 /* check if netpoll clients need ARP */
778 if (skb
->protocol
== htons(ETH_P_ARP
) && atomic_read(&trapped
)) {
779 skb_queue_tail(&npinfo
->neigh_tx
, skb
);
781 } else if (pkt_is_ns(skb
) && atomic_read(&trapped
)) {
782 skb_queue_tail(&npinfo
->neigh_tx
, skb
);
786 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
787 skb
= vlan_untag(skb
);
792 proto
= ntohs(eth_hdr(skb
)->h_proto
);
793 if (proto
!= ETH_P_IP
&& proto
!= ETH_P_IPV6
)
795 if (skb
->pkt_type
== PACKET_OTHERHOST
)
800 if (proto
== ETH_P_IP
) {
801 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
803 iph
= (struct iphdr
*)skb
->data
;
804 if (iph
->ihl
< 5 || iph
->version
!= 4)
806 if (!pskb_may_pull(skb
, iph
->ihl
*4))
808 iph
= (struct iphdr
*)skb
->data
;
809 if (ip_fast_csum((u8
*)iph
, iph
->ihl
) != 0)
812 len
= ntohs(iph
->tot_len
);
813 if (skb
->len
< len
|| len
< iph
->ihl
*4)
817 * Our transport medium may have padded the buffer out.
818 * Now We trim to the true length of the frame.
820 if (pskb_trim_rcsum(skb
, len
))
823 iph
= (struct iphdr
*)skb
->data
;
824 if (iph
->protocol
!= IPPROTO_UDP
)
828 uh
= (struct udphdr
*)(((char *)iph
) + iph
->ihl
*4);
829 ulen
= ntohs(uh
->len
);
833 if (checksum_udp(skb
, uh
, ulen
, iph
->saddr
, iph
->daddr
))
835 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
836 if (np
->local_ip
.ip
&& np
->local_ip
.ip
!= iph
->daddr
)
838 if (np
->remote_ip
.ip
&& np
->remote_ip
.ip
!= iph
->saddr
)
840 if (np
->local_port
&& np
->local_port
!= ntohs(uh
->dest
))
843 np
->rx_hook(np
, ntohs(uh
->source
),
845 ulen
- sizeof(struct udphdr
));
849 #if IS_ENABLED(CONFIG_IPV6)
850 const struct ipv6hdr
*ip6h
;
852 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
854 ip6h
= (struct ipv6hdr
*)skb
->data
;
855 if (ip6h
->version
!= 6)
857 len
= ntohs(ip6h
->payload_len
);
860 if (len
+ sizeof(struct ipv6hdr
) > skb
->len
)
862 if (pskb_trim_rcsum(skb
, len
+ sizeof(struct ipv6hdr
)))
864 ip6h
= ipv6_hdr(skb
);
865 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
868 ulen
= ntohs(uh
->len
);
869 if (ulen
!= skb
->len
)
871 if (udp6_csum_init(skb
, uh
, IPPROTO_UDP
))
873 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
874 if (!ipv6_addr_equal(&np
->local_ip
.in6
, &ip6h
->daddr
))
876 if (!ipv6_addr_equal(&np
->remote_ip
.in6
, &ip6h
->saddr
))
878 if (np
->local_port
&& np
->local_port
!= ntohs(uh
->dest
))
881 np
->rx_hook(np
, ntohs(uh
->source
),
883 ulen
- sizeof(struct udphdr
));
896 if (atomic_read(&trapped
)) {
904 void netpoll_print_options(struct netpoll
*np
)
906 np_info(np
, "local port %d\n", np
->local_port
);
908 np_info(np
, "local IPv6 address %pI6c\n", &np
->local_ip
.in6
);
910 np_info(np
, "local IPv4 address %pI4\n", &np
->local_ip
.ip
);
911 np_info(np
, "interface '%s'\n", np
->dev_name
);
912 np_info(np
, "remote port %d\n", np
->remote_port
);
914 np_info(np
, "remote IPv6 address %pI6c\n", &np
->remote_ip
.in6
);
916 np_info(np
, "remote IPv4 address %pI4\n", &np
->remote_ip
.ip
);
917 np_info(np
, "remote ethernet address %pM\n", np
->remote_mac
);
919 EXPORT_SYMBOL(netpoll_print_options
);
921 static int netpoll_parse_ip_addr(const char *str
, union inet_addr
*addr
)
925 if (!strchr(str
, ':') &&
926 in4_pton(str
, -1, (void *)addr
, -1, &end
) > 0) {
930 if (in6_pton(str
, -1, addr
->in6
.s6_addr
, -1, &end
) > 0) {
931 #if IS_ENABLED(CONFIG_IPV6)
941 int netpoll_parse_options(struct netpoll
*np
, char *opt
)
943 char *cur
=opt
, *delim
;
947 if ((delim
= strchr(cur
, '@')) == NULL
)
950 if (kstrtou16(cur
, 10, &np
->local_port
))
957 if ((delim
= strchr(cur
, '/')) == NULL
)
960 ipv6
= netpoll_parse_ip_addr(cur
, &np
->local_ip
);
964 np
->ipv6
= (bool)ipv6
;
970 /* parse out dev name */
971 if ((delim
= strchr(cur
, ',')) == NULL
)
974 strlcpy(np
->dev_name
, cur
, sizeof(np
->dev_name
));
981 if ((delim
= strchr(cur
, '@')) == NULL
)
984 if (*cur
== ' ' || *cur
== '\t')
985 np_info(np
, "warning: whitespace is not allowed\n");
986 if (kstrtou16(cur
, 10, &np
->remote_port
))
993 if ((delim
= strchr(cur
, '/')) == NULL
)
996 ipv6
= netpoll_parse_ip_addr(cur
, &np
->remote_ip
);
999 else if (np
->ipv6
!= (bool)ipv6
)
1002 np
->ipv6
= (bool)ipv6
;
1007 if (!mac_pton(cur
, np
->remote_mac
))
1011 netpoll_print_options(np
);
1016 np_info(np
, "couldn't parse config at '%s'!\n", cur
);
1019 EXPORT_SYMBOL(netpoll_parse_options
);
1021 int __netpoll_setup(struct netpoll
*np
, struct net_device
*ndev
, gfp_t gfp
)
1023 struct netpoll_info
*npinfo
;
1024 const struct net_device_ops
*ops
;
1025 unsigned long flags
;
1029 strlcpy(np
->dev_name
, ndev
->name
, IFNAMSIZ
);
1030 INIT_WORK(&np
->cleanup_work
, netpoll_async_cleanup
);
1032 if ((ndev
->priv_flags
& IFF_DISABLE_NETPOLL
) ||
1033 !ndev
->netdev_ops
->ndo_poll_controller
) {
1034 np_err(np
, "%s doesn't support polling, aborting\n",
1040 if (!ndev
->npinfo
) {
1041 npinfo
= kmalloc(sizeof(*npinfo
), gfp
);
1047 npinfo
->rx_flags
= 0;
1048 INIT_LIST_HEAD(&npinfo
->rx_np
);
1050 spin_lock_init(&npinfo
->rx_lock
);
1051 sema_init(&npinfo
->dev_lock
, 1);
1052 skb_queue_head_init(&npinfo
->neigh_tx
);
1053 skb_queue_head_init(&npinfo
->txq
);
1054 INIT_DELAYED_WORK(&npinfo
->tx_work
, queue_process
);
1056 atomic_set(&npinfo
->refcnt
, 1);
1058 ops
= np
->dev
->netdev_ops
;
1059 if (ops
->ndo_netpoll_setup
) {
1060 err
= ops
->ndo_netpoll_setup(ndev
, npinfo
, gfp
);
1065 npinfo
= rtnl_dereference(ndev
->npinfo
);
1066 atomic_inc(&npinfo
->refcnt
);
1069 npinfo
->netpoll
= np
;
1072 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
1073 npinfo
->rx_flags
|= NETPOLL_RX_ENABLED
;
1074 list_add_tail(&np
->rx
, &npinfo
->rx_np
);
1075 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
1078 /* last thing to do is link it to the net device structure */
1079 rcu_assign_pointer(ndev
->npinfo
, npinfo
);
1088 EXPORT_SYMBOL_GPL(__netpoll_setup
);
1090 int netpoll_setup(struct netpoll
*np
)
1092 struct net_device
*ndev
= NULL
;
1093 struct in_device
*in_dev
;
1098 struct net
*net
= current
->nsproxy
->net_ns
;
1099 ndev
= __dev_get_by_name(net
, np
->dev_name
);
1102 np_err(np
, "%s doesn't exist, aborting\n", np
->dev_name
);
1108 if (netdev_master_upper_dev_get(ndev
)) {
1109 np_err(np
, "%s is a slave device, aborting\n", np
->dev_name
);
1114 if (!netif_running(ndev
)) {
1115 unsigned long atmost
, atleast
;
1117 np_info(np
, "device %s not up yet, forcing it\n", np
->dev_name
);
1119 err
= dev_open(ndev
);
1122 np_err(np
, "failed to open %s\n", ndev
->name
);
1127 atleast
= jiffies
+ HZ
/10;
1128 atmost
= jiffies
+ carrier_timeout
* HZ
;
1129 while (!netif_carrier_ok(ndev
)) {
1130 if (time_after(jiffies
, atmost
)) {
1131 np_notice(np
, "timeout waiting for carrier\n");
1137 /* If carrier appears to come up instantly, we don't
1138 * trust it and pause so that we don't pump all our
1139 * queued console messages into the bitbucket.
1142 if (time_before(jiffies
, atleast
)) {
1143 np_notice(np
, "carrier detect appears untrustworthy, waiting 4 seconds\n");
1149 if (!np
->local_ip
.ip
) {
1151 in_dev
= __in_dev_get_rtnl(ndev
);
1153 if (!in_dev
|| !in_dev
->ifa_list
) {
1154 np_err(np
, "no IP address for %s, aborting\n",
1156 err
= -EDESTADDRREQ
;
1160 np
->local_ip
.ip
= in_dev
->ifa_list
->ifa_local
;
1161 np_info(np
, "local IP %pI4\n", &np
->local_ip
.ip
);
1163 #if IS_ENABLED(CONFIG_IPV6)
1164 struct inet6_dev
*idev
;
1166 err
= -EDESTADDRREQ
;
1167 idev
= __in6_dev_get(ndev
);
1169 struct inet6_ifaddr
*ifp
;
1171 read_lock_bh(&idev
->lock
);
1172 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
1173 if (ipv6_addr_type(&ifp
->addr
) & IPV6_ADDR_LINKLOCAL
)
1175 np
->local_ip
.in6
= ifp
->addr
;
1179 read_unlock_bh(&idev
->lock
);
1182 np_err(np
, "no IPv6 address for %s, aborting\n",
1186 np_info(np
, "local IPv6 %pI6c\n", &np
->local_ip
.in6
);
1188 np_err(np
, "IPv6 is not supported %s, aborting\n",
1196 /* fill up the skb queue */
1199 err
= __netpoll_setup(np
, ndev
, GFP_KERNEL
);
1212 EXPORT_SYMBOL(netpoll_setup
);
1214 static int __init
netpoll_init(void)
1216 skb_queue_head_init(&skb_pool
);
1219 core_initcall(netpoll_init
);
1221 static void rcu_cleanup_netpoll_info(struct rcu_head
*rcu_head
)
1223 struct netpoll_info
*npinfo
=
1224 container_of(rcu_head
, struct netpoll_info
, rcu
);
1226 skb_queue_purge(&npinfo
->neigh_tx
);
1227 skb_queue_purge(&npinfo
->txq
);
1229 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
1230 cancel_delayed_work(&npinfo
->tx_work
);
1232 /* clean after last, unfinished work */
1233 __skb_queue_purge(&npinfo
->txq
);
1234 /* now cancel it again */
1235 cancel_delayed_work(&npinfo
->tx_work
);
1239 void __netpoll_cleanup(struct netpoll
*np
)
1241 struct netpoll_info
*npinfo
;
1242 unsigned long flags
;
1244 /* rtnl_dereference would be preferable here but
1245 * rcu_cleanup_netpoll path can put us in here safely without
1246 * holding the rtnl, so plain rcu_dereference it is
1248 npinfo
= rtnl_dereference(np
->dev
->npinfo
);
1252 if (!list_empty(&npinfo
->rx_np
)) {
1253 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
1255 if (list_empty(&npinfo
->rx_np
))
1256 npinfo
->rx_flags
&= ~NETPOLL_RX_ENABLED
;
1257 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
1260 synchronize_srcu(&netpoll_srcu
);
1262 if (atomic_dec_and_test(&npinfo
->refcnt
)) {
1263 const struct net_device_ops
*ops
;
1265 ops
= np
->dev
->netdev_ops
;
1266 if (ops
->ndo_netpoll_cleanup
)
1267 ops
->ndo_netpoll_cleanup(np
->dev
);
1269 rcu_assign_pointer(np
->dev
->npinfo
, NULL
);
1270 call_rcu_bh(&npinfo
->rcu
, rcu_cleanup_netpoll_info
);
1273 EXPORT_SYMBOL_GPL(__netpoll_cleanup
);
1275 static void netpoll_async_cleanup(struct work_struct
*work
)
1277 struct netpoll
*np
= container_of(work
, struct netpoll
, cleanup_work
);
1280 __netpoll_cleanup(np
);
1285 void __netpoll_free_async(struct netpoll
*np
)
1287 schedule_work(&np
->cleanup_work
);
1289 EXPORT_SYMBOL_GPL(__netpoll_free_async
);
1291 void netpoll_cleanup(struct netpoll
*np
)
1297 __netpoll_cleanup(np
);
1303 EXPORT_SYMBOL(netpoll_cleanup
);
1305 int netpoll_trap(void)
1307 return atomic_read(&trapped
);
1309 EXPORT_SYMBOL(netpoll_trap
);
1311 void netpoll_set_trap(int trap
)
1314 atomic_inc(&trapped
);
1316 atomic_dec(&trapped
);
1318 EXPORT_SYMBOL(netpoll_set_trap
);