2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
44 #define MAX_UDP_CHUNK 1460
47 static struct sk_buff_head skb_pool
;
49 DEFINE_STATIC_SRCU(netpoll_srcu
);
51 #define USEC_PER_POLL 50
53 #define MAX_SKB_SIZE \
54 (sizeof(struct ethhdr) + \
55 sizeof(struct iphdr) + \
56 sizeof(struct udphdr) + \
59 static void zap_completion_queue(void);
61 static unsigned int carrier_timeout
= 4;
62 module_param(carrier_timeout
, uint
, 0644);
64 #define np_info(np, fmt, ...) \
65 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
66 #define np_err(np, fmt, ...) \
67 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
68 #define np_notice(np, fmt, ...) \
69 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
71 static int netpoll_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
72 struct netdev_queue
*txq
)
74 int status
= NETDEV_TX_OK
;
75 netdev_features_t features
;
77 features
= netif_skb_features(skb
);
79 if (skb_vlan_tag_present(skb
) &&
80 !vlan_hw_offload_capable(features
, skb
->vlan_proto
)) {
81 skb
= __vlan_hwaccel_push_inside(skb
);
83 /* This is actually a packet drop, but we
84 * don't want the code that calls this
85 * function to try and operate on a NULL skb.
91 status
= netdev_start_xmit(skb
, dev
, txq
, false);
97 static void queue_process(struct work_struct
*work
)
99 struct netpoll_info
*npinfo
=
100 container_of(work
, struct netpoll_info
, tx_work
.work
);
104 while ((skb
= skb_dequeue(&npinfo
->txq
))) {
105 struct net_device
*dev
= skb
->dev
;
106 struct netdev_queue
*txq
;
107 unsigned int q_index
;
109 if (!netif_device_present(dev
) || !netif_running(dev
)) {
114 local_irq_save(flags
);
115 /* check if skb->queue_mapping is still valid */
116 q_index
= skb_get_queue_mapping(skb
);
117 if (unlikely(q_index
>= dev
->real_num_tx_queues
)) {
118 q_index
= q_index
% dev
->real_num_tx_queues
;
119 skb_set_queue_mapping(skb
, q_index
);
121 txq
= netdev_get_tx_queue(dev
, q_index
);
122 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
123 if (netif_xmit_frozen_or_stopped(txq
) ||
124 netpoll_start_xmit(skb
, dev
, txq
) != NETDEV_TX_OK
) {
125 skb_queue_head(&npinfo
->txq
, skb
);
126 HARD_TX_UNLOCK(dev
, txq
);
127 local_irq_restore(flags
);
129 schedule_delayed_work(&npinfo
->tx_work
, HZ
/10);
132 HARD_TX_UNLOCK(dev
, txq
);
133 local_irq_restore(flags
);
137 static void poll_one_napi(struct napi_struct
*napi
)
141 /* If we set this bit but see that it has already been set,
142 * that indicates that napi has been disabled and we need
143 * to abort this operation
145 if (test_and_set_bit(NAPI_STATE_NPSVC
, &napi
->state
))
148 /* We explicilty pass the polling call a budget of 0 to
149 * indicate that we are clearing the Tx path only.
151 work
= napi
->poll(napi
, 0);
152 WARN_ONCE(work
, "%pF exceeded budget in poll\n", napi
->poll
);
153 trace_napi_poll(napi
, work
, 0);
155 clear_bit(NAPI_STATE_NPSVC
, &napi
->state
);
158 static void poll_napi(struct net_device
*dev
)
160 struct napi_struct
*napi
;
161 int cpu
= smp_processor_id();
163 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
164 if (cmpxchg(&napi
->poll_owner
, -1, cpu
) == -1) {
166 smp_store_release(&napi
->poll_owner
, -1);
171 void netpoll_poll_dev(struct net_device
*dev
)
173 struct netpoll_info
*ni
= rcu_dereference_bh(dev
->npinfo
);
174 const struct net_device_ops
*ops
;
176 /* Don't do any rx activity if the dev_lock mutex is held
177 * the dev_open/close paths use this to block netpoll activity
178 * while changing device state
180 if (!ni
|| down_trylock(&ni
->dev_lock
))
183 if (!netif_running(dev
)) {
188 ops
= dev
->netdev_ops
;
189 if (ops
->ndo_poll_controller
)
190 ops
->ndo_poll_controller(dev
);
196 zap_completion_queue();
198 EXPORT_SYMBOL(netpoll_poll_dev
);
200 void netpoll_poll_disable(struct net_device
*dev
)
202 struct netpoll_info
*ni
;
205 idx
= srcu_read_lock(&netpoll_srcu
);
206 ni
= srcu_dereference(dev
->npinfo
, &netpoll_srcu
);
209 srcu_read_unlock(&netpoll_srcu
, idx
);
211 EXPORT_SYMBOL(netpoll_poll_disable
);
213 void netpoll_poll_enable(struct net_device
*dev
)
215 struct netpoll_info
*ni
;
217 ni
= rcu_dereference(dev
->npinfo
);
222 EXPORT_SYMBOL(netpoll_poll_enable
);
224 static void refill_skbs(void)
229 spin_lock_irqsave(&skb_pool
.lock
, flags
);
230 while (skb_pool
.qlen
< MAX_SKBS
) {
231 skb
= alloc_skb(MAX_SKB_SIZE
, GFP_ATOMIC
);
235 __skb_queue_tail(&skb_pool
, skb
);
237 spin_unlock_irqrestore(&skb_pool
.lock
, flags
);
240 static void zap_completion_queue(void)
243 struct softnet_data
*sd
= &get_cpu_var(softnet_data
);
245 if (sd
->completion_queue
) {
246 struct sk_buff
*clist
;
248 local_irq_save(flags
);
249 clist
= sd
->completion_queue
;
250 sd
->completion_queue
= NULL
;
251 local_irq_restore(flags
);
253 while (clist
!= NULL
) {
254 struct sk_buff
*skb
= clist
;
256 if (!skb_irq_freeable(skb
)) {
257 refcount_set(&skb
->users
, 1);
258 dev_kfree_skb_any(skb
); /* put this one back */
265 put_cpu_var(softnet_data
);
268 static struct sk_buff
*find_skb(struct netpoll
*np
, int len
, int reserve
)
273 zap_completion_queue();
277 skb
= alloc_skb(len
, GFP_ATOMIC
);
279 skb
= skb_dequeue(&skb_pool
);
283 netpoll_poll_dev(np
->dev
);
289 refcount_set(&skb
->users
, 1);
290 skb_reserve(skb
, reserve
);
294 static int netpoll_owner_active(struct net_device
*dev
)
296 struct napi_struct
*napi
;
298 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
299 if (napi
->poll_owner
== smp_processor_id())
305 /* call with IRQ disabled */
306 void netpoll_send_skb_on_dev(struct netpoll
*np
, struct sk_buff
*skb
,
307 struct net_device
*dev
)
309 int status
= NETDEV_TX_BUSY
;
311 /* It is up to the caller to keep npinfo alive. */
312 struct netpoll_info
*npinfo
;
314 lockdep_assert_irqs_disabled();
316 npinfo
= rcu_dereference_bh(np
->dev
->npinfo
);
317 if (!npinfo
|| !netif_running(dev
) || !netif_device_present(dev
)) {
318 dev_kfree_skb_irq(skb
);
322 /* don't get messages out of order, and no recursion */
323 if (skb_queue_len(&npinfo
->txq
) == 0 && !netpoll_owner_active(dev
)) {
324 struct netdev_queue
*txq
;
326 txq
= netdev_pick_tx(dev
, skb
, NULL
);
328 /* try until next clock tick */
329 for (tries
= jiffies_to_usecs(1)/USEC_PER_POLL
;
330 tries
> 0; --tries
) {
331 if (HARD_TX_TRYLOCK(dev
, txq
)) {
332 if (!netif_xmit_stopped(txq
))
333 status
= netpoll_start_xmit(skb
, dev
, txq
);
335 HARD_TX_UNLOCK(dev
, txq
);
337 if (status
== NETDEV_TX_OK
)
342 /* tickle device maybe there is some cleanup */
343 netpoll_poll_dev(np
->dev
);
345 udelay(USEC_PER_POLL
);
348 WARN_ONCE(!irqs_disabled(),
349 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
350 dev
->name
, dev
->netdev_ops
->ndo_start_xmit
);
354 if (status
!= NETDEV_TX_OK
) {
355 skb_queue_tail(&npinfo
->txq
, skb
);
356 schedule_delayed_work(&npinfo
->tx_work
,0);
359 EXPORT_SYMBOL(netpoll_send_skb_on_dev
);
361 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
)
363 int total_len
, ip_len
, udp_len
;
368 static atomic_t ip_ident
;
369 struct ipv6hdr
*ip6h
;
371 WARN_ON_ONCE(!irqs_disabled());
373 udp_len
= len
+ sizeof(*udph
);
375 ip_len
= udp_len
+ sizeof(*ip6h
);
377 ip_len
= udp_len
+ sizeof(*iph
);
379 total_len
= ip_len
+ LL_RESERVED_SPACE(np
->dev
);
381 skb
= find_skb(np
, total_len
+ np
->dev
->needed_tailroom
,
386 skb_copy_to_linear_data(skb
, msg
, len
);
389 skb_push(skb
, sizeof(*udph
));
390 skb_reset_transport_header(skb
);
392 udph
->source
= htons(np
->local_port
);
393 udph
->dest
= htons(np
->remote_port
);
394 udph
->len
= htons(udp_len
);
398 udph
->check
= csum_ipv6_magic(&np
->local_ip
.in6
,
400 udp_len
, IPPROTO_UDP
,
401 csum_partial(udph
, udp_len
, 0));
402 if (udph
->check
== 0)
403 udph
->check
= CSUM_MANGLED_0
;
405 skb_push(skb
, sizeof(*ip6h
));
406 skb_reset_network_header(skb
);
407 ip6h
= ipv6_hdr(skb
);
409 /* ip6h->version = 6; ip6h->priority = 0; */
410 put_unaligned(0x60, (unsigned char *)ip6h
);
411 ip6h
->flow_lbl
[0] = 0;
412 ip6h
->flow_lbl
[1] = 0;
413 ip6h
->flow_lbl
[2] = 0;
415 ip6h
->payload_len
= htons(sizeof(struct udphdr
) + len
);
416 ip6h
->nexthdr
= IPPROTO_UDP
;
417 ip6h
->hop_limit
= 32;
418 ip6h
->saddr
= np
->local_ip
.in6
;
419 ip6h
->daddr
= np
->remote_ip
.in6
;
421 eth
= skb_push(skb
, ETH_HLEN
);
422 skb_reset_mac_header(skb
);
423 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IPV6
);
426 udph
->check
= csum_tcpudp_magic(np
->local_ip
.ip
,
428 udp_len
, IPPROTO_UDP
,
429 csum_partial(udph
, udp_len
, 0));
430 if (udph
->check
== 0)
431 udph
->check
= CSUM_MANGLED_0
;
433 skb_push(skb
, sizeof(*iph
));
434 skb_reset_network_header(skb
);
437 /* iph->version = 4; iph->ihl = 5; */
438 put_unaligned(0x45, (unsigned char *)iph
);
440 put_unaligned(htons(ip_len
), &(iph
->tot_len
));
441 iph
->id
= htons(atomic_inc_return(&ip_ident
));
444 iph
->protocol
= IPPROTO_UDP
;
446 put_unaligned(np
->local_ip
.ip
, &(iph
->saddr
));
447 put_unaligned(np
->remote_ip
.ip
, &(iph
->daddr
));
448 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
450 eth
= skb_push(skb
, ETH_HLEN
);
451 skb_reset_mac_header(skb
);
452 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IP
);
455 ether_addr_copy(eth
->h_source
, np
->dev
->dev_addr
);
456 ether_addr_copy(eth
->h_dest
, np
->remote_mac
);
460 netpoll_send_skb(np
, skb
);
462 EXPORT_SYMBOL(netpoll_send_udp
);
464 void netpoll_print_options(struct netpoll
*np
)
466 np_info(np
, "local port %d\n", np
->local_port
);
468 np_info(np
, "local IPv6 address %pI6c\n", &np
->local_ip
.in6
);
470 np_info(np
, "local IPv4 address %pI4\n", &np
->local_ip
.ip
);
471 np_info(np
, "interface '%s'\n", np
->dev_name
);
472 np_info(np
, "remote port %d\n", np
->remote_port
);
474 np_info(np
, "remote IPv6 address %pI6c\n", &np
->remote_ip
.in6
);
476 np_info(np
, "remote IPv4 address %pI4\n", &np
->remote_ip
.ip
);
477 np_info(np
, "remote ethernet address %pM\n", np
->remote_mac
);
479 EXPORT_SYMBOL(netpoll_print_options
);
481 static int netpoll_parse_ip_addr(const char *str
, union inet_addr
*addr
)
485 if (!strchr(str
, ':') &&
486 in4_pton(str
, -1, (void *)addr
, -1, &end
) > 0) {
490 if (in6_pton(str
, -1, addr
->in6
.s6_addr
, -1, &end
) > 0) {
491 #if IS_ENABLED(CONFIG_IPV6)
501 int netpoll_parse_options(struct netpoll
*np
, char *opt
)
503 char *cur
=opt
, *delim
;
505 bool ipversion_set
= false;
508 if ((delim
= strchr(cur
, '@')) == NULL
)
511 if (kstrtou16(cur
, 10, &np
->local_port
))
518 ipversion_set
= true;
519 if ((delim
= strchr(cur
, '/')) == NULL
)
522 ipv6
= netpoll_parse_ip_addr(cur
, &np
->local_ip
);
526 np
->ipv6
= (bool)ipv6
;
532 /* parse out dev name */
533 if ((delim
= strchr(cur
, ',')) == NULL
)
536 strlcpy(np
->dev_name
, cur
, sizeof(np
->dev_name
));
543 if ((delim
= strchr(cur
, '@')) == NULL
)
546 if (*cur
== ' ' || *cur
== '\t')
547 np_info(np
, "warning: whitespace is not allowed\n");
548 if (kstrtou16(cur
, 10, &np
->remote_port
))
555 if ((delim
= strchr(cur
, '/')) == NULL
)
558 ipv6
= netpoll_parse_ip_addr(cur
, &np
->remote_ip
);
561 else if (ipversion_set
&& np
->ipv6
!= (bool)ipv6
)
564 np
->ipv6
= (bool)ipv6
;
569 if (!mac_pton(cur
, np
->remote_mac
))
573 netpoll_print_options(np
);
578 np_info(np
, "couldn't parse config at '%s'!\n", cur
);
581 EXPORT_SYMBOL(netpoll_parse_options
);
583 int __netpoll_setup(struct netpoll
*np
, struct net_device
*ndev
)
585 struct netpoll_info
*npinfo
;
586 const struct net_device_ops
*ops
;
590 strlcpy(np
->dev_name
, ndev
->name
, IFNAMSIZ
);
592 if (ndev
->priv_flags
& IFF_DISABLE_NETPOLL
) {
593 np_err(np
, "%s doesn't support polling, aborting\n",
600 npinfo
= kmalloc(sizeof(*npinfo
), GFP_KERNEL
);
606 sema_init(&npinfo
->dev_lock
, 1);
607 skb_queue_head_init(&npinfo
->txq
);
608 INIT_DELAYED_WORK(&npinfo
->tx_work
, queue_process
);
610 refcount_set(&npinfo
->refcnt
, 1);
612 ops
= np
->dev
->netdev_ops
;
613 if (ops
->ndo_netpoll_setup
) {
614 err
= ops
->ndo_netpoll_setup(ndev
, npinfo
);
619 npinfo
= rtnl_dereference(ndev
->npinfo
);
620 refcount_inc(&npinfo
->refcnt
);
623 npinfo
->netpoll
= np
;
625 /* last thing to do is link it to the net device structure */
626 rcu_assign_pointer(ndev
->npinfo
, npinfo
);
635 EXPORT_SYMBOL_GPL(__netpoll_setup
);
637 int netpoll_setup(struct netpoll
*np
)
639 struct net_device
*ndev
= NULL
;
640 struct in_device
*in_dev
;
644 if (np
->dev_name
[0]) {
645 struct net
*net
= current
->nsproxy
->net_ns
;
646 ndev
= __dev_get_by_name(net
, np
->dev_name
);
649 np_err(np
, "%s doesn't exist, aborting\n", np
->dev_name
);
655 if (netdev_master_upper_dev_get(ndev
)) {
656 np_err(np
, "%s is a slave device, aborting\n", np
->dev_name
);
661 if (!netif_running(ndev
)) {
662 unsigned long atmost
, atleast
;
664 np_info(np
, "device %s not up yet, forcing it\n", np
->dev_name
);
666 err
= dev_open(ndev
, NULL
);
669 np_err(np
, "failed to open %s\n", ndev
->name
);
674 atleast
= jiffies
+ HZ
/10;
675 atmost
= jiffies
+ carrier_timeout
* HZ
;
676 while (!netif_carrier_ok(ndev
)) {
677 if (time_after(jiffies
, atmost
)) {
678 np_notice(np
, "timeout waiting for carrier\n");
684 /* If carrier appears to come up instantly, we don't
685 * trust it and pause so that we don't pump all our
686 * queued console messages into the bitbucket.
689 if (time_before(jiffies
, atleast
)) {
690 np_notice(np
, "carrier detect appears untrustworthy, waiting 4 seconds\n");
696 if (!np
->local_ip
.ip
) {
698 in_dev
= __in_dev_get_rtnl(ndev
);
700 if (!in_dev
|| !in_dev
->ifa_list
) {
701 np_err(np
, "no IP address for %s, aborting\n",
707 np
->local_ip
.ip
= in_dev
->ifa_list
->ifa_local
;
708 np_info(np
, "local IP %pI4\n", &np
->local_ip
.ip
);
710 #if IS_ENABLED(CONFIG_IPV6)
711 struct inet6_dev
*idev
;
714 idev
= __in6_dev_get(ndev
);
716 struct inet6_ifaddr
*ifp
;
718 read_lock_bh(&idev
->lock
);
719 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
720 if (!!(ipv6_addr_type(&ifp
->addr
) & IPV6_ADDR_LINKLOCAL
) !=
721 !!(ipv6_addr_type(&np
->remote_ip
.in6
) & IPV6_ADDR_LINKLOCAL
))
723 np
->local_ip
.in6
= ifp
->addr
;
727 read_unlock_bh(&idev
->lock
);
730 np_err(np
, "no IPv6 address for %s, aborting\n",
734 np_info(np
, "local IPv6 %pI6c\n", &np
->local_ip
.in6
);
736 np_err(np
, "IPv6 is not supported %s, aborting\n",
744 /* fill up the skb queue */
747 err
= __netpoll_setup(np
, ndev
);
760 EXPORT_SYMBOL(netpoll_setup
);
762 static int __init
netpoll_init(void)
764 skb_queue_head_init(&skb_pool
);
767 core_initcall(netpoll_init
);
769 static void rcu_cleanup_netpoll_info(struct rcu_head
*rcu_head
)
771 struct netpoll_info
*npinfo
=
772 container_of(rcu_head
, struct netpoll_info
, rcu
);
774 skb_queue_purge(&npinfo
->txq
);
776 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
777 cancel_delayed_work(&npinfo
->tx_work
);
779 /* clean after last, unfinished work */
780 __skb_queue_purge(&npinfo
->txq
);
781 /* now cancel it again */
782 cancel_delayed_work(&npinfo
->tx_work
);
786 void __netpoll_cleanup(struct netpoll
*np
)
788 struct netpoll_info
*npinfo
;
790 npinfo
= rtnl_dereference(np
->dev
->npinfo
);
794 synchronize_srcu(&netpoll_srcu
);
796 if (refcount_dec_and_test(&npinfo
->refcnt
)) {
797 const struct net_device_ops
*ops
;
799 ops
= np
->dev
->netdev_ops
;
800 if (ops
->ndo_netpoll_cleanup
)
801 ops
->ndo_netpoll_cleanup(np
->dev
);
803 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
804 call_rcu(&npinfo
->rcu
, rcu_cleanup_netpoll_info
);
806 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
808 EXPORT_SYMBOL_GPL(__netpoll_cleanup
);
810 void __netpoll_free(struct netpoll
*np
)
814 /* Wait for transmitting packets to finish before freeing. */
816 __netpoll_cleanup(np
);
819 EXPORT_SYMBOL_GPL(__netpoll_free
);
821 void netpoll_cleanup(struct netpoll
*np
)
826 __netpoll_cleanup(np
);
832 EXPORT_SYMBOL(netpoll_cleanup
);