]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Common framework for low-level network console, dump, and debugger code | |
3 | * | |
4 | * Sep 8 2003 Matt Mackall <mpm@selenic.com> | |
5 | * | |
6 | * based on the netconsole code from: | |
7 | * | |
8 | * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> | |
9 | * Copyright (C) 2002 Red Hat, Inc. | |
10 | */ | |
11 | ||
e6ec2693 JP |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | ||
bff38771 | 14 | #include <linux/moduleparam.h> |
4cd5773a | 15 | #include <linux/kernel.h> |
1da177e4 LT |
16 | #include <linux/netdevice.h> |
17 | #include <linux/etherdevice.h> | |
18 | #include <linux/string.h> | |
14c85021 | 19 | #include <linux/if_arp.h> |
1da177e4 LT |
20 | #include <linux/inetdevice.h> |
21 | #include <linux/inet.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/netpoll.h> | |
24 | #include <linux/sched.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/rcupdate.h> | |
27 | #include <linux/workqueue.h> | |
5a0e3ad6 | 28 | #include <linux/slab.h> |
bc3b2d7f | 29 | #include <linux/export.h> |
689971b4 | 30 | #include <linux/if_vlan.h> |
1da177e4 LT |
31 | #include <net/tcp.h> |
32 | #include <net/udp.h> | |
b3d936f3 CW |
33 | #include <net/addrconf.h> |
34 | #include <net/ndisc.h> | |
35 | #include <net/ip6_checksum.h> | |
1da177e4 | 36 | #include <asm/unaligned.h> |
9cbc1cb8 | 37 | #include <trace/events/napi.h> |
1da177e4 LT |
38 | |
39 | /* | |
40 | * We maintain a small pool of fully-sized skbs, to make sure the | |
41 | * message gets out even in extreme OOM situations. | |
42 | */ | |
43 | ||
44 | #define MAX_UDP_CHUNK 1460 | |
45 | #define MAX_SKBS 32 | |
1da177e4 | 46 | |
a1bcfacd | 47 | static struct sk_buff_head skb_pool; |
1da177e4 | 48 | |
7f9421c2 | 49 | DEFINE_STATIC_SRCU(netpoll_srcu); |
ca99ca14 | 50 | |
2bdfe0ba | 51 | #define USEC_PER_POLL 50 |
1da177e4 | 52 | |
6f706245 JP |
53 | #define MAX_SKB_SIZE \ |
54 | (sizeof(struct ethhdr) + \ | |
55 | sizeof(struct iphdr) + \ | |
56 | sizeof(struct udphdr) + \ | |
57 | MAX_UDP_CHUNK) | |
1da177e4 | 58 | |
3578b0c8 | 59 | static void zap_completion_queue(void); |
2cde6acd | 60 | static void netpoll_async_cleanup(struct work_struct *work); |
1da177e4 | 61 | |
bff38771 AV |
62 | static unsigned int carrier_timeout = 4; |
63 | module_param(carrier_timeout, uint, 0644); | |
64 | ||
e6ec2693 JP |
65 | #define np_info(np, fmt, ...) \ |
66 | pr_info("%s: " fmt, np->name, ##__VA_ARGS__) | |
67 | #define np_err(np, fmt, ...) \ | |
68 | pr_err("%s: " fmt, np->name, ##__VA_ARGS__) | |
69 | #define np_notice(np, fmt, ...) \ | |
70 | pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) | |
71 | ||
944e2948 EB |
72 | static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, |
73 | struct netdev_queue *txq) | |
74 | { | |
944e2948 EB |
75 | int status = NETDEV_TX_OK; |
76 | netdev_features_t features; | |
77 | ||
78 | features = netif_skb_features(skb); | |
79 | ||
df8a39de | 80 | if (skb_vlan_tag_present(skb) && |
944e2948 | 81 | !vlan_hw_offload_capable(features, skb->vlan_proto)) { |
5968250c | 82 | skb = __vlan_hwaccel_push_inside(skb); |
944e2948 EB |
83 | if (unlikely(!skb)) { |
84 | /* This is actually a packet drop, but we | |
85 | * don't want the code that calls this | |
86 | * function to try and operate on a NULL skb. | |
87 | */ | |
88 | goto out; | |
89 | } | |
944e2948 EB |
90 | } |
91 | ||
fa2dbdc2 | 92 | status = netdev_start_xmit(skb, dev, txq, false); |
944e2948 EB |
93 | |
94 | out: | |
95 | return status; | |
96 | } | |
97 | ||
c4028958 | 98 | static void queue_process(struct work_struct *work) |
1da177e4 | 99 | { |
4c1ac1b4 DH |
100 | struct netpoll_info *npinfo = |
101 | container_of(work, struct netpoll_info, tx_work.work); | |
1da177e4 | 102 | struct sk_buff *skb; |
3640543d | 103 | unsigned long flags; |
1da177e4 | 104 | |
6c43ff18 SH |
105 | while ((skb = skb_dequeue(&npinfo->txq))) { |
106 | struct net_device *dev = skb->dev; | |
fd2ea0a7 | 107 | struct netdev_queue *txq; |
c70b17b7 | 108 | unsigned int q_index; |
1da177e4 | 109 | |
6c43ff18 | 110 | if (!netif_device_present(dev) || !netif_running(dev)) { |
080b3c19 | 111 | kfree_skb(skb); |
6c43ff18 SH |
112 | continue; |
113 | } | |
1da177e4 | 114 | |
3640543d | 115 | local_irq_save(flags); |
c70b17b7 TD |
116 | /* check if skb->queue_mapping is still valid */ |
117 | q_index = skb_get_queue_mapping(skb); | |
118 | if (unlikely(q_index >= dev->real_num_tx_queues)) { | |
119 | q_index = q_index % dev->real_num_tx_queues; | |
120 | skb_set_queue_mapping(skb, q_index); | |
121 | } | |
122 | txq = netdev_get_tx_queue(dev, q_index); | |
5efeac44 | 123 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
73466498 | 124 | if (netif_xmit_frozen_or_stopped(txq) || |
944e2948 | 125 | netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { |
6c43ff18 | 126 | skb_queue_head(&npinfo->txq, skb); |
5efeac44 | 127 | HARD_TX_UNLOCK(dev, txq); |
3640543d | 128 | local_irq_restore(flags); |
1da177e4 | 129 | |
25442caf | 130 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
6c43ff18 SH |
131 | return; |
132 | } | |
5efeac44 | 133 | HARD_TX_UNLOCK(dev, txq); |
3640543d | 134 | local_irq_restore(flags); |
1da177e4 | 135 | } |
1da177e4 LT |
136 | } |
137 | ||
1da177e4 LT |
138 | /* |
139 | * Check whether delayed processing was scheduled for our NIC. If so, | |
140 | * we attempt to grab the poll lock and use ->poll() to pump the card. | |
141 | * If this fails, either we've recursed in ->poll() or it's already | |
142 | * running on another CPU. | |
143 | * | |
144 | * Note: we don't mask interrupts with this lock because we're using | |
145 | * trylock here and interrupts are already disabled in the softirq | |
146 | * case. Further, we test the poll_owner to avoid recursion on UP | |
147 | * systems where the lock doesn't exist. | |
1da177e4 | 148 | */ |
822d54b9 | 149 | static void poll_one_napi(struct napi_struct *napi) |
0a7606c1 | 150 | { |
2d8bff12 | 151 | int work = 0; |
0a7606c1 DM |
152 | |
153 | /* net_rx_action's ->poll() invocations and our's are | |
154 | * synchronized by this test which is only made while | |
155 | * holding the napi->poll_lock. | |
156 | */ | |
157 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) | |
822d54b9 | 158 | return; |
0a7606c1 | 159 | |
2d8bff12 NH |
160 | /* If we set this bit but see that it has already been set, |
161 | * that indicates that napi has been disabled and we need | |
162 | * to abort this operation | |
163 | */ | |
164 | if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state)) | |
822d54b9 | 165 | return; |
0a7606c1 | 166 | |
822d54b9 AD |
167 | /* We explicilty pass the polling call a budget of 0 to |
168 | * indicate that we are clearing the Tx path only. | |
169 | */ | |
170 | work = napi->poll(napi, 0); | |
171 | WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll); | |
1db19db7 | 172 | trace_napi_poll(napi, work, 0); |
0a7606c1 | 173 | |
7b363e44 | 174 | clear_bit(NAPI_STATE_NPSVC, &napi->state); |
0a7606c1 DM |
175 | } |
176 | ||
822d54b9 | 177 | static void poll_napi(struct net_device *dev) |
1da177e4 | 178 | { |
bea3348e | 179 | struct napi_struct *napi; |
89c4b442 | 180 | int cpu = smp_processor_id(); |
1da177e4 | 181 | |
f13d493d | 182 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
89c4b442 | 183 | if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { |
822d54b9 | 184 | poll_one_napi(napi); |
89c4b442 | 185 | smp_store_release(&napi->poll_owner, -1); |
bea3348e | 186 | } |
1da177e4 LT |
187 | } |
188 | } | |
189 | ||
234b921d | 190 | static void netpoll_poll_dev(struct net_device *dev) |
1da177e4 | 191 | { |
5e392739 | 192 | const struct net_device_ops *ops; |
2899656b | 193 | struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); |
5106930b | 194 | |
ca99ca14 NH |
195 | /* Don't do any rx activity if the dev_lock mutex is held |
196 | * the dev_open/close paths use this to block netpoll activity | |
197 | * while changing device state | |
198 | */ | |
a3dbbc2b | 199 | if (down_trylock(&ni->dev_lock)) |
ca99ca14 NH |
200 | return; |
201 | ||
959d5fde | 202 | if (!netif_running(dev)) { |
bd7c4b60 | 203 | up(&ni->dev_lock); |
5e392739 | 204 | return; |
959d5fde | 205 | } |
5e392739 PE |
206 | |
207 | ops = dev->netdev_ops; | |
959d5fde | 208 | if (!ops->ndo_poll_controller) { |
bd7c4b60 | 209 | up(&ni->dev_lock); |
1da177e4 | 210 | return; |
959d5fde | 211 | } |
1da177e4 LT |
212 | |
213 | /* Process pending work on NIC */ | |
d314774c | 214 | ops->ndo_poll_controller(dev); |
5106930b | 215 | |
822d54b9 | 216 | poll_napi(dev); |
1da177e4 | 217 | |
bd7c4b60 | 218 | up(&ni->dev_lock); |
ca99ca14 | 219 | |
3578b0c8 | 220 | zap_completion_queue(); |
1da177e4 LT |
221 | } |
222 | ||
66b5552f | 223 | void netpoll_poll_disable(struct net_device *dev) |
ca99ca14 NH |
224 | { |
225 | struct netpoll_info *ni; | |
226 | int idx; | |
227 | might_sleep(); | |
228 | idx = srcu_read_lock(&netpoll_srcu); | |
229 | ni = srcu_dereference(dev->npinfo, &netpoll_srcu); | |
230 | if (ni) | |
bd7c4b60 | 231 | down(&ni->dev_lock); |
ca99ca14 | 232 | srcu_read_unlock(&netpoll_srcu, idx); |
ca99ca14 | 233 | } |
66b5552f | 234 | EXPORT_SYMBOL(netpoll_poll_disable); |
ca99ca14 | 235 | |
66b5552f | 236 | void netpoll_poll_enable(struct net_device *dev) |
ca99ca14 NH |
237 | { |
238 | struct netpoll_info *ni; | |
239 | rcu_read_lock(); | |
240 | ni = rcu_dereference(dev->npinfo); | |
241 | if (ni) | |
bd7c4b60 | 242 | up(&ni->dev_lock); |
ca99ca14 NH |
243 | rcu_read_unlock(); |
244 | } | |
66b5552f | 245 | EXPORT_SYMBOL(netpoll_poll_enable); |
ca99ca14 | 246 | |
1da177e4 LT |
247 | static void refill_skbs(void) |
248 | { | |
249 | struct sk_buff *skb; | |
250 | unsigned long flags; | |
251 | ||
a1bcfacd SH |
252 | spin_lock_irqsave(&skb_pool.lock, flags); |
253 | while (skb_pool.qlen < MAX_SKBS) { | |
1da177e4 LT |
254 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); |
255 | if (!skb) | |
256 | break; | |
257 | ||
a1bcfacd | 258 | __skb_queue_tail(&skb_pool, skb); |
1da177e4 | 259 | } |
a1bcfacd | 260 | spin_unlock_irqrestore(&skb_pool.lock, flags); |
1da177e4 LT |
261 | } |
262 | ||
3578b0c8 DM |
263 | static void zap_completion_queue(void) |
264 | { | |
265 | unsigned long flags; | |
266 | struct softnet_data *sd = &get_cpu_var(softnet_data); | |
267 | ||
268 | if (sd->completion_queue) { | |
269 | struct sk_buff *clist; | |
270 | ||
271 | local_irq_save(flags); | |
272 | clist = sd->completion_queue; | |
273 | sd->completion_queue = NULL; | |
274 | local_irq_restore(flags); | |
275 | ||
276 | while (clist != NULL) { | |
277 | struct sk_buff *skb = clist; | |
278 | clist = clist->next; | |
b1586f09 | 279 | if (!skb_irq_freeable(skb)) { |
230cd127 | 280 | refcount_set(&skb->users, 1); |
3578b0c8 DM |
281 | dev_kfree_skb_any(skb); /* put this one back */ |
282 | } else { | |
283 | __kfree_skb(skb); | |
284 | } | |
285 | } | |
286 | } | |
287 | ||
288 | put_cpu_var(softnet_data); | |
289 | } | |
290 | ||
a1bcfacd | 291 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
1da177e4 | 292 | { |
a1bcfacd SH |
293 | int count = 0; |
294 | struct sk_buff *skb; | |
1da177e4 | 295 | |
3578b0c8 | 296 | zap_completion_queue(); |
a1bcfacd | 297 | refill_skbs(); |
1da177e4 | 298 | repeat: |
1da177e4 LT |
299 | |
300 | skb = alloc_skb(len, GFP_ATOMIC); | |
a1bcfacd SH |
301 | if (!skb) |
302 | skb = skb_dequeue(&skb_pool); | |
1da177e4 LT |
303 | |
304 | if (!skb) { | |
a1bcfacd | 305 | if (++count < 10) { |
2a49e001 | 306 | netpoll_poll_dev(np->dev); |
a1bcfacd | 307 | goto repeat; |
1da177e4 | 308 | } |
a1bcfacd | 309 | return NULL; |
1da177e4 LT |
310 | } |
311 | ||
63354797 | 312 | refcount_set(&skb->users, 1); |
1da177e4 LT |
313 | skb_reserve(skb, reserve); |
314 | return skb; | |
315 | } | |
316 | ||
bea3348e SH |
317 | static int netpoll_owner_active(struct net_device *dev) |
318 | { | |
319 | struct napi_struct *napi; | |
320 | ||
321 | list_for_each_entry(napi, &dev->napi_list, dev_list) { | |
322 | if (napi->poll_owner == smp_processor_id()) | |
323 | return 1; | |
324 | } | |
325 | return 0; | |
326 | } | |
327 | ||
2899656b | 328 | /* call with IRQ disabled */ |
c2355e1a NH |
329 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
330 | struct net_device *dev) | |
1da177e4 | 331 | { |
2bdfe0ba SH |
332 | int status = NETDEV_TX_BUSY; |
333 | unsigned long tries; | |
de85d99e | 334 | /* It is up to the caller to keep npinfo alive. */ |
2899656b | 335 | struct netpoll_info *npinfo; |
2bdfe0ba | 336 | |
af073393 | 337 | lockdep_assert_irqs_disabled(); |
2899656b AW |
338 | |
339 | npinfo = rcu_dereference_bh(np->dev->npinfo); | |
4ec93edb | 340 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
080b3c19 | 341 | dev_kfree_skb_irq(skb); |
4ec93edb YH |
342 | return; |
343 | } | |
2bdfe0ba SH |
344 | |
345 | /* don't get messages out of order, and no recursion */ | |
bea3348e | 346 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
fd2ea0a7 | 347 | struct netdev_queue *txq; |
a49f99ff | 348 | |
f663dd9a | 349 | txq = netdev_pick_tx(dev, skb, NULL); |
fd2ea0a7 | 350 | |
0db3dc73 SH |
351 | /* try until next clock tick */ |
352 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | |
353 | tries > 0; --tries) { | |
5efeac44 | 354 | if (HARD_TX_TRYLOCK(dev, txq)) { |
944e2948 EB |
355 | if (!netif_xmit_stopped(txq)) |
356 | status = netpoll_start_xmit(skb, dev, txq); | |
357 | ||
5efeac44 | 358 | HARD_TX_UNLOCK(dev, txq); |
e37b8d93 AM |
359 | |
360 | if (status == NETDEV_TX_OK) | |
361 | break; | |
362 | ||
e37b8d93 | 363 | } |
0db3dc73 SH |
364 | |
365 | /* tickle device maybe there is some cleanup */ | |
2a49e001 | 366 | netpoll_poll_dev(np->dev); |
0db3dc73 SH |
367 | |
368 | udelay(USEC_PER_POLL); | |
0db1d6fc | 369 | } |
79b1bee8 DD |
370 | |
371 | WARN_ONCE(!irqs_disabled(), | |
2899656b | 372 | "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", |
944e2948 | 373 | dev->name, dev->netdev_ops->ndo_start_xmit); |
79b1bee8 | 374 | |
1da177e4 | 375 | } |
1da177e4 | 376 | |
2bdfe0ba | 377 | if (status != NETDEV_TX_OK) { |
5de4a473 | 378 | skb_queue_tail(&npinfo->txq, skb); |
4c1ac1b4 | 379 | schedule_delayed_work(&npinfo->tx_work,0); |
1da177e4 | 380 | } |
1da177e4 | 381 | } |
c2355e1a | 382 | EXPORT_SYMBOL(netpoll_send_skb_on_dev); |
1da177e4 LT |
383 | |
384 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | |
385 | { | |
954fba02 | 386 | int total_len, ip_len, udp_len; |
1da177e4 LT |
387 | struct sk_buff *skb; |
388 | struct udphdr *udph; | |
389 | struct iphdr *iph; | |
390 | struct ethhdr *eth; | |
ee130409 | 391 | static atomic_t ip_ident; |
b3d936f3 | 392 | struct ipv6hdr *ip6h; |
1da177e4 | 393 | |
c9fd56b3 NA |
394 | WARN_ON_ONCE(!irqs_disabled()); |
395 | ||
1da177e4 | 396 | udp_len = len + sizeof(*udph); |
b3d936f3 CW |
397 | if (np->ipv6) |
398 | ip_len = udp_len + sizeof(*ip6h); | |
399 | else | |
b7394d24 CW |
400 | ip_len = udp_len + sizeof(*iph); |
401 | ||
954fba02 | 402 | total_len = ip_len + LL_RESERVED_SPACE(np->dev); |
1da177e4 | 403 | |
954fba02 ED |
404 | skb = find_skb(np, total_len + np->dev->needed_tailroom, |
405 | total_len - len); | |
1da177e4 LT |
406 | if (!skb) |
407 | return; | |
408 | ||
27d7ff46 | 409 | skb_copy_to_linear_data(skb, msg, len); |
954fba02 | 410 | skb_put(skb, len); |
1da177e4 | 411 | |
4bedb452 ACM |
412 | skb_push(skb, sizeof(*udph)); |
413 | skb_reset_transport_header(skb); | |
414 | udph = udp_hdr(skb); | |
1da177e4 LT |
415 | udph->source = htons(np->local_port); |
416 | udph->dest = htons(np->remote_port); | |
417 | udph->len = htons(udp_len); | |
b7394d24 | 418 | |
b3d936f3 CW |
419 | if (np->ipv6) { |
420 | udph->check = 0; | |
421 | udph->check = csum_ipv6_magic(&np->local_ip.in6, | |
422 | &np->remote_ip.in6, | |
423 | udp_len, IPPROTO_UDP, | |
424 | csum_partial(udph, udp_len, 0)); | |
425 | if (udph->check == 0) | |
426 | udph->check = CSUM_MANGLED_0; | |
427 | ||
428 | skb_push(skb, sizeof(*ip6h)); | |
429 | skb_reset_network_header(skb); | |
430 | ip6h = ipv6_hdr(skb); | |
431 | ||
432 | /* ip6h->version = 6; ip6h->priority = 0; */ | |
433 | put_unaligned(0x60, (unsigned char *)ip6h); | |
434 | ip6h->flow_lbl[0] = 0; | |
435 | ip6h->flow_lbl[1] = 0; | |
436 | ip6h->flow_lbl[2] = 0; | |
437 | ||
438 | ip6h->payload_len = htons(sizeof(struct udphdr) + len); | |
439 | ip6h->nexthdr = IPPROTO_UDP; | |
440 | ip6h->hop_limit = 32; | |
441 | ip6h->saddr = np->local_ip.in6; | |
442 | ip6h->daddr = np->remote_ip.in6; | |
443 | ||
d58ff351 | 444 | eth = skb_push(skb, ETH_HLEN); |
b3d936f3 CW |
445 | skb_reset_mac_header(skb); |
446 | skb->protocol = eth->h_proto = htons(ETH_P_IPV6); | |
447 | } else { | |
b7394d24 CW |
448 | udph->check = 0; |
449 | udph->check = csum_tcpudp_magic(np->local_ip.ip, | |
450 | np->remote_ip.ip, | |
451 | udp_len, IPPROTO_UDP, | |
452 | csum_partial(udph, udp_len, 0)); | |
453 | if (udph->check == 0) | |
454 | udph->check = CSUM_MANGLED_0; | |
455 | ||
456 | skb_push(skb, sizeof(*iph)); | |
457 | skb_reset_network_header(skb); | |
458 | iph = ip_hdr(skb); | |
459 | ||
460 | /* iph->version = 4; iph->ihl = 5; */ | |
461 | put_unaligned(0x45, (unsigned char *)iph); | |
462 | iph->tos = 0; | |
463 | put_unaligned(htons(ip_len), &(iph->tot_len)); | |
464 | iph->id = htons(atomic_inc_return(&ip_ident)); | |
465 | iph->frag_off = 0; | |
466 | iph->ttl = 64; | |
467 | iph->protocol = IPPROTO_UDP; | |
468 | iph->check = 0; | |
469 | put_unaligned(np->local_ip.ip, &(iph->saddr)); | |
470 | put_unaligned(np->remote_ip.ip, &(iph->daddr)); | |
471 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); | |
472 | ||
d58ff351 | 473 | eth = skb_push(skb, ETH_HLEN); |
b7394d24 CW |
474 | skb_reset_mac_header(skb); |
475 | skb->protocol = eth->h_proto = htons(ETH_P_IP); | |
476 | } | |
477 | ||
c62326ab JP |
478 | ether_addr_copy(eth->h_source, np->dev->dev_addr); |
479 | ether_addr_copy(eth->h_dest, np->remote_mac); | |
1da177e4 LT |
480 | |
481 | skb->dev = np->dev; | |
482 | ||
483 | netpoll_send_skb(np, skb); | |
484 | } | |
9e34a5b5 | 485 | EXPORT_SYMBOL(netpoll_send_udp); |
1da177e4 | 486 | |
0bcc1816 SS |
487 | void netpoll_print_options(struct netpoll *np) |
488 | { | |
e6ec2693 | 489 | np_info(np, "local port %d\n", np->local_port); |
b3d936f3 CW |
490 | if (np->ipv6) |
491 | np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6); | |
492 | else | |
b7394d24 | 493 | np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip); |
e6ec2693 JP |
494 | np_info(np, "interface '%s'\n", np->dev_name); |
495 | np_info(np, "remote port %d\n", np->remote_port); | |
b3d936f3 CW |
496 | if (np->ipv6) |
497 | np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6); | |
498 | else | |
b7394d24 | 499 | np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip); |
e6ec2693 | 500 | np_info(np, "remote ethernet address %pM\n", np->remote_mac); |
0bcc1816 | 501 | } |
9e34a5b5 | 502 | EXPORT_SYMBOL(netpoll_print_options); |
0bcc1816 | 503 | |
b7394d24 CW |
504 | static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr) |
505 | { | |
506 | const char *end; | |
507 | ||
508 | if (!strchr(str, ':') && | |
509 | in4_pton(str, -1, (void *)addr, -1, &end) > 0) { | |
510 | if (!*end) | |
511 | return 0; | |
512 | } | |
513 | if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) { | |
514 | #if IS_ENABLED(CONFIG_IPV6) | |
515 | if (!*end) | |
516 | return 1; | |
517 | #else | |
518 | return -1; | |
519 | #endif | |
520 | } | |
521 | return -1; | |
522 | } | |
523 | ||
1da177e4 LT |
524 | int netpoll_parse_options(struct netpoll *np, char *opt) |
525 | { | |
526 | char *cur=opt, *delim; | |
b7394d24 | 527 | int ipv6; |
00fe11b3 | 528 | bool ipversion_set = false; |
1da177e4 | 529 | |
c68b9070 | 530 | if (*cur != '@') { |
1da177e4 LT |
531 | if ((delim = strchr(cur, '@')) == NULL) |
532 | goto parse_failed; | |
c68b9070 | 533 | *delim = 0; |
4b5511eb AP |
534 | if (kstrtou16(cur, 10, &np->local_port)) |
535 | goto parse_failed; | |
c68b9070 | 536 | cur = delim; |
1da177e4 LT |
537 | } |
538 | cur++; | |
1da177e4 | 539 | |
c68b9070 | 540 | if (*cur != '/') { |
00fe11b3 | 541 | ipversion_set = true; |
1da177e4 LT |
542 | if ((delim = strchr(cur, '/')) == NULL) |
543 | goto parse_failed; | |
c68b9070 | 544 | *delim = 0; |
b7394d24 CW |
545 | ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip); |
546 | if (ipv6 < 0) | |
547 | goto parse_failed; | |
548 | else | |
549 | np->ipv6 = (bool)ipv6; | |
c68b9070 | 550 | cur = delim; |
1da177e4 LT |
551 | } |
552 | cur++; | |
553 | ||
c68b9070 | 554 | if (*cur != ',') { |
1da177e4 LT |
555 | /* parse out dev name */ |
556 | if ((delim = strchr(cur, ',')) == NULL) | |
557 | goto parse_failed; | |
c68b9070 | 558 | *delim = 0; |
1da177e4 | 559 | strlcpy(np->dev_name, cur, sizeof(np->dev_name)); |
c68b9070 | 560 | cur = delim; |
1da177e4 LT |
561 | } |
562 | cur++; | |
563 | ||
c68b9070 | 564 | if (*cur != '@') { |
1da177e4 LT |
565 | /* dst port */ |
566 | if ((delim = strchr(cur, '@')) == NULL) | |
567 | goto parse_failed; | |
c68b9070 | 568 | *delim = 0; |
5fc05f87 | 569 | if (*cur == ' ' || *cur == '\t') |
e6ec2693 | 570 | np_info(np, "warning: whitespace is not allowed\n"); |
4b5511eb AP |
571 | if (kstrtou16(cur, 10, &np->remote_port)) |
572 | goto parse_failed; | |
c68b9070 | 573 | cur = delim; |
1da177e4 LT |
574 | } |
575 | cur++; | |
1da177e4 LT |
576 | |
577 | /* dst ip */ | |
578 | if ((delim = strchr(cur, '/')) == NULL) | |
579 | goto parse_failed; | |
c68b9070 | 580 | *delim = 0; |
b7394d24 CW |
581 | ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); |
582 | if (ipv6 < 0) | |
583 | goto parse_failed; | |
00fe11b3 | 584 | else if (ipversion_set && np->ipv6 != (bool)ipv6) |
b7394d24 CW |
585 | goto parse_failed; |
586 | else | |
587 | np->ipv6 = (bool)ipv6; | |
c68b9070 | 588 | cur = delim + 1; |
1da177e4 | 589 | |
c68b9070 | 590 | if (*cur != 0) { |
1da177e4 | 591 | /* MAC address */ |
4940fc88 | 592 | if (!mac_pton(cur, np->remote_mac)) |
1da177e4 | 593 | goto parse_failed; |
1da177e4 LT |
594 | } |
595 | ||
0bcc1816 | 596 | netpoll_print_options(np); |
1da177e4 LT |
597 | |
598 | return 0; | |
599 | ||
600 | parse_failed: | |
e6ec2693 | 601 | np_info(np, "couldn't parse config at '%s'!\n", cur); |
1da177e4 LT |
602 | return -1; |
603 | } | |
9e34a5b5 | 604 | EXPORT_SYMBOL(netpoll_parse_options); |
1da177e4 | 605 | |
a8779ec1 | 606 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev) |
1da177e4 | 607 | { |
115c1d6e | 608 | struct netpoll_info *npinfo; |
4247e161 | 609 | const struct net_device_ops *ops; |
b41848b6 | 610 | int err; |
1da177e4 | 611 | |
727ceaa4 | 612 | np->dev = ndev; |
30fdd8a0 | 613 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); |
2cde6acd | 614 | INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); |
30fdd8a0 | 615 | |
8fdd95ec HX |
616 | if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || |
617 | !ndev->netdev_ops->ndo_poll_controller) { | |
e6ec2693 JP |
618 | np_err(np, "%s doesn't support polling, aborting\n", |
619 | np->dev_name); | |
8fdd95ec HX |
620 | err = -ENOTSUPP; |
621 | goto out; | |
622 | } | |
623 | ||
624 | if (!ndev->npinfo) { | |
a8779ec1 | 625 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); |
8fdd95ec HX |
626 | if (!npinfo) { |
627 | err = -ENOMEM; | |
628 | goto out; | |
629 | } | |
630 | ||
bd7c4b60 | 631 | sema_init(&npinfo->dev_lock, 1); |
8fdd95ec HX |
632 | skb_queue_head_init(&npinfo->txq); |
633 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); | |
634 | ||
433cea4d | 635 | refcount_set(&npinfo->refcnt, 1); |
8fdd95ec HX |
636 | |
637 | ops = np->dev->netdev_ops; | |
638 | if (ops->ndo_netpoll_setup) { | |
a8779ec1 | 639 | err = ops->ndo_netpoll_setup(ndev, npinfo); |
8fdd95ec HX |
640 | if (err) |
641 | goto free_npinfo; | |
642 | } | |
643 | } else { | |
0790bbb6 | 644 | npinfo = rtnl_dereference(ndev->npinfo); |
433cea4d | 645 | refcount_inc(&npinfo->refcnt); |
8fdd95ec HX |
646 | } |
647 | ||
648 | npinfo->netpoll = np; | |
649 | ||
8fdd95ec | 650 | /* last thing to do is link it to the net device structure */ |
cf778b00 | 651 | rcu_assign_pointer(ndev->npinfo, npinfo); |
8fdd95ec HX |
652 | |
653 | return 0; | |
654 | ||
655 | free_npinfo: | |
656 | kfree(npinfo); | |
657 | out: | |
658 | return err; | |
659 | } | |
660 | EXPORT_SYMBOL_GPL(__netpoll_setup); | |
661 | ||
662 | int netpoll_setup(struct netpoll *np) | |
663 | { | |
664 | struct net_device *ndev = NULL; | |
665 | struct in_device *in_dev; | |
666 | int err; | |
667 | ||
f92d3180 | 668 | rtnl_lock(); |
0c3a8f8b | 669 | if (np->dev_name[0]) { |
556e6256 CW |
670 | struct net *net = current->nsproxy->net_ns; |
671 | ndev = __dev_get_by_name(net, np->dev_name); | |
672 | } | |
1da177e4 | 673 | if (!ndev) { |
e6ec2693 | 674 | np_err(np, "%s doesn't exist, aborting\n", np->dev_name); |
f92d3180 CW |
675 | err = -ENODEV; |
676 | goto unlock; | |
1da177e4 | 677 | } |
5bd30d39 | 678 | dev_hold(ndev); |
1da177e4 | 679 | |
49bd8fb0 | 680 | if (netdev_master_upper_dev_get(ndev)) { |
e6ec2693 | 681 | np_err(np, "%s is a slave device, aborting\n", np->dev_name); |
83fe32de DC |
682 | err = -EBUSY; |
683 | goto put; | |
0c1ad04a WC |
684 | } |
685 | ||
1da177e4 LT |
686 | if (!netif_running(ndev)) { |
687 | unsigned long atmost, atleast; | |
688 | ||
e6ec2693 | 689 | np_info(np, "device %s not up yet, forcing it\n", np->dev_name); |
1da177e4 | 690 | |
b41848b6 | 691 | err = dev_open(ndev); |
b41848b6 SH |
692 | |
693 | if (err) { | |
e6ec2693 | 694 | np_err(np, "failed to open %s\n", ndev->name); |
dbaa1541 | 695 | goto put; |
1da177e4 | 696 | } |
1da177e4 | 697 | |
f92d3180 | 698 | rtnl_unlock(); |
1da177e4 | 699 | atleast = jiffies + HZ/10; |
bff38771 | 700 | atmost = jiffies + carrier_timeout * HZ; |
1da177e4 LT |
701 | while (!netif_carrier_ok(ndev)) { |
702 | if (time_after(jiffies, atmost)) { | |
e6ec2693 | 703 | np_notice(np, "timeout waiting for carrier\n"); |
1da177e4 LT |
704 | break; |
705 | } | |
1b614fb9 | 706 | msleep(1); |
1da177e4 LT |
707 | } |
708 | ||
709 | /* If carrier appears to come up instantly, we don't | |
710 | * trust it and pause so that we don't pump all our | |
711 | * queued console messages into the bitbucket. | |
712 | */ | |
713 | ||
714 | if (time_before(jiffies, atleast)) { | |
e6ec2693 | 715 | np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n"); |
1da177e4 LT |
716 | msleep(4000); |
717 | } | |
f92d3180 | 718 | rtnl_lock(); |
1da177e4 LT |
719 | } |
720 | ||
b7394d24 CW |
721 | if (!np->local_ip.ip) { |
722 | if (!np->ipv6) { | |
f92d3180 | 723 | in_dev = __in_dev_get_rtnl(ndev); |
b7394d24 CW |
724 | |
725 | if (!in_dev || !in_dev->ifa_list) { | |
b7394d24 CW |
726 | np_err(np, "no IP address for %s, aborting\n", |
727 | np->dev_name); | |
728 | err = -EDESTADDRREQ; | |
729 | goto put; | |
730 | } | |
731 | ||
732 | np->local_ip.ip = in_dev->ifa_list->ifa_local; | |
b7394d24 | 733 | np_info(np, "local IP %pI4\n", &np->local_ip.ip); |
b3d936f3 CW |
734 | } else { |
735 | #if IS_ENABLED(CONFIG_IPV6) | |
736 | struct inet6_dev *idev; | |
737 | ||
738 | err = -EDESTADDRREQ; | |
b3d936f3 CW |
739 | idev = __in6_dev_get(ndev); |
740 | if (idev) { | |
741 | struct inet6_ifaddr *ifp; | |
742 | ||
743 | read_lock_bh(&idev->lock); | |
744 | list_for_each_entry(ifp, &idev->addr_list, if_list) { | |
745 | if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) | |
746 | continue; | |
747 | np->local_ip.in6 = ifp->addr; | |
748 | err = 0; | |
749 | break; | |
750 | } | |
751 | read_unlock_bh(&idev->lock); | |
752 | } | |
b3d936f3 CW |
753 | if (err) { |
754 | np_err(np, "no IPv6 address for %s, aborting\n", | |
755 | np->dev_name); | |
756 | goto put; | |
757 | } else | |
758 | np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6); | |
759 | #else | |
760 | np_err(np, "IPv6 is not supported %s, aborting\n", | |
761 | np->dev_name); | |
e39363a9 | 762 | err = -EINVAL; |
b3d936f3 CW |
763 | goto put; |
764 | #endif | |
1da177e4 | 765 | } |
1da177e4 LT |
766 | } |
767 | ||
dbaa1541 HX |
768 | /* fill up the skb queue */ |
769 | refill_skbs(); | |
770 | ||
a8779ec1 | 771 | err = __netpoll_setup(np, ndev); |
8fdd95ec HX |
772 | if (err) |
773 | goto put; | |
774 | ||
f92d3180 | 775 | rtnl_unlock(); |
1da177e4 LT |
776 | return 0; |
777 | ||
21edbb22 | 778 | put: |
1da177e4 | 779 | dev_put(ndev); |
f92d3180 CW |
780 | unlock: |
781 | rtnl_unlock(); | |
b41848b6 | 782 | return err; |
1da177e4 | 783 | } |
9e34a5b5 | 784 | EXPORT_SYMBOL(netpoll_setup); |
1da177e4 | 785 | |
c68b9070 DM |
786 | static int __init netpoll_init(void) |
787 | { | |
a1bcfacd SH |
788 | skb_queue_head_init(&skb_pool); |
789 | return 0; | |
790 | } | |
791 | core_initcall(netpoll_init); | |
792 | ||
38e6bc18 AW |
793 | static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) |
794 | { | |
795 | struct netpoll_info *npinfo = | |
796 | container_of(rcu_head, struct netpoll_info, rcu); | |
797 | ||
38e6bc18 AW |
798 | skb_queue_purge(&npinfo->txq); |
799 | ||
800 | /* we can't call cancel_delayed_work_sync here, as we are in softirq */ | |
801 | cancel_delayed_work(&npinfo->tx_work); | |
802 | ||
803 | /* clean after last, unfinished work */ | |
804 | __skb_queue_purge(&npinfo->txq); | |
805 | /* now cancel it again */ | |
806 | cancel_delayed_work(&npinfo->tx_work); | |
807 | kfree(npinfo); | |
808 | } | |
809 | ||
8fdd95ec | 810 | void __netpoll_cleanup(struct netpoll *np) |
1da177e4 | 811 | { |
fbeec2e1 | 812 | struct netpoll_info *npinfo; |
fbeec2e1 | 813 | |
0790bbb6 NH |
814 | /* rtnl_dereference would be preferable here but |
815 | * rcu_cleanup_netpoll path can put us in here safely without | |
816 | * holding the rtnl, so plain rcu_dereference it is | |
817 | */ | |
818 | npinfo = rtnl_dereference(np->dev->npinfo); | |
8fdd95ec | 819 | if (!npinfo) |
dbaa1541 | 820 | return; |
93ec2c72 | 821 | |
ca99ca14 NH |
822 | synchronize_srcu(&netpoll_srcu); |
823 | ||
433cea4d | 824 | if (refcount_dec_and_test(&npinfo->refcnt)) { |
8fdd95ec | 825 | const struct net_device_ops *ops; |
de85d99e | 826 | |
8fdd95ec HX |
827 | ops = np->dev->netdev_ops; |
828 | if (ops->ndo_netpoll_cleanup) | |
829 | ops->ndo_netpoll_cleanup(np->dev); | |
de85d99e | 830 | |
fcb144b5 | 831 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
38e6bc18 | 832 | call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); |
efa95b01 | 833 | } else |
834 | RCU_INIT_POINTER(np->dev->npinfo, NULL); | |
38e6bc18 AW |
835 | } |
836 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); | |
de85d99e | 837 | |
2cde6acd | 838 | static void netpoll_async_cleanup(struct work_struct *work) |
38e6bc18 | 839 | { |
2cde6acd | 840 | struct netpoll *np = container_of(work, struct netpoll, cleanup_work); |
93ec2c72 | 841 | |
2cde6acd | 842 | rtnl_lock(); |
38e6bc18 | 843 | __netpoll_cleanup(np); |
2cde6acd | 844 | rtnl_unlock(); |
38e6bc18 AW |
845 | kfree(np); |
846 | } | |
93ec2c72 | 847 | |
2cde6acd | 848 | void __netpoll_free_async(struct netpoll *np) |
38e6bc18 | 849 | { |
2cde6acd | 850 | schedule_work(&np->cleanup_work); |
8fdd95ec | 851 | } |
2cde6acd | 852 | EXPORT_SYMBOL_GPL(__netpoll_free_async); |
fbeec2e1 | 853 | |
8fdd95ec HX |
854 | void netpoll_cleanup(struct netpoll *np) |
855 | { | |
8fdd95ec | 856 | rtnl_lock(); |
d0fe8c88 NA |
857 | if (!np->dev) |
858 | goto out; | |
8fdd95ec | 859 | __netpoll_cleanup(np); |
8fdd95ec | 860 | dev_put(np->dev); |
1da177e4 | 861 | np->dev = NULL; |
d0fe8c88 NA |
862 | out: |
863 | rtnl_unlock(); | |
1da177e4 | 864 | } |
9e34a5b5 | 865 | EXPORT_SYMBOL(netpoll_cleanup); |