]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/core/netpoll.c
netpoll: Remove unused EXPORT_SYMBOLs of netpoll_poll and netpoll_poll_dev
[mirror_ubuntu-bionic-kernel.git] / net / core / netpoll.c
CommitLineData
1da177e4
LT
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
bff38771 12#include <linux/moduleparam.h>
1da177e4
LT
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/string.h>
14c85021 16#include <linux/if_arp.h>
1da177e4
LT
17#include <linux/inetdevice.h>
18#include <linux/inet.h>
19#include <linux/interrupt.h>
20#include <linux/netpoll.h>
21#include <linux/sched.h>
22#include <linux/delay.h>
23#include <linux/rcupdate.h>
24#include <linux/workqueue.h>
5a0e3ad6 25#include <linux/slab.h>
1da177e4
LT
26#include <net/tcp.h>
27#include <net/udp.h>
28#include <asm/unaligned.h>
9cbc1cb8 29#include <trace/events/napi.h>
1da177e4
LT
30
31/*
32 * We maintain a small pool of fully-sized skbs, to make sure the
33 * message gets out even in extreme OOM situations.
34 */
35
36#define MAX_UDP_CHUNK 1460
37#define MAX_SKBS 32
1da177e4 38
a1bcfacd 39static struct sk_buff_head skb_pool;
1da177e4
LT
40
41static atomic_t trapped;
42
2bdfe0ba 43#define USEC_PER_POLL 50
d9452e9f
DM
44#define NETPOLL_RX_ENABLED 1
45#define NETPOLL_RX_DROP 2
1da177e4
LT
46
47#define MAX_SKB_SIZE \
48 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
49 sizeof(struct iphdr) + sizeof(struct ethhdr))
50
3578b0c8 51static void zap_completion_queue(void);
068c6e98 52static void arp_reply(struct sk_buff *skb);
1da177e4 53
bff38771
AV
54static unsigned int carrier_timeout = 4;
55module_param(carrier_timeout, uint, 0644);
56
c4028958 57static void queue_process(struct work_struct *work)
1da177e4 58{
4c1ac1b4
DH
59 struct netpoll_info *npinfo =
60 container_of(work, struct netpoll_info, tx_work.work);
1da177e4 61 struct sk_buff *skb;
3640543d 62 unsigned long flags;
1da177e4 63
6c43ff18
SH
64 while ((skb = skb_dequeue(&npinfo->txq))) {
65 struct net_device *dev = skb->dev;
00829823 66 const struct net_device_ops *ops = dev->netdev_ops;
fd2ea0a7 67 struct netdev_queue *txq;
1da177e4 68
6c43ff18
SH
69 if (!netif_device_present(dev) || !netif_running(dev)) {
70 __kfree_skb(skb);
71 continue;
72 }
1da177e4 73
fd2ea0a7
DM
74 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
75
3640543d 76 local_irq_save(flags);
fd2ea0a7 77 __netif_tx_lock(txq, smp_processor_id());
5a0d2268 78 if (netif_tx_queue_frozen_or_stopped(txq) ||
00829823 79 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
6c43ff18 80 skb_queue_head(&npinfo->txq, skb);
fd2ea0a7 81 __netif_tx_unlock(txq);
3640543d 82 local_irq_restore(flags);
1da177e4 83
25442caf 84 schedule_delayed_work(&npinfo->tx_work, HZ/10);
6c43ff18
SH
85 return;
86 }
fd2ea0a7 87 __netif_tx_unlock(txq);
3640543d 88 local_irq_restore(flags);
1da177e4 89 }
1da177e4
LT
90}
91
b51655b9
AV
92static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
93 unsigned short ulen, __be32 saddr, __be32 daddr)
1da177e4 94{
d6f5493c 95 __wsum psum;
fb286bb2 96
60476372 97 if (uh->check == 0 || skb_csum_unnecessary(skb))
1da177e4
LT
98 return 0;
99
fb286bb2
HX
100 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
101
84fa7933 102 if (skb->ip_summed == CHECKSUM_COMPLETE &&
d3bc23e7 103 !csum_fold(csum_add(psum, skb->csum)))
fb286bb2 104 return 0;
1da177e4 105
fb286bb2 106 skb->csum = psum;
1da177e4 107
fb286bb2 108 return __skb_checksum_complete(skb);
1da177e4
LT
109}
110
111/*
112 * Check whether delayed processing was scheduled for our NIC. If so,
113 * we attempt to grab the poll lock and use ->poll() to pump the card.
114 * If this fails, either we've recursed in ->poll() or it's already
115 * running on another CPU.
116 *
117 * Note: we don't mask interrupts with this lock because we're using
118 * trylock here and interrupts are already disabled in the softirq
119 * case. Further, we test the poll_owner to avoid recursion on UP
120 * systems where the lock doesn't exist.
121 *
122 * In cases where there is bi-directional communications, reading only
123 * one message at a time can lead to packets being dropped by the
124 * network adapter, forcing superfluous retries and possibly timeouts.
125 * Thus, we set our budget to greater than 1.
126 */
0a7606c1
DM
127static int poll_one_napi(struct netpoll_info *npinfo,
128 struct napi_struct *napi, int budget)
129{
130 int work;
131
132 /* net_rx_action's ->poll() invocations and our's are
133 * synchronized by this test which is only made while
134 * holding the napi->poll_lock.
135 */
136 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
137 return budget;
138
d9452e9f 139 npinfo->rx_flags |= NETPOLL_RX_DROP;
0a7606c1 140 atomic_inc(&trapped);
7b363e44 141 set_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1
DM
142
143 work = napi->poll(napi, budget);
7d18f114 144 trace_napi_poll(napi);
0a7606c1 145
7b363e44 146 clear_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1 147 atomic_dec(&trapped);
d9452e9f 148 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
0a7606c1
DM
149
150 return budget - work;
151}
152
5106930b 153static void poll_napi(struct net_device *dev)
1da177e4 154{
bea3348e 155 struct napi_struct *napi;
1da177e4
LT
156 int budget = 16;
157
f13d493d 158 list_for_each_entry(napi, &dev->napi_list, dev_list) {
0a7606c1 159 if (napi->poll_owner != smp_processor_id() &&
bea3348e 160 spin_trylock(&napi->poll_lock)) {
5106930b 161 budget = poll_one_napi(dev->npinfo, napi, budget);
bea3348e 162 spin_unlock(&napi->poll_lock);
0a7606c1
DM
163
164 if (!budget)
165 break;
bea3348e 166 }
1da177e4
LT
167 }
168}
169
068c6e98
NH
170static void service_arp_queue(struct netpoll_info *npi)
171{
5106930b
SH
172 if (npi) {
173 struct sk_buff *skb;
068c6e98 174
5106930b
SH
175 while ((skb = skb_dequeue(&npi->arp_tx)))
176 arp_reply(skb);
068c6e98 177 }
068c6e98
NH
178}
179
234b921d 180static void netpoll_poll_dev(struct net_device *dev)
1da177e4 181{
5e392739 182 const struct net_device_ops *ops;
5106930b 183
5e392739
PE
184 if (!dev || !netif_running(dev))
185 return;
186
187 ops = dev->netdev_ops;
188 if (!ops->ndo_poll_controller)
1da177e4
LT
189 return;
190
191 /* Process pending work on NIC */
d314774c 192 ops->ndo_poll_controller(dev);
5106930b
SH
193
194 poll_napi(dev);
1da177e4 195
5a698af5
AW
196 if (dev->priv_flags & IFF_SLAVE) {
197 if (dev->npinfo) {
198 struct net_device *bond_dev = dev->master;
199 struct sk_buff *skb;
200 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
201 skb->dev = bond_dev;
202 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
203 }
204 }
205 }
206
5106930b 207 service_arp_queue(dev->npinfo);
068c6e98 208
3578b0c8 209 zap_completion_queue();
1da177e4
LT
210}
211
234b921d 212static void netpoll_poll(struct netpoll *np)
0e34e931
WC
213{
214 netpoll_poll_dev(np->dev);
215}
216
1da177e4
LT
217static void refill_skbs(void)
218{
219 struct sk_buff *skb;
220 unsigned long flags;
221
a1bcfacd
SH
222 spin_lock_irqsave(&skb_pool.lock, flags);
223 while (skb_pool.qlen < MAX_SKBS) {
1da177e4
LT
224 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
225 if (!skb)
226 break;
227
a1bcfacd 228 __skb_queue_tail(&skb_pool, skb);
1da177e4 229 }
a1bcfacd 230 spin_unlock_irqrestore(&skb_pool.lock, flags);
1da177e4
LT
231}
232
3578b0c8
DM
233static void zap_completion_queue(void)
234{
235 unsigned long flags;
236 struct softnet_data *sd = &get_cpu_var(softnet_data);
237
238 if (sd->completion_queue) {
239 struct sk_buff *clist;
240
241 local_irq_save(flags);
242 clist = sd->completion_queue;
243 sd->completion_queue = NULL;
244 local_irq_restore(flags);
245
246 while (clist != NULL) {
247 struct sk_buff *skb = clist;
248 clist = clist->next;
249 if (skb->destructor) {
250 atomic_inc(&skb->users);
251 dev_kfree_skb_any(skb); /* put this one back */
252 } else {
253 __kfree_skb(skb);
254 }
255 }
256 }
257
258 put_cpu_var(softnet_data);
259}
260
a1bcfacd 261static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
1da177e4 262{
a1bcfacd
SH
263 int count = 0;
264 struct sk_buff *skb;
1da177e4 265
3578b0c8 266 zap_completion_queue();
a1bcfacd 267 refill_skbs();
1da177e4 268repeat:
1da177e4
LT
269
270 skb = alloc_skb(len, GFP_ATOMIC);
a1bcfacd
SH
271 if (!skb)
272 skb = skb_dequeue(&skb_pool);
1da177e4
LT
273
274 if (!skb) {
a1bcfacd
SH
275 if (++count < 10) {
276 netpoll_poll(np);
277 goto repeat;
1da177e4 278 }
a1bcfacd 279 return NULL;
1da177e4
LT
280 }
281
282 atomic_set(&skb->users, 1);
283 skb_reserve(skb, reserve);
284 return skb;
285}
286
bea3348e
SH
287static int netpoll_owner_active(struct net_device *dev)
288{
289 struct napi_struct *napi;
290
291 list_for_each_entry(napi, &dev->napi_list, dev_list) {
292 if (napi->poll_owner == smp_processor_id())
293 return 1;
294 }
295 return 0;
296}
297
c2355e1a
NH
298void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
299 struct net_device *dev)
1da177e4 300{
2bdfe0ba
SH
301 int status = NETDEV_TX_BUSY;
302 unsigned long tries;
00829823 303 const struct net_device_ops *ops = dev->netdev_ops;
de85d99e 304 /* It is up to the caller to keep npinfo alive. */
4ec93edb 305 struct netpoll_info *npinfo = np->dev->npinfo;
2bdfe0ba 306
4ec93edb
YH
307 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
308 __kfree_skb(skb);
309 return;
310 }
2bdfe0ba
SH
311
312 /* don't get messages out of order, and no recursion */
bea3348e 313 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
fd2ea0a7 314 struct netdev_queue *txq;
a49f99ff
AM
315 unsigned long flags;
316
fd2ea0a7
DM
317 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
318
a49f99ff 319 local_irq_save(flags);
0db3dc73
SH
320 /* try until next clock tick */
321 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
322 tries > 0; --tries) {
fd2ea0a7 323 if (__netif_tx_trylock(txq)) {
08baf561 324 if (!netif_tx_queue_stopped(txq)) {
00829823 325 status = ops->ndo_start_xmit(skb, dev);
08baf561
ED
326 if (status == NETDEV_TX_OK)
327 txq_trans_update(txq);
328 }
fd2ea0a7 329 __netif_tx_unlock(txq);
e37b8d93
AM
330
331 if (status == NETDEV_TX_OK)
332 break;
333
e37b8d93 334 }
0db3dc73
SH
335
336 /* tickle device maybe there is some cleanup */
337 netpoll_poll(np);
338
339 udelay(USEC_PER_POLL);
0db1d6fc 340 }
79b1bee8
DD
341
342 WARN_ONCE(!irqs_disabled(),
343 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
344 dev->name, ops->ndo_start_xmit);
345
a49f99ff 346 local_irq_restore(flags);
1da177e4 347 }
1da177e4 348
2bdfe0ba 349 if (status != NETDEV_TX_OK) {
5de4a473 350 skb_queue_tail(&npinfo->txq, skb);
4c1ac1b4 351 schedule_delayed_work(&npinfo->tx_work,0);
1da177e4 352 }
1da177e4 353}
c2355e1a 354EXPORT_SYMBOL(netpoll_send_skb_on_dev);
1da177e4
LT
355
356void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
357{
358 int total_len, eth_len, ip_len, udp_len;
359 struct sk_buff *skb;
360 struct udphdr *udph;
361 struct iphdr *iph;
362 struct ethhdr *eth;
363
364 udp_len = len + sizeof(*udph);
365 ip_len = eth_len = udp_len + sizeof(*iph);
366 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
367
368 skb = find_skb(np, total_len, total_len - len);
369 if (!skb)
370 return;
371
27d7ff46 372 skb_copy_to_linear_data(skb, msg, len);
1da177e4
LT
373 skb->len += len;
374
4bedb452
ACM
375 skb_push(skb, sizeof(*udph));
376 skb_reset_transport_header(skb);
377 udph = udp_hdr(skb);
1da177e4
LT
378 udph->source = htons(np->local_port);
379 udph->dest = htons(np->remote_port);
380 udph->len = htons(udp_len);
381 udph->check = 0;
e7557af5
HH
382 udph->check = csum_tcpudp_magic(np->local_ip,
383 np->remote_ip,
8e365eec 384 udp_len, IPPROTO_UDP,
07f0757a 385 csum_partial(udph, udp_len, 0));
8e365eec 386 if (udph->check == 0)
5e57dff2 387 udph->check = CSUM_MANGLED_0;
1da177e4 388
e2d1bca7
ACM
389 skb_push(skb, sizeof(*iph));
390 skb_reset_network_header(skb);
eddc9ec5 391 iph = ip_hdr(skb);
1da177e4
LT
392
393 /* iph->version = 4; iph->ihl = 5; */
394 put_unaligned(0x45, (unsigned char *)iph);
395 iph->tos = 0;
396 put_unaligned(htons(ip_len), &(iph->tot_len));
397 iph->id = 0;
398 iph->frag_off = 0;
399 iph->ttl = 64;
400 iph->protocol = IPPROTO_UDP;
401 iph->check = 0;
e7557af5
HH
402 put_unaligned(np->local_ip, &(iph->saddr));
403 put_unaligned(np->remote_ip, &(iph->daddr));
1da177e4
LT
404 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
405
406 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
459a98ed 407 skb_reset_mac_header(skb);
206daaf7 408 skb->protocol = eth->h_proto = htons(ETH_P_IP);
09538641
SH
409 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
410 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
1da177e4
LT
411
412 skb->dev = np->dev;
413
414 netpoll_send_skb(np, skb);
415}
9e34a5b5 416EXPORT_SYMBOL(netpoll_send_udp);
1da177e4
LT
417
418static void arp_reply(struct sk_buff *skb)
419{
115c1d6e 420 struct netpoll_info *npinfo = skb->dev->npinfo;
1da177e4
LT
421 struct arphdr *arp;
422 unsigned char *arp_ptr;
423 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
252e3346 424 __be32 sip, tip;
47bbec02 425 unsigned char *sha;
1da177e4 426 struct sk_buff *send_skb;
508e14b4
DB
427 struct netpoll *np, *tmp;
428 unsigned long flags;
429 int hits = 0;
430
431 if (list_empty(&npinfo->rx_np))
432 return;
433
434 /* Before checking the packet, we do some early
435 inspection whether this is interesting at all */
436 spin_lock_irqsave(&npinfo->rx_lock, flags);
437 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
438 if (np->dev == skb->dev)
439 hits++;
440 }
441 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1da177e4 442
508e14b4
DB
443 /* No netpoll struct is using this dev */
444 if (!hits)
115c1d6e 445 return;
1da177e4
LT
446
447 /* No arp on this interface */
448 if (skb->dev->flags & IFF_NOARP)
449 return;
450
988b7050 451 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
1da177e4
LT
452 return;
453
c1d2bbe1 454 skb_reset_network_header(skb);
badff6d0 455 skb_reset_transport_header(skb);
d0a92be0 456 arp = arp_hdr(skb);
1da177e4
LT
457
458 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
459 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
460 arp->ar_pro != htons(ETH_P_IP) ||
461 arp->ar_op != htons(ARPOP_REQUEST))
462 return;
463
47bbec02
NH
464 arp_ptr = (unsigned char *)(arp+1);
465 /* save the location of the src hw addr */
466 sha = arp_ptr;
467 arp_ptr += skb->dev->addr_len;
1da177e4 468 memcpy(&sip, arp_ptr, 4);
47bbec02 469 arp_ptr += 4;
508e14b4
DB
470 /* If we actually cared about dst hw addr,
471 it would get copied here */
47bbec02 472 arp_ptr += skb->dev->addr_len;
1da177e4
LT
473 memcpy(&tip, arp_ptr, 4);
474
475 /* Should we ignore arp? */
508e14b4 476 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
1da177e4
LT
477 return;
478
988b7050 479 size = arp_hdr_len(skb->dev);
1da177e4 480
508e14b4
DB
481 spin_lock_irqsave(&npinfo->rx_lock, flags);
482 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
483 if (tip != np->local_ip)
484 continue;
1da177e4 485
508e14b4
DB
486 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
487 LL_RESERVED_SPACE(np->dev));
488 if (!send_skb)
489 continue;
1da177e4 490
508e14b4
DB
491 skb_reset_network_header(send_skb);
492 arp = (struct arphdr *) skb_put(send_skb, size);
493 send_skb->dev = skb->dev;
494 send_skb->protocol = htons(ETH_P_ARP);
1da177e4 495
508e14b4
DB
496 /* Fill the device header for the ARP frame */
497 if (dev_hard_header(send_skb, skb->dev, ptype,
498 sha, np->dev->dev_addr,
499 send_skb->len) < 0) {
500 kfree_skb(send_skb);
501 continue;
502 }
1da177e4 503
508e14b4
DB
504 /*
505 * Fill out the arp protocol part.
506 *
507 * we only support ethernet device type,
508 * which (according to RFC 1390) should
509 * always equal 1 (Ethernet).
510 */
1da177e4 511
508e14b4
DB
512 arp->ar_hrd = htons(np->dev->type);
513 arp->ar_pro = htons(ETH_P_IP);
514 arp->ar_hln = np->dev->addr_len;
515 arp->ar_pln = 4;
516 arp->ar_op = htons(type);
517
518 arp_ptr = (unsigned char *)(arp + 1);
519 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
520 arp_ptr += np->dev->addr_len;
521 memcpy(arp_ptr, &tip, 4);
522 arp_ptr += 4;
523 memcpy(arp_ptr, sha, np->dev->addr_len);
524 arp_ptr += np->dev->addr_len;
525 memcpy(arp_ptr, &sip, 4);
526
527 netpoll_send_skb(np, send_skb);
528
529 /* If there are several rx_hooks for the same address,
530 we're fine by sending a single reply */
531 break;
532 }
533 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1da177e4
LT
534}
535
536int __netpoll_rx(struct sk_buff *skb)
537{
538 int proto, len, ulen;
508e14b4 539 int hits = 0;
b71d1d42 540 const struct iphdr *iph;
1da177e4 541 struct udphdr *uh;
508e14b4
DB
542 struct netpoll_info *npinfo = skb->dev->npinfo;
543 struct netpoll *np, *tmp;
068c6e98 544
508e14b4 545 if (list_empty(&npinfo->rx_np))
1da177e4 546 goto out;
508e14b4 547
1da177e4
LT
548 if (skb->dev->type != ARPHRD_ETHER)
549 goto out;
550
d9452e9f 551 /* check if netpoll clients need ARP */
724800d6 552 if (skb->protocol == htons(ETH_P_ARP) &&
1da177e4 553 atomic_read(&trapped)) {
508e14b4 554 skb_queue_tail(&npinfo->arp_tx, skb);
1da177e4
LT
555 return 1;
556 }
557
558 proto = ntohs(eth_hdr(skb)->h_proto);
559 if (proto != ETH_P_IP)
560 goto out;
561 if (skb->pkt_type == PACKET_OTHERHOST)
562 goto out;
563 if (skb_shared(skb))
564 goto out;
565
566 iph = (struct iphdr *)skb->data;
567 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
568 goto out;
569 if (iph->ihl < 5 || iph->version != 4)
570 goto out;
571 if (!pskb_may_pull(skb, iph->ihl*4))
572 goto out;
573 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
574 goto out;
575
576 len = ntohs(iph->tot_len);
577 if (skb->len < len || len < iph->ihl*4)
578 goto out;
579
5e7d7fa5
AL
580 /*
581 * Our transport medium may have padded the buffer out.
582 * Now We trim to the true length of the frame.
583 */
584 if (pskb_trim_rcsum(skb, len))
585 goto out;
586
1da177e4
LT
587 if (iph->protocol != IPPROTO_UDP)
588 goto out;
589
590 len -= iph->ihl*4;
591 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
592 ulen = ntohs(uh->len);
593
594 if (ulen != len)
595 goto out;
fb286bb2 596 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
1da177e4 597 goto out;
1da177e4 598
508e14b4
DB
599 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
600 if (np->local_ip && np->local_ip != iph->daddr)
601 continue;
602 if (np->remote_ip && np->remote_ip != iph->saddr)
603 continue;
604 if (np->local_port && np->local_port != ntohs(uh->dest))
605 continue;
606
607 np->rx_hook(np, ntohs(uh->source),
608 (char *)(uh+1),
609 ulen - sizeof(struct udphdr));
610 hits++;
611 }
612
613 if (!hits)
614 goto out;
1da177e4
LT
615
616 kfree_skb(skb);
617 return 1;
618
619out:
620 if (atomic_read(&trapped)) {
621 kfree_skb(skb);
622 return 1;
623 }
624
625 return 0;
626}
627
0bcc1816
SS
628void netpoll_print_options(struct netpoll *np)
629{
630 printk(KERN_INFO "%s: local port %d\n",
631 np->name, np->local_port);
e7557af5
HH
632 printk(KERN_INFO "%s: local IP %pI4\n",
633 np->name, &np->local_ip);
5fc05f87 634 printk(KERN_INFO "%s: interface '%s'\n",
0bcc1816
SS
635 np->name, np->dev_name);
636 printk(KERN_INFO "%s: remote port %d\n",
637 np->name, np->remote_port);
e7557af5
HH
638 printk(KERN_INFO "%s: remote IP %pI4\n",
639 np->name, &np->remote_ip);
e174961c
JB
640 printk(KERN_INFO "%s: remote ethernet address %pM\n",
641 np->name, np->remote_mac);
0bcc1816 642}
9e34a5b5 643EXPORT_SYMBOL(netpoll_print_options);
0bcc1816 644
1da177e4
LT
645int netpoll_parse_options(struct netpoll *np, char *opt)
646{
647 char *cur=opt, *delim;
648
c68b9070 649 if (*cur != '@') {
1da177e4
LT
650 if ((delim = strchr(cur, '@')) == NULL)
651 goto parse_failed;
c68b9070
DM
652 *delim = 0;
653 np->local_port = simple_strtol(cur, NULL, 10);
654 cur = delim;
1da177e4
LT
655 }
656 cur++;
1da177e4 657
c68b9070 658 if (*cur != '/') {
1da177e4
LT
659 if ((delim = strchr(cur, '/')) == NULL)
660 goto parse_failed;
c68b9070 661 *delim = 0;
e7557af5 662 np->local_ip = in_aton(cur);
c68b9070 663 cur = delim;
1da177e4
LT
664 }
665 cur++;
666
c68b9070 667 if (*cur != ',') {
1da177e4
LT
668 /* parse out dev name */
669 if ((delim = strchr(cur, ',')) == NULL)
670 goto parse_failed;
c68b9070 671 *delim = 0;
1da177e4 672 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
c68b9070 673 cur = delim;
1da177e4
LT
674 }
675 cur++;
676
c68b9070 677 if (*cur != '@') {
1da177e4
LT
678 /* dst port */
679 if ((delim = strchr(cur, '@')) == NULL)
680 goto parse_failed;
c68b9070 681 *delim = 0;
5fc05f87
AW
682 if (*cur == ' ' || *cur == '\t')
683 printk(KERN_INFO "%s: warning: whitespace"
684 "is not allowed\n", np->name);
c68b9070
DM
685 np->remote_port = simple_strtol(cur, NULL, 10);
686 cur = delim;
1da177e4
LT
687 }
688 cur++;
1da177e4
LT
689
690 /* dst ip */
691 if ((delim = strchr(cur, '/')) == NULL)
692 goto parse_failed;
c68b9070 693 *delim = 0;
e7557af5 694 np->remote_ip = in_aton(cur);
c68b9070 695 cur = delim + 1;
1da177e4 696
c68b9070 697 if (*cur != 0) {
1da177e4 698 /* MAC address */
4940fc88 699 if (!mac_pton(cur, np->remote_mac))
1da177e4 700 goto parse_failed;
1da177e4
LT
701 }
702
0bcc1816 703 netpoll_print_options(np);
1da177e4
LT
704
705 return 0;
706
707 parse_failed:
5fc05f87 708 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
1da177e4
LT
709 np->name, cur);
710 return -1;
711}
9e34a5b5 712EXPORT_SYMBOL(netpoll_parse_options);
1da177e4 713
8fdd95ec 714int __netpoll_setup(struct netpoll *np)
1da177e4 715{
8fdd95ec 716 struct net_device *ndev = np->dev;
115c1d6e 717 struct netpoll_info *npinfo;
4247e161 718 const struct net_device_ops *ops;
fbeec2e1 719 unsigned long flags;
b41848b6 720 int err;
1da177e4 721
8fdd95ec
HX
722 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
723 !ndev->netdev_ops->ndo_poll_controller) {
724 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
725 np->name, np->dev_name);
726 err = -ENOTSUPP;
727 goto out;
728 }
729
730 if (!ndev->npinfo) {
731 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
732 if (!npinfo) {
733 err = -ENOMEM;
734 goto out;
735 }
736
737 npinfo->rx_flags = 0;
738 INIT_LIST_HEAD(&npinfo->rx_np);
739
740 spin_lock_init(&npinfo->rx_lock);
741 skb_queue_head_init(&npinfo->arp_tx);
742 skb_queue_head_init(&npinfo->txq);
743 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
744
745 atomic_set(&npinfo->refcnt, 1);
746
747 ops = np->dev->netdev_ops;
748 if (ops->ndo_netpoll_setup) {
749 err = ops->ndo_netpoll_setup(ndev, npinfo);
750 if (err)
751 goto free_npinfo;
752 }
753 } else {
754 npinfo = ndev->npinfo;
755 atomic_inc(&npinfo->refcnt);
756 }
757
758 npinfo->netpoll = np;
759
760 if (np->rx_hook) {
761 spin_lock_irqsave(&npinfo->rx_lock, flags);
762 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
763 list_add_tail(&np->rx, &npinfo->rx_np);
764 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
765 }
766
767 /* last thing to do is link it to the net device structure */
768 rcu_assign_pointer(ndev->npinfo, npinfo);
8fdd95ec
HX
769
770 return 0;
771
772free_npinfo:
773 kfree(npinfo);
774out:
775 return err;
776}
777EXPORT_SYMBOL_GPL(__netpoll_setup);
778
779int netpoll_setup(struct netpoll *np)
780{
781 struct net_device *ndev = NULL;
782 struct in_device *in_dev;
783 int err;
784
1da177e4 785 if (np->dev_name)
881d966b 786 ndev = dev_get_by_name(&init_net, np->dev_name);
1da177e4
LT
787 if (!ndev) {
788 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
789 np->name, np->dev_name);
b41848b6 790 return -ENODEV;
1da177e4
LT
791 }
792
0c1ad04a
WC
793 if (ndev->master) {
794 printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
795 np->name, np->dev_name);
83fe32de
DC
796 err = -EBUSY;
797 goto put;
0c1ad04a
WC
798 }
799
1da177e4
LT
800 if (!netif_running(ndev)) {
801 unsigned long atmost, atleast;
802
803 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
804 np->name, np->dev_name);
805
6756ae4b 806 rtnl_lock();
b41848b6
SH
807 err = dev_open(ndev);
808 rtnl_unlock();
809
810 if (err) {
1da177e4 811 printk(KERN_ERR "%s: failed to open %s\n",
b41848b6 812 np->name, ndev->name);
dbaa1541 813 goto put;
1da177e4 814 }
1da177e4
LT
815
816 atleast = jiffies + HZ/10;
bff38771 817 atmost = jiffies + carrier_timeout * HZ;
1da177e4
LT
818 while (!netif_carrier_ok(ndev)) {
819 if (time_after(jiffies, atmost)) {
820 printk(KERN_NOTICE
821 "%s: timeout waiting for carrier\n",
822 np->name);
823 break;
824 }
1b614fb9 825 msleep(1);
1da177e4
LT
826 }
827
828 /* If carrier appears to come up instantly, we don't
829 * trust it and pause so that we don't pump all our
830 * queued console messages into the bitbucket.
831 */
832
833 if (time_before(jiffies, atleast)) {
834 printk(KERN_NOTICE "%s: carrier detect appears"
835 " untrustworthy, waiting 4 seconds\n",
836 np->name);
837 msleep(4000);
838 }
839 }
840
1da177e4
LT
841 if (!np->local_ip) {
842 rcu_read_lock();
e5ed6399 843 in_dev = __in_dev_get_rcu(ndev);
1da177e4
LT
844
845 if (!in_dev || !in_dev->ifa_list) {
846 rcu_read_unlock();
847 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
848 np->name, np->dev_name);
b41848b6 849 err = -EDESTADDRREQ;
dbaa1541 850 goto put;
1da177e4
LT
851 }
852
e7557af5 853 np->local_ip = in_dev->ifa_list->ifa_local;
1da177e4 854 rcu_read_unlock();
e7557af5 855 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
1da177e4
LT
856 }
857
dbaa1541
HX
858 np->dev = ndev;
859
860 /* fill up the skb queue */
861 refill_skbs();
862
863 rtnl_lock();
8fdd95ec 864 err = __netpoll_setup(np);
dbaa1541 865 rtnl_unlock();
53fb95d3 866
8fdd95ec
HX
867 if (err)
868 goto put;
869
1da177e4
LT
870 return 0;
871
21edbb22 872put:
1da177e4 873 dev_put(ndev);
b41848b6 874 return err;
1da177e4 875}
9e34a5b5 876EXPORT_SYMBOL(netpoll_setup);
1da177e4 877
c68b9070
DM
878static int __init netpoll_init(void)
879{
a1bcfacd
SH
880 skb_queue_head_init(&skb_pool);
881 return 0;
882}
883core_initcall(netpoll_init);
884
8fdd95ec 885void __netpoll_cleanup(struct netpoll *np)
1da177e4 886{
fbeec2e1
JM
887 struct netpoll_info *npinfo;
888 unsigned long flags;
889
8fdd95ec
HX
890 npinfo = np->dev->npinfo;
891 if (!npinfo)
dbaa1541 892 return;
93ec2c72 893
8fdd95ec
HX
894 if (!list_empty(&npinfo->rx_np)) {
895 spin_lock_irqsave(&npinfo->rx_lock, flags);
896 list_del(&np->rx);
897 if (list_empty(&npinfo->rx_np))
898 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
899 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
900 }
de85d99e 901
8fdd95ec
HX
902 if (atomic_dec_and_test(&npinfo->refcnt)) {
903 const struct net_device_ops *ops;
de85d99e 904
8fdd95ec
HX
905 ops = np->dev->netdev_ops;
906 if (ops->ndo_netpoll_cleanup)
907 ops->ndo_netpoll_cleanup(np->dev);
de85d99e 908
8fdd95ec 909 rcu_assign_pointer(np->dev->npinfo, NULL);
de85d99e 910
dbaa1541
HX
911 /* avoid racing with NAPI reading npinfo */
912 synchronize_rcu_bh();
93ec2c72 913
dbaa1541
HX
914 skb_queue_purge(&npinfo->arp_tx);
915 skb_queue_purge(&npinfo->txq);
afe2c511 916 cancel_delayed_work_sync(&npinfo->tx_work);
93ec2c72 917
dbaa1541
HX
918 /* clean after last, unfinished work */
919 __skb_queue_purge(&npinfo->txq);
920 kfree(npinfo);
115c1d6e 921 }
8fdd95ec
HX
922}
923EXPORT_SYMBOL_GPL(__netpoll_cleanup);
fbeec2e1 924
8fdd95ec
HX
925void netpoll_cleanup(struct netpoll *np)
926{
927 if (!np->dev)
928 return;
dbaa1541 929
8fdd95ec
HX
930 rtnl_lock();
931 __netpoll_cleanup(np);
932 rtnl_unlock();
933
934 dev_put(np->dev);
1da177e4
LT
935 np->dev = NULL;
936}
9e34a5b5 937EXPORT_SYMBOL(netpoll_cleanup);
1da177e4
LT
938
939int netpoll_trap(void)
940{
941 return atomic_read(&trapped);
942}
9e34a5b5 943EXPORT_SYMBOL(netpoll_trap);
1da177e4
LT
944
945void netpoll_set_trap(int trap)
946{
947 if (trap)
948 atomic_inc(&trapped);
949 else
950 atomic_dec(&trapped);
951}
1da177e4 952EXPORT_SYMBOL(netpoll_set_trap);