]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/core/netpoll.c
netpoll: Introduce netpoll_carrier_timeout kernel option
[mirror_ubuntu-artful-kernel.git] / net / core / netpoll.c
CommitLineData
1da177e4
LT
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
bff38771 12#include <linux/moduleparam.h>
1da177e4
LT
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/string.h>
14c85021 16#include <linux/if_arp.h>
1da177e4
LT
17#include <linux/inetdevice.h>
18#include <linux/inet.h>
19#include <linux/interrupt.h>
20#include <linux/netpoll.h>
21#include <linux/sched.h>
22#include <linux/delay.h>
23#include <linux/rcupdate.h>
24#include <linux/workqueue.h>
25#include <net/tcp.h>
26#include <net/udp.h>
27#include <asm/unaligned.h>
9cbc1cb8 28#include <trace/events/napi.h>
1da177e4
LT
29
30/*
31 * We maintain a small pool of fully-sized skbs, to make sure the
32 * message gets out even in extreme OOM situations.
33 */
34
35#define MAX_UDP_CHUNK 1460
36#define MAX_SKBS 32
37#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
38
a1bcfacd 39static struct sk_buff_head skb_pool;
1da177e4
LT
40
41static atomic_t trapped;
42
2bdfe0ba 43#define USEC_PER_POLL 50
d9452e9f
DM
44#define NETPOLL_RX_ENABLED 1
45#define NETPOLL_RX_DROP 2
1da177e4
LT
46
47#define MAX_SKB_SIZE \
48 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
49 sizeof(struct iphdr) + sizeof(struct ethhdr))
50
51static void zap_completion_queue(void);
068c6e98 52static void arp_reply(struct sk_buff *skb);
1da177e4 53
bff38771
AV
54static unsigned int carrier_timeout = 4;
55module_param(carrier_timeout, uint, 0644);
56
c4028958 57static void queue_process(struct work_struct *work)
1da177e4 58{
4c1ac1b4
DH
59 struct netpoll_info *npinfo =
60 container_of(work, struct netpoll_info, tx_work.work);
1da177e4 61 struct sk_buff *skb;
3640543d 62 unsigned long flags;
1da177e4 63
6c43ff18
SH
64 while ((skb = skb_dequeue(&npinfo->txq))) {
65 struct net_device *dev = skb->dev;
00829823 66 const struct net_device_ops *ops = dev->netdev_ops;
fd2ea0a7 67 struct netdev_queue *txq;
1da177e4 68
6c43ff18
SH
69 if (!netif_device_present(dev) || !netif_running(dev)) {
70 __kfree_skb(skb);
71 continue;
72 }
1da177e4 73
fd2ea0a7
DM
74 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
75
3640543d 76 local_irq_save(flags);
fd2ea0a7
DM
77 __netif_tx_lock(txq, smp_processor_id());
78 if (netif_tx_queue_stopped(txq) ||
c3f26a26 79 netif_tx_queue_frozen(txq) ||
00829823 80 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
6c43ff18 81 skb_queue_head(&npinfo->txq, skb);
fd2ea0a7 82 __netif_tx_unlock(txq);
3640543d 83 local_irq_restore(flags);
1da177e4 84
25442caf 85 schedule_delayed_work(&npinfo->tx_work, HZ/10);
6c43ff18
SH
86 return;
87 }
fd2ea0a7 88 __netif_tx_unlock(txq);
3640543d 89 local_irq_restore(flags);
1da177e4 90 }
1da177e4
LT
91}
92
b51655b9
AV
93static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
94 unsigned short ulen, __be32 saddr, __be32 daddr)
1da177e4 95{
d6f5493c 96 __wsum psum;
fb286bb2 97
60476372 98 if (uh->check == 0 || skb_csum_unnecessary(skb))
1da177e4
LT
99 return 0;
100
fb286bb2
HX
101 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
102
84fa7933 103 if (skb->ip_summed == CHECKSUM_COMPLETE &&
d3bc23e7 104 !csum_fold(csum_add(psum, skb->csum)))
fb286bb2 105 return 0;
1da177e4 106
fb286bb2 107 skb->csum = psum;
1da177e4 108
fb286bb2 109 return __skb_checksum_complete(skb);
1da177e4
LT
110}
111
112/*
113 * Check whether delayed processing was scheduled for our NIC. If so,
114 * we attempt to grab the poll lock and use ->poll() to pump the card.
115 * If this fails, either we've recursed in ->poll() or it's already
116 * running on another CPU.
117 *
118 * Note: we don't mask interrupts with this lock because we're using
119 * trylock here and interrupts are already disabled in the softirq
120 * case. Further, we test the poll_owner to avoid recursion on UP
121 * systems where the lock doesn't exist.
122 *
123 * In cases where there is bi-directional communications, reading only
124 * one message at a time can lead to packets being dropped by the
125 * network adapter, forcing superfluous retries and possibly timeouts.
126 * Thus, we set our budget to greater than 1.
127 */
0a7606c1
DM
128static int poll_one_napi(struct netpoll_info *npinfo,
129 struct napi_struct *napi, int budget)
130{
131 int work;
132
133 /* net_rx_action's ->poll() invocations and our's are
134 * synchronized by this test which is only made while
135 * holding the napi->poll_lock.
136 */
137 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
138 return budget;
139
d9452e9f 140 npinfo->rx_flags |= NETPOLL_RX_DROP;
0a7606c1 141 atomic_inc(&trapped);
7b363e44 142 set_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1
DM
143
144 work = napi->poll(napi, budget);
7d18f114 145 trace_napi_poll(napi);
0a7606c1 146
7b363e44 147 clear_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1 148 atomic_dec(&trapped);
d9452e9f 149 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
0a7606c1
DM
150
151 return budget - work;
152}
153
5106930b 154static void poll_napi(struct net_device *dev)
1da177e4 155{
bea3348e 156 struct napi_struct *napi;
1da177e4
LT
157 int budget = 16;
158
5106930b 159 list_for_each_entry(napi, &dev->napi_list, dev_list) {
0a7606c1 160 if (napi->poll_owner != smp_processor_id() &&
bea3348e 161 spin_trylock(&napi->poll_lock)) {
5106930b 162 budget = poll_one_napi(dev->npinfo, napi, budget);
bea3348e 163 spin_unlock(&napi->poll_lock);
0a7606c1
DM
164
165 if (!budget)
166 break;
bea3348e 167 }
1da177e4
LT
168 }
169}
170
068c6e98
NH
171static void service_arp_queue(struct netpoll_info *npi)
172{
5106930b
SH
173 if (npi) {
174 struct sk_buff *skb;
068c6e98 175
5106930b
SH
176 while ((skb = skb_dequeue(&npi->arp_tx)))
177 arp_reply(skb);
068c6e98 178 }
068c6e98
NH
179}
180
1da177e4
LT
181void netpoll_poll(struct netpoll *np)
182{
5106930b 183 struct net_device *dev = np->dev;
5e392739 184 const struct net_device_ops *ops;
5106930b 185
5e392739
PE
186 if (!dev || !netif_running(dev))
187 return;
188
189 ops = dev->netdev_ops;
190 if (!ops->ndo_poll_controller)
1da177e4
LT
191 return;
192
193 /* Process pending work on NIC */
d314774c 194 ops->ndo_poll_controller(dev);
5106930b
SH
195
196 poll_napi(dev);
1da177e4 197
5106930b 198 service_arp_queue(dev->npinfo);
068c6e98 199
1da177e4
LT
200 zap_completion_queue();
201}
202
203static void refill_skbs(void)
204{
205 struct sk_buff *skb;
206 unsigned long flags;
207
a1bcfacd
SH
208 spin_lock_irqsave(&skb_pool.lock, flags);
209 while (skb_pool.qlen < MAX_SKBS) {
1da177e4
LT
210 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
211 if (!skb)
212 break;
213
a1bcfacd 214 __skb_queue_tail(&skb_pool, skb);
1da177e4 215 }
a1bcfacd 216 spin_unlock_irqrestore(&skb_pool.lock, flags);
1da177e4
LT
217}
218
219static void zap_completion_queue(void)
220{
221 unsigned long flags;
222 struct softnet_data *sd = &get_cpu_var(softnet_data);
223
224 if (sd->completion_queue) {
225 struct sk_buff *clist;
226
227 local_irq_save(flags);
228 clist = sd->completion_queue;
229 sd->completion_queue = NULL;
230 local_irq_restore(flags);
231
232 while (clist != NULL) {
233 struct sk_buff *skb = clist;
234 clist = clist->next;
8a455b08
JP
235 if (skb->destructor) {
236 atomic_inc(&skb->users);
1da177e4 237 dev_kfree_skb_any(skb); /* put this one back */
8a455b08 238 } else {
1da177e4 239 __kfree_skb(skb);
8a455b08 240 }
1da177e4
LT
241 }
242 }
243
244 put_cpu_var(softnet_data);
245}
246
a1bcfacd 247static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
1da177e4 248{
a1bcfacd
SH
249 int count = 0;
250 struct sk_buff *skb;
1da177e4
LT
251
252 zap_completion_queue();
a1bcfacd 253 refill_skbs();
1da177e4 254repeat:
1da177e4
LT
255
256 skb = alloc_skb(len, GFP_ATOMIC);
a1bcfacd
SH
257 if (!skb)
258 skb = skb_dequeue(&skb_pool);
1da177e4
LT
259
260 if (!skb) {
a1bcfacd
SH
261 if (++count < 10) {
262 netpoll_poll(np);
263 goto repeat;
1da177e4 264 }
a1bcfacd 265 return NULL;
1da177e4
LT
266 }
267
268 atomic_set(&skb->users, 1);
269 skb_reserve(skb, reserve);
270 return skb;
271}
272
bea3348e
SH
273static int netpoll_owner_active(struct net_device *dev)
274{
275 struct napi_struct *napi;
276
277 list_for_each_entry(napi, &dev->napi_list, dev_list) {
278 if (napi->poll_owner == smp_processor_id())
279 return 1;
280 }
281 return 0;
282}
283
1da177e4
LT
284static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
285{
2bdfe0ba
SH
286 int status = NETDEV_TX_BUSY;
287 unsigned long tries;
4ec93edb 288 struct net_device *dev = np->dev;
00829823 289 const struct net_device_ops *ops = dev->netdev_ops;
4ec93edb 290 struct netpoll_info *npinfo = np->dev->npinfo;
2bdfe0ba 291
4ec93edb
YH
292 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
293 __kfree_skb(skb);
294 return;
295 }
2bdfe0ba
SH
296
297 /* don't get messages out of order, and no recursion */
bea3348e 298 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
fd2ea0a7 299 struct netdev_queue *txq;
a49f99ff
AM
300 unsigned long flags;
301
fd2ea0a7
DM
302 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
303
a49f99ff 304 local_irq_save(flags);
0db3dc73
SH
305 /* try until next clock tick */
306 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
307 tries > 0; --tries) {
fd2ea0a7 308 if (__netif_tx_trylock(txq)) {
08baf561 309 if (!netif_tx_queue_stopped(txq)) {
00829823 310 status = ops->ndo_start_xmit(skb, dev);
08baf561
ED
311 if (status == NETDEV_TX_OK)
312 txq_trans_update(txq);
313 }
fd2ea0a7 314 __netif_tx_unlock(txq);
e37b8d93
AM
315
316 if (status == NETDEV_TX_OK)
317 break;
318
e37b8d93 319 }
0db3dc73
SH
320
321 /* tickle device maybe there is some cleanup */
322 netpoll_poll(np);
323
324 udelay(USEC_PER_POLL);
0db1d6fc 325 }
a49f99ff 326 local_irq_restore(flags);
1da177e4 327 }
1da177e4 328
2bdfe0ba 329 if (status != NETDEV_TX_OK) {
5de4a473 330 skb_queue_tail(&npinfo->txq, skb);
4c1ac1b4 331 schedule_delayed_work(&npinfo->tx_work,0);
1da177e4 332 }
1da177e4
LT
333}
334
335void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
336{
337 int total_len, eth_len, ip_len, udp_len;
338 struct sk_buff *skb;
339 struct udphdr *udph;
340 struct iphdr *iph;
341 struct ethhdr *eth;
342
343 udp_len = len + sizeof(*udph);
344 ip_len = eth_len = udp_len + sizeof(*iph);
345 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
346
347 skb = find_skb(np, total_len, total_len - len);
348 if (!skb)
349 return;
350
27d7ff46 351 skb_copy_to_linear_data(skb, msg, len);
1da177e4
LT
352 skb->len += len;
353
4bedb452
ACM
354 skb_push(skb, sizeof(*udph));
355 skb_reset_transport_header(skb);
356 udph = udp_hdr(skb);
1da177e4
LT
357 udph->source = htons(np->local_port);
358 udph->dest = htons(np->remote_port);
359 udph->len = htons(udp_len);
360 udph->check = 0;
e7557af5
HH
361 udph->check = csum_tcpudp_magic(np->local_ip,
362 np->remote_ip,
8e365eec 363 udp_len, IPPROTO_UDP,
07f0757a 364 csum_partial(udph, udp_len, 0));
8e365eec 365 if (udph->check == 0)
5e57dff2 366 udph->check = CSUM_MANGLED_0;
1da177e4 367
e2d1bca7
ACM
368 skb_push(skb, sizeof(*iph));
369 skb_reset_network_header(skb);
eddc9ec5 370 iph = ip_hdr(skb);
1da177e4
LT
371
372 /* iph->version = 4; iph->ihl = 5; */
373 put_unaligned(0x45, (unsigned char *)iph);
374 iph->tos = 0;
375 put_unaligned(htons(ip_len), &(iph->tot_len));
376 iph->id = 0;
377 iph->frag_off = 0;
378 iph->ttl = 64;
379 iph->protocol = IPPROTO_UDP;
380 iph->check = 0;
e7557af5
HH
381 put_unaligned(np->local_ip, &(iph->saddr));
382 put_unaligned(np->remote_ip, &(iph->daddr));
1da177e4
LT
383 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
384
385 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
459a98ed 386 skb_reset_mac_header(skb);
206daaf7 387 skb->protocol = eth->h_proto = htons(ETH_P_IP);
09538641
SH
388 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
389 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
1da177e4
LT
390
391 skb->dev = np->dev;
392
393 netpoll_send_skb(np, skb);
394}
395
396static void arp_reply(struct sk_buff *skb)
397{
115c1d6e 398 struct netpoll_info *npinfo = skb->dev->npinfo;
1da177e4
LT
399 struct arphdr *arp;
400 unsigned char *arp_ptr;
401 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
252e3346 402 __be32 sip, tip;
47bbec02 403 unsigned char *sha;
1da177e4 404 struct sk_buff *send_skb;
115c1d6e 405 struct netpoll *np = NULL;
1da177e4 406
fbeec2e1
JM
407 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
408 np = npinfo->rx_np;
115c1d6e
JM
409 if (!np)
410 return;
1da177e4
LT
411
412 /* No arp on this interface */
413 if (skb->dev->flags & IFF_NOARP)
414 return;
415
988b7050 416 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
1da177e4
LT
417 return;
418
c1d2bbe1 419 skb_reset_network_header(skb);
badff6d0 420 skb_reset_transport_header(skb);
d0a92be0 421 arp = arp_hdr(skb);
1da177e4
LT
422
423 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
424 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
425 arp->ar_pro != htons(ETH_P_IP) ||
426 arp->ar_op != htons(ARPOP_REQUEST))
427 return;
428
47bbec02
NH
429 arp_ptr = (unsigned char *)(arp+1);
430 /* save the location of the src hw addr */
431 sha = arp_ptr;
432 arp_ptr += skb->dev->addr_len;
1da177e4 433 memcpy(&sip, arp_ptr, 4);
47bbec02
NH
434 arp_ptr += 4;
435 /* if we actually cared about dst hw addr, it would get copied here */
436 arp_ptr += skb->dev->addr_len;
1da177e4
LT
437 memcpy(&tip, arp_ptr, 4);
438
439 /* Should we ignore arp? */
e7557af5 440 if (tip != np->local_ip ||
21cf2253 441 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
1da177e4
LT
442 return;
443
988b7050 444 size = arp_hdr_len(skb->dev);
f5184d26 445 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
1da177e4
LT
446 LL_RESERVED_SPACE(np->dev));
447
448 if (!send_skb)
449 return;
450
c1d2bbe1 451 skb_reset_network_header(send_skb);
1da177e4
LT
452 arp = (struct arphdr *) skb_put(send_skb, size);
453 send_skb->dev = skb->dev;
454 send_skb->protocol = htons(ETH_P_ARP);
455
456 /* Fill the device header for the ARP frame */
0c4e8581 457 if (dev_hard_header(send_skb, skb->dev, ptype,
09538641 458 sha, np->dev->dev_addr,
0c4e8581 459 send_skb->len) < 0) {
1da177e4
LT
460 kfree_skb(send_skb);
461 return;
462 }
463
464 /*
465 * Fill out the arp protocol part.
466 *
467 * we only support ethernet device type,
468 * which (according to RFC 1390) should always equal 1 (Ethernet).
469 */
470
471 arp->ar_hrd = htons(np->dev->type);
472 arp->ar_pro = htons(ETH_P_IP);
473 arp->ar_hln = np->dev->addr_len;
474 arp->ar_pln = 4;
475 arp->ar_op = htons(type);
476
477 arp_ptr=(unsigned char *)(arp + 1);
478 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
479 arp_ptr += np->dev->addr_len;
480 memcpy(arp_ptr, &tip, 4);
481 arp_ptr += 4;
47bbec02 482 memcpy(arp_ptr, sha, np->dev->addr_len);
1da177e4
LT
483 arp_ptr += np->dev->addr_len;
484 memcpy(arp_ptr, &sip, 4);
485
486 netpoll_send_skb(np, send_skb);
487}
488
489int __netpoll_rx(struct sk_buff *skb)
490{
491 int proto, len, ulen;
492 struct iphdr *iph;
493 struct udphdr *uh;
068c6e98
NH
494 struct netpoll_info *npi = skb->dev->npinfo;
495 struct netpoll *np = npi->rx_np;
496
fbeec2e1 497 if (!np)
1da177e4
LT
498 goto out;
499 if (skb->dev->type != ARPHRD_ETHER)
500 goto out;
501
d9452e9f 502 /* check if netpoll clients need ARP */
724800d6 503 if (skb->protocol == htons(ETH_P_ARP) &&
1da177e4 504 atomic_read(&trapped)) {
068c6e98 505 skb_queue_tail(&npi->arp_tx, skb);
1da177e4
LT
506 return 1;
507 }
508
509 proto = ntohs(eth_hdr(skb)->h_proto);
510 if (proto != ETH_P_IP)
511 goto out;
512 if (skb->pkt_type == PACKET_OTHERHOST)
513 goto out;
514 if (skb_shared(skb))
515 goto out;
516
517 iph = (struct iphdr *)skb->data;
518 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
519 goto out;
520 if (iph->ihl < 5 || iph->version != 4)
521 goto out;
522 if (!pskb_may_pull(skb, iph->ihl*4))
523 goto out;
524 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
525 goto out;
526
527 len = ntohs(iph->tot_len);
528 if (skb->len < len || len < iph->ihl*4)
529 goto out;
530
5e7d7fa5
AL
531 /*
532 * Our transport medium may have padded the buffer out.
533 * Now We trim to the true length of the frame.
534 */
535 if (pskb_trim_rcsum(skb, len))
536 goto out;
537
1da177e4
LT
538 if (iph->protocol != IPPROTO_UDP)
539 goto out;
540
541 len -= iph->ihl*4;
542 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
543 ulen = ntohs(uh->len);
544
545 if (ulen != len)
546 goto out;
fb286bb2 547 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
1da177e4 548 goto out;
e7557af5 549 if (np->local_ip && np->local_ip != iph->daddr)
1da177e4 550 goto out;
e7557af5 551 if (np->remote_ip && np->remote_ip != iph->saddr)
1da177e4
LT
552 goto out;
553 if (np->local_port && np->local_port != ntohs(uh->dest))
554 goto out;
555
556 np->rx_hook(np, ntohs(uh->source),
557 (char *)(uh+1),
558 ulen - sizeof(struct udphdr));
559
560 kfree_skb(skb);
561 return 1;
562
563out:
564 if (atomic_read(&trapped)) {
565 kfree_skb(skb);
566 return 1;
567 }
568
569 return 0;
570}
571
0bcc1816
SS
572void netpoll_print_options(struct netpoll *np)
573{
574 printk(KERN_INFO "%s: local port %d\n",
575 np->name, np->local_port);
e7557af5
HH
576 printk(KERN_INFO "%s: local IP %pI4\n",
577 np->name, &np->local_ip);
0bcc1816
SS
578 printk(KERN_INFO "%s: interface %s\n",
579 np->name, np->dev_name);
580 printk(KERN_INFO "%s: remote port %d\n",
581 np->name, np->remote_port);
e7557af5
HH
582 printk(KERN_INFO "%s: remote IP %pI4\n",
583 np->name, &np->remote_ip);
e174961c
JB
584 printk(KERN_INFO "%s: remote ethernet address %pM\n",
585 np->name, np->remote_mac);
0bcc1816
SS
586}
587
1da177e4
LT
588int netpoll_parse_options(struct netpoll *np, char *opt)
589{
590 char *cur=opt, *delim;
591
c68b9070 592 if (*cur != '@') {
1da177e4
LT
593 if ((delim = strchr(cur, '@')) == NULL)
594 goto parse_failed;
c68b9070
DM
595 *delim = 0;
596 np->local_port = simple_strtol(cur, NULL, 10);
597 cur = delim;
1da177e4
LT
598 }
599 cur++;
1da177e4 600
c68b9070 601 if (*cur != '/') {
1da177e4
LT
602 if ((delim = strchr(cur, '/')) == NULL)
603 goto parse_failed;
c68b9070 604 *delim = 0;
e7557af5 605 np->local_ip = in_aton(cur);
c68b9070 606 cur = delim;
1da177e4
LT
607 }
608 cur++;
609
c68b9070 610 if (*cur != ',') {
1da177e4
LT
611 /* parse out dev name */
612 if ((delim = strchr(cur, ',')) == NULL)
613 goto parse_failed;
c68b9070 614 *delim = 0;
1da177e4 615 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
c68b9070 616 cur = delim;
1da177e4
LT
617 }
618 cur++;
619
c68b9070 620 if (*cur != '@') {
1da177e4
LT
621 /* dst port */
622 if ((delim = strchr(cur, '@')) == NULL)
623 goto parse_failed;
c68b9070
DM
624 *delim = 0;
625 np->remote_port = simple_strtol(cur, NULL, 10);
626 cur = delim;
1da177e4
LT
627 }
628 cur++;
1da177e4
LT
629
630 /* dst ip */
631 if ((delim = strchr(cur, '/')) == NULL)
632 goto parse_failed;
c68b9070 633 *delim = 0;
e7557af5 634 np->remote_ip = in_aton(cur);
c68b9070 635 cur = delim + 1;
1da177e4 636
c68b9070 637 if (*cur != 0) {
1da177e4
LT
638 /* MAC address */
639 if ((delim = strchr(cur, ':')) == NULL)
640 goto parse_failed;
c68b9070
DM
641 *delim = 0;
642 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
643 cur = delim + 1;
1da177e4
LT
644 if ((delim = strchr(cur, ':')) == NULL)
645 goto parse_failed;
c68b9070
DM
646 *delim = 0;
647 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
648 cur = delim + 1;
1da177e4
LT
649 if ((delim = strchr(cur, ':')) == NULL)
650 goto parse_failed;
c68b9070
DM
651 *delim = 0;
652 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
653 cur = delim + 1;
1da177e4
LT
654 if ((delim = strchr(cur, ':')) == NULL)
655 goto parse_failed;
c68b9070
DM
656 *delim = 0;
657 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
658 cur = delim + 1;
1da177e4
LT
659 if ((delim = strchr(cur, ':')) == NULL)
660 goto parse_failed;
c68b9070
DM
661 *delim = 0;
662 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
663 cur = delim + 1;
664 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
1da177e4
LT
665 }
666
0bcc1816 667 netpoll_print_options(np);
1da177e4
LT
668
669 return 0;
670
671 parse_failed:
672 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
673 np->name, cur);
674 return -1;
675}
676
677int netpoll_setup(struct netpoll *np)
678{
679 struct net_device *ndev = NULL;
680 struct in_device *in_dev;
115c1d6e 681 struct netpoll_info *npinfo;
fbeec2e1 682 unsigned long flags;
b41848b6 683 int err;
1da177e4
LT
684
685 if (np->dev_name)
881d966b 686 ndev = dev_get_by_name(&init_net, np->dev_name);
1da177e4
LT
687 if (!ndev) {
688 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
689 np->name, np->dev_name);
b41848b6 690 return -ENODEV;
1da177e4
LT
691 }
692
693 np->dev = ndev;
115c1d6e
JM
694 if (!ndev->npinfo) {
695 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
b41848b6
SH
696 if (!npinfo) {
697 err = -ENOMEM;
115c1d6e 698 goto release;
b41848b6 699 }
115c1d6e 700
d9452e9f 701 npinfo->rx_flags = 0;
fbeec2e1 702 npinfo->rx_np = NULL;
2bdfe0ba 703
a9f6a0dd 704 spin_lock_init(&npinfo->rx_lock);
068c6e98 705 skb_queue_head_init(&npinfo->arp_tx);
b6cd27ed 706 skb_queue_head_init(&npinfo->txq);
4c1ac1b4 707 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
b6cd27ed 708
93ec2c72
SH
709 atomic_set(&npinfo->refcnt, 1);
710 } else {
115c1d6e 711 npinfo = ndev->npinfo;
93ec2c72
SH
712 atomic_inc(&npinfo->refcnt);
713 }
1da177e4 714
d314774c 715 if (!ndev->netdev_ops->ndo_poll_controller) {
1da177e4
LT
716 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
717 np->name, np->dev_name);
b41848b6 718 err = -ENOTSUPP;
1da177e4
LT
719 goto release;
720 }
721
722 if (!netif_running(ndev)) {
723 unsigned long atmost, atleast;
724
725 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
726 np->name, np->dev_name);
727
6756ae4b 728 rtnl_lock();
b41848b6
SH
729 err = dev_open(ndev);
730 rtnl_unlock();
731
732 if (err) {
1da177e4 733 printk(KERN_ERR "%s: failed to open %s\n",
b41848b6 734 np->name, ndev->name);
1da177e4
LT
735 goto release;
736 }
1da177e4
LT
737
738 atleast = jiffies + HZ/10;
bff38771 739 atmost = jiffies + carrier_timeout * HZ;
1da177e4
LT
740 while (!netif_carrier_ok(ndev)) {
741 if (time_after(jiffies, atmost)) {
742 printk(KERN_NOTICE
743 "%s: timeout waiting for carrier\n",
744 np->name);
745 break;
746 }
747 cond_resched();
748 }
749
750 /* If carrier appears to come up instantly, we don't
751 * trust it and pause so that we don't pump all our
752 * queued console messages into the bitbucket.
753 */
754
755 if (time_before(jiffies, atleast)) {
756 printk(KERN_NOTICE "%s: carrier detect appears"
757 " untrustworthy, waiting 4 seconds\n",
758 np->name);
759 msleep(4000);
760 }
761 }
762
1da177e4
LT
763 if (!np->local_ip) {
764 rcu_read_lock();
e5ed6399 765 in_dev = __in_dev_get_rcu(ndev);
1da177e4
LT
766
767 if (!in_dev || !in_dev->ifa_list) {
768 rcu_read_unlock();
769 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
770 np->name, np->dev_name);
b41848b6 771 err = -EDESTADDRREQ;
1da177e4
LT
772 goto release;
773 }
774
e7557af5 775 np->local_ip = in_dev->ifa_list->ifa_local;
1da177e4 776 rcu_read_unlock();
e7557af5 777 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
1da177e4
LT
778 }
779
fbeec2e1
JM
780 if (np->rx_hook) {
781 spin_lock_irqsave(&npinfo->rx_lock, flags);
d9452e9f 782 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
fbeec2e1
JM
783 npinfo->rx_np = np;
784 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
785 }
26520765
IM
786
787 /* fill up the skb queue */
788 refill_skbs();
789
fbeec2e1 790 /* last thing to do is link it to the net device structure */
115c1d6e 791 ndev->npinfo = npinfo;
1da177e4 792
53fb95d3
MM
793 /* avoid racing with NAPI reading npinfo */
794 synchronize_rcu();
795
1da177e4
LT
796 return 0;
797
798 release:
115c1d6e
JM
799 if (!ndev->npinfo)
800 kfree(npinfo);
1da177e4
LT
801 np->dev = NULL;
802 dev_put(ndev);
b41848b6 803 return err;
1da177e4
LT
804}
805
c68b9070
DM
806static int __init netpoll_init(void)
807{
a1bcfacd
SH
808 skb_queue_head_init(&skb_pool);
809 return 0;
810}
811core_initcall(netpoll_init);
812
1da177e4
LT
813void netpoll_cleanup(struct netpoll *np)
814{
fbeec2e1
JM
815 struct netpoll_info *npinfo;
816 unsigned long flags;
817
115c1d6e 818 if (np->dev) {
fbeec2e1 819 npinfo = np->dev->npinfo;
93ec2c72
SH
820 if (npinfo) {
821 if (npinfo->rx_np == np) {
822 spin_lock_irqsave(&npinfo->rx_lock, flags);
823 npinfo->rx_np = NULL;
d9452e9f 824 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
93ec2c72
SH
825 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
826 }
827
93ec2c72
SH
828 if (atomic_dec_and_test(&npinfo->refcnt)) {
829 skb_queue_purge(&npinfo->arp_tx);
4ec93edb 830 skb_queue_purge(&npinfo->txq);
25442caf 831 cancel_rearming_delayed_work(&npinfo->tx_work);
93ec2c72 832
17200811 833 /* clean after last, unfinished work */
0adc9add 834 __skb_queue_purge(&npinfo->txq);
93ec2c72 835 kfree(npinfo);
1498b3f1 836 np->dev->npinfo = NULL;
93ec2c72 837 }
fbeec2e1 838 }
93ec2c72 839
115c1d6e
JM
840 dev_put(np->dev);
841 }
fbeec2e1 842
1da177e4
LT
843 np->dev = NULL;
844}
845
846int netpoll_trap(void)
847{
848 return atomic_read(&trapped);
849}
850
851void netpoll_set_trap(int trap)
852{
853 if (trap)
854 atomic_inc(&trapped);
855 else
856 atomic_dec(&trapped);
857}
858
859EXPORT_SYMBOL(netpoll_set_trap);
860EXPORT_SYMBOL(netpoll_trap);
0bcc1816 861EXPORT_SYMBOL(netpoll_print_options);
1da177e4
LT
862EXPORT_SYMBOL(netpoll_parse_options);
863EXPORT_SYMBOL(netpoll_setup);
864EXPORT_SYMBOL(netpoll_cleanup);
865EXPORT_SYMBOL(netpoll_send_udp);
866EXPORT_SYMBOL(netpoll_poll);