]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/core/netpoll.c
net/ipv4: EXPORT_SYMBOL cleanups
[mirror_ubuntu-zesty-kernel.git] / net / core / netpoll.c
CommitLineData
1da177e4
LT
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
bff38771 12#include <linux/moduleparam.h>
1da177e4
LT
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/string.h>
14c85021 16#include <linux/if_arp.h>
1da177e4
LT
17#include <linux/inetdevice.h>
18#include <linux/inet.h>
19#include <linux/interrupt.h>
20#include <linux/netpoll.h>
21#include <linux/sched.h>
22#include <linux/delay.h>
23#include <linux/rcupdate.h>
24#include <linux/workqueue.h>
5a0e3ad6 25#include <linux/slab.h>
1da177e4
LT
26#include <net/tcp.h>
27#include <net/udp.h>
28#include <asm/unaligned.h>
9cbc1cb8 29#include <trace/events/napi.h>
1da177e4
LT
30
31/*
32 * We maintain a small pool of fully-sized skbs, to make sure the
33 * message gets out even in extreme OOM situations.
34 */
35
36#define MAX_UDP_CHUNK 1460
37#define MAX_SKBS 32
38#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
39
a1bcfacd 40static struct sk_buff_head skb_pool;
1da177e4
LT
41
42static atomic_t trapped;
43
2bdfe0ba 44#define USEC_PER_POLL 50
d9452e9f
DM
45#define NETPOLL_RX_ENABLED 1
46#define NETPOLL_RX_DROP 2
1da177e4
LT
47
48#define MAX_SKB_SIZE \
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr))
51
068c6e98 52static void arp_reply(struct sk_buff *skb);
1da177e4 53
bff38771
AV
54static unsigned int carrier_timeout = 4;
55module_param(carrier_timeout, uint, 0644);
56
c4028958 57static void queue_process(struct work_struct *work)
1da177e4 58{
4c1ac1b4
DH
59 struct netpoll_info *npinfo =
60 container_of(work, struct netpoll_info, tx_work.work);
1da177e4 61 struct sk_buff *skb;
3640543d 62 unsigned long flags;
1da177e4 63
6c43ff18
SH
64 while ((skb = skb_dequeue(&npinfo->txq))) {
65 struct net_device *dev = skb->dev;
00829823 66 const struct net_device_ops *ops = dev->netdev_ops;
fd2ea0a7 67 struct netdev_queue *txq;
1da177e4 68
6c43ff18
SH
69 if (!netif_device_present(dev) || !netif_running(dev)) {
70 __kfree_skb(skb);
71 continue;
72 }
1da177e4 73
fd2ea0a7
DM
74 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
75
3640543d 76 local_irq_save(flags);
fd2ea0a7
DM
77 __netif_tx_lock(txq, smp_processor_id());
78 if (netif_tx_queue_stopped(txq) ||
c3f26a26 79 netif_tx_queue_frozen(txq) ||
00829823 80 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
6c43ff18 81 skb_queue_head(&npinfo->txq, skb);
fd2ea0a7 82 __netif_tx_unlock(txq);
3640543d 83 local_irq_restore(flags);
1da177e4 84
25442caf 85 schedule_delayed_work(&npinfo->tx_work, HZ/10);
6c43ff18
SH
86 return;
87 }
fd2ea0a7 88 __netif_tx_unlock(txq);
3640543d 89 local_irq_restore(flags);
1da177e4 90 }
1da177e4
LT
91}
92
b51655b9
AV
93static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
94 unsigned short ulen, __be32 saddr, __be32 daddr)
1da177e4 95{
d6f5493c 96 __wsum psum;
fb286bb2 97
60476372 98 if (uh->check == 0 || skb_csum_unnecessary(skb))
1da177e4
LT
99 return 0;
100
fb286bb2
HX
101 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
102
84fa7933 103 if (skb->ip_summed == CHECKSUM_COMPLETE &&
d3bc23e7 104 !csum_fold(csum_add(psum, skb->csum)))
fb286bb2 105 return 0;
1da177e4 106
fb286bb2 107 skb->csum = psum;
1da177e4 108
fb286bb2 109 return __skb_checksum_complete(skb);
1da177e4
LT
110}
111
112/*
113 * Check whether delayed processing was scheduled for our NIC. If so,
114 * we attempt to grab the poll lock and use ->poll() to pump the card.
115 * If this fails, either we've recursed in ->poll() or it's already
116 * running on another CPU.
117 *
118 * Note: we don't mask interrupts with this lock because we're using
119 * trylock here and interrupts are already disabled in the softirq
120 * case. Further, we test the poll_owner to avoid recursion on UP
121 * systems where the lock doesn't exist.
122 *
123 * In cases where there is bi-directional communications, reading only
124 * one message at a time can lead to packets being dropped by the
125 * network adapter, forcing superfluous retries and possibly timeouts.
126 * Thus, we set our budget to greater than 1.
127 */
0a7606c1
DM
128static int poll_one_napi(struct netpoll_info *npinfo,
129 struct napi_struct *napi, int budget)
130{
131 int work;
132
133 /* net_rx_action's ->poll() invocations and our's are
134 * synchronized by this test which is only made while
135 * holding the napi->poll_lock.
136 */
137 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
138 return budget;
139
d9452e9f 140 npinfo->rx_flags |= NETPOLL_RX_DROP;
0a7606c1 141 atomic_inc(&trapped);
7b363e44 142 set_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1
DM
143
144 work = napi->poll(napi, budget);
7d18f114 145 trace_napi_poll(napi);
0a7606c1 146
7b363e44 147 clear_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1 148 atomic_dec(&trapped);
d9452e9f 149 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
0a7606c1
DM
150
151 return budget - work;
152}
153
5106930b 154static void poll_napi(struct net_device *dev)
1da177e4 155{
bea3348e 156 struct napi_struct *napi;
1da177e4
LT
157 int budget = 16;
158
5106930b 159 list_for_each_entry(napi, &dev->napi_list, dev_list) {
0a7606c1 160 if (napi->poll_owner != smp_processor_id() &&
bea3348e 161 spin_trylock(&napi->poll_lock)) {
5106930b 162 budget = poll_one_napi(dev->npinfo, napi, budget);
bea3348e 163 spin_unlock(&napi->poll_lock);
0a7606c1
DM
164
165 if (!budget)
166 break;
bea3348e 167 }
1da177e4
LT
168 }
169}
170
068c6e98
NH
171static void service_arp_queue(struct netpoll_info *npi)
172{
5106930b
SH
173 if (npi) {
174 struct sk_buff *skb;
068c6e98 175
5106930b
SH
176 while ((skb = skb_dequeue(&npi->arp_tx)))
177 arp_reply(skb);
068c6e98 178 }
068c6e98
NH
179}
180
0e34e931 181void netpoll_poll_dev(struct net_device *dev)
1da177e4 182{
5e392739 183 const struct net_device_ops *ops;
5106930b 184
5e392739
PE
185 if (!dev || !netif_running(dev))
186 return;
187
188 ops = dev->netdev_ops;
189 if (!ops->ndo_poll_controller)
1da177e4
LT
190 return;
191
192 /* Process pending work on NIC */
d314774c 193 ops->ndo_poll_controller(dev);
5106930b
SH
194
195 poll_napi(dev);
1da177e4 196
5106930b 197 service_arp_queue(dev->npinfo);
068c6e98 198
1da177e4
LT
199}
200
0e34e931
WC
201void netpoll_poll(struct netpoll *np)
202{
203 netpoll_poll_dev(np->dev);
204}
205
1da177e4
LT
206static void refill_skbs(void)
207{
208 struct sk_buff *skb;
209 unsigned long flags;
210
a1bcfacd
SH
211 spin_lock_irqsave(&skb_pool.lock, flags);
212 while (skb_pool.qlen < MAX_SKBS) {
1da177e4
LT
213 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
214 if (!skb)
215 break;
216
a1bcfacd 217 __skb_queue_tail(&skb_pool, skb);
1da177e4 218 }
a1bcfacd 219 spin_unlock_irqrestore(&skb_pool.lock, flags);
1da177e4
LT
220}
221
a1bcfacd 222static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
1da177e4 223{
a1bcfacd
SH
224 int count = 0;
225 struct sk_buff *skb;
1da177e4 226
a1bcfacd 227 refill_skbs();
1da177e4 228repeat:
1da177e4
LT
229
230 skb = alloc_skb(len, GFP_ATOMIC);
a1bcfacd
SH
231 if (!skb)
232 skb = skb_dequeue(&skb_pool);
1da177e4
LT
233
234 if (!skb) {
a1bcfacd
SH
235 if (++count < 10) {
236 netpoll_poll(np);
237 goto repeat;
1da177e4 238 }
a1bcfacd 239 return NULL;
1da177e4
LT
240 }
241
242 atomic_set(&skb->users, 1);
243 skb_reserve(skb, reserve);
244 return skb;
245}
246
bea3348e
SH
247static int netpoll_owner_active(struct net_device *dev)
248{
249 struct napi_struct *napi;
250
251 list_for_each_entry(napi, &dev->napi_list, dev_list) {
252 if (napi->poll_owner == smp_processor_id())
253 return 1;
254 }
255 return 0;
256}
257
0e34e931 258void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
1da177e4 259{
2bdfe0ba
SH
260 int status = NETDEV_TX_BUSY;
261 unsigned long tries;
4ec93edb 262 struct net_device *dev = np->dev;
00829823 263 const struct net_device_ops *ops = dev->netdev_ops;
de85d99e 264 /* It is up to the caller to keep npinfo alive. */
4ec93edb 265 struct netpoll_info *npinfo = np->dev->npinfo;
2bdfe0ba 266
4ec93edb
YH
267 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
268 __kfree_skb(skb);
269 return;
270 }
2bdfe0ba
SH
271
272 /* don't get messages out of order, and no recursion */
bea3348e 273 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
fd2ea0a7 274 struct netdev_queue *txq;
a49f99ff
AM
275 unsigned long flags;
276
fd2ea0a7
DM
277 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
278
a49f99ff 279 local_irq_save(flags);
0db3dc73
SH
280 /* try until next clock tick */
281 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
282 tries > 0; --tries) {
fd2ea0a7 283 if (__netif_tx_trylock(txq)) {
08baf561 284 if (!netif_tx_queue_stopped(txq)) {
0e34e931 285 dev->priv_flags |= IFF_IN_NETPOLL;
00829823 286 status = ops->ndo_start_xmit(skb, dev);
0e34e931 287 dev->priv_flags &= ~IFF_IN_NETPOLL;
08baf561
ED
288 if (status == NETDEV_TX_OK)
289 txq_trans_update(txq);
290 }
fd2ea0a7 291 __netif_tx_unlock(txq);
e37b8d93
AM
292
293 if (status == NETDEV_TX_OK)
294 break;
295
e37b8d93 296 }
0db3dc73
SH
297
298 /* tickle device maybe there is some cleanup */
299 netpoll_poll(np);
300
301 udelay(USEC_PER_POLL);
0db1d6fc 302 }
79b1bee8
DD
303
304 WARN_ONCE(!irqs_disabled(),
305 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
306 dev->name, ops->ndo_start_xmit);
307
a49f99ff 308 local_irq_restore(flags);
1da177e4 309 }
1da177e4 310
2bdfe0ba 311 if (status != NETDEV_TX_OK) {
5de4a473 312 skb_queue_tail(&npinfo->txq, skb);
4c1ac1b4 313 schedule_delayed_work(&npinfo->tx_work,0);
1da177e4 314 }
1da177e4
LT
315}
316
317void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
318{
319 int total_len, eth_len, ip_len, udp_len;
320 struct sk_buff *skb;
321 struct udphdr *udph;
322 struct iphdr *iph;
323 struct ethhdr *eth;
324
325 udp_len = len + sizeof(*udph);
326 ip_len = eth_len = udp_len + sizeof(*iph);
327 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
328
329 skb = find_skb(np, total_len, total_len - len);
330 if (!skb)
331 return;
332
27d7ff46 333 skb_copy_to_linear_data(skb, msg, len);
1da177e4
LT
334 skb->len += len;
335
4bedb452
ACM
336 skb_push(skb, sizeof(*udph));
337 skb_reset_transport_header(skb);
338 udph = udp_hdr(skb);
1da177e4
LT
339 udph->source = htons(np->local_port);
340 udph->dest = htons(np->remote_port);
341 udph->len = htons(udp_len);
342 udph->check = 0;
e7557af5
HH
343 udph->check = csum_tcpudp_magic(np->local_ip,
344 np->remote_ip,
8e365eec 345 udp_len, IPPROTO_UDP,
07f0757a 346 csum_partial(udph, udp_len, 0));
8e365eec 347 if (udph->check == 0)
5e57dff2 348 udph->check = CSUM_MANGLED_0;
1da177e4 349
e2d1bca7
ACM
350 skb_push(skb, sizeof(*iph));
351 skb_reset_network_header(skb);
eddc9ec5 352 iph = ip_hdr(skb);
1da177e4
LT
353
354 /* iph->version = 4; iph->ihl = 5; */
355 put_unaligned(0x45, (unsigned char *)iph);
356 iph->tos = 0;
357 put_unaligned(htons(ip_len), &(iph->tot_len));
358 iph->id = 0;
359 iph->frag_off = 0;
360 iph->ttl = 64;
361 iph->protocol = IPPROTO_UDP;
362 iph->check = 0;
e7557af5
HH
363 put_unaligned(np->local_ip, &(iph->saddr));
364 put_unaligned(np->remote_ip, &(iph->daddr));
1da177e4
LT
365 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
366
367 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
459a98ed 368 skb_reset_mac_header(skb);
206daaf7 369 skb->protocol = eth->h_proto = htons(ETH_P_IP);
09538641
SH
370 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
371 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
1da177e4
LT
372
373 skb->dev = np->dev;
374
375 netpoll_send_skb(np, skb);
376}
377
378static void arp_reply(struct sk_buff *skb)
379{
115c1d6e 380 struct netpoll_info *npinfo = skb->dev->npinfo;
1da177e4
LT
381 struct arphdr *arp;
382 unsigned char *arp_ptr;
383 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
252e3346 384 __be32 sip, tip;
47bbec02 385 unsigned char *sha;
1da177e4 386 struct sk_buff *send_skb;
508e14b4
DB
387 struct netpoll *np, *tmp;
388 unsigned long flags;
389 int hits = 0;
390
391 if (list_empty(&npinfo->rx_np))
392 return;
393
394 /* Before checking the packet, we do some early
395 inspection whether this is interesting at all */
396 spin_lock_irqsave(&npinfo->rx_lock, flags);
397 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
398 if (np->dev == skb->dev)
399 hits++;
400 }
401 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1da177e4 402
508e14b4
DB
403 /* No netpoll struct is using this dev */
404 if (!hits)
115c1d6e 405 return;
1da177e4
LT
406
407 /* No arp on this interface */
408 if (skb->dev->flags & IFF_NOARP)
409 return;
410
988b7050 411 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
1da177e4
LT
412 return;
413
c1d2bbe1 414 skb_reset_network_header(skb);
badff6d0 415 skb_reset_transport_header(skb);
d0a92be0 416 arp = arp_hdr(skb);
1da177e4
LT
417
418 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
419 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
420 arp->ar_pro != htons(ETH_P_IP) ||
421 arp->ar_op != htons(ARPOP_REQUEST))
422 return;
423
47bbec02
NH
424 arp_ptr = (unsigned char *)(arp+1);
425 /* save the location of the src hw addr */
426 sha = arp_ptr;
427 arp_ptr += skb->dev->addr_len;
1da177e4 428 memcpy(&sip, arp_ptr, 4);
47bbec02 429 arp_ptr += 4;
508e14b4
DB
430 /* If we actually cared about dst hw addr,
431 it would get copied here */
47bbec02 432 arp_ptr += skb->dev->addr_len;
1da177e4
LT
433 memcpy(&tip, arp_ptr, 4);
434
435 /* Should we ignore arp? */
508e14b4 436 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
1da177e4
LT
437 return;
438
988b7050 439 size = arp_hdr_len(skb->dev);
1da177e4 440
508e14b4
DB
441 spin_lock_irqsave(&npinfo->rx_lock, flags);
442 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
443 if (tip != np->local_ip)
444 continue;
1da177e4 445
508e14b4
DB
446 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
447 LL_RESERVED_SPACE(np->dev));
448 if (!send_skb)
449 continue;
1da177e4 450
508e14b4
DB
451 skb_reset_network_header(send_skb);
452 arp = (struct arphdr *) skb_put(send_skb, size);
453 send_skb->dev = skb->dev;
454 send_skb->protocol = htons(ETH_P_ARP);
1da177e4 455
508e14b4
DB
456 /* Fill the device header for the ARP frame */
457 if (dev_hard_header(send_skb, skb->dev, ptype,
458 sha, np->dev->dev_addr,
459 send_skb->len) < 0) {
460 kfree_skb(send_skb);
461 continue;
462 }
1da177e4 463
508e14b4
DB
464 /*
465 * Fill out the arp protocol part.
466 *
467 * we only support ethernet device type,
468 * which (according to RFC 1390) should
469 * always equal 1 (Ethernet).
470 */
1da177e4 471
508e14b4
DB
472 arp->ar_hrd = htons(np->dev->type);
473 arp->ar_pro = htons(ETH_P_IP);
474 arp->ar_hln = np->dev->addr_len;
475 arp->ar_pln = 4;
476 arp->ar_op = htons(type);
477
478 arp_ptr = (unsigned char *)(arp + 1);
479 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
480 arp_ptr += np->dev->addr_len;
481 memcpy(arp_ptr, &tip, 4);
482 arp_ptr += 4;
483 memcpy(arp_ptr, sha, np->dev->addr_len);
484 arp_ptr += np->dev->addr_len;
485 memcpy(arp_ptr, &sip, 4);
486
487 netpoll_send_skb(np, send_skb);
488
489 /* If there are several rx_hooks for the same address,
490 we're fine by sending a single reply */
491 break;
492 }
493 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1da177e4
LT
494}
495
496int __netpoll_rx(struct sk_buff *skb)
497{
498 int proto, len, ulen;
508e14b4 499 int hits = 0;
1da177e4
LT
500 struct iphdr *iph;
501 struct udphdr *uh;
508e14b4
DB
502 struct netpoll_info *npinfo = skb->dev->npinfo;
503 struct netpoll *np, *tmp;
068c6e98 504
508e14b4 505 if (list_empty(&npinfo->rx_np))
1da177e4 506 goto out;
508e14b4 507
1da177e4
LT
508 if (skb->dev->type != ARPHRD_ETHER)
509 goto out;
510
d9452e9f 511 /* check if netpoll clients need ARP */
724800d6 512 if (skb->protocol == htons(ETH_P_ARP) &&
1da177e4 513 atomic_read(&trapped)) {
508e14b4 514 skb_queue_tail(&npinfo->arp_tx, skb);
1da177e4
LT
515 return 1;
516 }
517
518 proto = ntohs(eth_hdr(skb)->h_proto);
519 if (proto != ETH_P_IP)
520 goto out;
521 if (skb->pkt_type == PACKET_OTHERHOST)
522 goto out;
523 if (skb_shared(skb))
524 goto out;
525
526 iph = (struct iphdr *)skb->data;
527 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
528 goto out;
529 if (iph->ihl < 5 || iph->version != 4)
530 goto out;
531 if (!pskb_may_pull(skb, iph->ihl*4))
532 goto out;
533 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
534 goto out;
535
536 len = ntohs(iph->tot_len);
537 if (skb->len < len || len < iph->ihl*4)
538 goto out;
539
5e7d7fa5
AL
540 /*
541 * Our transport medium may have padded the buffer out.
542 * Now We trim to the true length of the frame.
543 */
544 if (pskb_trim_rcsum(skb, len))
545 goto out;
546
1da177e4
LT
547 if (iph->protocol != IPPROTO_UDP)
548 goto out;
549
550 len -= iph->ihl*4;
551 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
552 ulen = ntohs(uh->len);
553
554 if (ulen != len)
555 goto out;
fb286bb2 556 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
1da177e4 557 goto out;
1da177e4 558
508e14b4
DB
559 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
560 if (np->local_ip && np->local_ip != iph->daddr)
561 continue;
562 if (np->remote_ip && np->remote_ip != iph->saddr)
563 continue;
564 if (np->local_port && np->local_port != ntohs(uh->dest))
565 continue;
566
567 np->rx_hook(np, ntohs(uh->source),
568 (char *)(uh+1),
569 ulen - sizeof(struct udphdr));
570 hits++;
571 }
572
573 if (!hits)
574 goto out;
1da177e4
LT
575
576 kfree_skb(skb);
577 return 1;
578
579out:
580 if (atomic_read(&trapped)) {
581 kfree_skb(skb);
582 return 1;
583 }
584
585 return 0;
586}
587
0bcc1816
SS
588void netpoll_print_options(struct netpoll *np)
589{
590 printk(KERN_INFO "%s: local port %d\n",
591 np->name, np->local_port);
e7557af5
HH
592 printk(KERN_INFO "%s: local IP %pI4\n",
593 np->name, &np->local_ip);
5fc05f87 594 printk(KERN_INFO "%s: interface '%s'\n",
0bcc1816
SS
595 np->name, np->dev_name);
596 printk(KERN_INFO "%s: remote port %d\n",
597 np->name, np->remote_port);
e7557af5
HH
598 printk(KERN_INFO "%s: remote IP %pI4\n",
599 np->name, &np->remote_ip);
e174961c
JB
600 printk(KERN_INFO "%s: remote ethernet address %pM\n",
601 np->name, np->remote_mac);
0bcc1816
SS
602}
603
1da177e4
LT
604int netpoll_parse_options(struct netpoll *np, char *opt)
605{
606 char *cur=opt, *delim;
607
c68b9070 608 if (*cur != '@') {
1da177e4
LT
609 if ((delim = strchr(cur, '@')) == NULL)
610 goto parse_failed;
c68b9070
DM
611 *delim = 0;
612 np->local_port = simple_strtol(cur, NULL, 10);
613 cur = delim;
1da177e4
LT
614 }
615 cur++;
1da177e4 616
c68b9070 617 if (*cur != '/') {
1da177e4
LT
618 if ((delim = strchr(cur, '/')) == NULL)
619 goto parse_failed;
c68b9070 620 *delim = 0;
e7557af5 621 np->local_ip = in_aton(cur);
c68b9070 622 cur = delim;
1da177e4
LT
623 }
624 cur++;
625
c68b9070 626 if (*cur != ',') {
1da177e4
LT
627 /* parse out dev name */
628 if ((delim = strchr(cur, ',')) == NULL)
629 goto parse_failed;
c68b9070 630 *delim = 0;
1da177e4 631 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
c68b9070 632 cur = delim;
1da177e4
LT
633 }
634 cur++;
635
c68b9070 636 if (*cur != '@') {
1da177e4
LT
637 /* dst port */
638 if ((delim = strchr(cur, '@')) == NULL)
639 goto parse_failed;
c68b9070 640 *delim = 0;
5fc05f87
AW
641 if (*cur == ' ' || *cur == '\t')
642 printk(KERN_INFO "%s: warning: whitespace"
643 "is not allowed\n", np->name);
c68b9070
DM
644 np->remote_port = simple_strtol(cur, NULL, 10);
645 cur = delim;
1da177e4
LT
646 }
647 cur++;
1da177e4
LT
648
649 /* dst ip */
650 if ((delim = strchr(cur, '/')) == NULL)
651 goto parse_failed;
c68b9070 652 *delim = 0;
e7557af5 653 np->remote_ip = in_aton(cur);
c68b9070 654 cur = delim + 1;
1da177e4 655
c68b9070 656 if (*cur != 0) {
1da177e4
LT
657 /* MAC address */
658 if ((delim = strchr(cur, ':')) == NULL)
659 goto parse_failed;
c68b9070
DM
660 *delim = 0;
661 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
662 cur = delim + 1;
1da177e4
LT
663 if ((delim = strchr(cur, ':')) == NULL)
664 goto parse_failed;
c68b9070
DM
665 *delim = 0;
666 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
667 cur = delim + 1;
1da177e4
LT
668 if ((delim = strchr(cur, ':')) == NULL)
669 goto parse_failed;
c68b9070
DM
670 *delim = 0;
671 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
672 cur = delim + 1;
1da177e4
LT
673 if ((delim = strchr(cur, ':')) == NULL)
674 goto parse_failed;
c68b9070
DM
675 *delim = 0;
676 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
677 cur = delim + 1;
1da177e4
LT
678 if ((delim = strchr(cur, ':')) == NULL)
679 goto parse_failed;
c68b9070
DM
680 *delim = 0;
681 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
682 cur = delim + 1;
683 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
1da177e4
LT
684 }
685
0bcc1816 686 netpoll_print_options(np);
1da177e4
LT
687
688 return 0;
689
690 parse_failed:
5fc05f87 691 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
1da177e4
LT
692 np->name, cur);
693 return -1;
694}
695
8fdd95ec 696int __netpoll_setup(struct netpoll *np)
1da177e4 697{
8fdd95ec 698 struct net_device *ndev = np->dev;
115c1d6e 699 struct netpoll_info *npinfo;
4247e161 700 const struct net_device_ops *ops;
fbeec2e1 701 unsigned long flags;
b41848b6 702 int err;
1da177e4 703
8fdd95ec
HX
704 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
705 !ndev->netdev_ops->ndo_poll_controller) {
706 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
707 np->name, np->dev_name);
708 err = -ENOTSUPP;
709 goto out;
710 }
711
712 if (!ndev->npinfo) {
713 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
714 if (!npinfo) {
715 err = -ENOMEM;
716 goto out;
717 }
718
719 npinfo->rx_flags = 0;
720 INIT_LIST_HEAD(&npinfo->rx_np);
721
722 spin_lock_init(&npinfo->rx_lock);
723 skb_queue_head_init(&npinfo->arp_tx);
724 skb_queue_head_init(&npinfo->txq);
725 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
726
727 atomic_set(&npinfo->refcnt, 1);
728
729 ops = np->dev->netdev_ops;
730 if (ops->ndo_netpoll_setup) {
731 err = ops->ndo_netpoll_setup(ndev, npinfo);
732 if (err)
733 goto free_npinfo;
734 }
735 } else {
736 npinfo = ndev->npinfo;
737 atomic_inc(&npinfo->refcnt);
738 }
739
740 npinfo->netpoll = np;
741
742 if (np->rx_hook) {
743 spin_lock_irqsave(&npinfo->rx_lock, flags);
744 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
745 list_add_tail(&np->rx, &npinfo->rx_np);
746 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
747 }
748
749 /* last thing to do is link it to the net device structure */
750 rcu_assign_pointer(ndev->npinfo, npinfo);
8fdd95ec
HX
751
752 return 0;
753
754free_npinfo:
755 kfree(npinfo);
756out:
757 return err;
758}
759EXPORT_SYMBOL_GPL(__netpoll_setup);
760
761int netpoll_setup(struct netpoll *np)
762{
763 struct net_device *ndev = NULL;
764 struct in_device *in_dev;
765 int err;
766
1da177e4 767 if (np->dev_name)
881d966b 768 ndev = dev_get_by_name(&init_net, np->dev_name);
1da177e4
LT
769 if (!ndev) {
770 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
771 np->name, np->dev_name);
b41848b6 772 return -ENODEV;
1da177e4
LT
773 }
774
1da177e4
LT
775 if (!netif_running(ndev)) {
776 unsigned long atmost, atleast;
777
778 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
779 np->name, np->dev_name);
780
6756ae4b 781 rtnl_lock();
b41848b6
SH
782 err = dev_open(ndev);
783 rtnl_unlock();
784
785 if (err) {
1da177e4 786 printk(KERN_ERR "%s: failed to open %s\n",
b41848b6 787 np->name, ndev->name);
dbaa1541 788 goto put;
1da177e4 789 }
1da177e4
LT
790
791 atleast = jiffies + HZ/10;
bff38771 792 atmost = jiffies + carrier_timeout * HZ;
1da177e4
LT
793 while (!netif_carrier_ok(ndev)) {
794 if (time_after(jiffies, atmost)) {
795 printk(KERN_NOTICE
796 "%s: timeout waiting for carrier\n",
797 np->name);
798 break;
799 }
1b614fb9 800 msleep(1);
1da177e4
LT
801 }
802
803 /* If carrier appears to come up instantly, we don't
804 * trust it and pause so that we don't pump all our
805 * queued console messages into the bitbucket.
806 */
807
808 if (time_before(jiffies, atleast)) {
809 printk(KERN_NOTICE "%s: carrier detect appears"
810 " untrustworthy, waiting 4 seconds\n",
811 np->name);
812 msleep(4000);
813 }
814 }
815
1da177e4
LT
816 if (!np->local_ip) {
817 rcu_read_lock();
e5ed6399 818 in_dev = __in_dev_get_rcu(ndev);
1da177e4
LT
819
820 if (!in_dev || !in_dev->ifa_list) {
821 rcu_read_unlock();
822 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
823 np->name, np->dev_name);
b41848b6 824 err = -EDESTADDRREQ;
dbaa1541 825 goto put;
1da177e4
LT
826 }
827
e7557af5 828 np->local_ip = in_dev->ifa_list->ifa_local;
1da177e4 829 rcu_read_unlock();
e7557af5 830 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
1da177e4
LT
831 }
832
dbaa1541
HX
833 np->dev = ndev;
834
835 /* fill up the skb queue */
836 refill_skbs();
837
838 rtnl_lock();
8fdd95ec 839 err = __netpoll_setup(np);
dbaa1541 840 rtnl_unlock();
53fb95d3 841
8fdd95ec
HX
842 if (err)
843 goto put;
844
1da177e4
LT
845 return 0;
846
21edbb22 847put:
1da177e4 848 dev_put(ndev);
b41848b6 849 return err;
1da177e4
LT
850}
851
c68b9070
DM
852static int __init netpoll_init(void)
853{
a1bcfacd
SH
854 skb_queue_head_init(&skb_pool);
855 return 0;
856}
857core_initcall(netpoll_init);
858
8fdd95ec 859void __netpoll_cleanup(struct netpoll *np)
1da177e4 860{
fbeec2e1
JM
861 struct netpoll_info *npinfo;
862 unsigned long flags;
863
8fdd95ec
HX
864 npinfo = np->dev->npinfo;
865 if (!npinfo)
dbaa1541 866 return;
93ec2c72 867
8fdd95ec
HX
868 if (!list_empty(&npinfo->rx_np)) {
869 spin_lock_irqsave(&npinfo->rx_lock, flags);
870 list_del(&np->rx);
871 if (list_empty(&npinfo->rx_np))
872 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
873 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
874 }
de85d99e 875
8fdd95ec
HX
876 if (atomic_dec_and_test(&npinfo->refcnt)) {
877 const struct net_device_ops *ops;
de85d99e 878
8fdd95ec
HX
879 ops = np->dev->netdev_ops;
880 if (ops->ndo_netpoll_cleanup)
881 ops->ndo_netpoll_cleanup(np->dev);
de85d99e 882
8fdd95ec 883 rcu_assign_pointer(np->dev->npinfo, NULL);
de85d99e 884
dbaa1541
HX
885 /* avoid racing with NAPI reading npinfo */
886 synchronize_rcu_bh();
93ec2c72 887
dbaa1541
HX
888 skb_queue_purge(&npinfo->arp_tx);
889 skb_queue_purge(&npinfo->txq);
890 cancel_rearming_delayed_work(&npinfo->tx_work);
93ec2c72 891
dbaa1541
HX
892 /* clean after last, unfinished work */
893 __skb_queue_purge(&npinfo->txq);
894 kfree(npinfo);
115c1d6e 895 }
8fdd95ec
HX
896}
897EXPORT_SYMBOL_GPL(__netpoll_cleanup);
fbeec2e1 898
8fdd95ec
HX
899void netpoll_cleanup(struct netpoll *np)
900{
901 if (!np->dev)
902 return;
dbaa1541 903
8fdd95ec
HX
904 rtnl_lock();
905 __netpoll_cleanup(np);
906 rtnl_unlock();
907
908 dev_put(np->dev);
1da177e4
LT
909 np->dev = NULL;
910}
911
912int netpoll_trap(void)
913{
914 return atomic_read(&trapped);
915}
916
917void netpoll_set_trap(int trap)
918{
919 if (trap)
920 atomic_inc(&trapped);
921 else
922 atomic_dec(&trapped);
923}
924
0e34e931 925EXPORT_SYMBOL(netpoll_send_skb);
1da177e4
LT
926EXPORT_SYMBOL(netpoll_set_trap);
927EXPORT_SYMBOL(netpoll_trap);
0bcc1816 928EXPORT_SYMBOL(netpoll_print_options);
1da177e4
LT
929EXPORT_SYMBOL(netpoll_parse_options);
930EXPORT_SYMBOL(netpoll_setup);
931EXPORT_SYMBOL(netpoll_cleanup);
932EXPORT_SYMBOL(netpoll_send_udp);
0e34e931 933EXPORT_SYMBOL(netpoll_poll_dev);
1da177e4 934EXPORT_SYMBOL(netpoll_poll);