]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/core/netpoll.c
[NETPOLL]: netpoll_send_skb simplify
[mirror_ubuntu-artful-kernel.git] / net / core / netpoll.c
CommitLineData
1da177e4
LT
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
12#include <linux/smp_lock.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/string.h>
16#include <linux/inetdevice.h>
17#include <linux/inet.h>
18#include <linux/interrupt.h>
19#include <linux/netpoll.h>
20#include <linux/sched.h>
21#include <linux/delay.h>
22#include <linux/rcupdate.h>
23#include <linux/workqueue.h>
24#include <net/tcp.h>
25#include <net/udp.h>
26#include <asm/unaligned.h>
27
28/*
29 * We maintain a small pool of fully-sized skbs, to make sure the
30 * message gets out even in extreme OOM situations.
31 */
32
33#define MAX_UDP_CHUNK 1460
34#define MAX_SKBS 32
35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
36
37static DEFINE_SPINLOCK(skb_list_lock);
38static int nr_skbs;
39static struct sk_buff *skbs;
40
41static DEFINE_SPINLOCK(queue_lock);
42static int queue_depth;
43static struct sk_buff *queue_head, *queue_tail;
44
45static atomic_t trapped;
46
47#define NETPOLL_RX_ENABLED 1
48#define NETPOLL_RX_DROP 2
49
50#define MAX_SKB_SIZE \
51 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
52 sizeof(struct iphdr) + sizeof(struct ethhdr))
53
54static void zap_completion_queue(void);
55
56static void queue_process(void *p)
57{
58 unsigned long flags;
59 struct sk_buff *skb;
60
61 while (queue_head) {
62 spin_lock_irqsave(&queue_lock, flags);
63
64 skb = queue_head;
65 queue_head = skb->next;
66 if (skb == queue_tail)
67 queue_head = NULL;
68
69 queue_depth--;
70
71 spin_unlock_irqrestore(&queue_lock, flags);
72
73 dev_queue_xmit(skb);
74 }
75}
76
77static DECLARE_WORK(send_queue, queue_process, NULL);
78
79void netpoll_queue(struct sk_buff *skb)
80{
81 unsigned long flags;
82
83 if (queue_depth == MAX_QUEUE_DEPTH) {
84 __kfree_skb(skb);
85 return;
86 }
87
88 spin_lock_irqsave(&queue_lock, flags);
89 if (!queue_head)
90 queue_head = skb;
91 else
92 queue_tail->next = skb;
93 queue_tail = skb;
94 queue_depth++;
95 spin_unlock_irqrestore(&queue_lock, flags);
96
97 schedule_work(&send_queue);
98}
99
100static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
101 unsigned short ulen, u32 saddr, u32 daddr)
102{
103 if (uh->check == 0)
104 return 0;
105
106 if (skb->ip_summed == CHECKSUM_HW)
107 return csum_tcpudp_magic(
108 saddr, daddr, ulen, IPPROTO_UDP, skb->csum);
109
110 skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
111
112 return csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
113}
114
115/*
116 * Check whether delayed processing was scheduled for our NIC. If so,
117 * we attempt to grab the poll lock and use ->poll() to pump the card.
118 * If this fails, either we've recursed in ->poll() or it's already
119 * running on another CPU.
120 *
121 * Note: we don't mask interrupts with this lock because we're using
122 * trylock here and interrupts are already disabled in the softirq
123 * case. Further, we test the poll_owner to avoid recursion on UP
124 * systems where the lock doesn't exist.
125 *
126 * In cases where there is bi-directional communications, reading only
127 * one message at a time can lead to packets being dropped by the
128 * network adapter, forcing superfluous retries and possibly timeouts.
129 * Thus, we set our budget to greater than 1.
130 */
131static void poll_napi(struct netpoll *np)
132{
115c1d6e 133 struct netpoll_info *npinfo = np->dev->npinfo;
1da177e4
LT
134 int budget = 16;
135
136 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
115c1d6e
JM
137 npinfo->poll_owner != smp_processor_id() &&
138 spin_trylock(&npinfo->poll_lock)) {
139 npinfo->rx_flags |= NETPOLL_RX_DROP;
1da177e4
LT
140 atomic_inc(&trapped);
141
142 np->dev->poll(np->dev, &budget);
143
144 atomic_dec(&trapped);
115c1d6e
JM
145 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
146 spin_unlock(&npinfo->poll_lock);
1da177e4
LT
147 }
148}
149
150void netpoll_poll(struct netpoll *np)
151{
152 if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
153 return;
154
155 /* Process pending work on NIC */
156 np->dev->poll_controller(np->dev);
157 if (np->dev->poll)
158 poll_napi(np);
159
160 zap_completion_queue();
161}
162
163static void refill_skbs(void)
164{
165 struct sk_buff *skb;
166 unsigned long flags;
167
168 spin_lock_irqsave(&skb_list_lock, flags);
169 while (nr_skbs < MAX_SKBS) {
170 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
171 if (!skb)
172 break;
173
174 skb->next = skbs;
175 skbs = skb;
176 nr_skbs++;
177 }
178 spin_unlock_irqrestore(&skb_list_lock, flags);
179}
180
181static void zap_completion_queue(void)
182{
183 unsigned long flags;
184 struct softnet_data *sd = &get_cpu_var(softnet_data);
185
186 if (sd->completion_queue) {
187 struct sk_buff *clist;
188
189 local_irq_save(flags);
190 clist = sd->completion_queue;
191 sd->completion_queue = NULL;
192 local_irq_restore(flags);
193
194 while (clist != NULL) {
195 struct sk_buff *skb = clist;
196 clist = clist->next;
197 if(skb->destructor)
198 dev_kfree_skb_any(skb); /* put this one back */
199 else
200 __kfree_skb(skb);
201 }
202 }
203
204 put_cpu_var(softnet_data);
205}
206
207static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
208{
209 int once = 1, count = 0;
210 unsigned long flags;
211 struct sk_buff *skb = NULL;
212
213 zap_completion_queue();
214repeat:
215 if (nr_skbs < MAX_SKBS)
216 refill_skbs();
217
218 skb = alloc_skb(len, GFP_ATOMIC);
219
220 if (!skb) {
221 spin_lock_irqsave(&skb_list_lock, flags);
222 skb = skbs;
223 if (skb) {
224 skbs = skb->next;
225 skb->next = NULL;
226 nr_skbs--;
227 }
228 spin_unlock_irqrestore(&skb_list_lock, flags);
229 }
230
231 if(!skb) {
232 count++;
233 if (once && (count == 1000000)) {
234 printk("out of netpoll skbs!\n");
235 once = 0;
236 }
237 netpoll_poll(np);
238 goto repeat;
239 }
240
241 atomic_set(&skb->users, 1);
242 skb_reserve(skb, reserve);
243 return skb;
244}
245
246static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
247{
248 int status;
115c1d6e 249 struct netpoll_info *npinfo;
1da177e4 250
f0d3459d 251 if (!np || !np->dev || !netif_running(np->dev)) {
1da177e4
LT
252 __kfree_skb(skb);
253 return;
254 }
255
115c1d6e 256 npinfo = np->dev->npinfo;
f0d3459d
MM
257
258 /* avoid recursion */
115c1d6e
JM
259 if (npinfo->poll_owner == smp_processor_id() ||
260 np->dev->xmit_lock_owner == smp_processor_id()) {
1da177e4
LT
261 if (np->drop)
262 np->drop(skb);
263 else
264 __kfree_skb(skb);
265 return;
266 }
267
f0d3459d
MM
268 while (1) {
269 spin_lock(&np->dev->xmit_lock);
270 np->dev->xmit_lock_owner = smp_processor_id();
271
272 /*
273 * network drivers do not expect to be called if the queue is
274 * stopped.
275 */
276 if (netif_queue_stopped(np->dev)) {
277 np->dev->xmit_lock_owner = -1;
278 spin_unlock(&np->dev->xmit_lock);
279 netpoll_poll(np);
280 continue;
281 }
1da177e4 282
f0d3459d 283 status = np->dev->hard_start_xmit(skb, np->dev);
1da177e4
LT
284 np->dev->xmit_lock_owner = -1;
285 spin_unlock(&np->dev->xmit_lock);
286
f0d3459d
MM
287 /* success */
288 if(!status)
289 return;
1da177e4 290
f0d3459d 291 /* transmit busy */
1da177e4 292 netpoll_poll(np);
1da177e4
LT
293 }
294}
295
296void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
297{
298 int total_len, eth_len, ip_len, udp_len;
299 struct sk_buff *skb;
300 struct udphdr *udph;
301 struct iphdr *iph;
302 struct ethhdr *eth;
303
304 udp_len = len + sizeof(*udph);
305 ip_len = eth_len = udp_len + sizeof(*iph);
306 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
307
308 skb = find_skb(np, total_len, total_len - len);
309 if (!skb)
310 return;
311
312 memcpy(skb->data, msg, len);
313 skb->len += len;
314
315 udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
316 udph->source = htons(np->local_port);
317 udph->dest = htons(np->remote_port);
318 udph->len = htons(udp_len);
319 udph->check = 0;
320
321 iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
322
323 /* iph->version = 4; iph->ihl = 5; */
324 put_unaligned(0x45, (unsigned char *)iph);
325 iph->tos = 0;
326 put_unaligned(htons(ip_len), &(iph->tot_len));
327 iph->id = 0;
328 iph->frag_off = 0;
329 iph->ttl = 64;
330 iph->protocol = IPPROTO_UDP;
331 iph->check = 0;
332 put_unaligned(htonl(np->local_ip), &(iph->saddr));
333 put_unaligned(htonl(np->remote_ip), &(iph->daddr));
334 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
335
336 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
337
338 eth->h_proto = htons(ETH_P_IP);
339 memcpy(eth->h_source, np->local_mac, 6);
340 memcpy(eth->h_dest, np->remote_mac, 6);
341
342 skb->dev = np->dev;
343
344 netpoll_send_skb(np, skb);
345}
346
347static void arp_reply(struct sk_buff *skb)
348{
115c1d6e 349 struct netpoll_info *npinfo = skb->dev->npinfo;
1da177e4
LT
350 struct arphdr *arp;
351 unsigned char *arp_ptr;
352 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
353 u32 sip, tip;
fbeec2e1 354 unsigned long flags;
1da177e4 355 struct sk_buff *send_skb;
115c1d6e 356 struct netpoll *np = NULL;
1da177e4 357
fbeec2e1
JM
358 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
359 np = npinfo->rx_np;
115c1d6e
JM
360 if (!np)
361 return;
1da177e4
LT
362
363 /* No arp on this interface */
364 if (skb->dev->flags & IFF_NOARP)
365 return;
366
367 if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
368 (2 * skb->dev->addr_len) +
369 (2 * sizeof(u32)))))
370 return;
371
372 skb->h.raw = skb->nh.raw = skb->data;
373 arp = skb->nh.arph;
374
375 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
376 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
377 arp->ar_pro != htons(ETH_P_IP) ||
378 arp->ar_op != htons(ARPOP_REQUEST))
379 return;
380
381 arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len;
382 memcpy(&sip, arp_ptr, 4);
383 arp_ptr += 4 + skb->dev->addr_len;
384 memcpy(&tip, arp_ptr, 4);
385
386 /* Should we ignore arp? */
387 if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
388 return;
389
390 size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
391 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
392 LL_RESERVED_SPACE(np->dev));
393
394 if (!send_skb)
395 return;
396
397 send_skb->nh.raw = send_skb->data;
398 arp = (struct arphdr *) skb_put(send_skb, size);
399 send_skb->dev = skb->dev;
400 send_skb->protocol = htons(ETH_P_ARP);
401
402 /* Fill the device header for the ARP frame */
403
404 if (np->dev->hard_header &&
405 np->dev->hard_header(send_skb, skb->dev, ptype,
406 np->remote_mac, np->local_mac,
407 send_skb->len) < 0) {
408 kfree_skb(send_skb);
409 return;
410 }
411
412 /*
413 * Fill out the arp protocol part.
414 *
415 * we only support ethernet device type,
416 * which (according to RFC 1390) should always equal 1 (Ethernet).
417 */
418
419 arp->ar_hrd = htons(np->dev->type);
420 arp->ar_pro = htons(ETH_P_IP);
421 arp->ar_hln = np->dev->addr_len;
422 arp->ar_pln = 4;
423 arp->ar_op = htons(type);
424
425 arp_ptr=(unsigned char *)(arp + 1);
426 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
427 arp_ptr += np->dev->addr_len;
428 memcpy(arp_ptr, &tip, 4);
429 arp_ptr += 4;
430 memcpy(arp_ptr, np->remote_mac, np->dev->addr_len);
431 arp_ptr += np->dev->addr_len;
432 memcpy(arp_ptr, &sip, 4);
433
434 netpoll_send_skb(np, send_skb);
435}
436
437int __netpoll_rx(struct sk_buff *skb)
438{
439 int proto, len, ulen;
440 struct iphdr *iph;
441 struct udphdr *uh;
fbeec2e1 442 struct netpoll *np = skb->dev->npinfo->rx_np;
1da177e4 443
fbeec2e1 444 if (!np)
1da177e4
LT
445 goto out;
446 if (skb->dev->type != ARPHRD_ETHER)
447 goto out;
448
449 /* check if netpoll clients need ARP */
450 if (skb->protocol == __constant_htons(ETH_P_ARP) &&
451 atomic_read(&trapped)) {
452 arp_reply(skb);
453 return 1;
454 }
455
456 proto = ntohs(eth_hdr(skb)->h_proto);
457 if (proto != ETH_P_IP)
458 goto out;
459 if (skb->pkt_type == PACKET_OTHERHOST)
460 goto out;
461 if (skb_shared(skb))
462 goto out;
463
464 iph = (struct iphdr *)skb->data;
465 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
466 goto out;
467 if (iph->ihl < 5 || iph->version != 4)
468 goto out;
469 if (!pskb_may_pull(skb, iph->ihl*4))
470 goto out;
471 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
472 goto out;
473
474 len = ntohs(iph->tot_len);
475 if (skb->len < len || len < iph->ihl*4)
476 goto out;
477
478 if (iph->protocol != IPPROTO_UDP)
479 goto out;
480
481 len -= iph->ihl*4;
482 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
483 ulen = ntohs(uh->len);
484
485 if (ulen != len)
486 goto out;
487 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr) < 0)
488 goto out;
489 if (np->local_ip && np->local_ip != ntohl(iph->daddr))
490 goto out;
491 if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
492 goto out;
493 if (np->local_port && np->local_port != ntohs(uh->dest))
494 goto out;
495
496 np->rx_hook(np, ntohs(uh->source),
497 (char *)(uh+1),
498 ulen - sizeof(struct udphdr));
499
500 kfree_skb(skb);
501 return 1;
502
503out:
504 if (atomic_read(&trapped)) {
505 kfree_skb(skb);
506 return 1;
507 }
508
509 return 0;
510}
511
512int netpoll_parse_options(struct netpoll *np, char *opt)
513{
514 char *cur=opt, *delim;
515
516 if(*cur != '@') {
517 if ((delim = strchr(cur, '@')) == NULL)
518 goto parse_failed;
519 *delim=0;
520 np->local_port=simple_strtol(cur, NULL, 10);
521 cur=delim;
522 }
523 cur++;
524 printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
525
526 if(*cur != '/') {
527 if ((delim = strchr(cur, '/')) == NULL)
528 goto parse_failed;
529 *delim=0;
530 np->local_ip=ntohl(in_aton(cur));
531 cur=delim;
532
533 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
534 np->name, HIPQUAD(np->local_ip));
535 }
536 cur++;
537
538 if ( *cur != ',') {
539 /* parse out dev name */
540 if ((delim = strchr(cur, ',')) == NULL)
541 goto parse_failed;
542 *delim=0;
543 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
544 cur=delim;
545 }
546 cur++;
547
548 printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
549
550 if ( *cur != '@' ) {
551 /* dst port */
552 if ((delim = strchr(cur, '@')) == NULL)
553 goto parse_failed;
554 *delim=0;
555 np->remote_port=simple_strtol(cur, NULL, 10);
556 cur=delim;
557 }
558 cur++;
559 printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
560
561 /* dst ip */
562 if ((delim = strchr(cur, '/')) == NULL)
563 goto parse_failed;
564 *delim=0;
565 np->remote_ip=ntohl(in_aton(cur));
566 cur=delim+1;
567
568 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
569 np->name, HIPQUAD(np->remote_ip));
570
571 if( *cur != 0 )
572 {
573 /* MAC address */
574 if ((delim = strchr(cur, ':')) == NULL)
575 goto parse_failed;
576 *delim=0;
577 np->remote_mac[0]=simple_strtol(cur, NULL, 16);
578 cur=delim+1;
579 if ((delim = strchr(cur, ':')) == NULL)
580 goto parse_failed;
581 *delim=0;
582 np->remote_mac[1]=simple_strtol(cur, NULL, 16);
583 cur=delim+1;
584 if ((delim = strchr(cur, ':')) == NULL)
585 goto parse_failed;
586 *delim=0;
587 np->remote_mac[2]=simple_strtol(cur, NULL, 16);
588 cur=delim+1;
589 if ((delim = strchr(cur, ':')) == NULL)
590 goto parse_failed;
591 *delim=0;
592 np->remote_mac[3]=simple_strtol(cur, NULL, 16);
593 cur=delim+1;
594 if ((delim = strchr(cur, ':')) == NULL)
595 goto parse_failed;
596 *delim=0;
597 np->remote_mac[4]=simple_strtol(cur, NULL, 16);
598 cur=delim+1;
599 np->remote_mac[5]=simple_strtol(cur, NULL, 16);
600 }
601
602 printk(KERN_INFO "%s: remote ethernet address "
603 "%02x:%02x:%02x:%02x:%02x:%02x\n",
604 np->name,
605 np->remote_mac[0],
606 np->remote_mac[1],
607 np->remote_mac[2],
608 np->remote_mac[3],
609 np->remote_mac[4],
610 np->remote_mac[5]);
611
612 return 0;
613
614 parse_failed:
615 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
616 np->name, cur);
617 return -1;
618}
619
620int netpoll_setup(struct netpoll *np)
621{
622 struct net_device *ndev = NULL;
623 struct in_device *in_dev;
115c1d6e 624 struct netpoll_info *npinfo;
fbeec2e1 625 unsigned long flags;
1da177e4
LT
626
627 if (np->dev_name)
628 ndev = dev_get_by_name(np->dev_name);
629 if (!ndev) {
630 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
631 np->name, np->dev_name);
632 return -1;
633 }
634
635 np->dev = ndev;
115c1d6e
JM
636 if (!ndev->npinfo) {
637 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
638 if (!npinfo)
639 goto release;
640
11513128 641 npinfo->rx_flags = 0;
fbeec2e1 642 npinfo->rx_np = NULL;
115c1d6e
JM
643 npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
644 npinfo->poll_owner = -1;
fbeec2e1 645 npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
115c1d6e
JM
646 } else
647 npinfo = ndev->npinfo;
1da177e4
LT
648
649 if (!ndev->poll_controller) {
650 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
651 np->name, np->dev_name);
652 goto release;
653 }
654
655 if (!netif_running(ndev)) {
656 unsigned long atmost, atleast;
657
658 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
659 np->name, np->dev_name);
660
661 rtnl_shlock();
662 if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) {
663 printk(KERN_ERR "%s: failed to open %s\n",
664 np->name, np->dev_name);
665 rtnl_shunlock();
666 goto release;
667 }
668 rtnl_shunlock();
669
670 atleast = jiffies + HZ/10;
671 atmost = jiffies + 4*HZ;
672 while (!netif_carrier_ok(ndev)) {
673 if (time_after(jiffies, atmost)) {
674 printk(KERN_NOTICE
675 "%s: timeout waiting for carrier\n",
676 np->name);
677 break;
678 }
679 cond_resched();
680 }
681
682 /* If carrier appears to come up instantly, we don't
683 * trust it and pause so that we don't pump all our
684 * queued console messages into the bitbucket.
685 */
686
687 if (time_before(jiffies, atleast)) {
688 printk(KERN_NOTICE "%s: carrier detect appears"
689 " untrustworthy, waiting 4 seconds\n",
690 np->name);
691 msleep(4000);
692 }
693 }
694
695 if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr)
696 memcpy(np->local_mac, ndev->dev_addr, 6);
697
698 if (!np->local_ip) {
699 rcu_read_lock();
700 in_dev = __in_dev_get(ndev);
701
702 if (!in_dev || !in_dev->ifa_list) {
703 rcu_read_unlock();
704 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
705 np->name, np->dev_name);
706 goto release;
707 }
708
709 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
710 rcu_read_unlock();
711 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
712 np->name, HIPQUAD(np->local_ip));
713 }
714
fbeec2e1
JM
715 if (np->rx_hook) {
716 spin_lock_irqsave(&npinfo->rx_lock, flags);
717 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
718 npinfo->rx_np = np;
719 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
720 }
721 /* last thing to do is link it to the net device structure */
115c1d6e 722 ndev->npinfo = npinfo;
1da177e4
LT
723
724 return 0;
725
726 release:
115c1d6e
JM
727 if (!ndev->npinfo)
728 kfree(npinfo);
1da177e4
LT
729 np->dev = NULL;
730 dev_put(ndev);
731 return -1;
732}
733
734void netpoll_cleanup(struct netpoll *np)
735{
fbeec2e1
JM
736 struct netpoll_info *npinfo;
737 unsigned long flags;
738
115c1d6e 739 if (np->dev) {
fbeec2e1
JM
740 npinfo = np->dev->npinfo;
741 if (npinfo && npinfo->rx_np == np) {
742 spin_lock_irqsave(&npinfo->rx_lock, flags);
743 npinfo->rx_np = NULL;
744 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
745 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
746 }
115c1d6e
JM
747 dev_put(np->dev);
748 }
fbeec2e1 749
1da177e4
LT
750 np->dev = NULL;
751}
752
753int netpoll_trap(void)
754{
755 return atomic_read(&trapped);
756}
757
758void netpoll_set_trap(int trap)
759{
760 if (trap)
761 atomic_inc(&trapped);
762 else
763 atomic_dec(&trapped);
764}
765
766EXPORT_SYMBOL(netpoll_set_trap);
767EXPORT_SYMBOL(netpoll_trap);
768EXPORT_SYMBOL(netpoll_parse_options);
769EXPORT_SYMBOL(netpoll_setup);
770EXPORT_SYMBOL(netpoll_cleanup);
771EXPORT_SYMBOL(netpoll_send_udp);
772EXPORT_SYMBOL(netpoll_poll);
773EXPORT_SYMBOL(netpoll_queue);