]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/netpoll.h
[NET] netpoll: break recursive loop in netpoll rx path
[mirror_ubuntu-artful-kernel.git] / include / linux / netpoll.h
CommitLineData
1da177e4
LT
1/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
53fb95d3 12#include <linux/rcupdate.h>
1da177e4
LT
13#include <linux/list.h>
14
15struct netpoll;
16
17struct netpoll {
18 struct net_device *dev;
19 char dev_name[16], *name;
1da177e4
LT
20 void (*rx_hook)(struct netpoll *, int, char *, int);
21 void (*drop)(struct sk_buff *skb);
22 u32 local_ip, remote_ip;
23 u16 local_port, remote_port;
24 unsigned char local_mac[6], remote_mac[6];
115c1d6e
JM
25};
26
27struct netpoll_info {
1da177e4
LT
28 spinlock_t poll_lock;
29 int poll_owner;
0db1d6fc 30 int tries;
115c1d6e 31 int rx_flags;
fbeec2e1
JM
32 spinlock_t rx_lock;
33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
068c6e98 34 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
1da177e4
LT
35};
36
37void netpoll_poll(struct netpoll *np);
38void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
39int netpoll_parse_options(struct netpoll *np, char *opt);
40int netpoll_setup(struct netpoll *np);
41int netpoll_trap(void);
42void netpoll_set_trap(int trap);
43void netpoll_cleanup(struct netpoll *np);
44int __netpoll_rx(struct sk_buff *skb);
45void netpoll_queue(struct sk_buff *skb);
46
47#ifdef CONFIG_NETPOLL
48static inline int netpoll_rx(struct sk_buff *skb)
49{
115c1d6e 50 struct netpoll_info *npinfo = skb->dev->npinfo;
fbeec2e1
JM
51 unsigned long flags;
52 int ret = 0;
115c1d6e 53
fbeec2e1 54 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
115c1d6e
JM
55 return 0;
56
fbeec2e1
JM
57 spin_lock_irqsave(&npinfo->rx_lock, flags);
58 /* check rx_flags again with the lock held */
59 if (npinfo->rx_flags && __netpoll_rx(skb))
60 ret = 1;
61 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
62
63 return ret;
1da177e4
LT
64}
65
53fb95d3 66static inline void *netpoll_poll_lock(struct net_device *dev)
1da177e4 67{
53fb95d3 68 rcu_read_lock(); /* deal with race on ->npinfo */
115c1d6e
JM
69 if (dev->npinfo) {
70 spin_lock(&dev->npinfo->poll_lock);
71 dev->npinfo->poll_owner = smp_processor_id();
53fb95d3 72 return dev->npinfo;
1da177e4 73 }
53fb95d3 74 return NULL;
1da177e4
LT
75}
76
53fb95d3 77static inline void netpoll_poll_unlock(void *have)
1da177e4 78{
53fb95d3
MM
79 struct netpoll_info *npi = have;
80
81 if (npi) {
82 npi->poll_owner = -1;
83 spin_unlock(&npi->poll_lock);
1da177e4 84 }
53fb95d3 85 rcu_read_unlock();
1da177e4
LT
86}
87
88#else
89#define netpoll_rx(a) 0
afb997c6 90#define netpoll_poll_lock(a) NULL
1da177e4
LT
91#define netpoll_poll_unlock(a)
92#endif
93
94#endif