]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/core/link_watch.c
net: ping_check_bind_addr() etc. can be static
[mirror_ubuntu-bionic-kernel.git] / net / core / link_watch.c
CommitLineData
1da177e4
LT
1/*
2 * Linux network device link state notification
3 *
4 * Author:
5 * Stefan Rompf <sux@loplof.de>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 */
13
1da177e4
LT
14#include <linux/module.h>
15#include <linux/netdevice.h>
16#include <linux/if.h>
17#include <net/sock.h>
cacaddf5 18#include <net/pkt_sched.h>
1da177e4
LT
19#include <linux/rtnetlink.h>
20#include <linux/jiffies.h>
21#include <linux/spinlock.h>
1da177e4
LT
22#include <linux/workqueue.h>
23#include <linux/bitops.h>
24#include <asm/types.h>
25
26
27enum lw_bits {
d9568ba9 28 LW_URGENT = 0,
1da177e4
LT
29};
30
31static unsigned long linkwatch_flags;
32static unsigned long linkwatch_nextevent;
33
65f27f38
DH
34static void linkwatch_event(struct work_struct *dummy);
35static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
1da177e4 36
e014debe 37static LIST_HEAD(lweventlist);
1da177e4
LT
38static DEFINE_SPINLOCK(lweventlist_lock);
39
b00055aa
SR
40static unsigned char default_operstate(const struct net_device *dev)
41{
42 if (!netif_carrier_ok(dev))
43 return (dev->ifindex != dev->iflink ?
44 IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
45
46 if (netif_dormant(dev))
47 return IF_OPER_DORMANT;
48
49 return IF_OPER_UP;
50}
51
52
53static void rfc2863_policy(struct net_device *dev)
54{
55 unsigned char operstate = default_operstate(dev);
56
57 if (operstate == dev->operstate)
58 return;
59
60 write_lock_bh(&dev_base_lock);
61
62 switch(dev->link_mode) {
63 case IF_LINK_MODE_DORMANT:
64 if (operstate == IF_OPER_UP)
65 operstate = IF_OPER_DORMANT;
66 break;
67
68 case IF_LINK_MODE_DEFAULT:
69 default:
70 break;
3ff50b79 71 }
b00055aa
SR
72
73 dev->operstate = operstate;
74
75 write_unlock_bh(&dev_base_lock);
76}
77
78
8f4cccbb
BH
79void linkwatch_init_dev(struct net_device *dev)
80{
81 /* Handle pre-registration link state changes */
82 if (!netif_carrier_ok(dev) || netif_dormant(dev))
83 rfc2863_policy(dev);
84}
85
86
6fa9864b 87static bool linkwatch_urgent_event(struct net_device *dev)
294cc44b 88{
c37e0c99
ED
89 if (!netif_running(dev))
90 return false;
91
92 if (dev->ifindex != dev->iflink)
93 return true;
94
95 return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
294cc44b
HX
96}
97
98
99static void linkwatch_add_event(struct net_device *dev)
100{
101 unsigned long flags;
102
103 spin_lock_irqsave(&lweventlist_lock, flags);
e014debe
ED
104 if (list_empty(&dev->link_watch_list)) {
105 list_add_tail(&dev->link_watch_list, &lweventlist);
106 dev_hold(dev);
107 }
294cc44b
HX
108 spin_unlock_irqrestore(&lweventlist_lock, flags);
109}
110
111
d9568ba9 112static void linkwatch_schedule_work(int urgent)
294cc44b 113{
d9568ba9
HX
114 unsigned long delay = linkwatch_nextevent - jiffies;
115
116 if (test_bit(LW_URGENT, &linkwatch_flags))
294cc44b
HX
117 return;
118
d9568ba9
HX
119 /* Minimise down-time: drop delay for up event. */
120 if (urgent) {
121 if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
122 return;
294cc44b 123 delay = 0;
db0ccffe 124 }
294cc44b 125
d9568ba9
HX
126 /* If we wrap around we'll delay it by at most HZ. */
127 if (delay > HZ)
128 delay = 0;
129
130 /*
e7c2f967
TH
131 * If urgent, schedule immediate execution; otherwise, don't
132 * override the existing timer.
d9568ba9 133 */
e7c2f967
TH
134 if (test_bit(LW_URGENT, &linkwatch_flags))
135 mod_delayed_work(system_wq, &linkwatch_work, 0);
136 else
137 schedule_delayed_work(&linkwatch_work, delay);
294cc44b
HX
138}
139
140
e014debe
ED
141static void linkwatch_do_dev(struct net_device *dev)
142{
143 /*
144 * Make sure the above read is complete since it can be
145 * rewritten as soon as we clear the bit below.
146 */
147 smp_mb__before_clear_bit();
148
149 /* We are about to handle this device,
150 * so new events can be accepted
151 */
152 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
153
154 rfc2863_policy(dev);
155 if (dev->flags & IFF_UP) {
156 if (netif_carrier_ok(dev))
157 dev_activate(dev);
158 else
159 dev_deactivate(dev);
160
161 netdev_state_change(dev);
162 }
163 dev_put(dev);
164}
165
294cc44b 166static void __linkwatch_run_queue(int urgent_only)
1da177e4 167{
e014debe
ED
168 struct net_device *dev;
169 LIST_HEAD(wrk);
1da177e4 170
294cc44b
HX
171 /*
172 * Limit the number of linkwatch events to one
173 * per second so that a runaway driver does not
174 * cause a storm of messages on the netlink
175 * socket. This limit does not apply to up events
176 * while the device qdisc is down.
177 */
178 if (!urgent_only)
179 linkwatch_nextevent = jiffies + HZ;
d9568ba9
HX
180 /* Limit wrap-around effect on delay. */
181 else if (time_after(linkwatch_nextevent, jiffies + HZ))
182 linkwatch_nextevent = jiffies;
183
184 clear_bit(LW_URGENT, &linkwatch_flags);
294cc44b 185
1da177e4 186 spin_lock_irq(&lweventlist_lock);
e014debe 187 list_splice_init(&lweventlist, &wrk);
1da177e4 188
e014debe 189 while (!list_empty(&wrk)) {
1da177e4 190
e014debe
ED
191 dev = list_first_entry(&wrk, struct net_device, link_watch_list);
192 list_del_init(&dev->link_watch_list);
572a103d 193
294cc44b 194 if (urgent_only && !linkwatch_urgent_event(dev)) {
e014debe 195 list_add_tail(&dev->link_watch_list, &lweventlist);
294cc44b
HX
196 continue;
197 }
e014debe
ED
198 spin_unlock_irq(&lweventlist_lock);
199 linkwatch_do_dev(dev);
200 spin_lock_irq(&lweventlist_lock);
1da177e4 201 }
294cc44b 202
e014debe 203 if (!list_empty(&lweventlist))
d9568ba9 204 linkwatch_schedule_work(0);
e014debe
ED
205 spin_unlock_irq(&lweventlist_lock);
206}
207
208void linkwatch_forget_dev(struct net_device *dev)
209{
210 unsigned long flags;
211 int clean = 0;
212
213 spin_lock_irqsave(&lweventlist_lock, flags);
214 if (!list_empty(&dev->link_watch_list)) {
215 list_del_init(&dev->link_watch_list);
216 clean = 1;
217 }
218 spin_unlock_irqrestore(&lweventlist_lock, flags);
219 if (clean)
220 linkwatch_do_dev(dev);
4ec93edb 221}
1da177e4
LT
222
223
294cc44b
HX
224/* Must be called with the rtnl semaphore held */
225void linkwatch_run_queue(void)
1da177e4 226{
294cc44b
HX
227 __linkwatch_run_queue(0);
228}
229
1da177e4 230
294cc44b
HX
231static void linkwatch_event(struct work_struct *dummy)
232{
6756ae4b 233 rtnl_lock();
294cc44b 234 __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
6756ae4b 235 rtnl_unlock();
1da177e4
LT
236}
237
238
239void linkwatch_fire_event(struct net_device *dev)
240{
6fa9864b 241 bool urgent = linkwatch_urgent_event(dev);
1da177e4 242
d9568ba9 243 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
294cc44b 244 linkwatch_add_event(dev);
d9568ba9
HX
245 } else if (!urgent)
246 return;
1da177e4 247
d9568ba9 248 linkwatch_schedule_work(urgent);
1da177e4 249}
1da177e4 250EXPORT_SYMBOL(linkwatch_fire_event);