]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/core/dst.c
net: Do delayed neigh confirmation.
[mirror_ubuntu-zesty-kernel.git] / net / core / dst.c
1 /*
2 * net/core/dst.c Protocol independent destination cache.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/workqueue.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <net/net_namespace.h>
21 #include <linux/sched.h>
22 #include <linux/prefetch.h>
23
24 #include <net/dst.h>
25
26 /*
27 * Theory of operations:
28 * 1) We use a list, protected by a spinlock, to add
29 * new entries from both BH and non-BH context.
30 * 2) In order to keep spinlock held for a small delay,
31 * we use a second list where are stored long lived
32 * entries, that are handled by the garbage collect thread
33 * fired by a workqueue.
34 * 3) This list is guarded by a mutex,
35 * so that the gc_task and dst_dev_event() can be synchronized.
36 */
37
38 /*
39 * We want to keep lock & list close together
40 * to dirty as few cache lines as possible in __dst_free().
41 * As this is not a very strong hint, we dont force an alignment on SMP.
42 */
43 static struct {
44 spinlock_t lock;
45 struct dst_entry *list;
46 unsigned long timer_inc;
47 unsigned long timer_expires;
48 } dst_garbage = {
49 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
50 .timer_inc = DST_GC_MAX,
51 };
52 static void dst_gc_task(struct work_struct *work);
53 static void ___dst_free(struct dst_entry *dst);
54
55 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
56
57 static DEFINE_MUTEX(dst_gc_mutex);
58 /*
59 * long lived entries are maintained in this list, guarded by dst_gc_mutex
60 */
61 static struct dst_entry *dst_busy_list;
62
63 static void dst_gc_task(struct work_struct *work)
64 {
65 int delayed = 0;
66 int work_performed = 0;
67 unsigned long expires = ~0L;
68 struct dst_entry *dst, *next, head;
69 struct dst_entry *last = &head;
70
71 mutex_lock(&dst_gc_mutex);
72 next = dst_busy_list;
73
74 loop:
75 while ((dst = next) != NULL) {
76 next = dst->next;
77 prefetch(&next->next);
78 cond_resched();
79 if (likely(atomic_read(&dst->__refcnt))) {
80 last->next = dst;
81 last = dst;
82 delayed++;
83 continue;
84 }
85 work_performed++;
86
87 dst = dst_destroy(dst);
88 if (dst) {
89 /* NOHASH and still referenced. Unless it is already
90 * on gc list, invalidate it and add to gc list.
91 *
92 * Note: this is temporary. Actually, NOHASH dst's
93 * must be obsoleted when parent is obsoleted.
94 * But we do not have state "obsoleted, but
95 * referenced by parent", so it is right.
96 */
97 if (dst->obsolete > 1)
98 continue;
99
100 ___dst_free(dst);
101 dst->next = next;
102 next = dst;
103 }
104 }
105
106 spin_lock_bh(&dst_garbage.lock);
107 next = dst_garbage.list;
108 if (next) {
109 dst_garbage.list = NULL;
110 spin_unlock_bh(&dst_garbage.lock);
111 goto loop;
112 }
113 last->next = NULL;
114 dst_busy_list = head.next;
115 if (!dst_busy_list)
116 dst_garbage.timer_inc = DST_GC_MAX;
117 else {
118 /*
119 * if we freed less than 1/10 of delayed entries,
120 * we can sleep longer.
121 */
122 if (work_performed <= delayed/10) {
123 dst_garbage.timer_expires += dst_garbage.timer_inc;
124 if (dst_garbage.timer_expires > DST_GC_MAX)
125 dst_garbage.timer_expires = DST_GC_MAX;
126 dst_garbage.timer_inc += DST_GC_INC;
127 } else {
128 dst_garbage.timer_inc = DST_GC_INC;
129 dst_garbage.timer_expires = DST_GC_MIN;
130 }
131 expires = dst_garbage.timer_expires;
132 /*
133 * if the next desired timer is more than 4 seconds in the
134 * future then round the timer to whole seconds
135 */
136 if (expires > 4*HZ)
137 expires = round_jiffies_relative(expires);
138 schedule_delayed_work(&dst_gc_work, expires);
139 }
140
141 spin_unlock_bh(&dst_garbage.lock);
142 mutex_unlock(&dst_gc_mutex);
143 }
144
145 int dst_discard(struct sk_buff *skb)
146 {
147 kfree_skb(skb);
148 return 0;
149 }
150 EXPORT_SYMBOL(dst_discard);
151
152 const u32 dst_default_metrics[RTAX_MAX];
153
154 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, unsigned short flags)
156 {
157 struct dst_entry *dst;
158
159 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
160 if (ops->gc(ops))
161 return NULL;
162 }
163 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
164 if (!dst)
165 return NULL;
166 dst->child = NULL;
167 dst->dev = dev;
168 if (dev)
169 dev_hold(dev);
170 dst->ops = ops;
171 dst_init_metrics(dst, dst_default_metrics, true);
172 dst->expires = 0UL;
173 dst->path = dst;
174 RCU_INIT_POINTER(dst->_neighbour, NULL);
175 #ifdef CONFIG_XFRM
176 dst->xfrm = NULL;
177 #endif
178 dst->input = dst_discard;
179 dst->output = dst_discard;
180 dst->error = 0;
181 dst->obsolete = initial_obsolete;
182 dst->header_len = 0;
183 dst->trailer_len = 0;
184 #ifdef CONFIG_IP_ROUTE_CLASSID
185 dst->tclassid = 0;
186 #endif
187 atomic_set(&dst->__refcnt, initial_ref);
188 dst->__use = 0;
189 dst->lastuse = jiffies;
190 dst->flags = flags;
191 dst->pending_confirm = 0;
192 dst->next = NULL;
193 if (!(flags & DST_NOCOUNT))
194 dst_entries_add(ops, 1);
195 return dst;
196 }
197 EXPORT_SYMBOL(dst_alloc);
198
199 static void ___dst_free(struct dst_entry *dst)
200 {
201 /* The first case (dev==NULL) is required, when
202 protocol module is unloaded.
203 */
204 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
205 dst->input = dst->output = dst_discard;
206 dst->obsolete = 2;
207 }
208
209 void __dst_free(struct dst_entry *dst)
210 {
211 spin_lock_bh(&dst_garbage.lock);
212 ___dst_free(dst);
213 dst->next = dst_garbage.list;
214 dst_garbage.list = dst;
215 if (dst_garbage.timer_inc > DST_GC_INC) {
216 dst_garbage.timer_inc = DST_GC_INC;
217 dst_garbage.timer_expires = DST_GC_MIN;
218 cancel_delayed_work(&dst_gc_work);
219 schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
220 }
221 spin_unlock_bh(&dst_garbage.lock);
222 }
223 EXPORT_SYMBOL(__dst_free);
224
225 struct dst_entry *dst_destroy(struct dst_entry * dst)
226 {
227 struct dst_entry *child;
228 struct neighbour *neigh;
229
230 smp_rmb();
231
232 again:
233 neigh = rcu_dereference_protected(dst->_neighbour, 1);
234 child = dst->child;
235
236 if (neigh) {
237 RCU_INIT_POINTER(dst->_neighbour, NULL);
238 neigh_release(neigh);
239 }
240
241 if (!(dst->flags & DST_NOCOUNT))
242 dst_entries_add(dst->ops, -1);
243
244 if (dst->ops->destroy)
245 dst->ops->destroy(dst);
246 if (dst->dev)
247 dev_put(dst->dev);
248 kmem_cache_free(dst->ops->kmem_cachep, dst);
249
250 dst = child;
251 if (dst) {
252 int nohash = dst->flags & DST_NOHASH;
253
254 if (atomic_dec_and_test(&dst->__refcnt)) {
255 /* We were real parent of this dst, so kill child. */
256 if (nohash)
257 goto again;
258 } else {
259 /* Child is still referenced, return it for freeing. */
260 if (nohash)
261 return dst;
262 /* Child is still in his hash table */
263 }
264 }
265 return NULL;
266 }
267 EXPORT_SYMBOL(dst_destroy);
268
269 void dst_release(struct dst_entry *dst)
270 {
271 if (dst) {
272 int newrefcnt;
273
274 newrefcnt = atomic_dec_return(&dst->__refcnt);
275 WARN_ON(newrefcnt < 0);
276 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
277 dst = dst_destroy(dst);
278 if (dst)
279 __dst_free(dst);
280 }
281 }
282 }
283 EXPORT_SYMBOL(dst_release);
284
285 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
286 {
287 u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
288
289 if (p) {
290 u32 *old_p = __DST_METRICS_PTR(old);
291 unsigned long prev, new;
292
293 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
294
295 new = (unsigned long) p;
296 prev = cmpxchg(&dst->_metrics, old, new);
297
298 if (prev != old) {
299 kfree(p);
300 p = __DST_METRICS_PTR(prev);
301 if (prev & DST_METRICS_READ_ONLY)
302 p = NULL;
303 }
304 }
305 return p;
306 }
307 EXPORT_SYMBOL(dst_cow_metrics_generic);
308
309 /* Caller asserts that dst_metrics_read_only(dst) is false. */
310 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
311 {
312 unsigned long prev, new;
313
314 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
315 prev = cmpxchg(&dst->_metrics, old, new);
316 if (prev == old)
317 kfree(__DST_METRICS_PTR(old));
318 }
319 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
320
321 /**
322 * skb_dst_set_noref - sets skb dst, without a reference
323 * @skb: buffer
324 * @dst: dst entry
325 *
326 * Sets skb dst, assuming a reference was not taken on dst
327 * skb_dst_drop() should not dst_release() this dst
328 */
329 void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
330 {
331 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
332 /* If dst not in cache, we must take a reference, because
333 * dst_release() will destroy dst as soon as its refcount becomes zero
334 */
335 if (unlikely(dst->flags & DST_NOCACHE)) {
336 dst_hold(dst);
337 skb_dst_set(skb, dst);
338 } else {
339 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
340 }
341 }
342 EXPORT_SYMBOL(skb_dst_set_noref);
343
344 /* Dirty hack. We did it in 2.2 (in __dst_free),
345 * we have _very_ good reasons not to repeat
346 * this mistake in 2.3, but we have no choice
347 * now. _It_ _is_ _explicit_ _deliberate_
348 * _race_ _condition_.
349 *
350 * Commented and originally written by Alexey.
351 */
352 static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
353 int unregister)
354 {
355 if (dst->ops->ifdown)
356 dst->ops->ifdown(dst, dev, unregister);
357
358 if (dev != dst->dev)
359 return;
360
361 if (!unregister) {
362 dst->input = dst->output = dst_discard;
363 } else {
364 struct neighbour *neigh;
365
366 dst->dev = dev_net(dst->dev)->loopback_dev;
367 dev_hold(dst->dev);
368 dev_put(dev);
369 rcu_read_lock();
370 neigh = dst_get_neighbour_noref(dst);
371 if (neigh && neigh->dev == dev) {
372 neigh->dev = dst->dev;
373 dev_hold(dst->dev);
374 dev_put(dev);
375 }
376 rcu_read_unlock();
377 }
378 }
379
380 static int dst_dev_event(struct notifier_block *this, unsigned long event,
381 void *ptr)
382 {
383 struct net_device *dev = ptr;
384 struct dst_entry *dst, *last = NULL;
385
386 switch (event) {
387 case NETDEV_UNREGISTER:
388 case NETDEV_DOWN:
389 mutex_lock(&dst_gc_mutex);
390 for (dst = dst_busy_list; dst; dst = dst->next) {
391 last = dst;
392 dst_ifdown(dst, dev, event != NETDEV_DOWN);
393 }
394
395 spin_lock_bh(&dst_garbage.lock);
396 dst = dst_garbage.list;
397 dst_garbage.list = NULL;
398 spin_unlock_bh(&dst_garbage.lock);
399
400 if (last)
401 last->next = dst;
402 else
403 dst_busy_list = dst;
404 for (; dst; dst = dst->next)
405 dst_ifdown(dst, dev, event != NETDEV_DOWN);
406 mutex_unlock(&dst_gc_mutex);
407 break;
408 }
409 return NOTIFY_DONE;
410 }
411
412 static struct notifier_block dst_dev_notifier = {
413 .notifier_call = dst_dev_event,
414 .priority = -10, /* must be called after other network notifiers */
415 };
416
417 void __init dst_init(void)
418 {
419 register_netdevice_notifier(&dst_dev_notifier);
420 }