]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/core/dst.c Protocol independent destination cache. | |
3 | * | |
4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/bitops.h> | |
9 | #include <linux/errno.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/kernel.h> | |
86bba269 | 12 | #include <linux/workqueue.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
5a0e3ad6 | 15 | #include <linux/slab.h> |
1da177e4 | 16 | #include <linux/netdevice.h> |
1da177e4 LT |
17 | #include <linux/skbuff.h> |
18 | #include <linux/string.h> | |
19 | #include <linux/types.h> | |
e9dc8653 | 20 | #include <net/net_namespace.h> |
2fc1b5dd | 21 | #include <linux/sched.h> |
268bb0ce | 22 | #include <linux/prefetch.h> |
1da177e4 LT |
23 | |
24 | #include <net/dst.h> | |
25 | ||
86bba269 ED |
26 | /* |
27 | * Theory of operations: | |
28 | * 1) We use a list, protected by a spinlock, to add | |
29 | * new entries from both BH and non-BH context. | |
30 | * 2) In order to keep spinlock held for a small delay, | |
31 | * we use a second list where are stored long lived | |
32 | * entries, that are handled by the garbage collect thread | |
33 | * fired by a workqueue. | |
34 | * 3) This list is guarded by a mutex, | |
35 | * so that the gc_task and dst_dev_event() can be synchronized. | |
1da177e4 | 36 | */ |
1da177e4 | 37 | |
86bba269 ED |
38 | /* |
39 | * We want to keep lock & list close together | |
40 | * to dirty as few cache lines as possible in __dst_free(). | |
41 | * As this is not a very strong hint, we dont force an alignment on SMP. | |
42 | */ | |
43 | static struct { | |
44 | spinlock_t lock; | |
598ed936 | 45 | struct dst_entry *list; |
86bba269 ED |
46 | unsigned long timer_inc; |
47 | unsigned long timer_expires; | |
48 | } dst_garbage = { | |
49 | .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), | |
50 | .timer_inc = DST_GC_MAX, | |
51 | }; | |
52 | static void dst_gc_task(struct work_struct *work); | |
598ed936 | 53 | static void ___dst_free(struct dst_entry *dst); |
1da177e4 | 54 | |
86bba269 | 55 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); |
1da177e4 | 56 | |
86bba269 ED |
57 | static DEFINE_MUTEX(dst_gc_mutex); |
58 | /* | |
59 | * long lived entries are maintained in this list, guarded by dst_gc_mutex | |
60 | */ | |
61 | static struct dst_entry *dst_busy_list; | |
62 | ||
63 | static void dst_gc_task(struct work_struct *work) | |
1da177e4 LT |
64 | { |
65 | int delayed = 0; | |
86bba269 ED |
66 | int work_performed = 0; |
67 | unsigned long expires = ~0L; | |
68 | struct dst_entry *dst, *next, head; | |
69 | struct dst_entry *last = &head; | |
1da177e4 | 70 | |
86bba269 ED |
71 | mutex_lock(&dst_gc_mutex); |
72 | next = dst_busy_list; | |
1da177e4 | 73 | |
86bba269 ED |
74 | loop: |
75 | while ((dst = next) != NULL) { | |
76 | next = dst->next; | |
77 | prefetch(&next->next); | |
2fc1b5dd | 78 | cond_resched(); |
86bba269 ED |
79 | if (likely(atomic_read(&dst->__refcnt))) { |
80 | last->next = dst; | |
81 | last = dst; | |
1da177e4 LT |
82 | delayed++; |
83 | continue; | |
84 | } | |
86bba269 | 85 | work_performed++; |
1da177e4 LT |
86 | |
87 | dst = dst_destroy(dst); | |
88 | if (dst) { | |
89 | /* NOHASH and still referenced. Unless it is already | |
90 | * on gc list, invalidate it and add to gc list. | |
91 | * | |
92 | * Note: this is temporary. Actually, NOHASH dst's | |
93 | * must be obsoleted when parent is obsoleted. | |
94 | * But we do not have state "obsoleted, but | |
95 | * referenced by parent", so it is right. | |
96 | */ | |
f5b0a874 | 97 | if (dst->obsolete > 0) |
1da177e4 LT |
98 | continue; |
99 | ||
100 | ___dst_free(dst); | |
86bba269 ED |
101 | dst->next = next; |
102 | next = dst; | |
1da177e4 LT |
103 | } |
104 | } | |
86bba269 ED |
105 | |
106 | spin_lock_bh(&dst_garbage.lock); | |
107 | next = dst_garbage.list; | |
108 | if (next) { | |
109 | dst_garbage.list = NULL; | |
110 | spin_unlock_bh(&dst_garbage.lock); | |
111 | goto loop; | |
1da177e4 | 112 | } |
86bba269 ED |
113 | last->next = NULL; |
114 | dst_busy_list = head.next; | |
115 | if (!dst_busy_list) | |
116 | dst_garbage.timer_inc = DST_GC_MAX; | |
117 | else { | |
118 | /* | |
119 | * if we freed less than 1/10 of delayed entries, | |
120 | * we can sleep longer. | |
121 | */ | |
122 | if (work_performed <= delayed/10) { | |
123 | dst_garbage.timer_expires += dst_garbage.timer_inc; | |
124 | if (dst_garbage.timer_expires > DST_GC_MAX) | |
125 | dst_garbage.timer_expires = DST_GC_MAX; | |
126 | dst_garbage.timer_inc += DST_GC_INC; | |
127 | } else { | |
128 | dst_garbage.timer_inc = DST_GC_INC; | |
129 | dst_garbage.timer_expires = DST_GC_MIN; | |
130 | } | |
131 | expires = dst_garbage.timer_expires; | |
132 | /* | |
598ed936 | 133 | * if the next desired timer is more than 4 seconds in the |
134 | * future then round the timer to whole seconds | |
86bba269 ED |
135 | */ |
136 | if (expires > 4*HZ) | |
137 | expires = round_jiffies_relative(expires); | |
138 | schedule_delayed_work(&dst_gc_work, expires); | |
f0098f78 | 139 | } |
86bba269 ED |
140 | |
141 | spin_unlock_bh(&dst_garbage.lock); | |
142 | mutex_unlock(&dst_gc_mutex); | |
1da177e4 LT |
143 | } |
144 | ||
352e512c | 145 | int dst_discard(struct sk_buff *skb) |
1da177e4 LT |
146 | { |
147 | kfree_skb(skb); | |
148 | return 0; | |
149 | } | |
352e512c | 150 | EXPORT_SYMBOL(dst_discard); |
1da177e4 | 151 | |
a37e6e34 ED |
152 | const u32 dst_default_metrics[RTAX_MAX + 1] = { |
153 | /* This initializer is needed to force linker to place this variable | |
154 | * into const section. Otherwise it might end into bss section. | |
155 | * We really want to avoid false sharing on this variable, and catch | |
156 | * any writes on it. | |
157 | */ | |
158 | [RTAX_MAX] = 0xdeadbeef, | |
159 | }; | |
160 | ||
62fa8a84 | 161 | |
5c1e6aa3 | 162 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, |
5110effe | 163 | int initial_ref, int initial_obsolete, unsigned short flags) |
1da177e4 | 164 | { |
598ed936 | 165 | struct dst_entry *dst; |
1da177e4 | 166 | |
fc66f95c | 167 | if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) { |
569d3645 | 168 | if (ops->gc(ops)) |
1da177e4 LT |
169 | return NULL; |
170 | } | |
cf911662 | 171 | dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); |
1da177e4 LT |
172 | if (!dst) |
173 | return NULL; | |
cf911662 | 174 | dst->child = NULL; |
5c1e6aa3 DM |
175 | dst->dev = dev; |
176 | if (dev) | |
177 | dev_hold(dev); | |
1da177e4 | 178 | dst->ops = ops; |
62fa8a84 | 179 | dst_init_metrics(dst, dst_default_metrics, true); |
cf911662 | 180 | dst->expires = 0UL; |
5c1e6aa3 | 181 | dst->path = dst; |
cf911662 DM |
182 | #ifdef CONFIG_XFRM |
183 | dst->xfrm = NULL; | |
184 | #endif | |
5c1e6aa3 DM |
185 | dst->input = dst_discard; |
186 | dst->output = dst_discard; | |
cf911662 | 187 | dst->error = 0; |
5c1e6aa3 | 188 | dst->obsolete = initial_obsolete; |
cf911662 DM |
189 | dst->header_len = 0; |
190 | dst->trailer_len = 0; | |
191 | #ifdef CONFIG_IP_ROUTE_CLASSID | |
192 | dst->tclassid = 0; | |
1da177e4 | 193 | #endif |
5c1e6aa3 | 194 | atomic_set(&dst->__refcnt, initial_ref); |
cf911662 | 195 | dst->__use = 0; |
5c1e6aa3 DM |
196 | dst->lastuse = jiffies; |
197 | dst->flags = flags; | |
5110effe | 198 | dst->pending_confirm = 0; |
cf911662 | 199 | dst->next = NULL; |
957c665f DM |
200 | if (!(flags & DST_NOCOUNT)) |
201 | dst_entries_add(ops, 1); | |
1da177e4 LT |
202 | return dst; |
203 | } | |
598ed936 | 204 | EXPORT_SYMBOL(dst_alloc); |
1da177e4 | 205 | |
598ed936 | 206 | static void ___dst_free(struct dst_entry *dst) |
1da177e4 LT |
207 | { |
208 | /* The first case (dev==NULL) is required, when | |
209 | protocol module is unloaded. | |
210 | */ | |
598ed936 | 211 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) |
c4b1010f | 212 | dst->input = dst->output = dst_discard; |
f5b0a874 | 213 | dst->obsolete = DST_OBSOLETE_DEAD; |
1da177e4 LT |
214 | } |
215 | ||
598ed936 | 216 | void __dst_free(struct dst_entry *dst) |
1da177e4 | 217 | { |
86bba269 | 218 | spin_lock_bh(&dst_garbage.lock); |
1da177e4 | 219 | ___dst_free(dst); |
86bba269 ED |
220 | dst->next = dst_garbage.list; |
221 | dst_garbage.list = dst; | |
222 | if (dst_garbage.timer_inc > DST_GC_INC) { | |
223 | dst_garbage.timer_inc = DST_GC_INC; | |
224 | dst_garbage.timer_expires = DST_GC_MIN; | |
f262b59b | 225 | cancel_delayed_work(&dst_gc_work); |
86bba269 | 226 | schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); |
1da177e4 | 227 | } |
86bba269 | 228 | spin_unlock_bh(&dst_garbage.lock); |
1da177e4 | 229 | } |
d79d9913 | 230 | EXPORT_SYMBOL(__dst_free); |
1da177e4 LT |
231 | |
232 | struct dst_entry *dst_destroy(struct dst_entry * dst) | |
233 | { | |
234 | struct dst_entry *child; | |
1da177e4 LT |
235 | |
236 | smp_rmb(); | |
237 | ||
238 | again: | |
1da177e4 LT |
239 | child = dst->child; |
240 | ||
957c665f DM |
241 | if (!(dst->flags & DST_NOCOUNT)) |
242 | dst_entries_add(dst->ops, -1); | |
1da177e4 LT |
243 | |
244 | if (dst->ops->destroy) | |
245 | dst->ops->destroy(dst); | |
246 | if (dst->dev) | |
247 | dev_put(dst->dev); | |
1da177e4 LT |
248 | kmem_cache_free(dst->ops->kmem_cachep, dst); |
249 | ||
250 | dst = child; | |
251 | if (dst) { | |
6775cab9 HX |
252 | int nohash = dst->flags & DST_NOHASH; |
253 | ||
1da177e4 LT |
254 | if (atomic_dec_and_test(&dst->__refcnt)) { |
255 | /* We were real parent of this dst, so kill child. */ | |
6775cab9 | 256 | if (nohash) |
1da177e4 LT |
257 | goto again; |
258 | } else { | |
259 | /* Child is still referenced, return it for freeing. */ | |
6775cab9 | 260 | if (nohash) |
1da177e4 LT |
261 | return dst; |
262 | /* Child is still in his hash table */ | |
263 | } | |
264 | } | |
265 | return NULL; | |
266 | } | |
598ed936 | 267 | EXPORT_SYMBOL(dst_destroy); |
1da177e4 | 268 | |
8d330868 IJ |
269 | void dst_release(struct dst_entry *dst) |
270 | { | |
271 | if (dst) { | |
598ed936 | 272 | int newrefcnt; |
ef711cf1 | 273 | |
598ed936 | 274 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
275 | WARN_ON(newrefcnt < 0); | |
27b75c95 ED |
276 | if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { |
277 | dst = dst_destroy(dst); | |
278 | if (dst) | |
279 | __dst_free(dst); | |
280 | } | |
8d330868 IJ |
281 | } |
282 | } | |
283 | EXPORT_SYMBOL(dst_release); | |
284 | ||
62fa8a84 DM |
285 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) |
286 | { | |
287 | u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); | |
288 | ||
289 | if (p) { | |
290 | u32 *old_p = __DST_METRICS_PTR(old); | |
291 | unsigned long prev, new; | |
292 | ||
293 | memcpy(p, old_p, sizeof(u32) * RTAX_MAX); | |
294 | ||
295 | new = (unsigned long) p; | |
296 | prev = cmpxchg(&dst->_metrics, old, new); | |
297 | ||
298 | if (prev != old) { | |
299 | kfree(p); | |
300 | p = __DST_METRICS_PTR(prev); | |
301 | if (prev & DST_METRICS_READ_ONLY) | |
302 | p = NULL; | |
303 | } | |
304 | } | |
305 | return p; | |
306 | } | |
307 | EXPORT_SYMBOL(dst_cow_metrics_generic); | |
308 | ||
309 | /* Caller asserts that dst_metrics_read_only(dst) is false. */ | |
310 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) | |
311 | { | |
312 | unsigned long prev, new; | |
313 | ||
b30c516f | 314 | new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; |
62fa8a84 DM |
315 | prev = cmpxchg(&dst->_metrics, old, new); |
316 | if (prev == old) | |
317 | kfree(__DST_METRICS_PTR(old)); | |
318 | } | |
319 | EXPORT_SYMBOL(__dst_destroy_metrics_generic); | |
320 | ||
27b75c95 ED |
321 | /** |
322 | * skb_dst_set_noref - sets skb dst, without a reference | |
323 | * @skb: buffer | |
324 | * @dst: dst entry | |
325 | * | |
326 | * Sets skb dst, assuming a reference was not taken on dst | |
327 | * skb_dst_drop() should not dst_release() this dst | |
328 | */ | |
329 | void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) | |
330 | { | |
331 | WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); | |
332 | /* If dst not in cache, we must take a reference, because | |
333 | * dst_release() will destroy dst as soon as its refcount becomes zero | |
334 | */ | |
335 | if (unlikely(dst->flags & DST_NOCACHE)) { | |
336 | dst_hold(dst); | |
337 | skb_dst_set(skb, dst); | |
338 | } else { | |
339 | skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; | |
340 | } | |
341 | } | |
342 | EXPORT_SYMBOL(skb_dst_set_noref); | |
343 | ||
1da177e4 LT |
344 | /* Dirty hack. We did it in 2.2 (in __dst_free), |
345 | * we have _very_ good reasons not to repeat | |
346 | * this mistake in 2.3, but we have no choice | |
347 | * now. _It_ _is_ _explicit_ _deliberate_ | |
348 | * _race_ _condition_. | |
349 | * | |
350 | * Commented and originally written by Alexey. | |
351 | */ | |
56115511 | 352 | static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, |
353 | int unregister) | |
1da177e4 LT |
354 | { |
355 | if (dst->ops->ifdown) | |
356 | dst->ops->ifdown(dst, dev, unregister); | |
357 | ||
358 | if (dev != dst->dev) | |
359 | return; | |
360 | ||
361 | if (!unregister) { | |
c4b1010f | 362 | dst->input = dst->output = dst_discard; |
1da177e4 | 363 | } else { |
c346dca1 | 364 | dst->dev = dev_net(dst->dev)->loopback_dev; |
de3cb747 | 365 | dev_hold(dst->dev); |
1da177e4 | 366 | dev_put(dev); |
1da177e4 LT |
367 | } |
368 | } | |
369 | ||
598ed936 | 370 | static int dst_dev_event(struct notifier_block *this, unsigned long event, |
371 | void *ptr) | |
1da177e4 LT |
372 | { |
373 | struct net_device *dev = ptr; | |
86bba269 | 374 | struct dst_entry *dst, *last = NULL; |
1da177e4 LT |
375 | |
376 | switch (event) { | |
377 | case NETDEV_UNREGISTER: | |
378 | case NETDEV_DOWN: | |
86bba269 ED |
379 | mutex_lock(&dst_gc_mutex); |
380 | for (dst = dst_busy_list; dst; dst = dst->next) { | |
381 | last = dst; | |
382 | dst_ifdown(dst, dev, event != NETDEV_DOWN); | |
383 | } | |
384 | ||
385 | spin_lock_bh(&dst_garbage.lock); | |
386 | dst = dst_garbage.list; | |
387 | dst_garbage.list = NULL; | |
388 | spin_unlock_bh(&dst_garbage.lock); | |
389 | ||
390 | if (last) | |
391 | last->next = dst; | |
392 | else | |
393 | dst_busy_list = dst; | |
598ed936 | 394 | for (; dst; dst = dst->next) |
1da177e4 | 395 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
86bba269 | 396 | mutex_unlock(&dst_gc_mutex); |
1da177e4 LT |
397 | break; |
398 | } | |
399 | return NOTIFY_DONE; | |
400 | } | |
401 | ||
402 | static struct notifier_block dst_dev_notifier = { | |
403 | .notifier_call = dst_dev_event, | |
332dd96f | 404 | .priority = -10, /* must be called after other network notifiers */ |
1da177e4 LT |
405 | }; |
406 | ||
407 | void __init dst_init(void) | |
408 | { | |
409 | register_netdevice_notifier(&dst_dev_notifier); | |
410 | } |