]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/core/dst.c Protocol independent destination cache. | |
3 | * | |
4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/bitops.h> | |
9 | #include <linux/errno.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/kernel.h> | |
86bba269 | 12 | #include <linux/workqueue.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
15 | #include <linux/netdevice.h> | |
1da177e4 LT |
16 | #include <linux/skbuff.h> |
17 | #include <linux/string.h> | |
18 | #include <linux/types.h> | |
e9dc8653 | 19 | #include <net/net_namespace.h> |
1da177e4 LT |
20 | |
21 | #include <net/dst.h> | |
22 | ||
86bba269 ED |
23 | /* |
24 | * Theory of operations: | |
25 | * 1) We use a list, protected by a spinlock, to add | |
26 | * new entries from both BH and non-BH context. | |
27 | * 2) In order to keep spinlock held for a small delay, | |
28 | * we use a second list where are stored long lived | |
29 | * entries, that are handled by the garbage collect thread | |
30 | * fired by a workqueue. | |
31 | * 3) This list is guarded by a mutex, | |
32 | * so that the gc_task and dst_dev_event() can be synchronized. | |
1da177e4 | 33 | */ |
4ec93edb | 34 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
35 | static atomic_t dst_total = ATOMIC_INIT(0); |
36 | #endif | |
1da177e4 | 37 | |
86bba269 ED |
38 | /* |
39 | * We want to keep lock & list close together | |
40 | * to dirty as few cache lines as possible in __dst_free(). | |
41 | * As this is not a very strong hint, we dont force an alignment on SMP. | |
42 | */ | |
43 | static struct { | |
44 | spinlock_t lock; | |
45 | struct dst_entry *list; | |
46 | unsigned long timer_inc; | |
47 | unsigned long timer_expires; | |
48 | } dst_garbage = { | |
49 | .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), | |
50 | .timer_inc = DST_GC_MAX, | |
51 | }; | |
52 | static void dst_gc_task(struct work_struct *work); | |
1da177e4 LT |
53 | static void ___dst_free(struct dst_entry * dst); |
54 | ||
86bba269 | 55 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); |
1da177e4 | 56 | |
86bba269 ED |
57 | static DEFINE_MUTEX(dst_gc_mutex); |
58 | /* | |
59 | * long lived entries are maintained in this list, guarded by dst_gc_mutex | |
60 | */ | |
61 | static struct dst_entry *dst_busy_list; | |
62 | ||
63 | static void dst_gc_task(struct work_struct *work) | |
1da177e4 LT |
64 | { |
65 | int delayed = 0; | |
86bba269 ED |
66 | int work_performed = 0; |
67 | unsigned long expires = ~0L; | |
68 | struct dst_entry *dst, *next, head; | |
69 | struct dst_entry *last = &head; | |
70 | #if RT_CACHE_DEBUG >= 2 | |
71 | ktime_t time_start = ktime_get(); | |
72 | struct timespec elapsed; | |
73 | #endif | |
1da177e4 | 74 | |
86bba269 ED |
75 | mutex_lock(&dst_gc_mutex); |
76 | next = dst_busy_list; | |
1da177e4 | 77 | |
86bba269 ED |
78 | loop: |
79 | while ((dst = next) != NULL) { | |
80 | next = dst->next; | |
81 | prefetch(&next->next); | |
82 | if (likely(atomic_read(&dst->__refcnt))) { | |
83 | last->next = dst; | |
84 | last = dst; | |
1da177e4 LT |
85 | delayed++; |
86 | continue; | |
87 | } | |
86bba269 | 88 | work_performed++; |
1da177e4 LT |
89 | |
90 | dst = dst_destroy(dst); | |
91 | if (dst) { | |
92 | /* NOHASH and still referenced. Unless it is already | |
93 | * on gc list, invalidate it and add to gc list. | |
94 | * | |
95 | * Note: this is temporary. Actually, NOHASH dst's | |
96 | * must be obsoleted when parent is obsoleted. | |
97 | * But we do not have state "obsoleted, but | |
98 | * referenced by parent", so it is right. | |
99 | */ | |
100 | if (dst->obsolete > 1) | |
101 | continue; | |
102 | ||
103 | ___dst_free(dst); | |
86bba269 ED |
104 | dst->next = next; |
105 | next = dst; | |
1da177e4 LT |
106 | } |
107 | } | |
86bba269 ED |
108 | |
109 | spin_lock_bh(&dst_garbage.lock); | |
110 | next = dst_garbage.list; | |
111 | if (next) { | |
112 | dst_garbage.list = NULL; | |
113 | spin_unlock_bh(&dst_garbage.lock); | |
114 | goto loop; | |
1da177e4 | 115 | } |
86bba269 ED |
116 | last->next = NULL; |
117 | dst_busy_list = head.next; | |
118 | if (!dst_busy_list) | |
119 | dst_garbage.timer_inc = DST_GC_MAX; | |
120 | else { | |
121 | /* | |
122 | * if we freed less than 1/10 of delayed entries, | |
123 | * we can sleep longer. | |
124 | */ | |
125 | if (work_performed <= delayed/10) { | |
126 | dst_garbage.timer_expires += dst_garbage.timer_inc; | |
127 | if (dst_garbage.timer_expires > DST_GC_MAX) | |
128 | dst_garbage.timer_expires = DST_GC_MAX; | |
129 | dst_garbage.timer_inc += DST_GC_INC; | |
130 | } else { | |
131 | dst_garbage.timer_inc = DST_GC_INC; | |
132 | dst_garbage.timer_expires = DST_GC_MIN; | |
133 | } | |
134 | expires = dst_garbage.timer_expires; | |
135 | /* | |
136 | * if the next desired timer is more than 4 seconds in the future | |
137 | * then round the timer to whole seconds | |
138 | */ | |
139 | if (expires > 4*HZ) | |
140 | expires = round_jiffies_relative(expires); | |
141 | schedule_delayed_work(&dst_gc_work, expires); | |
f0098f78 | 142 | } |
86bba269 ED |
143 | |
144 | spin_unlock_bh(&dst_garbage.lock); | |
145 | mutex_unlock(&dst_gc_mutex); | |
1da177e4 | 146 | #if RT_CACHE_DEBUG >= 2 |
86bba269 ED |
147 | elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start)); |
148 | printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d" | |
149 | " expires: %lu elapsed: %lu us\n", | |
150 | atomic_read(&dst_total), delayed, work_performed, | |
151 | expires, | |
152 | elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); | |
1da177e4 | 153 | #endif |
1da177e4 LT |
154 | } |
155 | ||
c4b1010f | 156 | static int dst_discard(struct sk_buff *skb) |
1da177e4 LT |
157 | { |
158 | kfree_skb(skb); | |
159 | return 0; | |
160 | } | |
161 | ||
162 | void * dst_alloc(struct dst_ops * ops) | |
163 | { | |
164 | struct dst_entry * dst; | |
165 | ||
166 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { | |
167 | if (ops->gc()) | |
168 | return NULL; | |
169 | } | |
c3762229 | 170 | dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); |
1da177e4 LT |
171 | if (!dst) |
172 | return NULL; | |
1da177e4 LT |
173 | atomic_set(&dst->__refcnt, 0); |
174 | dst->ops = ops; | |
175 | dst->lastuse = jiffies; | |
176 | dst->path = dst; | |
c4b1010f | 177 | dst->input = dst->output = dst_discard; |
4ec93edb | 178 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
179 | atomic_inc(&dst_total); |
180 | #endif | |
181 | atomic_inc(&ops->entries); | |
182 | return dst; | |
183 | } | |
184 | ||
185 | static void ___dst_free(struct dst_entry * dst) | |
186 | { | |
187 | /* The first case (dev==NULL) is required, when | |
188 | protocol module is unloaded. | |
189 | */ | |
190 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { | |
c4b1010f | 191 | dst->input = dst->output = dst_discard; |
1da177e4 LT |
192 | } |
193 | dst->obsolete = 2; | |
194 | } | |
195 | ||
196 | void __dst_free(struct dst_entry * dst) | |
197 | { | |
86bba269 | 198 | spin_lock_bh(&dst_garbage.lock); |
1da177e4 | 199 | ___dst_free(dst); |
86bba269 ED |
200 | dst->next = dst_garbage.list; |
201 | dst_garbage.list = dst; | |
202 | if (dst_garbage.timer_inc > DST_GC_INC) { | |
203 | dst_garbage.timer_inc = DST_GC_INC; | |
204 | dst_garbage.timer_expires = DST_GC_MIN; | |
205 | schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); | |
1da177e4 | 206 | } |
86bba269 | 207 | spin_unlock_bh(&dst_garbage.lock); |
1da177e4 LT |
208 | } |
209 | ||
210 | struct dst_entry *dst_destroy(struct dst_entry * dst) | |
211 | { | |
212 | struct dst_entry *child; | |
213 | struct neighbour *neigh; | |
214 | struct hh_cache *hh; | |
215 | ||
216 | smp_rmb(); | |
217 | ||
218 | again: | |
219 | neigh = dst->neighbour; | |
220 | hh = dst->hh; | |
221 | child = dst->child; | |
222 | ||
223 | dst->hh = NULL; | |
224 | if (hh && atomic_dec_and_test(&hh->hh_refcnt)) | |
225 | kfree(hh); | |
226 | ||
227 | if (neigh) { | |
228 | dst->neighbour = NULL; | |
229 | neigh_release(neigh); | |
230 | } | |
231 | ||
232 | atomic_dec(&dst->ops->entries); | |
233 | ||
234 | if (dst->ops->destroy) | |
235 | dst->ops->destroy(dst); | |
236 | if (dst->dev) | |
237 | dev_put(dst->dev); | |
4ec93edb | 238 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
239 | atomic_dec(&dst_total); |
240 | #endif | |
241 | kmem_cache_free(dst->ops->kmem_cachep, dst); | |
242 | ||
243 | dst = child; | |
244 | if (dst) { | |
6775cab9 HX |
245 | int nohash = dst->flags & DST_NOHASH; |
246 | ||
1da177e4 LT |
247 | if (atomic_dec_and_test(&dst->__refcnt)) { |
248 | /* We were real parent of this dst, so kill child. */ | |
6775cab9 | 249 | if (nohash) |
1da177e4 LT |
250 | goto again; |
251 | } else { | |
252 | /* Child is still referenced, return it for freeing. */ | |
6775cab9 | 253 | if (nohash) |
1da177e4 LT |
254 | return dst; |
255 | /* Child is still in his hash table */ | |
256 | } | |
257 | } | |
258 | return NULL; | |
259 | } | |
260 | ||
261 | /* Dirty hack. We did it in 2.2 (in __dst_free), | |
262 | * we have _very_ good reasons not to repeat | |
263 | * this mistake in 2.3, but we have no choice | |
264 | * now. _It_ _is_ _explicit_ _deliberate_ | |
265 | * _race_ _condition_. | |
266 | * | |
267 | * Commented and originally written by Alexey. | |
268 | */ | |
269 | static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |
270 | int unregister) | |
271 | { | |
272 | if (dst->ops->ifdown) | |
273 | dst->ops->ifdown(dst, dev, unregister); | |
274 | ||
275 | if (dev != dst->dev) | |
276 | return; | |
277 | ||
278 | if (!unregister) { | |
c4b1010f | 279 | dst->input = dst->output = dst_discard; |
1da177e4 LT |
280 | } else { |
281 | dst->dev = &loopback_dev; | |
282 | dev_hold(&loopback_dev); | |
283 | dev_put(dev); | |
284 | if (dst->neighbour && dst->neighbour->dev == dev) { | |
285 | dst->neighbour->dev = &loopback_dev; | |
286 | dev_put(dev); | |
287 | dev_hold(&loopback_dev); | |
288 | } | |
289 | } | |
290 | } | |
291 | ||
292 | static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) | |
293 | { | |
294 | struct net_device *dev = ptr; | |
86bba269 | 295 | struct dst_entry *dst, *last = NULL; |
1da177e4 | 296 | |
e9dc8653 EB |
297 | if (dev->nd_net != &init_net) |
298 | return NOTIFY_DONE; | |
299 | ||
1da177e4 LT |
300 | switch (event) { |
301 | case NETDEV_UNREGISTER: | |
302 | case NETDEV_DOWN: | |
86bba269 ED |
303 | mutex_lock(&dst_gc_mutex); |
304 | for (dst = dst_busy_list; dst; dst = dst->next) { | |
305 | last = dst; | |
306 | dst_ifdown(dst, dev, event != NETDEV_DOWN); | |
307 | } | |
308 | ||
309 | spin_lock_bh(&dst_garbage.lock); | |
310 | dst = dst_garbage.list; | |
311 | dst_garbage.list = NULL; | |
312 | spin_unlock_bh(&dst_garbage.lock); | |
313 | ||
314 | if (last) | |
315 | last->next = dst; | |
316 | else | |
317 | dst_busy_list = dst; | |
318 | for (; dst; dst = dst->next) { | |
1da177e4 LT |
319 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
320 | } | |
86bba269 | 321 | mutex_unlock(&dst_gc_mutex); |
1da177e4 LT |
322 | break; |
323 | } | |
324 | return NOTIFY_DONE; | |
325 | } | |
326 | ||
327 | static struct notifier_block dst_dev_notifier = { | |
328 | .notifier_call = dst_dev_event, | |
329 | }; | |
330 | ||
331 | void __init dst_init(void) | |
332 | { | |
333 | register_netdevice_notifier(&dst_dev_notifier); | |
334 | } | |
335 | ||
336 | EXPORT_SYMBOL(__dst_free); | |
337 | EXPORT_SYMBOL(dst_alloc); | |
338 | EXPORT_SYMBOL(dst_destroy); |