]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INETPEER - A storage for permanent information about peers | |
3 | * | |
4 | * This source is covered by the GNU GPL, the same as all kernel sources. | |
5 | * | |
1da177e4 LT |
6 | * Authors: Andrey V. Savochkin <saw@msu.ru> |
7 | */ | |
8 | ||
08009a76 | 9 | #include <linux/cache.h> |
1da177e4 LT |
10 | #include <linux/module.h> |
11 | #include <linux/types.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/random.h> | |
1da177e4 LT |
16 | #include <linux/timer.h> |
17 | #include <linux/time.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/net.h> | |
5faa5df1 | 21 | #include <linux/workqueue.h> |
20380731 | 22 | #include <net/ip.h> |
1da177e4 | 23 | #include <net/inetpeer.h> |
6e5714ea | 24 | #include <net/secure_seq.h> |
1da177e4 LT |
25 | |
26 | /* | |
27 | * Theory of operations. | |
28 | * We keep one entry for each peer IP address. The nodes contains long-living | |
29 | * information about the peer which doesn't depend on routes. | |
1da177e4 | 30 | * |
1da177e4 LT |
31 | * Nodes are removed only when reference counter goes to 0. |
32 | * When it's happened the node may be removed when a sufficient amount of | |
33 | * time has been passed since its last use. The less-recently-used entry can | |
34 | * also be removed if the pool is overloaded i.e. if the total amount of | |
35 | * entries is greater-or-equal than the threshold. | |
36 | * | |
b145425f | 37 | * Node pool is organised as an RB tree. |
1da177e4 LT |
38 | * Such an implementation has been chosen not just for fun. It's a way to |
39 | * prevent easy and efficient DoS attacks by creating hash collisions. A huge | |
40 | * amount of long living nodes in a single hash slot would significantly delay | |
41 | * lookups performed with disabled BHs. | |
42 | * | |
43 | * Serialisation issues. | |
aa1039e7 ED |
44 | * 1. Nodes may appear in the tree only with the pool lock held. |
45 | * 2. Nodes may disappear from the tree only with the pool lock held | |
1da177e4 | 46 | * AND reference count being 0. |
4b9d9be8 ED |
47 | * 3. Global variable peer_total is modified under the pool lock. |
48 | * 4. struct inet_peer fields modification: | |
b145425f | 49 | * rb_node: pool lock |
1da177e4 LT |
50 | * refcnt: atomically against modifications on other CPU; |
51 | * usually under some other lock to prevent node disappearing | |
582a72da | 52 | * daddr: unchangeable |
1da177e4 LT |
53 | */ |
54 | ||
08009a76 | 55 | static struct kmem_cache *peer_cachep __ro_after_init; |
1da177e4 | 56 | |
c3426b47 DM |
57 | void inet_peer_base_init(struct inet_peer_base *bp) |
58 | { | |
b145425f | 59 | bp->rb_root = RB_ROOT; |
c3426b47 DM |
60 | seqlock_init(&bp->lock); |
61 | bp->total = 0; | |
62 | } | |
63 | EXPORT_SYMBOL_GPL(inet_peer_base_init); | |
021e9299 | 64 | |
b145425f | 65 | #define PEER_MAX_GC 32 |
1da177e4 | 66 | |
1da177e4 | 67 | /* Exported for sysctl_net_ipv4. */ |
243bbcaa | 68 | int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more |
1da177e4 | 69 | * aggressively at this stage */ |
243bbcaa ED |
70 | int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ |
71 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ | |
1da177e4 | 72 | |
1da177e4 LT |
73 | /* Called from ip_output.c:ip_init */ |
74 | void __init inet_initpeers(void) | |
75 | { | |
76 | struct sysinfo si; | |
77 | ||
78 | /* Use the straight interface to information about memory. */ | |
79 | si_meminfo(&si); | |
80 | /* The values below were suggested by Alexey Kuznetsov | |
81 | * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values | |
82 | * myself. --SAW | |
83 | */ | |
84 | if (si.totalram <= (32768*1024)/PAGE_SIZE) | |
85 | inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ | |
86 | if (si.totalram <= (16384*1024)/PAGE_SIZE) | |
87 | inet_peer_threshold >>= 1; /* about 512KB */ | |
88 | if (si.totalram <= (8192*1024)/PAGE_SIZE) | |
89 | inet_peer_threshold >>= 2; /* about 128KB */ | |
90 | ||
91 | peer_cachep = kmem_cache_create("inet_peer_cache", | |
92 | sizeof(struct inet_peer), | |
317fe0e6 | 93 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, |
20c2df83 | 94 | NULL); |
1da177e4 LT |
95 | } |
96 | ||
b145425f ED |
97 | /* Called with rcu_read_lock() or base->lock held */ |
98 | static struct inet_peer *lookup(const struct inetpeer_addr *daddr, | |
99 | struct inet_peer_base *base, | |
100 | unsigned int seq, | |
101 | struct inet_peer *gc_stack[], | |
102 | unsigned int *gc_cnt, | |
103 | struct rb_node **parent_p, | |
104 | struct rb_node ***pp_p) | |
aa1039e7 | 105 | { |
4cc5b44b | 106 | struct rb_node **pp, *parent, *next; |
b145425f ED |
107 | struct inet_peer *p; |
108 | ||
109 | pp = &base->rb_root.rb_node; | |
110 | parent = NULL; | |
4cc5b44b | 111 | while (1) { |
b145425f | 112 | int cmp; |
aa1039e7 | 113 | |
4cc5b44b ED |
114 | next = rcu_dereference_raw(*pp); |
115 | if (!next) | |
116 | break; | |
117 | parent = next; | |
b145425f ED |
118 | p = rb_entry(parent, struct inet_peer, rb_node); |
119 | cmp = inetpeer_addr_cmp(daddr, &p->daddr); | |
02663045 | 120 | if (cmp == 0) { |
b145425f ED |
121 | if (!refcount_inc_not_zero(&p->refcnt)) |
122 | break; | |
123 | return p; | |
124 | } | |
125 | if (gc_stack) { | |
126 | if (*gc_cnt < PEER_MAX_GC) | |
127 | gc_stack[(*gc_cnt)++] = p; | |
128 | } else if (unlikely(read_seqretry(&base->lock, seq))) { | |
129 | break; | |
aa1039e7 | 130 | } |
02663045 | 131 | if (cmp == -1) |
35f493b8 | 132 | pp = &next->rb_left; |
aa1039e7 | 133 | else |
35f493b8 | 134 | pp = &next->rb_right; |
aa1039e7 | 135 | } |
b145425f ED |
136 | *parent_p = parent; |
137 | *pp_p = pp; | |
aa1039e7 ED |
138 | return NULL; |
139 | } | |
140 | ||
aa1039e7 ED |
141 | static void inetpeer_free_rcu(struct rcu_head *head) |
142 | { | |
143 | kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); | |
144 | } | |
145 | ||
4b9d9be8 | 146 | /* perform garbage collect on all items stacked during a lookup */ |
b145425f ED |
147 | static void inet_peer_gc(struct inet_peer_base *base, |
148 | struct inet_peer *gc_stack[], | |
149 | unsigned int gc_cnt) | |
98158f5a | 150 | { |
b145425f | 151 | struct inet_peer *p; |
4b9d9be8 | 152 | __u32 delta, ttl; |
b145425f | 153 | int i; |
d71209de | 154 | |
4b9d9be8 ED |
155 | if (base->total >= inet_peer_threshold) |
156 | ttl = 0; /* be aggressive */ | |
157 | else | |
158 | ttl = inet_peer_maxttl | |
159 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * | |
160 | base->total / inet_peer_threshold * HZ; | |
b145425f ED |
161 | for (i = 0; i < gc_cnt; i++) { |
162 | p = gc_stack[i]; | |
163 | delta = (__u32)jiffies - p->dtime; | |
164 | if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) | |
165 | gc_stack[i] = NULL; | |
1da177e4 | 166 | } |
b145425f ED |
167 | for (i = 0; i < gc_cnt; i++) { |
168 | p = gc_stack[i]; | |
169 | if (p) { | |
170 | rb_erase(&p->rb_node, &base->rb_root); | |
171 | base->total--; | |
172 | call_rcu(&p->rcu, inetpeer_free_rcu); | |
173 | } | |
4b9d9be8 | 174 | } |
1da177e4 LT |
175 | } |
176 | ||
c0efc887 | 177 | struct inet_peer *inet_getpeer(struct inet_peer_base *base, |
c8a627ed G |
178 | const struct inetpeer_addr *daddr, |
179 | int create) | |
1da177e4 | 180 | { |
b145425f ED |
181 | struct inet_peer *p, *gc_stack[PEER_MAX_GC]; |
182 | struct rb_node **pp, *parent; | |
183 | unsigned int gc_cnt, seq; | |
184 | int invalidated; | |
1da177e4 | 185 | |
4b9d9be8 | 186 | /* Attempt a lockless lookup first. |
aa1039e7 ED |
187 | * Because of a concurrent writer, we might not find an existing entry. |
188 | */ | |
7b46ac4e | 189 | rcu_read_lock(); |
b145425f ED |
190 | seq = read_seqbegin(&base->lock); |
191 | p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp); | |
192 | invalidated = read_seqretry(&base->lock, seq); | |
7b46ac4e | 193 | rcu_read_unlock(); |
aa1039e7 | 194 | |
4b9d9be8 | 195 | if (p) |
aa1039e7 | 196 | return p; |
1da177e4 | 197 | |
65e8354e ED |
198 | /* If no writer did a change during our lookup, we can return early. */ |
199 | if (!create && !invalidated) | |
200 | return NULL; | |
201 | ||
aa1039e7 ED |
202 | /* retry an exact lookup, taking the lock before. |
203 | * At least, nodes should be hot in our cache. | |
204 | */ | |
b145425f | 205 | parent = NULL; |
65e8354e | 206 | write_seqlock_bh(&base->lock); |
aa1039e7 | 207 | |
b145425f ED |
208 | gc_cnt = 0; |
209 | p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp); | |
210 | if (!p && create) { | |
211 | p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); | |
212 | if (p) { | |
213 | p->daddr = *daddr; | |
b6a37e5e | 214 | p->dtime = (__u32)jiffies; |
b145425f ED |
215 | refcount_set(&p->refcnt, 2); |
216 | atomic_set(&p->rid, 0); | |
217 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; | |
218 | p->rate_tokens = 0; | |
219 | /* 60*HZ is arbitrary, but chosen enough high so that the first | |
220 | * calculation of tokens is at its maximum. | |
221 | */ | |
222 | p->rate_last = jiffies - 60*HZ; | |
223 | ||
224 | rb_link_node(&p->rb_node, parent, pp); | |
225 | rb_insert_color(&p->rb_node, &base->rb_root); | |
226 | base->total++; | |
227 | } | |
aa1039e7 | 228 | } |
b145425f ED |
229 | if (gc_cnt) |
230 | inet_peer_gc(base, gc_stack, gc_cnt); | |
65e8354e | 231 | write_sequnlock_bh(&base->lock); |
1da177e4 | 232 | |
1da177e4 LT |
233 | return p; |
234 | } | |
b3419363 | 235 | EXPORT_SYMBOL_GPL(inet_getpeer); |
98158f5a | 236 | |
4663afe2 ED |
237 | void inet_putpeer(struct inet_peer *p) |
238 | { | |
4b9d9be8 | 239 | p->dtime = (__u32)jiffies; |
b145425f ED |
240 | |
241 | if (refcount_dec_and_test(&p->refcnt)) | |
242 | call_rcu(&p->rcu, inetpeer_free_rcu); | |
4663afe2 | 243 | } |
b3419363 | 244 | EXPORT_SYMBOL_GPL(inet_putpeer); |
92d86829 DM |
245 | |
246 | /* | |
247 | * Check transmit rate limitation for given message. | |
248 | * The rate information is held in the inet_peer entries now. | |
249 | * This function is generic and could be used for other purposes | |
250 | * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. | |
251 | * | |
252 | * Note that the same inet_peer fields are modified by functions in | |
253 | * route.c too, but these work for packet destinations while xrlim_allow | |
254 | * works for icmp destinations. This means the rate limiting information | |
255 | * for one "ip object" is shared - and these ICMPs are twice limited: | |
256 | * by source and by destination. | |
257 | * | |
258 | * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate | |
259 | * SHOULD allow setting of rate limits | |
260 | * | |
261 | * Shared between ICMPv4 and ICMPv6. | |
262 | */ | |
263 | #define XRLIM_BURST_FACTOR 6 | |
264 | bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) | |
265 | { | |
266 | unsigned long now, token; | |
267 | bool rc = false; | |
268 | ||
269 | if (!peer) | |
270 | return true; | |
271 | ||
272 | token = peer->rate_tokens; | |
273 | now = jiffies; | |
274 | token += now - peer->rate_last; | |
275 | peer->rate_last = now; | |
276 | if (token > XRLIM_BURST_FACTOR * timeout) | |
277 | token = XRLIM_BURST_FACTOR * timeout; | |
278 | if (token >= timeout) { | |
279 | token -= timeout; | |
280 | rc = true; | |
281 | } | |
282 | peer->rate_tokens = token; | |
283 | return rc; | |
284 | } | |
285 | EXPORT_SYMBOL(inet_peer_xrlim_allow); | |
5faa5df1 | 286 | |
56a6b248 | 287 | void inetpeer_invalidate_tree(struct inet_peer_base *base) |
5faa5df1 | 288 | { |
8f1975e3 | 289 | struct rb_node *p = rb_first(&base->rb_root); |
5faa5df1 | 290 | |
8f1975e3 ED |
291 | while (p) { |
292 | struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node); | |
293 | ||
294 | p = rb_next(p); | |
295 | rb_erase(&peer->rb_node, &base->rb_root); | |
296 | inet_putpeer(peer); | |
b145425f | 297 | cond_resched(); |
5faa5df1 SK |
298 | } |
299 | ||
b145425f | 300 | base->total = 0; |
5faa5df1 SK |
301 | } |
302 | EXPORT_SYMBOL(inetpeer_invalidate_tree); |