2 * INETPEER - A storage for permanent information about peers
4 * This source is covered by the GNU GPL, the same as all kernel sources.
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
9 #include <linux/cache.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/interrupt.h>
14 #include <linux/spinlock.h>
15 #include <linux/random.h>
16 #include <linux/timer.h>
17 #include <linux/time.h>
18 #include <linux/kernel.h>
20 #include <linux/net.h>
21 #include <linux/workqueue.h>
23 #include <net/inetpeer.h>
24 #include <net/secure_seq.h>
27 * Theory of operations.
28 * We keep one entry for each peer IP address. The nodes contains long-living
29 * information about the peer which doesn't depend on routes.
31 * Nodes are removed only when reference counter goes to 0.
32 * When it's happened the node may be removed when a sufficient amount of
33 * time has been passed since its last use. The less-recently-used entry can
34 * also be removed if the pool is overloaded i.e. if the total amount of
35 * entries is greater-or-equal than the threshold.
37 * Node pool is organised as an RB tree.
38 * Such an implementation has been chosen not just for fun. It's a way to
39 * prevent easy and efficient DoS attacks by creating hash collisions. A huge
40 * amount of long living nodes in a single hash slot would significantly delay
41 * lookups performed with disabled BHs.
43 * Serialisation issues.
44 * 1. Nodes may appear in the tree only with the pool lock held.
45 * 2. Nodes may disappear from the tree only with the pool lock held
46 * AND reference count being 0.
47 * 3. Global variable peer_total is modified under the pool lock.
48 * 4. struct inet_peer fields modification:
50 * refcnt: atomically against modifications on other CPU;
51 * usually under some other lock to prevent node disappearing
55 static struct kmem_cache
*peer_cachep __ro_after_init
;
57 void inet_peer_base_init(struct inet_peer_base
*bp
)
59 bp
->rb_root
= RB_ROOT
;
60 seqlock_init(&bp
->lock
);
63 EXPORT_SYMBOL_GPL(inet_peer_base_init
);
65 #define PEER_MAX_GC 32
67 /* Exported for sysctl_net_ipv4. */
68 int inet_peer_threshold __read_mostly
; /* start to throw entries more
69 * aggressively at this stage */
70 int inet_peer_minttl __read_mostly
= 120 * HZ
; /* TTL under high load: 120 sec */
71 int inet_peer_maxttl __read_mostly
= 10 * 60 * HZ
; /* usual time to live: 10 min */
73 /* Called from ip_output.c:ip_init */
74 void __init
inet_initpeers(void)
78 /* 1% of physical memory */
79 nr_entries
= div64_ul((u64
)totalram_pages() << PAGE_SHIFT
,
80 100 * L1_CACHE_ALIGN(sizeof(struct inet_peer
)));
82 inet_peer_threshold
= clamp_val(nr_entries
, 4096, 65536 + 128);
84 peer_cachep
= kmem_cache_create("inet_peer_cache",
85 sizeof(struct inet_peer
),
86 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
,
90 /* Called with rcu_read_lock() or base->lock held */
91 static struct inet_peer
*lookup(const struct inetpeer_addr
*daddr
,
92 struct inet_peer_base
*base
,
94 struct inet_peer
*gc_stack
[],
96 struct rb_node
**parent_p
,
97 struct rb_node
***pp_p
)
99 struct rb_node
**pp
, *parent
, *next
;
102 pp
= &base
->rb_root
.rb_node
;
107 next
= rcu_dereference_raw(*pp
);
111 p
= rb_entry(parent
, struct inet_peer
, rb_node
);
112 cmp
= inetpeer_addr_cmp(daddr
, &p
->daddr
);
114 if (!refcount_inc_not_zero(&p
->refcnt
))
119 if (*gc_cnt
< PEER_MAX_GC
)
120 gc_stack
[(*gc_cnt
)++] = p
;
121 } else if (unlikely(read_seqretry(&base
->lock
, seq
))) {
127 pp
= &next
->rb_right
;
134 static void inetpeer_free_rcu(struct rcu_head
*head
)
136 kmem_cache_free(peer_cachep
, container_of(head
, struct inet_peer
, rcu
));
139 /* perform garbage collect on all items stacked during a lookup */
140 static void inet_peer_gc(struct inet_peer_base
*base
,
141 struct inet_peer
*gc_stack
[],
144 int peer_threshold
, peer_maxttl
, peer_minttl
;
149 peer_threshold
= READ_ONCE(inet_peer_threshold
);
150 peer_maxttl
= READ_ONCE(inet_peer_maxttl
);
151 peer_minttl
= READ_ONCE(inet_peer_minttl
);
153 if (base
->total
>= peer_threshold
)
154 ttl
= 0; /* be aggressive */
156 ttl
= peer_maxttl
- (peer_maxttl
- peer_minttl
) / HZ
*
157 base
->total
/ peer_threshold
* HZ
;
158 for (i
= 0; i
< gc_cnt
; i
++) {
161 /* The READ_ONCE() pairs with the WRITE_ONCE()
164 delta
= (__u32
)jiffies
- READ_ONCE(p
->dtime
);
166 if (delta
< ttl
|| !refcount_dec_if_one(&p
->refcnt
))
169 for (i
= 0; i
< gc_cnt
; i
++) {
172 rb_erase(&p
->rb_node
, &base
->rb_root
);
174 call_rcu(&p
->rcu
, inetpeer_free_rcu
);
179 struct inet_peer
*inet_getpeer(struct inet_peer_base
*base
,
180 const struct inetpeer_addr
*daddr
,
183 struct inet_peer
*p
, *gc_stack
[PEER_MAX_GC
];
184 struct rb_node
**pp
, *parent
;
185 unsigned int gc_cnt
, seq
;
188 /* Attempt a lockless lookup first.
189 * Because of a concurrent writer, we might not find an existing entry.
192 seq
= read_seqbegin(&base
->lock
);
193 p
= lookup(daddr
, base
, seq
, NULL
, &gc_cnt
, &parent
, &pp
);
194 invalidated
= read_seqretry(&base
->lock
, seq
);
200 /* If no writer did a change during our lookup, we can return early. */
201 if (!create
&& !invalidated
)
204 /* retry an exact lookup, taking the lock before.
205 * At least, nodes should be hot in our cache.
208 write_seqlock_bh(&base
->lock
);
211 p
= lookup(daddr
, base
, seq
, gc_stack
, &gc_cnt
, &parent
, &pp
);
213 p
= kmem_cache_alloc(peer_cachep
, GFP_ATOMIC
);
216 p
->dtime
= (__u32
)jiffies
;
217 refcount_set(&p
->refcnt
, 2);
218 atomic_set(&p
->rid
, 0);
219 p
->metrics
[RTAX_LOCK
-1] = INETPEER_METRICS_NEW
;
222 /* 60*HZ is arbitrary, but chosen enough high so that the first
223 * calculation of tokens is at its maximum.
225 p
->rate_last
= jiffies
- 60*HZ
;
227 rb_link_node(&p
->rb_node
, parent
, pp
);
228 rb_insert_color(&p
->rb_node
, &base
->rb_root
);
233 inet_peer_gc(base
, gc_stack
, gc_cnt
);
234 write_sequnlock_bh(&base
->lock
);
238 EXPORT_SYMBOL_GPL(inet_getpeer
);
240 void inet_putpeer(struct inet_peer
*p
)
242 /* The WRITE_ONCE() pairs with itself (we run lockless)
243 * and the READ_ONCE() in inet_peer_gc()
245 WRITE_ONCE(p
->dtime
, (__u32
)jiffies
);
247 if (refcount_dec_and_test(&p
->refcnt
))
248 call_rcu(&p
->rcu
, inetpeer_free_rcu
);
250 EXPORT_SYMBOL_GPL(inet_putpeer
);
253 * Check transmit rate limitation for given message.
254 * The rate information is held in the inet_peer entries now.
255 * This function is generic and could be used for other purposes
256 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
258 * Note that the same inet_peer fields are modified by functions in
259 * route.c too, but these work for packet destinations while xrlim_allow
260 * works for icmp destinations. This means the rate limiting information
261 * for one "ip object" is shared - and these ICMPs are twice limited:
262 * by source and by destination.
264 * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
265 * SHOULD allow setting of rate limits
267 * Shared between ICMPv4 and ICMPv6.
269 #define XRLIM_BURST_FACTOR 6
270 bool inet_peer_xrlim_allow(struct inet_peer
*peer
, int timeout
)
272 unsigned long now
, token
;
278 token
= peer
->rate_tokens
;
280 token
+= now
- peer
->rate_last
;
281 peer
->rate_last
= now
;
282 if (token
> XRLIM_BURST_FACTOR
* timeout
)
283 token
= XRLIM_BURST_FACTOR
* timeout
;
284 if (token
>= timeout
) {
288 peer
->rate_tokens
= token
;
291 EXPORT_SYMBOL(inet_peer_xrlim_allow
);
293 void inetpeer_invalidate_tree(struct inet_peer_base
*base
)
295 struct rb_node
*p
= rb_first(&base
->rb_root
);
298 struct inet_peer
*peer
= rb_entry(p
, struct inet_peer
, rb_node
);
301 rb_erase(&peer
->rb_node
, &base
->rb_root
);
308 EXPORT_SYMBOL(inetpeer_invalidate_tree
);