]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv4/inet_fragment.c
inet: frags: add a pointer to struct netns_frags
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / inet_fragment.c
CommitLineData
7eb95156
PE
1/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
321a3a99 19#include <linux/random.h>
1e4b8287
PE
20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
5a0e3ad6 22#include <linux/slab.h>
7eb95156 23
5a3da1fe 24#include <net/sock.h>
7eb95156 25#include <net/inet_frag.h>
be991971
HFS
26#include <net/inet_ecn.h>
27
b13d3cbf
FW
28#define INETFRAGS_EVICT_BUCKETS 128
29#define INETFRAGS_EVICT_MAX 512
30
e3a57d18
FW
31/* don't rebuild inetfrag table with new secret more often than this */
32#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33
be991971
HFS
34/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35 * Value : 0xff if frame should be dropped.
36 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37 */
38const u8 ip_frag_ecn_table[16] = {
39 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
41 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
42 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
43
44 /* invalid combinations : drop frame */
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52};
53EXPORT_SYMBOL(ip_frag_ecn_table);
7eb95156 54
fb3cfe6e
FW
55static unsigned int
56inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57{
58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59}
60
e3a57d18
FW
61static bool inet_frag_may_rebuild(struct inet_frags *f)
62{
63 return time_after(jiffies,
64 f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65}
66
67static void inet_frag_secret_rebuild(struct inet_frags *f)
321a3a99 68{
321a3a99
PE
69 int i;
70
ab1c724f 71 write_seqlock_bh(&f->rnd_seqlock);
e3a57d18
FW
72
73 if (!inet_frag_may_rebuild(f))
74 goto out;
19952cc4 75
321a3a99 76 get_random_bytes(&f->rnd, sizeof(u32));
e3a57d18 77
321a3a99 78 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
19952cc4 79 struct inet_frag_bucket *hb;
321a3a99 80 struct inet_frag_queue *q;
b67bfe0d 81 struct hlist_node *n;
321a3a99 82
19952cc4 83 hb = &f->hash[i];
ab1c724f
FW
84 spin_lock(&hb->chain_lock);
85
19952cc4 86 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
fb3cfe6e 87 unsigned int hval = inet_frag_hashfn(f, q);
321a3a99
PE
88
89 if (hval != i) {
19952cc4
JDB
90 struct inet_frag_bucket *hb_dest;
91
321a3a99
PE
92 hlist_del(&q->list);
93
94 /* Relink to new hash chain. */
19952cc4 95 hb_dest = &f->hash[hval];
ab1c724f
FW
96
97 /* This is the only place where we take
98 * another chain_lock while already holding
99 * one. As this will not run concurrently,
100 * we cannot deadlock on hb_dest lock below, if its
101 * already locked it will be released soon since
102 * other caller cannot be waiting for hb lock
103 * that we've taken above.
104 */
105 spin_lock_nested(&hb_dest->chain_lock,
106 SINGLE_DEPTH_NESTING);
19952cc4 107 hlist_add_head(&q->list, &hb_dest->chain);
ab1c724f 108 spin_unlock(&hb_dest->chain_lock);
321a3a99
PE
109 }
110 }
ab1c724f 111 spin_unlock(&hb->chain_lock);
321a3a99 112 }
321a3a99 113
e3a57d18
FW
114 f->rebuild = false;
115 f->last_rebuild_jiffies = jiffies;
116out:
ab1c724f 117 write_sequnlock_bh(&f->rnd_seqlock);
321a3a99
PE
118}
119
b13d3cbf
FW
120static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121{
4c9ee5ec
KT
122 if (!hlist_unhashed(&q->list_evictor))
123 return false;
124
b13d3cbf
FW
125 return q->net->low_thresh == 0 ||
126 frag_mem_limit(q->net) >= q->net->low_thresh;
127}
128
129static unsigned int
130inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131{
132 struct inet_frag_queue *fq;
133 struct hlist_node *n;
134 unsigned int evicted = 0;
135 HLIST_HEAD(expired);
136
b13d3cbf
FW
137 spin_lock(&hb->chain_lock);
138
139 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
140 if (!inet_fragq_should_evict(fq))
141 continue;
142
5719b296
FW
143 if (!del_timer(&fq->timer))
144 continue;
b13d3cbf 145
d1fe1944 146 hlist_add_head(&fq->list_evictor, &expired);
b13d3cbf
FW
147 ++evicted;
148 }
149
150 spin_unlock(&hb->chain_lock);
151
d1fe1944 152 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
78802011 153 f->frag_expire(&fq->timer);
b13d3cbf
FW
154
155 return evicted;
156}
157
158static void inet_frag_worker(struct work_struct *work)
159{
160 unsigned int budget = INETFRAGS_EVICT_BUCKETS;
161 unsigned int i, evicted = 0;
162 struct inet_frags *f;
163
164 f = container_of(work, struct inet_frags, frags_work);
165
166 BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
167
ab1c724f 168 local_bh_disable();
b13d3cbf 169
6aa7de05 170 for (i = READ_ONCE(f->next_bucket); budget; --budget) {
b13d3cbf
FW
171 evicted += inet_evict_bucket(f, &f->hash[i]);
172 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
173 if (evicted > INETFRAGS_EVICT_MAX)
174 break;
175 }
176
177 f->next_bucket = i;
178
ab1c724f
FW
179 local_bh_enable();
180
e3a57d18
FW
181 if (f->rebuild && inet_frag_may_rebuild(f))
182 inet_frag_secret_rebuild(f);
b13d3cbf
FW
183}
184
185static void inet_frag_schedule_worker(struct inet_frags *f)
186{
187 if (unlikely(!work_pending(&f->frags_work)))
188 schedule_work(&f->frags_work);
189}
190
d4ad4d22 191int inet_frags_init(struct inet_frags *f)
7eb95156
PE
192{
193 int i;
194
b13d3cbf
FW
195 INIT_WORK(&f->frags_work, inet_frag_worker);
196
19952cc4
JDB
197 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
198 struct inet_frag_bucket *hb = &f->hash[i];
7eb95156 199
19952cc4
JDB
200 spin_lock_init(&hb->chain_lock);
201 INIT_HLIST_HEAD(&hb->chain);
202 }
ab1c724f
FW
203
204 seqlock_init(&f->rnd_seqlock);
e3a57d18 205 f->last_rebuild_jiffies = 0;
d4ad4d22
NA
206 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
207 NULL);
208 if (!f->frags_cachep)
209 return -ENOMEM;
210
211 return 0;
7eb95156
PE
212}
213EXPORT_SYMBOL(inet_frags_init);
214
215void inet_frags_fini(struct inet_frags *f)
216{
b13d3cbf 217 cancel_work_sync(&f->frags_work);
d4ad4d22 218 kmem_cache_destroy(f->frags_cachep);
7eb95156
PE
219}
220EXPORT_SYMBOL(inet_frags_fini);
277e650d 221
9bdf9ca9 222void inet_frags_exit_net(struct netns_frags *nf)
81566e83 223{
9bdf9ca9 224 struct inet_frags *f =nf->f;
ab1c724f 225 unsigned int seq;
b13d3cbf
FW
226 int i;
227
d2619bcb 228 nf->high_thresh = 0; /* prevent creation of new frags */
e8e16b70 229
ab1c724f 230evict_again:
5719b296 231 local_bh_disable();
ab1c724f 232 seq = read_seqbegin(&f->rnd_seqlock);
b13d3cbf
FW
233
234 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
235 inet_evict_bucket(f, &f->hash[i]);
236
ab1c724f 237 local_bh_enable();
5719b296
FW
238 cond_resched();
239
240 if (read_seqretry(&f->rnd_seqlock, seq) ||
fb452a1a 241 sum_frag_mem_limit(nf))
5719b296 242 goto evict_again;
81566e83
PE
243}
244EXPORT_SYMBOL(inet_frags_exit_net);
245
ab1c724f
FW
246static struct inet_frag_bucket *
247get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
248__acquires(hb->chain_lock)
277e650d 249{
19952cc4 250 struct inet_frag_bucket *hb;
ab1c724f
FW
251 unsigned int seq, hash;
252
253 restart:
254 seq = read_seqbegin(&f->rnd_seqlock);
19952cc4 255
fb3cfe6e 256 hash = inet_frag_hashfn(f, fq);
19952cc4
JDB
257 hb = &f->hash[hash];
258
259 spin_lock(&hb->chain_lock);
ab1c724f
FW
260 if (read_seqretry(&f->rnd_seqlock, seq)) {
261 spin_unlock(&hb->chain_lock);
262 goto restart;
263 }
264
265 return hb;
266}
267
9bdf9ca9 268static inline void fq_unlink(struct inet_frag_queue *fq)
ab1c724f
FW
269{
270 struct inet_frag_bucket *hb;
271
9bdf9ca9 272 hb = get_frag_bucket_locked(fq, fq->net->f);
d1fe1944 273 hlist_del(&fq->list);
5719b296 274 fq->flags |= INET_FRAG_COMPLETE;
19952cc4 275 spin_unlock(&hb->chain_lock);
277e650d
PE
276}
277
9bdf9ca9 278void inet_frag_kill(struct inet_frag_queue *fq)
277e650d
PE
279{
280 if (del_timer(&fq->timer))
edcb6918 281 refcount_dec(&fq->refcnt);
277e650d 282
06aa8b8a 283 if (!(fq->flags & INET_FRAG_COMPLETE)) {
9bdf9ca9 284 fq_unlink(fq);
edcb6918 285 refcount_dec(&fq->refcnt);
277e650d
PE
286 }
287}
277e650d 288EXPORT_SYMBOL(inet_frag_kill);
1e4b8287 289
9bdf9ca9 290void inet_frag_destroy(struct inet_frag_queue *q)
1e4b8287
PE
291{
292 struct sk_buff *fp;
6ddc0822 293 struct netns_frags *nf;
d433673e 294 unsigned int sum, sum_truesize = 0;
9bdf9ca9 295 struct inet_frags *f;
1e4b8287 296
06aa8b8a 297 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
547b792c 298 WARN_ON(del_timer(&q->timer) != 0);
1e4b8287
PE
299
300 /* Release all fragment data. */
301 fp = q->fragments;
6ddc0822 302 nf = q->net;
9bdf9ca9 303 f = nf->f;
1e4b8287
PE
304 while (fp) {
305 struct sk_buff *xp = fp->next;
306
d433673e 307 sum_truesize += fp->truesize;
a72a5e2d 308 kfree_skb(fp);
1e4b8287
PE
309 fp = xp;
310 }
d433673e 311 sum = sum_truesize + f->qsize;
1e4b8287 312
c9547709
PE
313 if (f->destructor)
314 f->destructor(q);
d4ad4d22 315 kmem_cache_free(f->frags_cachep, q);
5719b296
FW
316
317 sub_frag_mem_limit(nf, sum);
1e4b8287
PE
318}
319EXPORT_SYMBOL(inet_frag_destroy);
8e7999c4 320
ac18e750 321static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
f926e236
NA
322 struct inet_frag_queue *qp_in,
323 struct inet_frags *f,
324 void *arg)
2588fe1d 325{
ab1c724f 326 struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
2588fe1d 327 struct inet_frag_queue *qp;
19952cc4 328
2588fe1d
PE
329#ifdef CONFIG_SMP
330 /* With SMP race we have to recheck hash table, because
ab1c724f
FW
331 * such entry could have been created on other cpu before
332 * we acquired hash bucket lock.
2588fe1d 333 */
19952cc4 334 hlist_for_each_entry(qp, &hb->chain, list) {
ac18e750 335 if (qp->net == nf && f->match(qp, arg)) {
edcb6918 336 refcount_inc(&qp->refcnt);
19952cc4 337 spin_unlock(&hb->chain_lock);
06aa8b8a 338 qp_in->flags |= INET_FRAG_COMPLETE;
9bdf9ca9 339 inet_frag_put(qp_in);
2588fe1d
PE
340 return qp;
341 }
342 }
343#endif
344 qp = qp_in;
b2fd5321 345 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
edcb6918 346 refcount_inc(&qp->refcnt);
2588fe1d 347
edcb6918 348 refcount_inc(&qp->refcnt);
19952cc4 349 hlist_add_head(&qp->list, &hb->chain);
3fd588eb 350
19952cc4 351 spin_unlock(&hb->chain_lock);
24b9bf43 352
2588fe1d
PE
353 return qp;
354}
e521db9d 355
ac18e750 356static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
f926e236
NA
357 struct inet_frags *f,
358 void *arg)
e521db9d
PE
359{
360 struct inet_frag_queue *q;
361
d4ad4d22 362 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
51456b29 363 if (!q)
e521db9d
PE
364 return NULL;
365
54db0cc2 366 q->net = nf;
c6fda282 367 f->constructor(q, arg);
0e60d245 368 add_frag_mem_limit(nf, f->qsize);
d433673e 369
78802011 370 timer_setup(&q->timer, f->frag_expire, 0);
e521db9d 371 spin_lock_init(&q->lock);
edcb6918 372 refcount_set(&q->refcnt, 1);
e521db9d
PE
373
374 return q;
375}
c6fda282 376
ac18e750 377static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
f926e236
NA
378 struct inet_frags *f,
379 void *arg)
c6fda282
PE
380{
381 struct inet_frag_queue *q;
382
ac18e750 383 q = inet_frag_alloc(nf, f, arg);
51456b29 384 if (!q)
c6fda282
PE
385 return NULL;
386
9a375803 387 return inet_frag_intern(nf, q, f, arg);
c6fda282 388}
abd6523d 389
ac18e750 390struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
f926e236
NA
391 struct inet_frags *f, void *key,
392 unsigned int hash)
abd6523d 393{
19952cc4 394 struct inet_frag_bucket *hb;
abd6523d 395 struct inet_frag_queue *q;
5a3da1fe 396 int depth = 0;
abd6523d 397
afa6122a
ED
398 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
399 inet_frag_schedule_worker(f);
400 return NULL;
401 }
402
b13d3cbf
FW
403 if (frag_mem_limit(nf) > nf->low_thresh)
404 inet_frag_schedule_worker(f);
86e93e47 405
fb3cfe6e 406 hash &= (INETFRAGS_HASHSZ - 1);
19952cc4
JDB
407 hb = &f->hash[hash];
408
409 spin_lock(&hb->chain_lock);
410 hlist_for_each_entry(q, &hb->chain, list) {
ac18e750 411 if (q->net == nf && f->match(q, key)) {
edcb6918 412 refcount_inc(&q->refcnt);
19952cc4 413 spin_unlock(&hb->chain_lock);
abd6523d
PE
414 return q;
415 }
5a3da1fe 416 depth++;
abd6523d 417 }
19952cc4 418 spin_unlock(&hb->chain_lock);
abd6523d 419
5a3da1fe
HFS
420 if (depth <= INETFRAGS_MAXDEPTH)
421 return inet_frag_create(nf, f, key);
e3a57d18
FW
422
423 if (inet_frag_may_rebuild(f)) {
ab1c724f
FW
424 if (!f->rebuild)
425 f->rebuild = true;
e3a57d18
FW
426 inet_frag_schedule_worker(f);
427 }
428
429 return ERR_PTR(-ENOBUFS);
abd6523d
PE
430}
431EXPORT_SYMBOL(inet_frag_find);
5a3da1fe
HFS
432
433void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
434 const char *prefix)
435{
436 static const char msg[] = "inet_frag_find: Fragment hash bucket"
437 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
438 ". Dropping fragment.\n";
439
440 if (PTR_ERR(q) == -ENOBUFS)
ba7a46f1 441 net_dbg_ratelimited("%s%s", prefix, msg);
5a3da1fe
HFS
442}
443EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);