]>
Commit | Line | Data |
---|---|---|
e7096c13 JD |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. | |
4 | * | |
5 | * This contains some basic static unit tests for the allowedips data structure. | |
6 | * It also has two additional modes that are disabled and meant to be used by | |
7 | * folks directly playing with this file. If you define the macro | |
8 | * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in | |
9 | * memory, it will be printed out as KERN_DEBUG in a format that can be passed | |
10 | * to graphviz (the dot command) to visualize it. If you define the macro | |
11 | * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of | |
12 | * randomized tests done against a trivial implementation, which may take | |
13 | * upwards of a half-hour to complete. There's no set of users who should be | |
14 | * enabling these, and the only developers that should go anywhere near these | |
15 | * nobs are the ones who are reading this comment. | |
16 | */ | |
17 | ||
18 | #ifdef DEBUG | |
19 | ||
20 | #include <linux/siphash.h> | |
21 | ||
22 | static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits, | |
23 | u8 cidr) | |
24 | { | |
25 | swap_endian(dst, src, bits); | |
26 | memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8); | |
27 | if (cidr) | |
28 | dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8); | |
29 | } | |
30 | ||
31 | static __init void print_node(struct allowedips_node *node, u8 bits) | |
32 | { | |
33 | char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; | |
34 | char *fmt_declaration = KERN_DEBUG | |
35 | "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; | |
36 | char *style = "dotted"; | |
37 | u8 ip1[16], ip2[16]; | |
38 | u32 color = 0; | |
39 | ||
40 | if (bits == 32) { | |
41 | fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; | |
42 | fmt_declaration = KERN_DEBUG | |
43 | "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; | |
44 | } else if (bits == 128) { | |
45 | fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; | |
46 | fmt_declaration = KERN_DEBUG | |
47 | "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; | |
48 | } | |
49 | if (node->peer) { | |
50 | hsiphash_key_t key = { { 0 } }; | |
51 | ||
52 | memcpy(&key, &node->peer, sizeof(node->peer)); | |
53 | color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 | | |
54 | hsiphash_1u32(0xbabecafe, &key) % 200 << 8 | | |
55 | hsiphash_1u32(0xabad1dea, &key) % 200; | |
56 | style = "bold"; | |
57 | } | |
58 | swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr); | |
59 | printk(fmt_declaration, ip1, node->cidr, style, color); | |
60 | if (node->bit[0]) { | |
61 | swap_endian_and_apply_cidr(ip2, | |
62 | rcu_dereference_raw(node->bit[0])->bits, bits, | |
63 | node->cidr); | |
64 | printk(fmt_connection, ip1, node->cidr, ip2, | |
65 | rcu_dereference_raw(node->bit[0])->cidr); | |
66 | print_node(rcu_dereference_raw(node->bit[0]), bits); | |
67 | } | |
68 | if (node->bit[1]) { | |
69 | swap_endian_and_apply_cidr(ip2, | |
70 | rcu_dereference_raw(node->bit[1])->bits, | |
71 | bits, node->cidr); | |
72 | printk(fmt_connection, ip1, node->cidr, ip2, | |
73 | rcu_dereference_raw(node->bit[1])->cidr); | |
74 | print_node(rcu_dereference_raw(node->bit[1]), bits); | |
75 | } | |
76 | } | |
77 | ||
78 | static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) | |
79 | { | |
80 | printk(KERN_DEBUG "digraph trie {\n"); | |
81 | print_node(rcu_dereference_raw(top), bits); | |
82 | printk(KERN_DEBUG "}\n"); | |
83 | } | |
84 | ||
85 | enum { | |
86 | NUM_PEERS = 2000, | |
87 | NUM_RAND_ROUTES = 400, | |
88 | NUM_MUTATED_ROUTES = 100, | |
89 | NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30 | |
90 | }; | |
91 | ||
92 | struct horrible_allowedips { | |
93 | struct hlist_head head; | |
94 | }; | |
95 | ||
96 | struct horrible_allowedips_node { | |
97 | struct hlist_node table; | |
98 | union nf_inet_addr ip; | |
99 | union nf_inet_addr mask; | |
100 | u8 ip_version; | |
101 | void *value; | |
102 | }; | |
103 | ||
104 | static __init void horrible_allowedips_init(struct horrible_allowedips *table) | |
105 | { | |
106 | INIT_HLIST_HEAD(&table->head); | |
107 | } | |
108 | ||
109 | static __init void horrible_allowedips_free(struct horrible_allowedips *table) | |
110 | { | |
111 | struct horrible_allowedips_node *node; | |
112 | struct hlist_node *h; | |
113 | ||
114 | hlist_for_each_entry_safe(node, h, &table->head, table) { | |
115 | hlist_del(&node->table); | |
116 | kfree(node); | |
117 | } | |
118 | } | |
119 | ||
120 | static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr) | |
121 | { | |
122 | union nf_inet_addr mask; | |
123 | ||
124 | memset(&mask, 0x00, 128 / 8); | |
125 | memset(&mask, 0xff, cidr / 8); | |
126 | if (cidr % 32) | |
127 | mask.all[cidr / 32] = (__force u32)htonl( | |
128 | (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); | |
129 | return mask; | |
130 | } | |
131 | ||
132 | static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet) | |
133 | { | |
134 | return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) + | |
135 | hweight32(subnet.all[2]) + hweight32(subnet.all[3]); | |
136 | } | |
137 | ||
138 | static __init inline void | |
139 | horrible_mask_self(struct horrible_allowedips_node *node) | |
140 | { | |
141 | if (node->ip_version == 4) { | |
142 | node->ip.ip &= node->mask.ip; | |
143 | } else if (node->ip_version == 6) { | |
144 | node->ip.ip6[0] &= node->mask.ip6[0]; | |
145 | node->ip.ip6[1] &= node->mask.ip6[1]; | |
146 | node->ip.ip6[2] &= node->mask.ip6[2]; | |
147 | node->ip.ip6[3] &= node->mask.ip6[3]; | |
148 | } | |
149 | } | |
150 | ||
151 | static __init inline bool | |
152 | horrible_match_v4(const struct horrible_allowedips_node *node, | |
153 | struct in_addr *ip) | |
154 | { | |
155 | return (ip->s_addr & node->mask.ip) == node->ip.ip; | |
156 | } | |
157 | ||
158 | static __init inline bool | |
159 | horrible_match_v6(const struct horrible_allowedips_node *node, | |
160 | struct in6_addr *ip) | |
161 | { | |
162 | return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == | |
163 | node->ip.ip6[0] && | |
164 | (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == | |
165 | node->ip.ip6[1] && | |
166 | (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == | |
167 | node->ip.ip6[2] && | |
168 | (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; | |
169 | } | |
170 | ||
171 | static __init void | |
172 | horrible_insert_ordered(struct horrible_allowedips *table, | |
173 | struct horrible_allowedips_node *node) | |
174 | { | |
175 | struct horrible_allowedips_node *other = NULL, *where = NULL; | |
176 | u8 my_cidr = horrible_mask_to_cidr(node->mask); | |
177 | ||
178 | hlist_for_each_entry(other, &table->head, table) { | |
179 | if (!memcmp(&other->mask, &node->mask, | |
180 | sizeof(union nf_inet_addr)) && | |
181 | !memcmp(&other->ip, &node->ip, | |
182 | sizeof(union nf_inet_addr)) && | |
183 | other->ip_version == node->ip_version) { | |
184 | other->value = node->value; | |
185 | kfree(node); | |
186 | return; | |
187 | } | |
188 | where = other; | |
189 | if (horrible_mask_to_cidr(other->mask) <= my_cidr) | |
190 | break; | |
191 | } | |
192 | if (!other && !where) | |
193 | hlist_add_head(&node->table, &table->head); | |
194 | else if (!other) | |
195 | hlist_add_behind(&node->table, &where->table); | |
196 | else | |
197 | hlist_add_before(&node->table, &where->table); | |
198 | } | |
199 | ||
200 | static __init int | |
201 | horrible_allowedips_insert_v4(struct horrible_allowedips *table, | |
202 | struct in_addr *ip, u8 cidr, void *value) | |
203 | { | |
204 | struct horrible_allowedips_node *node = kzalloc(sizeof(*node), | |
205 | GFP_KERNEL); | |
206 | ||
207 | if (unlikely(!node)) | |
208 | return -ENOMEM; | |
209 | node->ip.in = *ip; | |
210 | node->mask = horrible_cidr_to_mask(cidr); | |
211 | node->ip_version = 4; | |
212 | node->value = value; | |
213 | horrible_mask_self(node); | |
214 | horrible_insert_ordered(table, node); | |
215 | return 0; | |
216 | } | |
217 | ||
218 | static __init int | |
219 | horrible_allowedips_insert_v6(struct horrible_allowedips *table, | |
220 | struct in6_addr *ip, u8 cidr, void *value) | |
221 | { | |
222 | struct horrible_allowedips_node *node = kzalloc(sizeof(*node), | |
223 | GFP_KERNEL); | |
224 | ||
225 | if (unlikely(!node)) | |
226 | return -ENOMEM; | |
227 | node->ip.in6 = *ip; | |
228 | node->mask = horrible_cidr_to_mask(cidr); | |
229 | node->ip_version = 6; | |
230 | node->value = value; | |
231 | horrible_mask_self(node); | |
232 | horrible_insert_ordered(table, node); | |
233 | return 0; | |
234 | } | |
235 | ||
236 | static __init void * | |
237 | horrible_allowedips_lookup_v4(struct horrible_allowedips *table, | |
238 | struct in_addr *ip) | |
239 | { | |
240 | struct horrible_allowedips_node *node; | |
241 | void *ret = NULL; | |
242 | ||
243 | hlist_for_each_entry(node, &table->head, table) { | |
244 | if (node->ip_version != 4) | |
245 | continue; | |
246 | if (horrible_match_v4(node, ip)) { | |
247 | ret = node->value; | |
248 | break; | |
249 | } | |
250 | } | |
251 | return ret; | |
252 | } | |
253 | ||
254 | static __init void * | |
255 | horrible_allowedips_lookup_v6(struct horrible_allowedips *table, | |
256 | struct in6_addr *ip) | |
257 | { | |
258 | struct horrible_allowedips_node *node; | |
259 | void *ret = NULL; | |
260 | ||
261 | hlist_for_each_entry(node, &table->head, table) { | |
262 | if (node->ip_version != 6) | |
263 | continue; | |
264 | if (horrible_match_v6(node, ip)) { | |
265 | ret = node->value; | |
266 | break; | |
267 | } | |
268 | } | |
269 | return ret; | |
270 | } | |
271 | ||
272 | static __init bool randomized_test(void) | |
273 | { | |
274 | unsigned int i, j, k, mutate_amount, cidr; | |
275 | u8 ip[16], mutate_mask[16], mutated[16]; | |
276 | struct wg_peer **peers, *peer; | |
277 | struct horrible_allowedips h; | |
278 | DEFINE_MUTEX(mutex); | |
279 | struct allowedips t; | |
280 | bool ret = false; | |
281 | ||
282 | mutex_init(&mutex); | |
283 | ||
284 | wg_allowedips_init(&t); | |
285 | horrible_allowedips_init(&h); | |
286 | ||
287 | peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL); | |
288 | if (unlikely(!peers)) { | |
289 | pr_err("allowedips random self-test malloc: FAIL\n"); | |
290 | goto free; | |
291 | } | |
292 | for (i = 0; i < NUM_PEERS; ++i) { | |
293 | peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL); | |
294 | if (unlikely(!peers[i])) { | |
295 | pr_err("allowedips random self-test malloc: FAIL\n"); | |
296 | goto free; | |
297 | } | |
298 | kref_init(&peers[i]->refcount); | |
8884bb43 | 299 | INIT_LIST_HEAD(&peers[i]->allowedips_list); |
e7096c13 JD |
300 | } |
301 | ||
302 | mutex_lock(&mutex); | |
303 | ||
304 | for (i = 0; i < NUM_RAND_ROUTES; ++i) { | |
305 | prandom_bytes(ip, 4); | |
306 | cidr = prandom_u32_max(32) + 1; | |
307 | peer = peers[prandom_u32_max(NUM_PEERS)]; | |
308 | if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr, | |
309 | peer, &mutex) < 0) { | |
310 | pr_err("allowedips random self-test malloc: FAIL\n"); | |
311 | goto free_locked; | |
312 | } | |
313 | if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip, | |
314 | cidr, peer) < 0) { | |
315 | pr_err("allowedips random self-test malloc: FAIL\n"); | |
316 | goto free_locked; | |
317 | } | |
318 | for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { | |
319 | memcpy(mutated, ip, 4); | |
320 | prandom_bytes(mutate_mask, 4); | |
321 | mutate_amount = prandom_u32_max(32); | |
322 | for (k = 0; k < mutate_amount / 8; ++k) | |
323 | mutate_mask[k] = 0xff; | |
324 | mutate_mask[k] = 0xff | |
325 | << ((8 - (mutate_amount % 8)) % 8); | |
326 | for (; k < 4; ++k) | |
327 | mutate_mask[k] = 0; | |
328 | for (k = 0; k < 4; ++k) | |
329 | mutated[k] = (mutated[k] & mutate_mask[k]) | | |
330 | (~mutate_mask[k] & | |
331 | prandom_u32_max(256)); | |
332 | cidr = prandom_u32_max(32) + 1; | |
333 | peer = peers[prandom_u32_max(NUM_PEERS)]; | |
334 | if (wg_allowedips_insert_v4(&t, | |
335 | (struct in_addr *)mutated, | |
336 | cidr, peer, &mutex) < 0) { | |
8884bb43 | 337 | pr_err("allowedips random self-test malloc: FAIL\n"); |
e7096c13 JD |
338 | goto free_locked; |
339 | } | |
340 | if (horrible_allowedips_insert_v4(&h, | |
341 | (struct in_addr *)mutated, cidr, peer)) { | |
342 | pr_err("allowedips random self-test malloc: FAIL\n"); | |
343 | goto free_locked; | |
344 | } | |
345 | } | |
346 | } | |
347 | ||
348 | for (i = 0; i < NUM_RAND_ROUTES; ++i) { | |
349 | prandom_bytes(ip, 16); | |
350 | cidr = prandom_u32_max(128) + 1; | |
351 | peer = peers[prandom_u32_max(NUM_PEERS)]; | |
352 | if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr, | |
353 | peer, &mutex) < 0) { | |
354 | pr_err("allowedips random self-test malloc: FAIL\n"); | |
355 | goto free_locked; | |
356 | } | |
357 | if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip, | |
358 | cidr, peer) < 0) { | |
359 | pr_err("allowedips random self-test malloc: FAIL\n"); | |
360 | goto free_locked; | |
361 | } | |
362 | for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { | |
363 | memcpy(mutated, ip, 16); | |
364 | prandom_bytes(mutate_mask, 16); | |
365 | mutate_amount = prandom_u32_max(128); | |
366 | for (k = 0; k < mutate_amount / 8; ++k) | |
367 | mutate_mask[k] = 0xff; | |
368 | mutate_mask[k] = 0xff | |
369 | << ((8 - (mutate_amount % 8)) % 8); | |
370 | for (; k < 4; ++k) | |
371 | mutate_mask[k] = 0; | |
372 | for (k = 0; k < 4; ++k) | |
373 | mutated[k] = (mutated[k] & mutate_mask[k]) | | |
374 | (~mutate_mask[k] & | |
375 | prandom_u32_max(256)); | |
376 | cidr = prandom_u32_max(128) + 1; | |
377 | peer = peers[prandom_u32_max(NUM_PEERS)]; | |
378 | if (wg_allowedips_insert_v6(&t, | |
379 | (struct in6_addr *)mutated, | |
380 | cidr, peer, &mutex) < 0) { | |
381 | pr_err("allowedips random self-test malloc: FAIL\n"); | |
382 | goto free_locked; | |
383 | } | |
384 | if (horrible_allowedips_insert_v6( | |
385 | &h, (struct in6_addr *)mutated, cidr, | |
386 | peer)) { | |
387 | pr_err("allowedips random self-test malloc: FAIL\n"); | |
388 | goto free_locked; | |
389 | } | |
390 | } | |
391 | } | |
392 | ||
393 | mutex_unlock(&mutex); | |
394 | ||
395 | if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { | |
396 | print_tree(t.root4, 32); | |
397 | print_tree(t.root6, 128); | |
398 | } | |
399 | ||
400 | for (i = 0; i < NUM_QUERIES; ++i) { | |
401 | prandom_bytes(ip, 4); | |
402 | if (lookup(t.root4, 32, ip) != | |
403 | horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { | |
404 | pr_err("allowedips random self-test: FAIL\n"); | |
405 | goto free; | |
406 | } | |
407 | } | |
408 | ||
409 | for (i = 0; i < NUM_QUERIES; ++i) { | |
410 | prandom_bytes(ip, 16); | |
411 | if (lookup(t.root6, 128, ip) != | |
412 | horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { | |
413 | pr_err("allowedips random self-test: FAIL\n"); | |
414 | goto free; | |
415 | } | |
416 | } | |
417 | ret = true; | |
418 | ||
419 | free: | |
420 | mutex_lock(&mutex); | |
421 | free_locked: | |
422 | wg_allowedips_free(&t, &mutex); | |
423 | mutex_unlock(&mutex); | |
424 | horrible_allowedips_free(&h); | |
425 | if (peers) { | |
426 | for (i = 0; i < NUM_PEERS; ++i) | |
427 | kfree(peers[i]); | |
428 | } | |
429 | kfree(peers); | |
430 | return ret; | |
431 | } | |
432 | ||
433 | static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d) | |
434 | { | |
435 | static struct in_addr ip; | |
436 | u8 *split = (u8 *)&ip; | |
437 | ||
438 | split[0] = a; | |
439 | split[1] = b; | |
440 | split[2] = c; | |
441 | split[3] = d; | |
442 | return &ip; | |
443 | } | |
444 | ||
445 | static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d) | |
446 | { | |
447 | static struct in6_addr ip; | |
448 | __be32 *split = (__be32 *)&ip; | |
449 | ||
450 | split[0] = cpu_to_be32(a); | |
451 | split[1] = cpu_to_be32(b); | |
452 | split[2] = cpu_to_be32(c); | |
453 | split[3] = cpu_to_be32(d); | |
454 | return &ip; | |
455 | } | |
456 | ||
457 | static __init struct wg_peer *init_peer(void) | |
458 | { | |
459 | struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL); | |
460 | ||
461 | if (!peer) | |
462 | return NULL; | |
463 | kref_init(&peer->refcount); | |
464 | INIT_LIST_HEAD(&peer->allowedips_list); | |
465 | return peer; | |
466 | } | |
467 | ||
468 | #define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \ | |
469 | wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \ | |
470 | cidr, mem, &mutex) | |
471 | ||
472 | #define maybe_fail() do { \ | |
473 | ++i; \ | |
474 | if (!_s) { \ | |
475 | pr_info("allowedips self-test %zu: FAIL\n", i); \ | |
476 | success = false; \ | |
477 | } \ | |
478 | } while (0) | |
479 | ||
480 | #define test(version, mem, ipa, ipb, ipc, ipd) do { \ | |
481 | bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ | |
482 | ip##version(ipa, ipb, ipc, ipd)) == (mem); \ | |
483 | maybe_fail(); \ | |
484 | } while (0) | |
485 | ||
486 | #define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \ | |
487 | bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ | |
488 | ip##version(ipa, ipb, ipc, ipd)) != (mem); \ | |
489 | maybe_fail(); \ | |
490 | } while (0) | |
491 | ||
492 | #define test_boolean(cond) do { \ | |
493 | bool _s = (cond); \ | |
494 | maybe_fail(); \ | |
495 | } while (0) | |
496 | ||
497 | bool __init wg_allowedips_selftest(void) | |
498 | { | |
499 | bool found_a = false, found_b = false, found_c = false, found_d = false, | |
500 | found_e = false, found_other = false; | |
501 | struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(), | |
502 | *d = init_peer(), *e = init_peer(), *f = init_peer(), | |
503 | *g = init_peer(), *h = init_peer(); | |
504 | struct allowedips_node *iter_node; | |
505 | bool success = false; | |
506 | struct allowedips t; | |
507 | DEFINE_MUTEX(mutex); | |
508 | struct in6_addr ip; | |
509 | size_t i = 0, count = 0; | |
510 | __be64 part; | |
511 | ||
512 | mutex_init(&mutex); | |
513 | mutex_lock(&mutex); | |
514 | wg_allowedips_init(&t); | |
515 | ||
516 | if (!a || !b || !c || !d || !e || !f || !g || !h) { | |
517 | pr_err("allowedips self-test malloc: FAIL\n"); | |
518 | goto free; | |
519 | } | |
520 | ||
521 | insert(4, a, 192, 168, 4, 0, 24); | |
522 | insert(4, b, 192, 168, 4, 4, 32); | |
523 | insert(4, c, 192, 168, 0, 0, 16); | |
524 | insert(4, d, 192, 95, 5, 64, 27); | |
525 | /* replaces previous entry, and maskself is required */ | |
526 | insert(4, c, 192, 95, 5, 65, 27); | |
527 | insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); | |
528 | insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64); | |
529 | insert(4, e, 0, 0, 0, 0, 0); | |
530 | insert(6, e, 0, 0, 0, 0, 0); | |
531 | /* replaces previous entry */ | |
532 | insert(6, f, 0, 0, 0, 0, 0); | |
533 | insert(6, g, 0x24046800, 0, 0, 0, 32); | |
534 | /* maskself is required */ | |
535 | insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64); | |
536 | insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128); | |
537 | insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128); | |
538 | insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98); | |
539 | insert(4, g, 64, 15, 112, 0, 20); | |
540 | /* maskself is required */ | |
541 | insert(4, h, 64, 15, 123, 211, 25); | |
542 | insert(4, a, 10, 0, 0, 0, 25); | |
543 | insert(4, b, 10, 0, 0, 128, 25); | |
544 | insert(4, a, 10, 1, 0, 0, 30); | |
545 | insert(4, b, 10, 1, 0, 4, 30); | |
546 | insert(4, c, 10, 1, 0, 8, 29); | |
547 | insert(4, d, 10, 1, 0, 16, 29); | |
548 | ||
549 | if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { | |
550 | print_tree(t.root4, 32); | |
551 | print_tree(t.root6, 128); | |
552 | } | |
553 | ||
554 | success = true; | |
555 | ||
556 | test(4, a, 192, 168, 4, 20); | |
557 | test(4, a, 192, 168, 4, 0); | |
558 | test(4, b, 192, 168, 4, 4); | |
559 | test(4, c, 192, 168, 200, 182); | |
560 | test(4, c, 192, 95, 5, 68); | |
561 | test(4, e, 192, 95, 5, 96); | |
562 | test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543); | |
563 | test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee); | |
564 | test(6, f, 0x26075300, 0x60006b01, 0, 0); | |
565 | test(6, g, 0x24046800, 0x40040806, 0, 0x1006); | |
566 | test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678); | |
567 | test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678); | |
568 | test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678); | |
569 | test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678); | |
570 | test(6, h, 0x24046800, 0x40040800, 0, 0); | |
571 | test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010); | |
572 | test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef); | |
573 | test(4, g, 64, 15, 116, 26); | |
574 | test(4, g, 64, 15, 127, 3); | |
575 | test(4, g, 64, 15, 123, 1); | |
576 | test(4, h, 64, 15, 123, 128); | |
577 | test(4, h, 64, 15, 123, 129); | |
578 | test(4, a, 10, 0, 0, 52); | |
579 | test(4, b, 10, 0, 0, 220); | |
580 | test(4, a, 10, 1, 0, 2); | |
581 | test(4, b, 10, 1, 0, 6); | |
582 | test(4, c, 10, 1, 0, 10); | |
583 | test(4, d, 10, 1, 0, 20); | |
584 | ||
585 | insert(4, a, 1, 0, 0, 0, 32); | |
586 | insert(4, a, 64, 0, 0, 0, 32); | |
587 | insert(4, a, 128, 0, 0, 0, 32); | |
588 | insert(4, a, 192, 0, 0, 0, 32); | |
589 | insert(4, a, 255, 0, 0, 0, 32); | |
590 | wg_allowedips_remove_by_peer(&t, a, &mutex); | |
591 | test_negative(4, a, 1, 0, 0, 0); | |
592 | test_negative(4, a, 64, 0, 0, 0); | |
593 | test_negative(4, a, 128, 0, 0, 0); | |
594 | test_negative(4, a, 192, 0, 0, 0); | |
595 | test_negative(4, a, 255, 0, 0, 0); | |
596 | ||
597 | wg_allowedips_free(&t, &mutex); | |
598 | wg_allowedips_init(&t); | |
599 | insert(4, a, 192, 168, 0, 0, 16); | |
600 | insert(4, a, 192, 168, 0, 0, 24); | |
601 | wg_allowedips_remove_by_peer(&t, a, &mutex); | |
602 | test_negative(4, a, 192, 168, 0, 1); | |
603 | ||
604 | /* These will hit the WARN_ON(len >= 128) in free_node if something | |
605 | * goes wrong. | |
606 | */ | |
607 | for (i = 0; i < 128; ++i) { | |
608 | part = cpu_to_be64(~(1LLU << (i % 64))); | |
609 | memset(&ip, 0xff, 16); | |
610 | memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); | |
611 | wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); | |
612 | } | |
613 | ||
614 | wg_allowedips_free(&t, &mutex); | |
615 | ||
616 | wg_allowedips_init(&t); | |
617 | insert(4, a, 192, 95, 5, 93, 27); | |
618 | insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); | |
619 | insert(4, a, 10, 1, 0, 20, 29); | |
620 | insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83); | |
621 | insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21); | |
622 | list_for_each_entry(iter_node, &a->allowedips_list, peer_list) { | |
623 | u8 cidr, ip[16] __aligned(__alignof(u64)); | |
624 | int family = wg_allowedips_read_node(iter_node, ip, &cidr); | |
625 | ||
626 | count++; | |
627 | ||
628 | if (cidr == 27 && family == AF_INET && | |
629 | !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr))) | |
630 | found_a = true; | |
631 | else if (cidr == 128 && family == AF_INET6 && | |
632 | !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543), | |
633 | sizeof(struct in6_addr))) | |
634 | found_b = true; | |
635 | else if (cidr == 29 && family == AF_INET && | |
636 | !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr))) | |
637 | found_c = true; | |
638 | else if (cidr == 83 && family == AF_INET6 && | |
639 | !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0), | |
640 | sizeof(struct in6_addr))) | |
641 | found_d = true; | |
642 | else if (cidr == 21 && family == AF_INET6 && | |
643 | !memcmp(ip, ip6(0x26075000, 0, 0, 0), | |
644 | sizeof(struct in6_addr))) | |
645 | found_e = true; | |
646 | else | |
647 | found_other = true; | |
648 | } | |
649 | test_boolean(count == 5); | |
650 | test_boolean(found_a); | |
651 | test_boolean(found_b); | |
652 | test_boolean(found_c); | |
653 | test_boolean(found_d); | |
654 | test_boolean(found_e); | |
655 | test_boolean(!found_other); | |
656 | ||
657 | if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success) | |
658 | success = randomized_test(); | |
659 | ||
660 | if (success) | |
661 | pr_info("allowedips self-tests: pass\n"); | |
662 | ||
663 | free: | |
664 | wg_allowedips_free(&t, &mutex); | |
665 | kfree(a); | |
666 | kfree(b); | |
667 | kfree(c); | |
668 | kfree(d); | |
669 | kfree(e); | |
670 | kfree(f); | |
671 | kfree(g); | |
672 | kfree(h); | |
673 | mutex_unlock(&mutex); | |
674 | ||
675 | return success; | |
676 | } | |
677 | ||
678 | #undef test_negative | |
679 | #undef test | |
680 | #undef remove | |
681 | #undef insert | |
682 | #undef init_peer | |
683 | ||
684 | #endif |