]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_table/rte_table_hash_ext.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_table / rte_table_hash_ext.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <string.h>
35 #include <stdio.h>
36
37 #include <rte_common.h>
38 #include <rte_mbuf.h>
39 #include <rte_memory.h>
40 #include <rte_malloc.h>
41 #include <rte_log.h>
42
43 #include "rte_table_hash.h"
44
45 #define KEYS_PER_BUCKET 4
46
47 struct bucket {
48 union {
49 uintptr_t next;
50 uint64_t lru_list;
51 };
52 uint16_t sig[KEYS_PER_BUCKET];
53 uint32_t key_pos[KEYS_PER_BUCKET];
54 };
55
56 #define BUCKET_NEXT(bucket) \
57 ((void *) ((bucket)->next & (~1LU)))
58
59 #define BUCKET_NEXT_VALID(bucket) \
60 ((bucket)->next & 1LU)
61
62 #define BUCKET_NEXT_SET(bucket, bucket_next) \
63 do \
64 (bucket)->next = (((uintptr_t) ((void *) (bucket_next))) | 1LU);\
65 while (0)
66
67 #define BUCKET_NEXT_SET_NULL(bucket) \
68 do \
69 (bucket)->next = 0; \
70 while (0)
71
72 #define BUCKET_NEXT_COPY(bucket, bucket2) \
73 do \
74 (bucket)->next = (bucket2)->next; \
75 while (0)
76
77 #ifdef RTE_TABLE_STATS_COLLECT
78
79 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val) \
80 table->stats.n_pkts_in += val
81 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val) \
82 table->stats.n_pkts_lookup_miss += val
83
84 #else
85
86 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val)
87 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val)
88
89 #endif
90
91 struct grinder {
92 struct bucket *bkt;
93 uint64_t sig;
94 uint64_t match;
95 uint32_t key_index;
96 };
97
98 struct rte_table_hash {
99 struct rte_table_stats stats;
100
101 /* Input parameters */
102 uint32_t key_size;
103 uint32_t entry_size;
104 uint32_t n_keys;
105 uint32_t n_buckets;
106 uint32_t n_buckets_ext;
107 rte_table_hash_op_hash f_hash;
108 uint64_t seed;
109 uint32_t signature_offset;
110 uint32_t key_offset;
111
112 /* Internal */
113 uint64_t bucket_mask;
114 uint32_t key_size_shl;
115 uint32_t data_size_shl;
116 uint32_t key_stack_tos;
117 uint32_t bkt_ext_stack_tos;
118
119 /* Grinder */
120 struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
121
122 /* Tables */
123 struct bucket *buckets;
124 struct bucket *buckets_ext;
125 uint8_t *key_mem;
126 uint8_t *data_mem;
127 uint32_t *key_stack;
128 uint32_t *bkt_ext_stack;
129
130 /* Table memory */
131 uint8_t memory[0] __rte_cache_aligned;
132 };
133
134 static int
135 check_params_create(struct rte_table_hash_ext_params *params)
136 {
137 uint32_t n_buckets_min;
138
139 /* key_size */
140 if ((params->key_size == 0) ||
141 (!rte_is_power_of_2(params->key_size))) {
142 RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
143 return -EINVAL;
144 }
145
146 /* n_keys */
147 if ((params->n_keys == 0) ||
148 (!rte_is_power_of_2(params->n_keys))) {
149 RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__);
150 return -EINVAL;
151 }
152
153 /* n_buckets */
154 n_buckets_min = (params->n_keys + KEYS_PER_BUCKET - 1) / params->n_keys;
155 if ((params->n_buckets == 0) ||
156 (!rte_is_power_of_2(params->n_keys)) ||
157 (params->n_buckets < n_buckets_min)) {
158 RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
159 return -EINVAL;
160 }
161
162 /* f_hash */
163 if (params->f_hash == NULL) {
164 RTE_LOG(ERR, TABLE, "%s: f_hash invalid value\n", __func__);
165 return -EINVAL;
166 }
167
168 return 0;
169 }
170
171 static void *
172 rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
173 {
174 struct rte_table_hash_ext_params *p =
175 params;
176 struct rte_table_hash *t;
177 uint32_t total_size, table_meta_sz;
178 uint32_t bucket_sz, bucket_ext_sz, key_sz;
179 uint32_t key_stack_sz, bkt_ext_stack_sz, data_sz;
180 uint32_t bucket_offset, bucket_ext_offset, key_offset;
181 uint32_t key_stack_offset, bkt_ext_stack_offset, data_offset;
182 uint32_t i;
183
184 /* Check input parameters */
185 if ((check_params_create(p) != 0) ||
186 (!rte_is_power_of_2(entry_size)) ||
187 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
188 (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2)))
189 return NULL;
190
191 /* Memory allocation */
192 table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
193 bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
194 bucket_ext_sz =
195 RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(struct bucket));
196 key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
197 key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
198 bkt_ext_stack_sz =
199 RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(uint32_t));
200 data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
201 total_size = table_meta_sz + bucket_sz + bucket_ext_sz + key_sz +
202 key_stack_sz + bkt_ext_stack_sz + data_sz;
203
204 t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
205 if (t == NULL) {
206 RTE_LOG(ERR, TABLE,
207 "%s: Cannot allocate %u bytes for hash table\n",
208 __func__, total_size);
209 return NULL;
210 }
211 RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table memory footprint is "
212 "%u bytes\n", __func__, p->key_size, total_size);
213
214 /* Memory initialization */
215 t->key_size = p->key_size;
216 t->entry_size = entry_size;
217 t->n_keys = p->n_keys;
218 t->n_buckets = p->n_buckets;
219 t->n_buckets_ext = p->n_buckets_ext;
220 t->f_hash = p->f_hash;
221 t->seed = p->seed;
222 t->signature_offset = p->signature_offset;
223 t->key_offset = p->key_offset;
224
225 /* Internal */
226 t->bucket_mask = t->n_buckets - 1;
227 t->key_size_shl = __builtin_ctzl(p->key_size);
228 t->data_size_shl = __builtin_ctzl(entry_size);
229
230 /* Tables */
231 bucket_offset = 0;
232 bucket_ext_offset = bucket_offset + bucket_sz;
233 key_offset = bucket_ext_offset + bucket_ext_sz;
234 key_stack_offset = key_offset + key_sz;
235 bkt_ext_stack_offset = key_stack_offset + key_stack_sz;
236 data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz;
237
238 t->buckets = (struct bucket *) &t->memory[bucket_offset];
239 t->buckets_ext = (struct bucket *) &t->memory[bucket_ext_offset];
240 t->key_mem = &t->memory[key_offset];
241 t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
242 t->bkt_ext_stack = (uint32_t *) &t->memory[bkt_ext_stack_offset];
243 t->data_mem = &t->memory[data_offset];
244
245 /* Key stack */
246 for (i = 0; i < t->n_keys; i++)
247 t->key_stack[i] = t->n_keys - 1 - i;
248 t->key_stack_tos = t->n_keys;
249
250 /* Bucket ext stack */
251 for (i = 0; i < t->n_buckets_ext; i++)
252 t->bkt_ext_stack[i] = t->n_buckets_ext - 1 - i;
253 t->bkt_ext_stack_tos = t->n_buckets_ext;
254
255 return t;
256 }
257
258 static int
259 rte_table_hash_ext_free(void *table)
260 {
261 struct rte_table_hash *t = table;
262
263 /* Check input parameters */
264 if (t == NULL)
265 return -EINVAL;
266
267 rte_free(t);
268 return 0;
269 }
270
271 static int
272 rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
273 int *key_found, void **entry_ptr)
274 {
275 struct rte_table_hash *t = table;
276 struct bucket *bkt0, *bkt, *bkt_prev;
277 uint64_t sig;
278 uint32_t bkt_index, i;
279
280 sig = t->f_hash(key, t->key_size, t->seed);
281 bkt_index = sig & t->bucket_mask;
282 bkt0 = &t->buckets[bkt_index];
283 sig = (sig >> 16) | 1LLU;
284
285 /* Key is present in the bucket */
286 for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
287 for (i = 0; i < KEYS_PER_BUCKET; i++) {
288 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
289 uint32_t bkt_key_index = bkt->key_pos[i];
290 uint8_t *bkt_key =
291 &t->key_mem[bkt_key_index << t->key_size_shl];
292
293 if ((sig == bkt_sig) && (memcmp(key, bkt_key,
294 t->key_size) == 0)) {
295 uint8_t *data = &t->data_mem[bkt_key_index <<
296 t->data_size_shl];
297
298 memcpy(data, entry, t->entry_size);
299 *key_found = 1;
300 *entry_ptr = (void *) data;
301 return 0;
302 }
303 }
304
305 /* Key is not present in the bucket */
306 for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
307 bkt = BUCKET_NEXT(bkt))
308 for (i = 0; i < KEYS_PER_BUCKET; i++) {
309 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
310
311 if (bkt_sig == 0) {
312 uint32_t bkt_key_index;
313 uint8_t *bkt_key, *data;
314
315 /* Allocate new key */
316 if (t->key_stack_tos == 0) /* No free keys */
317 return -ENOSPC;
318
319 bkt_key_index = t->key_stack[
320 --t->key_stack_tos];
321
322 /* Install new key */
323 bkt_key = &t->key_mem[bkt_key_index <<
324 t->key_size_shl];
325 data = &t->data_mem[bkt_key_index <<
326 t->data_size_shl];
327
328 bkt->sig[i] = (uint16_t) sig;
329 bkt->key_pos[i] = bkt_key_index;
330 memcpy(bkt_key, key, t->key_size);
331 memcpy(data, entry, t->entry_size);
332
333 *key_found = 0;
334 *entry_ptr = (void *) data;
335 return 0;
336 }
337 }
338
339 /* Bucket full: extend bucket */
340 if ((t->bkt_ext_stack_tos > 0) && (t->key_stack_tos > 0)) {
341 uint32_t bkt_key_index;
342 uint8_t *bkt_key, *data;
343
344 /* Allocate new bucket ext */
345 bkt_index = t->bkt_ext_stack[--t->bkt_ext_stack_tos];
346 bkt = &t->buckets_ext[bkt_index];
347
348 /* Chain the new bucket ext */
349 BUCKET_NEXT_SET(bkt_prev, bkt);
350 BUCKET_NEXT_SET_NULL(bkt);
351
352 /* Allocate new key */
353 bkt_key_index = t->key_stack[--t->key_stack_tos];
354 bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl];
355
356 data = &t->data_mem[bkt_key_index << t->data_size_shl];
357
358 /* Install new key into bucket */
359 bkt->sig[0] = (uint16_t) sig;
360 bkt->key_pos[0] = bkt_key_index;
361 memcpy(bkt_key, key, t->key_size);
362 memcpy(data, entry, t->entry_size);
363
364 *key_found = 0;
365 *entry_ptr = (void *) data;
366 return 0;
367 }
368
369 return -ENOSPC;
370 }
371
372 static int
373 rte_table_hash_ext_entry_delete(void *table, void *key, int *key_found,
374 void *entry)
375 {
376 struct rte_table_hash *t = table;
377 struct bucket *bkt0, *bkt, *bkt_prev;
378 uint64_t sig;
379 uint32_t bkt_index, i;
380
381 sig = t->f_hash(key, t->key_size, t->seed);
382 bkt_index = sig & t->bucket_mask;
383 bkt0 = &t->buckets[bkt_index];
384 sig = (sig >> 16) | 1LLU;
385
386 /* Key is present in the bucket */
387 for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
388 bkt = BUCKET_NEXT(bkt))
389 for (i = 0; i < KEYS_PER_BUCKET; i++) {
390 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
391 uint32_t bkt_key_index = bkt->key_pos[i];
392 uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
393 t->key_size_shl];
394
395 if ((sig == bkt_sig) && (memcmp(key, bkt_key,
396 t->key_size) == 0)) {
397 uint8_t *data = &t->data_mem[bkt_key_index <<
398 t->data_size_shl];
399
400 /* Uninstall key from bucket */
401 bkt->sig[i] = 0;
402 *key_found = 1;
403 if (entry)
404 memcpy(entry, data, t->entry_size);
405
406 /* Free key */
407 t->key_stack[t->key_stack_tos++] =
408 bkt_key_index;
409
410 /*Check if bucket is unused */
411 if ((bkt_prev != NULL) &&
412 (bkt->sig[0] == 0) && (bkt->sig[1] == 0) &&
413 (bkt->sig[2] == 0) && (bkt->sig[3] == 0)) {
414 /* Unchain bucket */
415 BUCKET_NEXT_COPY(bkt_prev, bkt);
416
417 /* Clear bucket */
418 memset(bkt, 0, sizeof(struct bucket));
419
420 /* Free bucket back to buckets ext */
421 bkt_index = bkt - t->buckets_ext;
422 t->bkt_ext_stack[t->bkt_ext_stack_tos++]
423 = bkt_index;
424 }
425
426 return 0;
427 }
428 }
429
430 /* Key is not present in the bucket */
431 *key_found = 0;
432 return 0;
433 }
434
435 static int rte_table_hash_ext_lookup_unoptimized(
436 void *table,
437 struct rte_mbuf **pkts,
438 uint64_t pkts_mask,
439 uint64_t *lookup_hit_mask,
440 void **entries,
441 int dosig)
442 {
443 struct rte_table_hash *t = (struct rte_table_hash *) table;
444 uint64_t pkts_mask_out = 0;
445
446 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
447
448 for ( ; pkts_mask; ) {
449 struct bucket *bkt0, *bkt;
450 struct rte_mbuf *pkt;
451 uint8_t *key;
452 uint64_t pkt_mask, sig;
453 uint32_t pkt_index, bkt_index, i;
454
455 pkt_index = __builtin_ctzll(pkts_mask);
456 pkt_mask = 1LLU << pkt_index;
457 pkts_mask &= ~pkt_mask;
458
459 pkt = pkts[pkt_index];
460 key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
461 if (dosig)
462 sig = (uint64_t) t->f_hash(key, t->key_size, t->seed);
463 else
464 sig = RTE_MBUF_METADATA_UINT32(pkt,
465 t->signature_offset);
466
467 bkt_index = sig & t->bucket_mask;
468 bkt0 = &t->buckets[bkt_index];
469 sig = (sig >> 16) | 1LLU;
470
471 /* Key is present in the bucket */
472 for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
473 for (i = 0; i < KEYS_PER_BUCKET; i++) {
474 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
475 uint32_t bkt_key_index = bkt->key_pos[i];
476 uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
477 t->key_size_shl];
478
479 if ((sig == bkt_sig) && (memcmp(key, bkt_key,
480 t->key_size) == 0)) {
481 uint8_t *data = &t->data_mem[
482 bkt_key_index << t->data_size_shl];
483
484 pkts_mask_out |= pkt_mask;
485 entries[pkt_index] = (void *) data;
486 break;
487 }
488 }
489 }
490
491 *lookup_hit_mask = pkts_mask_out;
492 return 0;
493 }
494
495 /***
496 *
497 * mask = match bitmask
498 * match = at least one match
499 * match_many = more than one match
500 * match_pos = position of first match
501 *
502 *----------------------------------------
503 * mask match match_many match_pos
504 *----------------------------------------
505 * 0000 0 0 00
506 * 0001 1 0 00
507 * 0010 1 0 01
508 * 0011 1 1 00
509 *----------------------------------------
510 * 0100 1 0 10
511 * 0101 1 1 00
512 * 0110 1 1 01
513 * 0111 1 1 00
514 *----------------------------------------
515 * 1000 1 0 11
516 * 1001 1 1 00
517 * 1010 1 1 01
518 * 1011 1 1 00
519 *----------------------------------------
520 * 1100 1 1 10
521 * 1101 1 1 00
522 * 1110 1 1 01
523 * 1111 1 1 00
524 *----------------------------------------
525 *
526 * match = 1111_1111_1111_1110
527 * match_many = 1111_1110_1110_1000
528 * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000
529 *
530 * match = 0xFFFELLU
531 * match_many = 0xFEE8LLU
532 * match_pos = 0x12131210LLU
533 *
534 ***/
535
536 #define LUT_MATCH 0xFFFELLU
537 #define LUT_MATCH_MANY 0xFEE8LLU
538 #define LUT_MATCH_POS 0x12131210LLU
539
540 #define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos) \
541 { \
542 uint64_t bucket_sig[4], mask[4], mask_all; \
543 \
544 bucket_sig[0] = bucket->sig[0]; \
545 bucket_sig[1] = bucket->sig[1]; \
546 bucket_sig[2] = bucket->sig[2]; \
547 bucket_sig[3] = bucket->sig[3]; \
548 \
549 bucket_sig[0] ^= mbuf_sig; \
550 bucket_sig[1] ^= mbuf_sig; \
551 bucket_sig[2] ^= mbuf_sig; \
552 bucket_sig[3] ^= mbuf_sig; \
553 \
554 mask[0] = 0; \
555 mask[1] = 0; \
556 mask[2] = 0; \
557 mask[3] = 0; \
558 \
559 if (bucket_sig[0] == 0) \
560 mask[0] = 1; \
561 if (bucket_sig[1] == 0) \
562 mask[1] = 2; \
563 if (bucket_sig[2] == 0) \
564 mask[2] = 4; \
565 if (bucket_sig[3] == 0) \
566 mask[3] = 8; \
567 \
568 mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]); \
569 \
570 match = (LUT_MATCH >> mask_all) & 1; \
571 match_many = (LUT_MATCH_MANY >> mask_all) & 1; \
572 match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3; \
573 }
574
575 #define lookup_cmp_key(mbuf, key, match_key, f) \
576 { \
577 uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
578 uint64_t *bkt_key = (uint64_t *) key; \
579 \
580 switch (f->key_size) { \
581 case 8: \
582 { \
583 uint64_t xor = pkt_key[0] ^ bkt_key[0]; \
584 match_key = 0; \
585 if (xor == 0) \
586 match_key = 1; \
587 } \
588 break; \
589 \
590 case 16: \
591 { \
592 uint64_t xor[2], or; \
593 \
594 xor[0] = pkt_key[0] ^ bkt_key[0]; \
595 xor[1] = pkt_key[1] ^ bkt_key[1]; \
596 or = xor[0] | xor[1]; \
597 match_key = 0; \
598 if (or == 0) \
599 match_key = 1; \
600 } \
601 break; \
602 \
603 case 32: \
604 { \
605 uint64_t xor[4], or; \
606 \
607 xor[0] = pkt_key[0] ^ bkt_key[0]; \
608 xor[1] = pkt_key[1] ^ bkt_key[1]; \
609 xor[2] = pkt_key[2] ^ bkt_key[2]; \
610 xor[3] = pkt_key[3] ^ bkt_key[3]; \
611 or = xor[0] | xor[1] | xor[2] | xor[3]; \
612 match_key = 0; \
613 if (or == 0) \
614 match_key = 1; \
615 } \
616 break; \
617 \
618 case 64: \
619 { \
620 uint64_t xor[8], or; \
621 \
622 xor[0] = pkt_key[0] ^ bkt_key[0]; \
623 xor[1] = pkt_key[1] ^ bkt_key[1]; \
624 xor[2] = pkt_key[2] ^ bkt_key[2]; \
625 xor[3] = pkt_key[3] ^ bkt_key[3]; \
626 xor[4] = pkt_key[4] ^ bkt_key[4]; \
627 xor[5] = pkt_key[5] ^ bkt_key[5]; \
628 xor[6] = pkt_key[6] ^ bkt_key[6]; \
629 xor[7] = pkt_key[7] ^ bkt_key[7]; \
630 or = xor[0] | xor[1] | xor[2] | xor[3] | \
631 xor[4] | xor[5] | xor[6] | xor[7]; \
632 match_key = 0; \
633 if (or == 0) \
634 match_key = 1; \
635 } \
636 break; \
637 \
638 default: \
639 match_key = 0; \
640 if (memcmp(pkt_key, bkt_key, f->key_size) == 0) \
641 match_key = 1; \
642 } \
643 }
644
645 #define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index) \
646 { \
647 uint64_t pkt00_mask, pkt01_mask; \
648 struct rte_mbuf *mbuf00, *mbuf01; \
649 uint32_t key_offset = t->key_offset; \
650 \
651 pkt00_index = __builtin_ctzll(pkts_mask); \
652 pkt00_mask = 1LLU << pkt00_index; \
653 pkts_mask &= ~pkt00_mask; \
654 mbuf00 = pkts[pkt00_index]; \
655 \
656 pkt01_index = __builtin_ctzll(pkts_mask); \
657 pkt01_mask = 1LLU << pkt01_index; \
658 pkts_mask &= ~pkt01_mask; \
659 mbuf01 = pkts[pkt01_index]; \
660 \
661 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
662 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
663 }
664
665 #define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \
666 pkt01_index) \
667 { \
668 uint64_t pkt00_mask, pkt01_mask; \
669 struct rte_mbuf *mbuf00, *mbuf01; \
670 uint32_t key_offset = t->key_offset; \
671 \
672 pkt00_index = __builtin_ctzll(pkts_mask); \
673 pkt00_mask = 1LLU << pkt00_index; \
674 pkts_mask &= ~pkt00_mask; \
675 mbuf00 = pkts[pkt00_index]; \
676 \
677 pkt01_index = __builtin_ctzll(pkts_mask); \
678 if (pkts_mask == 0) \
679 pkt01_index = pkt00_index; \
680 pkt01_mask = 1LLU << pkt01_index; \
681 pkts_mask &= ~pkt01_mask; \
682 mbuf01 = pkts[pkt01_index]; \
683 \
684 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
685 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
686 }
687
688 #define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
689 { \
690 struct grinder *g10, *g11; \
691 uint64_t sig10, sig11, bkt10_index, bkt11_index; \
692 struct rte_mbuf *mbuf10, *mbuf11; \
693 struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
694 uint64_t bucket_mask = t->bucket_mask; \
695 uint32_t signature_offset = t->signature_offset; \
696 \
697 mbuf10 = pkts[pkt10_index]; \
698 sig10 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf10, signature_offset);\
699 bkt10_index = sig10 & bucket_mask; \
700 bkt10 = &buckets[bkt10_index]; \
701 \
702 mbuf11 = pkts[pkt11_index]; \
703 sig11 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf11, signature_offset);\
704 bkt11_index = sig11 & bucket_mask; \
705 bkt11 = &buckets[bkt11_index]; \
706 \
707 rte_prefetch0(bkt10); \
708 rte_prefetch0(bkt11); \
709 \
710 g10 = &g[pkt10_index]; \
711 g10->sig = sig10; \
712 g10->bkt = bkt10; \
713 \
714 g11 = &g[pkt11_index]; \
715 g11->sig = sig11; \
716 g11->bkt = bkt11; \
717 }
718
719 #define lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index) \
720 { \
721 struct grinder *g10, *g11; \
722 uint64_t sig10, sig11, bkt10_index, bkt11_index; \
723 struct rte_mbuf *mbuf10, *mbuf11; \
724 struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
725 uint8_t *key10, *key11; \
726 uint64_t bucket_mask = t->bucket_mask; \
727 rte_table_hash_op_hash f_hash = t->f_hash; \
728 uint64_t seed = t->seed; \
729 uint32_t key_size = t->key_size; \
730 uint32_t key_offset = t->key_offset; \
731 \
732 mbuf10 = pkts[pkt10_index]; \
733 key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset); \
734 sig10 = (uint64_t) f_hash(key10, key_size, seed); \
735 bkt10_index = sig10 & bucket_mask; \
736 bkt10 = &buckets[bkt10_index]; \
737 \
738 mbuf11 = pkts[pkt11_index]; \
739 key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset); \
740 sig11 = (uint64_t) f_hash(key11, key_size, seed); \
741 bkt11_index = sig11 & bucket_mask; \
742 bkt11 = &buckets[bkt11_index]; \
743 \
744 rte_prefetch0(bkt10); \
745 rte_prefetch0(bkt11); \
746 \
747 g10 = &g[pkt10_index]; \
748 g10->sig = sig10; \
749 g10->bkt = bkt10; \
750 \
751 g11 = &g[pkt11_index]; \
752 g11->sig = sig11; \
753 g11->bkt = bkt11; \
754 }
755
756 #define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\
757 { \
758 struct grinder *g20, *g21; \
759 uint64_t sig20, sig21; \
760 struct bucket *bkt20, *bkt21; \
761 uint8_t *key20, *key21, *key_mem = t->key_mem; \
762 uint64_t match20, match21, match_many20, match_many21; \
763 uint64_t match_pos20, match_pos21; \
764 uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\
765 \
766 g20 = &g[pkt20_index]; \
767 sig20 = g20->sig; \
768 bkt20 = g20->bkt; \
769 sig20 = (sig20 >> 16) | 1LLU; \
770 lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\
771 match20 <<= pkt20_index; \
772 match_many20 |= BUCKET_NEXT_VALID(bkt20); \
773 match_many20 <<= pkt20_index; \
774 key20_index = bkt20->key_pos[match_pos20]; \
775 key20 = &key_mem[key20_index << key_size_shl]; \
776 \
777 g21 = &g[pkt21_index]; \
778 sig21 = g21->sig; \
779 bkt21 = g21->bkt; \
780 sig21 = (sig21 >> 16) | 1LLU; \
781 lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\
782 match21 <<= pkt21_index; \
783 match_many21 |= BUCKET_NEXT_VALID(bkt21); \
784 match_many21 <<= pkt21_index; \
785 key21_index = bkt21->key_pos[match_pos21]; \
786 key21 = &key_mem[key21_index << key_size_shl]; \
787 \
788 rte_prefetch0(key20); \
789 rte_prefetch0(key21); \
790 \
791 pkts_mask_match_many |= match_many20 | match_many21; \
792 \
793 g20->match = match20; \
794 g20->key_index = key20_index; \
795 \
796 g21->match = match21; \
797 g21->key_index = key21_index; \
798 }
799
800 #define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \
801 entries) \
802 { \
803 struct grinder *g30, *g31; \
804 struct rte_mbuf *mbuf30, *mbuf31; \
805 uint8_t *key30, *key31, *key_mem = t->key_mem; \
806 uint8_t *data30, *data31, *data_mem = t->data_mem; \
807 uint64_t match30, match31, match_key30, match_key31, match_keys;\
808 uint32_t key30_index, key31_index; \
809 uint32_t key_size_shl = t->key_size_shl; \
810 uint32_t data_size_shl = t->data_size_shl; \
811 \
812 mbuf30 = pkts[pkt30_index]; \
813 g30 = &g[pkt30_index]; \
814 match30 = g30->match; \
815 key30_index = g30->key_index; \
816 key30 = &key_mem[key30_index << key_size_shl]; \
817 lookup_cmp_key(mbuf30, key30, match_key30, t); \
818 match_key30 <<= pkt30_index; \
819 match_key30 &= match30; \
820 data30 = &data_mem[key30_index << data_size_shl]; \
821 entries[pkt30_index] = data30; \
822 \
823 mbuf31 = pkts[pkt31_index]; \
824 g31 = &g[pkt31_index]; \
825 match31 = g31->match; \
826 key31_index = g31->key_index; \
827 key31 = &key_mem[key31_index << key_size_shl]; \
828 lookup_cmp_key(mbuf31, key31, match_key31, t); \
829 match_key31 <<= pkt31_index; \
830 match_key31 &= match31; \
831 data31 = &data_mem[key31_index << data_size_shl]; \
832 entries[pkt31_index] = data31; \
833 \
834 rte_prefetch0(data30); \
835 rte_prefetch0(data31); \
836 \
837 match_keys = match_key30 | match_key31; \
838 pkts_mask_out |= match_keys; \
839 }
840
841 /***
842 * The lookup function implements a 4-stage pipeline, with each stage processing
843 * two different packets. The purpose of pipelined implementation is to hide the
844 * latency of prefetching the data structures and loosen the data dependency
845 * between instructions.
846 *
847 * p00 _______ p10 _______ p20 _______ p30 _______
848 *----->| |----->| |----->| |----->| |----->
849 * | 0 | | 1 | | 2 | | 3 |
850 *----->|_______|----->|_______|----->|_______|----->|_______|----->
851 * p01 p11 p21 p31
852 *
853 * The naming convention is:
854 * pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1
855 *
856 ***/
857 static int rte_table_hash_ext_lookup(
858 void *table,
859 struct rte_mbuf **pkts,
860 uint64_t pkts_mask,
861 uint64_t *lookup_hit_mask,
862 void **entries)
863 {
864 struct rte_table_hash *t = (struct rte_table_hash *) table;
865 struct grinder *g = t->grinders;
866 uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
867 uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
868 uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
869 int status = 0;
870
871 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
872 RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in);
873
874 /* Cannot run the pipeline with less than 7 packets */
875 if (__builtin_popcountll(pkts_mask) < 7) {
876 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
877 pkts_mask, lookup_hit_mask, entries, 0);
878 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in -
879 __builtin_popcountll(*lookup_hit_mask));
880 return status;
881 }
882
883 /* Pipeline stage 0 */
884 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
885
886 /* Pipeline feed */
887 pkt10_index = pkt00_index;
888 pkt11_index = pkt01_index;
889
890 /* Pipeline stage 0 */
891 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
892
893 /* Pipeline stage 1 */
894 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
895
896 /* Pipeline feed */
897 pkt20_index = pkt10_index;
898 pkt21_index = pkt11_index;
899 pkt10_index = pkt00_index;
900 pkt11_index = pkt01_index;
901
902 /* Pipeline stage 0 */
903 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
904
905 /* Pipeline stage 1 */
906 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
907
908 /* Pipeline stage 2 */
909 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
910
911 /*
912 * Pipeline run
913 *
914 */
915 for ( ; pkts_mask; ) {
916 /* Pipeline feed */
917 pkt30_index = pkt20_index;
918 pkt31_index = pkt21_index;
919 pkt20_index = pkt10_index;
920 pkt21_index = pkt11_index;
921 pkt10_index = pkt00_index;
922 pkt11_index = pkt01_index;
923
924 /* Pipeline stage 0 */
925 lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
926 pkt00_index, pkt01_index);
927
928 /* Pipeline stage 1 */
929 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
930
931 /* Pipeline stage 2 */
932 lookup2_stage2(t, g, pkt20_index, pkt21_index,
933 pkts_mask_match_many);
934
935 /* Pipeline stage 3 */
936 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
937 pkts_mask_out, entries);
938 }
939
940 /* Pipeline feed */
941 pkt30_index = pkt20_index;
942 pkt31_index = pkt21_index;
943 pkt20_index = pkt10_index;
944 pkt21_index = pkt11_index;
945 pkt10_index = pkt00_index;
946 pkt11_index = pkt01_index;
947
948 /* Pipeline stage 1 */
949 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
950
951 /* Pipeline stage 2 */
952 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
953
954 /* Pipeline stage 3 */
955 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
956 entries);
957
958 /* Pipeline feed */
959 pkt30_index = pkt20_index;
960 pkt31_index = pkt21_index;
961 pkt20_index = pkt10_index;
962 pkt21_index = pkt11_index;
963
964 /* Pipeline stage 2 */
965 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
966
967 /* Pipeline stage 3 */
968 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
969 entries);
970
971 /* Pipeline feed */
972 pkt30_index = pkt20_index;
973 pkt31_index = pkt21_index;
974
975 /* Pipeline stage 3 */
976 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
977 entries);
978
979 /* Slow path */
980 pkts_mask_match_many &= ~pkts_mask_out;
981 if (pkts_mask_match_many) {
982 uint64_t pkts_mask_out_slow = 0;
983
984 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
985 pkts_mask_match_many, &pkts_mask_out_slow, entries, 0);
986 pkts_mask_out |= pkts_mask_out_slow;
987 }
988
989 *lookup_hit_mask = pkts_mask_out;
990 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
991 return status;
992 }
993
994 static int rte_table_hash_ext_lookup_dosig(
995 void *table,
996 struct rte_mbuf **pkts,
997 uint64_t pkts_mask,
998 uint64_t *lookup_hit_mask,
999 void **entries)
1000 {
1001 struct rte_table_hash *t = (struct rte_table_hash *) table;
1002 struct grinder *g = t->grinders;
1003 uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
1004 uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
1005 uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
1006 int status = 0;
1007
1008 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
1009 RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in);
1010
1011 /* Cannot run the pipeline with less than 7 packets */
1012 if (__builtin_popcountll(pkts_mask) < 7) {
1013 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
1014 pkts_mask, lookup_hit_mask, entries, 1);
1015 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in -
1016 __builtin_popcountll(*lookup_hit_mask));
1017 return status;
1018 }
1019
1020 /* Pipeline stage 0 */
1021 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
1022
1023 /* Pipeline feed */
1024 pkt10_index = pkt00_index;
1025 pkt11_index = pkt01_index;
1026
1027 /* Pipeline stage 0 */
1028 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
1029
1030 /* Pipeline stage 1 */
1031 lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
1032
1033 /* Pipeline feed */
1034 pkt20_index = pkt10_index;
1035 pkt21_index = pkt11_index;
1036 pkt10_index = pkt00_index;
1037 pkt11_index = pkt01_index;
1038
1039 /* Pipeline stage 0 */
1040 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
1041
1042 /* Pipeline stage 1 */
1043 lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
1044
1045 /* Pipeline stage 2 */
1046 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
1047
1048 /*
1049 * Pipeline run
1050 *
1051 */
1052 for ( ; pkts_mask; ) {
1053 /* Pipeline feed */
1054 pkt30_index = pkt20_index;
1055 pkt31_index = pkt21_index;
1056 pkt20_index = pkt10_index;
1057 pkt21_index = pkt11_index;
1058 pkt10_index = pkt00_index;
1059 pkt11_index = pkt01_index;
1060
1061 /* Pipeline stage 0 */
1062 lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
1063 pkt00_index, pkt01_index);
1064
1065 /* Pipeline stage 1 */
1066 lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
1067
1068 /* Pipeline stage 2 */
1069 lookup2_stage2(t, g, pkt20_index, pkt21_index,
1070 pkts_mask_match_many);
1071
1072 /* Pipeline stage 3 */
1073 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
1074 pkts_mask_out, entries);
1075 }
1076
1077 /* Pipeline feed */
1078 pkt30_index = pkt20_index;
1079 pkt31_index = pkt21_index;
1080 pkt20_index = pkt10_index;
1081 pkt21_index = pkt11_index;
1082 pkt10_index = pkt00_index;
1083 pkt11_index = pkt01_index;
1084
1085 /* Pipeline stage 1 */
1086 lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
1087
1088 /* Pipeline stage 2 */
1089 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
1090
1091 /* Pipeline stage 3 */
1092 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
1093 entries);
1094
1095 /* Pipeline feed */
1096 pkt30_index = pkt20_index;
1097 pkt31_index = pkt21_index;
1098 pkt20_index = pkt10_index;
1099 pkt21_index = pkt11_index;
1100
1101 /* Pipeline stage 2 */
1102 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
1103
1104 /* Pipeline stage 3 */
1105 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
1106 entries);
1107
1108 /* Pipeline feed */
1109 pkt30_index = pkt20_index;
1110 pkt31_index = pkt21_index;
1111
1112 /* Pipeline stage 3 */
1113 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
1114 entries);
1115
1116 /* Slow path */
1117 pkts_mask_match_many &= ~pkts_mask_out;
1118 if (pkts_mask_match_many) {
1119 uint64_t pkts_mask_out_slow = 0;
1120
1121 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
1122 pkts_mask_match_many, &pkts_mask_out_slow, entries, 1);
1123 pkts_mask_out |= pkts_mask_out_slow;
1124 }
1125
1126 *lookup_hit_mask = pkts_mask_out;
1127 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
1128 return status;
1129 }
1130
1131 static int
1132 rte_table_hash_ext_stats_read(void *table, struct rte_table_stats *stats, int clear)
1133 {
1134 struct rte_table_hash *t = table;
1135
1136 if (stats != NULL)
1137 memcpy(stats, &t->stats, sizeof(t->stats));
1138
1139 if (clear)
1140 memset(&t->stats, 0, sizeof(t->stats));
1141
1142 return 0;
1143 }
1144
1145 struct rte_table_ops rte_table_hash_ext_ops = {
1146 .f_create = rte_table_hash_ext_create,
1147 .f_free = rte_table_hash_ext_free,
1148 .f_add = rte_table_hash_ext_entry_add,
1149 .f_delete = rte_table_hash_ext_entry_delete,
1150 .f_add_bulk = NULL,
1151 .f_delete_bulk = NULL,
1152 .f_lookup = rte_table_hash_ext_lookup,
1153 .f_stats = rte_table_hash_ext_stats_read,
1154 };
1155
1156 struct rte_table_ops rte_table_hash_ext_dosig_ops = {
1157 .f_create = rte_table_hash_ext_create,
1158 .f_free = rte_table_hash_ext_free,
1159 .f_add = rte_table_hash_ext_entry_add,
1160 .f_delete = rte_table_hash_ext_entry_delete,
1161 .f_add_bulk = NULL,
1162 .f_delete_bulk = NULL,
1163 .f_lookup = rte_table_hash_ext_lookup_dosig,
1164 .f_stats = rte_table_hash_ext_stats_read,
1165 };