1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
13 #include "rte_table_hash.h"
18 #define KEYS_PER_BUCKET 4
20 #define RTE_BUCKET_ENTRY_VALID 0x1LLU
22 #ifdef RTE_TABLE_STATS_COLLECT
24 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val) \
25 table->stats.n_pkts_in += val
26 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val) \
27 table->stats.n_pkts_lookup_miss += val
31 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val)
32 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val)
36 struct rte_bucket_4_32
{
38 uint64_t signature
[4 + 1];
40 struct rte_bucket_4_32
*next
;
43 /* Cache lines 1 and 2 */
50 struct rte_table_hash
{
51 struct rte_table_stats stats
;
53 /* Input parameters */
60 rte_table_hash_op_hash f_hash
;
63 /* Extendible buckets */
64 uint32_t n_buckets_ext
;
69 uint8_t memory
[0] __rte_cache_aligned
;
73 keycmp(void *a
, void *b
, void *b_mask
)
75 uint64_t *a64
= a
, *b64
= b
, *b_mask64
= b_mask
;
77 return (a64
[0] != (b64
[0] & b_mask64
[0])) ||
78 (a64
[1] != (b64
[1] & b_mask64
[1])) ||
79 (a64
[2] != (b64
[2] & b_mask64
[2])) ||
80 (a64
[3] != (b64
[3] & b_mask64
[3]));
84 keycpy(void *dst
, void *src
, void *src_mask
)
86 uint64_t *dst64
= dst
, *src64
= src
, *src_mask64
= src_mask
;
88 dst64
[0] = src64
[0] & src_mask64
[0];
89 dst64
[1] = src64
[1] & src_mask64
[1];
90 dst64
[2] = src64
[2] & src_mask64
[2];
91 dst64
[3] = src64
[3] & src_mask64
[3];
95 check_params_create(struct rte_table_hash_params
*params
)
98 if (params
->name
== NULL
) {
99 RTE_LOG(ERR
, TABLE
, "%s: name invalid value\n", __func__
);
104 if (params
->key_size
!= KEY_SIZE
) {
105 RTE_LOG(ERR
, TABLE
, "%s: key_size invalid value\n", __func__
);
110 if (params
->n_keys
== 0) {
111 RTE_LOG(ERR
, TABLE
, "%s: n_keys is zero\n", __func__
);
116 if ((params
->n_buckets
== 0) ||
117 (!rte_is_power_of_2(params
->n_buckets
))) {
118 RTE_LOG(ERR
, TABLE
, "%s: n_buckets invalid value\n", __func__
);
123 if (params
->f_hash
== NULL
) {
124 RTE_LOG(ERR
, TABLE
, "%s: f_hash function pointer is NULL\n",
133 rte_table_hash_create_key32_lru(void *params
,
137 struct rte_table_hash_params
*p
= params
;
138 struct rte_table_hash
*f
;
139 uint64_t bucket_size
, total_size
;
140 uint32_t n_buckets
, i
;
142 /* Check input parameters */
143 if ((check_params_create(p
) != 0) ||
144 ((sizeof(struct rte_table_hash
) % RTE_CACHE_LINE_SIZE
) != 0) ||
145 ((sizeof(struct rte_bucket_4_32
) % 64) != 0))
151 * Objective: Pick the number of buckets (n_buckets) so that there a chance
152 * to store n_keys keys in the table.
154 * Note: Since the buckets do not get extended, it is not possible to
155 * guarantee that n_keys keys can be stored in the table at any time. In the
156 * worst case scenario when all the n_keys fall into the same bucket, only
157 * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
158 * defeats the purpose of the hash table. It indicates unsuitable f_hash or
159 * n_keys to n_buckets ratio.
161 * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
163 n_buckets
= rte_align32pow2(
164 (p
->n_keys
+ KEYS_PER_BUCKET
- 1) / KEYS_PER_BUCKET
);
165 n_buckets
= RTE_MAX(n_buckets
, p
->n_buckets
);
167 /* Memory allocation */
168 bucket_size
= RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32
) +
169 KEYS_PER_BUCKET
* entry_size
);
170 total_size
= sizeof(struct rte_table_hash
) + n_buckets
* bucket_size
;
171 if (total_size
> SIZE_MAX
) {
172 RTE_LOG(ERR
, TABLE
, "%s: Cannot allocate %" PRIu64
" bytes "
173 "for hash table %s\n",
174 __func__
, total_size
, p
->name
);
178 f
= rte_zmalloc_socket(p
->name
,
183 RTE_LOG(ERR
, TABLE
, "%s: Cannot allocate %" PRIu64
" bytes "
184 "for hash table %s\n",
185 __func__
, total_size
, p
->name
);
189 "%s: Hash table %s memory footprint "
190 "is %" PRIu64
" bytes\n",
191 __func__
, p
->name
, total_size
);
193 /* Memory initialization */
194 f
->n_buckets
= n_buckets
;
195 f
->key_size
= KEY_SIZE
;
196 f
->entry_size
= entry_size
;
197 f
->bucket_size
= bucket_size
;
198 f
->key_offset
= p
->key_offset
;
199 f
->f_hash
= p
->f_hash
;
202 if (p
->key_mask
!= NULL
) {
203 f
->key_mask
[0] = ((uint64_t *)p
->key_mask
)[0];
204 f
->key_mask
[1] = ((uint64_t *)p
->key_mask
)[1];
205 f
->key_mask
[2] = ((uint64_t *)p
->key_mask
)[2];
206 f
->key_mask
[3] = ((uint64_t *)p
->key_mask
)[3];
208 f
->key_mask
[0] = 0xFFFFFFFFFFFFFFFFLLU
;
209 f
->key_mask
[1] = 0xFFFFFFFFFFFFFFFFLLU
;
210 f
->key_mask
[2] = 0xFFFFFFFFFFFFFFFFLLU
;
211 f
->key_mask
[3] = 0xFFFFFFFFFFFFFFFFLLU
;
214 for (i
= 0; i
< n_buckets
; i
++) {
215 struct rte_bucket_4_32
*bucket
;
217 bucket
= (struct rte_bucket_4_32
*) &f
->memory
[i
*
219 bucket
->lru_list
= 0x0000000100020003LLU
;
226 rte_table_hash_free_key32_lru(void *table
)
228 struct rte_table_hash
*f
= table
;
230 /* Check input parameters */
232 RTE_LOG(ERR
, TABLE
, "%s: table parameter is NULL\n", __func__
);
241 rte_table_hash_entry_add_key32_lru(
248 struct rte_table_hash
*f
= table
;
249 struct rte_bucket_4_32
*bucket
;
250 uint64_t signature
, pos
;
251 uint32_t bucket_index
, i
;
253 signature
= f
->f_hash(key
, f
->key_mask
, f
->key_size
, f
->seed
);
254 bucket_index
= signature
& (f
->n_buckets
- 1);
255 bucket
= (struct rte_bucket_4_32
*)
256 &f
->memory
[bucket_index
* f
->bucket_size
];
257 signature
|= RTE_BUCKET_ENTRY_VALID
;
259 /* Key is present in the bucket */
260 for (i
= 0; i
< 4; i
++) {
261 uint64_t bucket_signature
= bucket
->signature
[i
];
262 uint8_t *bucket_key
= (uint8_t *) &bucket
->key
[i
];
264 if ((bucket_signature
== signature
) &&
265 (keycmp(bucket_key
, key
, f
->key_mask
) == 0)) {
266 uint8_t *bucket_data
= &bucket
->data
[i
* f
->entry_size
];
268 memcpy(bucket_data
, entry
, f
->entry_size
);
269 lru_update(bucket
, i
);
271 *entry_ptr
= (void *) bucket_data
;
276 /* Key is not present in the bucket */
277 for (i
= 0; i
< 4; i
++) {
278 uint64_t bucket_signature
= bucket
->signature
[i
];
279 uint8_t *bucket_key
= (uint8_t *) &bucket
->key
[i
];
281 if (bucket_signature
== 0) {
282 uint8_t *bucket_data
= &bucket
->data
[i
* f
->entry_size
];
284 bucket
->signature
[i
] = signature
;
285 keycpy(bucket_key
, key
, f
->key_mask
);
286 memcpy(bucket_data
, entry
, f
->entry_size
);
287 lru_update(bucket
, i
);
289 *entry_ptr
= (void *) bucket_data
;
295 /* Bucket full: replace LRU entry */
296 pos
= lru_pos(bucket
);
297 bucket
->signature
[pos
] = signature
;
298 keycpy(&bucket
->key
[pos
], key
, f
->key_mask
);
299 memcpy(&bucket
->data
[pos
* f
->entry_size
], entry
, f
->entry_size
);
300 lru_update(bucket
, pos
);
302 *entry_ptr
= (void *) &bucket
->data
[pos
* f
->entry_size
];
308 rte_table_hash_entry_delete_key32_lru(
314 struct rte_table_hash
*f
= table
;
315 struct rte_bucket_4_32
*bucket
;
317 uint32_t bucket_index
, i
;
319 signature
= f
->f_hash(key
, f
->key_mask
, f
->key_size
, f
->seed
);
320 bucket_index
= signature
& (f
->n_buckets
- 1);
321 bucket
= (struct rte_bucket_4_32
*)
322 &f
->memory
[bucket_index
* f
->bucket_size
];
323 signature
|= RTE_BUCKET_ENTRY_VALID
;
325 /* Key is present in the bucket */
326 for (i
= 0; i
< 4; i
++) {
327 uint64_t bucket_signature
= bucket
->signature
[i
];
328 uint8_t *bucket_key
= (uint8_t *) &bucket
->key
[i
];
330 if ((bucket_signature
== signature
) &&
331 (keycmp(bucket_key
, key
, f
->key_mask
) == 0)) {
332 uint8_t *bucket_data
= &bucket
->data
[i
* f
->entry_size
];
334 bucket
->signature
[i
] = 0;
337 memcpy(entry
, bucket_data
, f
->entry_size
);
343 /* Key is not present in the bucket */
349 rte_table_hash_create_key32_ext(void *params
,
353 struct rte_table_hash_params
*p
= params
;
354 struct rte_table_hash
*f
;
355 uint64_t bucket_size
, stack_size
, total_size
;
356 uint32_t n_buckets_ext
, i
;
358 /* Check input parameters */
359 if ((check_params_create(p
) != 0) ||
360 ((sizeof(struct rte_table_hash
) % RTE_CACHE_LINE_SIZE
) != 0) ||
361 ((sizeof(struct rte_bucket_4_32
) % 64) != 0))
367 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
368 * it is guaranteed that n_keys keys can be stored in the table at any time.
370 * The worst case scenario takes place when all the n_keys keys fall into
371 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
372 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
373 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
374 * into a different bucket. This case defeats the purpose of the hash table.
375 * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
377 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
379 n_buckets_ext
= p
->n_keys
/ KEYS_PER_BUCKET
+ KEYS_PER_BUCKET
- 1;
381 /* Memory allocation */
382 bucket_size
= RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32
) +
383 KEYS_PER_BUCKET
* entry_size
);
384 stack_size
= RTE_CACHE_LINE_ROUNDUP(n_buckets_ext
* sizeof(uint32_t));
385 total_size
= sizeof(struct rte_table_hash
) +
386 (p
->n_buckets
+ n_buckets_ext
) * bucket_size
+ stack_size
;
387 if (total_size
> SIZE_MAX
) {
388 RTE_LOG(ERR
, TABLE
, "%s: Cannot allocate %" PRIu64
" bytes "
389 "for hash table %s\n",
390 __func__
, total_size
, p
->name
);
394 f
= rte_zmalloc_socket(p
->name
,
399 RTE_LOG(ERR
, TABLE
, "%s: Cannot allocate %" PRIu64
" bytes "
400 "for hash table %s\n",
401 __func__
, total_size
, p
->name
);
405 "%s: Hash table %s memory footprint "
406 "is %" PRIu64
" bytes\n",
407 __func__
, p
->name
, total_size
);
409 /* Memory initialization */
410 f
->n_buckets
= p
->n_buckets
;
411 f
->key_size
= KEY_SIZE
;
412 f
->entry_size
= entry_size
;
413 f
->bucket_size
= bucket_size
;
414 f
->key_offset
= p
->key_offset
;
415 f
->f_hash
= p
->f_hash
;
418 f
->n_buckets_ext
= n_buckets_ext
;
419 f
->stack_pos
= n_buckets_ext
;
420 f
->stack
= (uint32_t *)
421 &f
->memory
[(p
->n_buckets
+ n_buckets_ext
) * f
->bucket_size
];
423 if (p
->key_mask
!= NULL
) {
424 f
->key_mask
[0] = (((uint64_t *)p
->key_mask
)[0]);
425 f
->key_mask
[1] = (((uint64_t *)p
->key_mask
)[1]);
426 f
->key_mask
[2] = (((uint64_t *)p
->key_mask
)[2]);
427 f
->key_mask
[3] = (((uint64_t *)p
->key_mask
)[3]);
429 f
->key_mask
[0] = 0xFFFFFFFFFFFFFFFFLLU
;
430 f
->key_mask
[1] = 0xFFFFFFFFFFFFFFFFLLU
;
431 f
->key_mask
[2] = 0xFFFFFFFFFFFFFFFFLLU
;
432 f
->key_mask
[3] = 0xFFFFFFFFFFFFFFFFLLU
;
435 for (i
= 0; i
< n_buckets_ext
; i
++)
442 rte_table_hash_free_key32_ext(void *table
)
444 struct rte_table_hash
*f
= table
;
446 /* Check input parameters */
448 RTE_LOG(ERR
, TABLE
, "%s: table parameter is NULL\n", __func__
);
457 rte_table_hash_entry_add_key32_ext(
464 struct rte_table_hash
*f
= table
;
465 struct rte_bucket_4_32
*bucket0
, *bucket
, *bucket_prev
;
467 uint32_t bucket_index
, i
;
469 signature
= f
->f_hash(key
, f
->key_mask
, f
->key_size
, f
->seed
);
470 bucket_index
= signature
& (f
->n_buckets
- 1);
471 bucket0
= (struct rte_bucket_4_32
*)
472 &f
->memory
[bucket_index
* f
->bucket_size
];
473 signature
|= RTE_BUCKET_ENTRY_VALID
;
475 /* Key is present in the bucket */
476 for (bucket
= bucket0
; bucket
!= NULL
; bucket
= bucket
->next
) {
477 for (i
= 0; i
< 4; i
++) {
478 uint64_t bucket_signature
= bucket
->signature
[i
];
479 uint8_t *bucket_key
= (uint8_t *) &bucket
->key
[i
];
481 if ((bucket_signature
== signature
) &&
482 (keycmp(bucket_key
, key
, f
->key_mask
) == 0)) {
483 uint8_t *bucket_data
= &bucket
->data
[i
*
486 memcpy(bucket_data
, entry
, f
->entry_size
);
488 *entry_ptr
= (void *) bucket_data
;
495 /* Key is not present in the bucket */
496 for (bucket_prev
= NULL
, bucket
= bucket0
; bucket
!= NULL
;
497 bucket_prev
= bucket
, bucket
= bucket
->next
)
498 for (i
= 0; i
< 4; i
++) {
499 uint64_t bucket_signature
= bucket
->signature
[i
];
500 uint8_t *bucket_key
= (uint8_t *) &bucket
->key
[i
];
502 if (bucket_signature
== 0) {
503 uint8_t *bucket_data
= &bucket
->data
[i
*
506 bucket
->signature
[i
] = signature
;
507 keycpy(bucket_key
, key
, f
->key_mask
);
508 memcpy(bucket_data
, entry
, f
->entry_size
);
510 *entry_ptr
= (void *) bucket_data
;
516 /* Bucket full: extend bucket */
517 if (f
->stack_pos
> 0) {
518 bucket_index
= f
->stack
[--f
->stack_pos
];
520 bucket
= (struct rte_bucket_4_32
*)
521 &f
->memory
[(f
->n_buckets
+ bucket_index
) *
523 bucket_prev
->next
= bucket
;
524 bucket_prev
->next_valid
= 1;
526 bucket
->signature
[0] = signature
;
527 keycpy(&bucket
->key
[0], key
, f
->key_mask
);
528 memcpy(&bucket
->data
[0], entry
, f
->entry_size
);
530 *entry_ptr
= (void *) &bucket
->data
[0];
538 rte_table_hash_entry_delete_key32_ext(
544 struct rte_table_hash
*f
= table
;
545 struct rte_bucket_4_32
*bucket0
, *bucket
, *bucket_prev
;
547 uint32_t bucket_index
, i
;
549 signature
= f
->f_hash(key
, f
->key_mask
, f
->key_size
, f
->seed
);
550 bucket_index
= signature
& (f
->n_buckets
- 1);
551 bucket0
= (struct rte_bucket_4_32
*)
552 &f
->memory
[bucket_index
* f
->bucket_size
];
553 signature
|= RTE_BUCKET_ENTRY_VALID
;
555 /* Key is present in the bucket */
556 for (bucket_prev
= NULL
, bucket
= bucket0
; bucket
!= NULL
;
557 bucket_prev
= bucket
, bucket
= bucket
->next
)
558 for (i
= 0; i
< 4; i
++) {
559 uint64_t bucket_signature
= bucket
->signature
[i
];
560 uint8_t *bucket_key
= (uint8_t *) &bucket
->key
[i
];
562 if ((bucket_signature
== signature
) &&
563 (keycmp(bucket_key
, key
, f
->key_mask
) == 0)) {
564 uint8_t *bucket_data
= &bucket
->data
[i
*
567 bucket
->signature
[i
] = 0;
570 memcpy(entry
, bucket_data
, f
->entry_size
);
572 if ((bucket
->signature
[0] == 0) &&
573 (bucket
->signature
[1] == 0) &&
574 (bucket
->signature
[2] == 0) &&
575 (bucket
->signature
[3] == 0) &&
576 (bucket_prev
!= NULL
)) {
577 bucket_prev
->next
= bucket
->next
;
578 bucket_prev
->next_valid
=
582 sizeof(struct rte_bucket_4_32
));
583 bucket_index
= (((uint8_t *)bucket
-
584 (uint8_t *)f
->memory
)/f
->bucket_size
) - f
->n_buckets
;
585 f
->stack
[f
->stack_pos
++] = bucket_index
;
592 /* Key is not present in the bucket */
597 #define lookup_key32_cmp(key_in, bucket, pos, f) \
599 uint64_t xor[4][4], or[4], signature[4], k[4]; \
601 k[0] = key_in[0] & f->key_mask[0]; \
602 k[1] = key_in[1] & f->key_mask[1]; \
603 k[2] = key_in[2] & f->key_mask[2]; \
604 k[3] = key_in[3] & f->key_mask[3]; \
606 signature[0] = ((~bucket->signature[0]) & 1); \
607 signature[1] = ((~bucket->signature[1]) & 1); \
608 signature[2] = ((~bucket->signature[2]) & 1); \
609 signature[3] = ((~bucket->signature[3]) & 1); \
611 xor[0][0] = k[0] ^ bucket->key[0][0]; \
612 xor[0][1] = k[1] ^ bucket->key[0][1]; \
613 xor[0][2] = k[2] ^ bucket->key[0][2]; \
614 xor[0][3] = k[3] ^ bucket->key[0][3]; \
616 xor[1][0] = k[0] ^ bucket->key[1][0]; \
617 xor[1][1] = k[1] ^ bucket->key[1][1]; \
618 xor[1][2] = k[2] ^ bucket->key[1][2]; \
619 xor[1][3] = k[3] ^ bucket->key[1][3]; \
621 xor[2][0] = k[0] ^ bucket->key[2][0]; \
622 xor[2][1] = k[1] ^ bucket->key[2][1]; \
623 xor[2][2] = k[2] ^ bucket->key[2][2]; \
624 xor[2][3] = k[3] ^ bucket->key[2][3]; \
626 xor[3][0] = k[0] ^ bucket->key[3][0]; \
627 xor[3][1] = k[1] ^ bucket->key[3][1]; \
628 xor[3][2] = k[2] ^ bucket->key[3][2]; \
629 xor[3][3] = k[3] ^ bucket->key[3][3]; \
631 or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\
632 or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\
633 or[2] = xor[2][0] | xor[2][1] | xor[2][2] | xor[2][3] | signature[2];\
634 or[3] = xor[3][0] | xor[3][1] | xor[3][2] | xor[3][3] | signature[3];\
647 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask, f) \
650 uint32_t key_offset = f->key_offset; \
652 pkt0_index = __builtin_ctzll(pkts_mask); \
653 pkt_mask = 1LLU << pkt0_index; \
654 pkts_mask &= ~pkt_mask; \
656 mbuf0 = pkts[pkt0_index]; \
657 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, key_offset));\
660 #define lookup1_stage1(mbuf1, bucket1, f) \
663 uint64_t signature; \
664 uint32_t bucket_index; \
666 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset); \
667 signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed); \
669 bucket_index = signature & (f->n_buckets - 1); \
670 bucket1 = (struct rte_bucket_4_32 *) \
671 &f->memory[bucket_index * f->bucket_size]; \
672 rte_prefetch0(bucket1); \
673 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
674 rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\
677 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
678 pkts_mask_out, entries, f) \
685 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
686 lookup_key32_cmp(key, bucket2, pos, f); \
688 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
689 pkts_mask_out |= pkt_mask; \
691 a = (void *) &bucket2->data[pos * f->entry_size]; \
693 entries[pkt2_index] = a; \
694 lru_update(bucket2, pos); \
697 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
698 entries, buckets_mask, buckets, keys, f) \
700 struct rte_bucket_4_32 *bucket_next; \
702 uint64_t pkt_mask, bucket_mask; \
706 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
707 lookup_key32_cmp(key, bucket2, pos, f); \
709 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
710 pkts_mask_out |= pkt_mask; \
712 a = (void *) &bucket2->data[pos * f->entry_size]; \
714 entries[pkt2_index] = a; \
716 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
717 buckets_mask |= bucket_mask; \
718 bucket_next = bucket2->next; \
719 buckets[pkt2_index] = bucket_next; \
720 keys[pkt2_index] = key; \
723 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, \
724 entries, buckets_mask, f) \
726 struct rte_bucket_4_32 *bucket, *bucket_next; \
728 uint64_t pkt_mask, bucket_mask; \
732 bucket = buckets[pkt_index]; \
733 key = keys[pkt_index]; \
735 lookup_key32_cmp(key, bucket, pos, f); \
737 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
738 pkts_mask_out |= pkt_mask; \
740 a = (void *) &bucket->data[pos * f->entry_size]; \
742 entries[pkt_index] = a; \
744 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
745 buckets_mask |= bucket_mask; \
746 bucket_next = bucket->next; \
747 rte_prefetch0(bucket_next); \
748 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
749 rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
750 2 * RTE_CACHE_LINE_SIZE)); \
751 buckets[pkt_index] = bucket_next; \
752 keys[pkt_index] = key; \
755 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
756 pkts, pkts_mask, f) \
758 uint64_t pkt00_mask, pkt01_mask; \
759 uint32_t key_offset = f->key_offset; \
761 pkt00_index = __builtin_ctzll(pkts_mask); \
762 pkt00_mask = 1LLU << pkt00_index; \
763 pkts_mask &= ~pkt00_mask; \
765 mbuf00 = pkts[pkt00_index]; \
766 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
768 pkt01_index = __builtin_ctzll(pkts_mask); \
769 pkt01_mask = 1LLU << pkt01_index; \
770 pkts_mask &= ~pkt01_mask; \
772 mbuf01 = pkts[pkt01_index]; \
773 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
776 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
777 mbuf00, mbuf01, pkts, pkts_mask, f) \
779 uint64_t pkt00_mask, pkt01_mask; \
780 uint32_t key_offset = f->key_offset; \
782 pkt00_index = __builtin_ctzll(pkts_mask); \
783 pkt00_mask = 1LLU << pkt00_index; \
784 pkts_mask &= ~pkt00_mask; \
786 mbuf00 = pkts[pkt00_index]; \
787 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \
789 pkt01_index = __builtin_ctzll(pkts_mask); \
790 if (pkts_mask == 0) \
791 pkt01_index = pkt00_index; \
793 pkt01_mask = 1LLU << pkt01_index; \
794 pkts_mask &= ~pkt01_mask; \
796 mbuf01 = pkts[pkt01_index]; \
797 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset)); \
800 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
802 uint64_t *key10, *key11; \
803 uint64_t signature10, signature11; \
804 uint32_t bucket10_index, bucket11_index; \
806 key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset); \
807 signature10 = f->f_hash(key10, f->key_mask, KEY_SIZE, f->seed); \
809 bucket10_index = signature10 & (f->n_buckets - 1); \
810 bucket10 = (struct rte_bucket_4_32 *) \
811 &f->memory[bucket10_index * f->bucket_size]; \
812 rte_prefetch0(bucket10); \
813 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
814 rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
816 key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset); \
817 signature11 = f->f_hash(key11, f->key_mask, KEY_SIZE, f->seed);\
819 bucket11_index = signature11 & (f->n_buckets - 1); \
820 bucket11 = (struct rte_bucket_4_32 *) \
821 &f->memory[bucket11_index * f->bucket_size]; \
822 rte_prefetch0(bucket11); \
823 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
824 rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
827 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
828 bucket20, bucket21, pkts_mask_out, entries, f) \
831 uint64_t pkt20_mask, pkt21_mask; \
832 uint64_t *key20, *key21; \
833 uint32_t pos20, pos21; \
835 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
836 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
838 lookup_key32_cmp(key20, bucket20, pos20, f); \
839 lookup_key32_cmp(key21, bucket21, pos21, f); \
841 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
842 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
843 pkts_mask_out |= pkt20_mask | pkt21_mask; \
845 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
846 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
847 rte_prefetch0(a20); \
848 rte_prefetch0(a21); \
849 entries[pkt20_index] = a20; \
850 entries[pkt21_index] = a21; \
851 lru_update(bucket20, pos20); \
852 lru_update(bucket21, pos21); \
855 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
856 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
858 struct rte_bucket_4_32 *bucket20_next, *bucket21_next; \
860 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
861 uint64_t *key20, *key21; \
862 uint32_t pos20, pos21; \
864 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
865 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
867 lookup_key32_cmp(key20, bucket20, pos20, f); \
868 lookup_key32_cmp(key21, bucket21, pos21, f); \
870 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
871 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
872 pkts_mask_out |= pkt20_mask | pkt21_mask; \
874 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
875 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
876 rte_prefetch0(a20); \
877 rte_prefetch0(a21); \
878 entries[pkt20_index] = a20; \
879 entries[pkt21_index] = a21; \
881 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
882 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
883 buckets_mask |= bucket20_mask | bucket21_mask; \
884 bucket20_next = bucket20->next; \
885 bucket21_next = bucket21->next; \
886 buckets[pkt20_index] = bucket20_next; \
887 buckets[pkt21_index] = bucket21_next; \
888 keys[pkt20_index] = key20; \
889 keys[pkt21_index] = key21; \
893 rte_table_hash_lookup_key32_lru(
895 struct rte_mbuf
**pkts
,
897 uint64_t *lookup_hit_mask
,
900 struct rte_table_hash
*f
= (struct rte_table_hash
*) table
;
901 struct rte_bucket_4_32
*bucket10
, *bucket11
, *bucket20
, *bucket21
;
902 struct rte_mbuf
*mbuf00
, *mbuf01
, *mbuf10
, *mbuf11
, *mbuf20
, *mbuf21
;
903 uint32_t pkt00_index
, pkt01_index
, pkt10_index
;
904 uint32_t pkt11_index
, pkt20_index
, pkt21_index
;
905 uint64_t pkts_mask_out
= 0;
907 __rte_unused
uint32_t n_pkts_in
= __builtin_popcountll(pkts_mask
);
908 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f
, n_pkts_in
);
910 /* Cannot run the pipeline with less than 5 packets */
911 if (__builtin_popcountll(pkts_mask
) < 5) {
912 for ( ; pkts_mask
; ) {
913 struct rte_bucket_4_32
*bucket
;
914 struct rte_mbuf
*mbuf
;
917 lookup1_stage0(pkt_index
, mbuf
, pkts
, pkts_mask
, f
);
918 lookup1_stage1(mbuf
, bucket
, f
);
919 lookup1_stage2_lru(pkt_index
, mbuf
, bucket
,
920 pkts_mask_out
, entries
, f
);
923 *lookup_hit_mask
= pkts_mask_out
;
924 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f
, n_pkts_in
- __builtin_popcountll(pkts_mask_out
));
932 /* Pipeline stage 0 */
933 lookup2_stage0(pkt00_index
, pkt01_index
, mbuf00
, mbuf01
, pkts
,
939 pkt10_index
= pkt00_index
;
940 pkt11_index
= pkt01_index
;
942 /* Pipeline stage 0 */
943 lookup2_stage0(pkt00_index
, pkt01_index
, mbuf00
, mbuf01
, pkts
,
946 /* Pipeline stage 1 */
947 lookup2_stage1(mbuf10
, mbuf11
, bucket10
, bucket11
, f
);
953 for ( ; pkts_mask
; ) {
961 pkt20_index
= pkt10_index
;
962 pkt21_index
= pkt11_index
;
963 pkt10_index
= pkt00_index
;
964 pkt11_index
= pkt01_index
;
966 /* Pipeline stage 0 */
967 lookup2_stage0_with_odd_support(pkt00_index
, pkt01_index
,
968 mbuf00
, mbuf01
, pkts
, pkts_mask
, f
);
970 /* Pipeline stage 1 */
971 lookup2_stage1(mbuf10
, mbuf11
, bucket10
, bucket11
, f
);
973 /* Pipeline stage 2 */
974 lookup2_stage2_lru(pkt20_index
, pkt21_index
,
975 mbuf20
, mbuf21
, bucket20
, bucket21
, pkts_mask_out
,
990 pkt20_index
= pkt10_index
;
991 pkt21_index
= pkt11_index
;
992 pkt10_index
= pkt00_index
;
993 pkt11_index
= pkt01_index
;
995 /* Pipeline stage 1 */
996 lookup2_stage1(mbuf10
, mbuf11
, bucket10
, bucket11
, f
);
998 /* Pipeline stage 2 */
999 lookup2_stage2_lru(pkt20_index
, pkt21_index
,
1000 mbuf20
, mbuf21
, bucket20
, bucket21
, pkts_mask_out
, entries
, f
);
1003 bucket20
= bucket10
;
1004 bucket21
= bucket11
;
1007 pkt20_index
= pkt10_index
;
1008 pkt21_index
= pkt11_index
;
1010 /* Pipeline stage 2 */
1011 lookup2_stage2_lru(pkt20_index
, pkt21_index
,
1012 mbuf20
, mbuf21
, bucket20
, bucket21
, pkts_mask_out
, entries
, f
);
1014 *lookup_hit_mask
= pkts_mask_out
;
1015 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f
, n_pkts_in
- __builtin_popcountll(pkts_mask_out
));
1017 } /* rte_table_hash_lookup_key32_lru() */
1020 rte_table_hash_lookup_key32_ext(
1022 struct rte_mbuf
**pkts
,
1024 uint64_t *lookup_hit_mask
,
1027 struct rte_table_hash
*f
= (struct rte_table_hash
*) table
;
1028 struct rte_bucket_4_32
*bucket10
, *bucket11
, *bucket20
, *bucket21
;
1029 struct rte_mbuf
*mbuf00
, *mbuf01
, *mbuf10
, *mbuf11
, *mbuf20
, *mbuf21
;
1030 uint32_t pkt00_index
, pkt01_index
, pkt10_index
;
1031 uint32_t pkt11_index
, pkt20_index
, pkt21_index
;
1032 uint64_t pkts_mask_out
= 0, buckets_mask
= 0;
1033 struct rte_bucket_4_32
*buckets
[RTE_PORT_IN_BURST_SIZE_MAX
];
1034 uint64_t *keys
[RTE_PORT_IN_BURST_SIZE_MAX
];
1036 __rte_unused
uint32_t n_pkts_in
= __builtin_popcountll(pkts_mask
);
1037 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f
, n_pkts_in
);
1039 /* Cannot run the pipeline with less than 5 packets */
1040 if (__builtin_popcountll(pkts_mask
) < 5) {
1041 for ( ; pkts_mask
; ) {
1042 struct rte_bucket_4_32
*bucket
;
1043 struct rte_mbuf
*mbuf
;
1046 lookup1_stage0(pkt_index
, mbuf
, pkts
, pkts_mask
, f
);
1047 lookup1_stage1(mbuf
, bucket
, f
);
1048 lookup1_stage2_ext(pkt_index
, mbuf
, bucket
,
1049 pkts_mask_out
, entries
, buckets_mask
, buckets
,
1053 goto grind_next_buckets
;
1060 /* Pipeline stage 0 */
1061 lookup2_stage0(pkt00_index
, pkt01_index
, mbuf00
, mbuf01
, pkts
,
1067 pkt10_index
= pkt00_index
;
1068 pkt11_index
= pkt01_index
;
1070 /* Pipeline stage 0 */
1071 lookup2_stage0(pkt00_index
, pkt01_index
, mbuf00
, mbuf01
, pkts
,
1074 /* Pipeline stage 1 */
1075 lookup2_stage1(mbuf10
, mbuf11
, bucket10
, bucket11
, f
);
1081 for ( ; pkts_mask
; ) {
1083 bucket20
= bucket10
;
1084 bucket21
= bucket11
;
1089 pkt20_index
= pkt10_index
;
1090 pkt21_index
= pkt11_index
;
1091 pkt10_index
= pkt00_index
;
1092 pkt11_index
= pkt01_index
;
1094 /* Pipeline stage 0 */
1095 lookup2_stage0_with_odd_support(pkt00_index
, pkt01_index
,
1096 mbuf00
, mbuf01
, pkts
, pkts_mask
, f
);
1098 /* Pipeline stage 1 */
1099 lookup2_stage1(mbuf10
, mbuf11
, bucket10
, bucket11
, f
);
1101 /* Pipeline stage 2 */
1102 lookup2_stage2_ext(pkt20_index
, pkt21_index
, mbuf20
, mbuf21
,
1103 bucket20
, bucket21
, pkts_mask_out
, entries
,
1104 buckets_mask
, buckets
, keys
, f
);
1112 bucket20
= bucket10
;
1113 bucket21
= bucket11
;
1118 pkt20_index
= pkt10_index
;
1119 pkt21_index
= pkt11_index
;
1120 pkt10_index
= pkt00_index
;
1121 pkt11_index
= pkt01_index
;
1123 /* Pipeline stage 1 */
1124 lookup2_stage1(mbuf10
, mbuf11
, bucket10
, bucket11
, f
);
1126 /* Pipeline stage 2 */
1127 lookup2_stage2_ext(pkt20_index
, pkt21_index
, mbuf20
, mbuf21
,
1128 bucket20
, bucket21
, pkts_mask_out
, entries
,
1129 buckets_mask
, buckets
, keys
, f
);
1132 bucket20
= bucket10
;
1133 bucket21
= bucket11
;
1136 pkt20_index
= pkt10_index
;
1137 pkt21_index
= pkt11_index
;
1139 /* Pipeline stage 2 */
1140 lookup2_stage2_ext(pkt20_index
, pkt21_index
, mbuf20
, mbuf21
,
1141 bucket20
, bucket21
, pkts_mask_out
, entries
,
1142 buckets_mask
, buckets
, keys
, f
);
1145 /* Grind next buckets */
1146 for ( ; buckets_mask
; ) {
1147 uint64_t buckets_mask_next
= 0;
1149 for ( ; buckets_mask
; ) {
1153 pkt_index
= __builtin_ctzll(buckets_mask
);
1154 pkt_mask
= 1LLU << pkt_index
;
1155 buckets_mask
&= ~pkt_mask
;
1157 lookup_grinder(pkt_index
, buckets
, keys
, pkts_mask_out
,
1158 entries
, buckets_mask_next
, f
);
1161 buckets_mask
= buckets_mask_next
;
1164 *lookup_hit_mask
= pkts_mask_out
;
1165 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f
, n_pkts_in
- __builtin_popcountll(pkts_mask_out
));
1167 } /* rte_table_hash_lookup_key32_ext() */
1170 rte_table_hash_key32_stats_read(void *table
, struct rte_table_stats
*stats
, int clear
)
1172 struct rte_table_hash
*t
= table
;
1175 memcpy(stats
, &t
->stats
, sizeof(t
->stats
));
1178 memset(&t
->stats
, 0, sizeof(t
->stats
));
1183 struct rte_table_ops rte_table_hash_key32_lru_ops
= {
1184 .f_create
= rte_table_hash_create_key32_lru
,
1185 .f_free
= rte_table_hash_free_key32_lru
,
1186 .f_add
= rte_table_hash_entry_add_key32_lru
,
1187 .f_delete
= rte_table_hash_entry_delete_key32_lru
,
1189 .f_delete_bulk
= NULL
,
1190 .f_lookup
= rte_table_hash_lookup_key32_lru
,
1191 .f_stats
= rte_table_hash_key32_stats_read
,
1194 struct rte_table_ops rte_table_hash_key32_ext_ops
= {
1195 .f_create
= rte_table_hash_create_key32_ext
,
1196 .f_free
= rte_table_hash_free_key32_ext
,
1197 .f_add
= rte_table_hash_entry_add_key32_ext
,
1198 .f_delete
= rte_table_hash_entry_delete_key32_ext
,
1200 .f_delete_bulk
= NULL
,
1201 .f_lookup
= rte_table_hash_lookup_key32_ext
,
1202 .f_stats
= rte_table_hash_key32_stats_read
,