]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_table/rte_table_hash_key32.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_table / rte_table_hash_key32.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4 #include <string.h>
5 #include <stdio.h>
6
7 #include <rte_common.h>
8 #include <rte_mbuf.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
11 #include <rte_log.h>
12
13 #include "rte_table_hash.h"
14 #include "rte_lru.h"
15
16 #define KEY_SIZE 32
17
18 #define KEYS_PER_BUCKET 4
19
20 #define RTE_BUCKET_ENTRY_VALID 0x1LLU
21
22 #ifdef RTE_TABLE_STATS_COLLECT
23
24 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val) \
25 table->stats.n_pkts_in += val
26 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val) \
27 table->stats.n_pkts_lookup_miss += val
28
29 #else
30
31 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val)
32 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val)
33
34 #endif
35
36 struct rte_bucket_4_32 {
37 /* Cache line 0 */
38 uint64_t signature[4 + 1];
39 uint64_t lru_list;
40 struct rte_bucket_4_32 *next;
41 uint64_t next_valid;
42
43 /* Cache lines 1 and 2 */
44 uint64_t key[4][4];
45
46 /* Cache line 3 */
47 uint8_t data[0];
48 };
49
50 struct rte_table_hash {
51 struct rte_table_stats stats;
52
53 /* Input parameters */
54 uint32_t n_buckets;
55 uint32_t key_size;
56 uint32_t entry_size;
57 uint32_t bucket_size;
58 uint32_t key_offset;
59 uint64_t key_mask[4];
60 rte_table_hash_op_hash f_hash;
61 uint64_t seed;
62
63 /* Extendible buckets */
64 uint32_t n_buckets_ext;
65 uint32_t stack_pos;
66 uint32_t *stack;
67
68 /* Lookup table */
69 uint8_t memory[0] __rte_cache_aligned;
70 };
71
72 static int
73 keycmp(void *a, void *b, void *b_mask)
74 {
75 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
76
77 return (a64[0] != (b64[0] & b_mask64[0])) ||
78 (a64[1] != (b64[1] & b_mask64[1])) ||
79 (a64[2] != (b64[2] & b_mask64[2])) ||
80 (a64[3] != (b64[3] & b_mask64[3]));
81 }
82
83 static void
84 keycpy(void *dst, void *src, void *src_mask)
85 {
86 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
87
88 dst64[0] = src64[0] & src_mask64[0];
89 dst64[1] = src64[1] & src_mask64[1];
90 dst64[2] = src64[2] & src_mask64[2];
91 dst64[3] = src64[3] & src_mask64[3];
92 }
93
94 static int
95 check_params_create(struct rte_table_hash_params *params)
96 {
97 /* name */
98 if (params->name == NULL) {
99 RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
100 return -EINVAL;
101 }
102
103 /* key_size */
104 if (params->key_size != KEY_SIZE) {
105 RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
106 return -EINVAL;
107 }
108
109 /* n_keys */
110 if (params->n_keys == 0) {
111 RTE_LOG(ERR, TABLE, "%s: n_keys is zero\n", __func__);
112 return -EINVAL;
113 }
114
115 /* n_buckets */
116 if ((params->n_buckets == 0) ||
117 (!rte_is_power_of_2(params->n_buckets))) {
118 RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
119 return -EINVAL;
120 }
121
122 /* f_hash */
123 if (params->f_hash == NULL) {
124 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
125 __func__);
126 return -EINVAL;
127 }
128
129 return 0;
130 }
131
132 static void *
133 rte_table_hash_create_key32_lru(void *params,
134 int socket_id,
135 uint32_t entry_size)
136 {
137 struct rte_table_hash_params *p = params;
138 struct rte_table_hash *f;
139 uint64_t bucket_size, total_size;
140 uint32_t n_buckets, i;
141
142 /* Check input parameters */
143 if ((check_params_create(p) != 0) ||
144 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
145 ((sizeof(struct rte_bucket_4_32) % 64) != 0))
146 return NULL;
147
148 /*
149 * Table dimensioning
150 *
151 * Objective: Pick the number of buckets (n_buckets) so that there a chance
152 * to store n_keys keys in the table.
153 *
154 * Note: Since the buckets do not get extended, it is not possible to
155 * guarantee that n_keys keys can be stored in the table at any time. In the
156 * worst case scenario when all the n_keys fall into the same bucket, only
157 * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
158 * defeats the purpose of the hash table. It indicates unsuitable f_hash or
159 * n_keys to n_buckets ratio.
160 *
161 * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
162 */
163 n_buckets = rte_align32pow2(
164 (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
165 n_buckets = RTE_MAX(n_buckets, p->n_buckets);
166
167 /* Memory allocation */
168 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) +
169 KEYS_PER_BUCKET * entry_size);
170 total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
171 if (total_size > SIZE_MAX) {
172 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
173 "for hash table %s\n",
174 __func__, total_size, p->name);
175 return NULL;
176 }
177
178 f = rte_zmalloc_socket(p->name,
179 (size_t)total_size,
180 RTE_CACHE_LINE_SIZE,
181 socket_id);
182 if (f == NULL) {
183 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
184 "for hash table %s\n",
185 __func__, total_size, p->name);
186 return NULL;
187 }
188 RTE_LOG(INFO, TABLE,
189 "%s: Hash table %s memory footprint "
190 "is %" PRIu64 " bytes\n",
191 __func__, p->name, total_size);
192
193 /* Memory initialization */
194 f->n_buckets = n_buckets;
195 f->key_size = KEY_SIZE;
196 f->entry_size = entry_size;
197 f->bucket_size = bucket_size;
198 f->key_offset = p->key_offset;
199 f->f_hash = p->f_hash;
200 f->seed = p->seed;
201
202 if (p->key_mask != NULL) {
203 f->key_mask[0] = ((uint64_t *)p->key_mask)[0];
204 f->key_mask[1] = ((uint64_t *)p->key_mask)[1];
205 f->key_mask[2] = ((uint64_t *)p->key_mask)[2];
206 f->key_mask[3] = ((uint64_t *)p->key_mask)[3];
207 } else {
208 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
209 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
210 f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU;
211 f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU;
212 }
213
214 for (i = 0; i < n_buckets; i++) {
215 struct rte_bucket_4_32 *bucket;
216
217 bucket = (struct rte_bucket_4_32 *) &f->memory[i *
218 f->bucket_size];
219 bucket->lru_list = 0x0000000100020003LLU;
220 }
221
222 return f;
223 }
224
225 static int
226 rte_table_hash_free_key32_lru(void *table)
227 {
228 struct rte_table_hash *f = table;
229
230 /* Check input parameters */
231 if (f == NULL) {
232 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
233 return -EINVAL;
234 }
235
236 rte_free(f);
237 return 0;
238 }
239
240 static int
241 rte_table_hash_entry_add_key32_lru(
242 void *table,
243 void *key,
244 void *entry,
245 int *key_found,
246 void **entry_ptr)
247 {
248 struct rte_table_hash *f = table;
249 struct rte_bucket_4_32 *bucket;
250 uint64_t signature, pos;
251 uint32_t bucket_index, i;
252
253 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
254 bucket_index = signature & (f->n_buckets - 1);
255 bucket = (struct rte_bucket_4_32 *)
256 &f->memory[bucket_index * f->bucket_size];
257 signature |= RTE_BUCKET_ENTRY_VALID;
258
259 /* Key is present in the bucket */
260 for (i = 0; i < 4; i++) {
261 uint64_t bucket_signature = bucket->signature[i];
262 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
263
264 if ((bucket_signature == signature) &&
265 (keycmp(bucket_key, key, f->key_mask) == 0)) {
266 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
267
268 memcpy(bucket_data, entry, f->entry_size);
269 lru_update(bucket, i);
270 *key_found = 1;
271 *entry_ptr = (void *) bucket_data;
272 return 0;
273 }
274 }
275
276 /* Key is not present in the bucket */
277 for (i = 0; i < 4; i++) {
278 uint64_t bucket_signature = bucket->signature[i];
279 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
280
281 if (bucket_signature == 0) {
282 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
283
284 bucket->signature[i] = signature;
285 keycpy(bucket_key, key, f->key_mask);
286 memcpy(bucket_data, entry, f->entry_size);
287 lru_update(bucket, i);
288 *key_found = 0;
289 *entry_ptr = (void *) bucket_data;
290
291 return 0;
292 }
293 }
294
295 /* Bucket full: replace LRU entry */
296 pos = lru_pos(bucket);
297 bucket->signature[pos] = signature;
298 keycpy(&bucket->key[pos], key, f->key_mask);
299 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
300 lru_update(bucket, pos);
301 *key_found = 0;
302 *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
303
304 return 0;
305 }
306
307 static int
308 rte_table_hash_entry_delete_key32_lru(
309 void *table,
310 void *key,
311 int *key_found,
312 void *entry)
313 {
314 struct rte_table_hash *f = table;
315 struct rte_bucket_4_32 *bucket;
316 uint64_t signature;
317 uint32_t bucket_index, i;
318
319 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
320 bucket_index = signature & (f->n_buckets - 1);
321 bucket = (struct rte_bucket_4_32 *)
322 &f->memory[bucket_index * f->bucket_size];
323 signature |= RTE_BUCKET_ENTRY_VALID;
324
325 /* Key is present in the bucket */
326 for (i = 0; i < 4; i++) {
327 uint64_t bucket_signature = bucket->signature[i];
328 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
329
330 if ((bucket_signature == signature) &&
331 (keycmp(bucket_key, key, f->key_mask) == 0)) {
332 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
333
334 bucket->signature[i] = 0;
335 *key_found = 1;
336 if (entry)
337 memcpy(entry, bucket_data, f->entry_size);
338
339 return 0;
340 }
341 }
342
343 /* Key is not present in the bucket */
344 *key_found = 0;
345 return 0;
346 }
347
348 static void *
349 rte_table_hash_create_key32_ext(void *params,
350 int socket_id,
351 uint32_t entry_size)
352 {
353 struct rte_table_hash_params *p = params;
354 struct rte_table_hash *f;
355 uint64_t bucket_size, stack_size, total_size;
356 uint32_t n_buckets_ext, i;
357
358 /* Check input parameters */
359 if ((check_params_create(p) != 0) ||
360 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
361 ((sizeof(struct rte_bucket_4_32) % 64) != 0))
362 return NULL;
363
364 /*
365 * Table dimensioning
366 *
367 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
368 * it is guaranteed that n_keys keys can be stored in the table at any time.
369 *
370 * The worst case scenario takes place when all the n_keys keys fall into
371 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
372 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
373 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
374 * into a different bucket. This case defeats the purpose of the hash table.
375 * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
376 *
377 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
378 */
379 n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
380
381 /* Memory allocation */
382 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) +
383 KEYS_PER_BUCKET * entry_size);
384 stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
385 total_size = sizeof(struct rte_table_hash) +
386 (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
387 if (total_size > SIZE_MAX) {
388 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
389 "for hash table %s\n",
390 __func__, total_size, p->name);
391 return NULL;
392 }
393
394 f = rte_zmalloc_socket(p->name,
395 (size_t)total_size,
396 RTE_CACHE_LINE_SIZE,
397 socket_id);
398 if (f == NULL) {
399 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
400 "for hash table %s\n",
401 __func__, total_size, p->name);
402 return NULL;
403 }
404 RTE_LOG(INFO, TABLE,
405 "%s: Hash table %s memory footprint "
406 "is %" PRIu64" bytes\n",
407 __func__, p->name, total_size);
408
409 /* Memory initialization */
410 f->n_buckets = p->n_buckets;
411 f->key_size = KEY_SIZE;
412 f->entry_size = entry_size;
413 f->bucket_size = bucket_size;
414 f->key_offset = p->key_offset;
415 f->f_hash = p->f_hash;
416 f->seed = p->seed;
417
418 f->n_buckets_ext = n_buckets_ext;
419 f->stack_pos = n_buckets_ext;
420 f->stack = (uint32_t *)
421 &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
422
423 if (p->key_mask != NULL) {
424 f->key_mask[0] = (((uint64_t *)p->key_mask)[0]);
425 f->key_mask[1] = (((uint64_t *)p->key_mask)[1]);
426 f->key_mask[2] = (((uint64_t *)p->key_mask)[2]);
427 f->key_mask[3] = (((uint64_t *)p->key_mask)[3]);
428 } else {
429 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
430 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
431 f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU;
432 f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU;
433 }
434
435 for (i = 0; i < n_buckets_ext; i++)
436 f->stack[i] = i;
437
438 return f;
439 }
440
441 static int
442 rte_table_hash_free_key32_ext(void *table)
443 {
444 struct rte_table_hash *f = table;
445
446 /* Check input parameters */
447 if (f == NULL) {
448 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
449 return -EINVAL;
450 }
451
452 rte_free(f);
453 return 0;
454 }
455
456 static int
457 rte_table_hash_entry_add_key32_ext(
458 void *table,
459 void *key,
460 void *entry,
461 int *key_found,
462 void **entry_ptr)
463 {
464 struct rte_table_hash *f = table;
465 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
466 uint64_t signature;
467 uint32_t bucket_index, i;
468
469 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
470 bucket_index = signature & (f->n_buckets - 1);
471 bucket0 = (struct rte_bucket_4_32 *)
472 &f->memory[bucket_index * f->bucket_size];
473 signature |= RTE_BUCKET_ENTRY_VALID;
474
475 /* Key is present in the bucket */
476 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
477 for (i = 0; i < 4; i++) {
478 uint64_t bucket_signature = bucket->signature[i];
479 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
480
481 if ((bucket_signature == signature) &&
482 (keycmp(bucket_key, key, f->key_mask) == 0)) {
483 uint8_t *bucket_data = &bucket->data[i *
484 f->entry_size];
485
486 memcpy(bucket_data, entry, f->entry_size);
487 *key_found = 1;
488 *entry_ptr = (void *) bucket_data;
489
490 return 0;
491 }
492 }
493 }
494
495 /* Key is not present in the bucket */
496 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
497 bucket_prev = bucket, bucket = bucket->next)
498 for (i = 0; i < 4; i++) {
499 uint64_t bucket_signature = bucket->signature[i];
500 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
501
502 if (bucket_signature == 0) {
503 uint8_t *bucket_data = &bucket->data[i *
504 f->entry_size];
505
506 bucket->signature[i] = signature;
507 keycpy(bucket_key, key, f->key_mask);
508 memcpy(bucket_data, entry, f->entry_size);
509 *key_found = 0;
510 *entry_ptr = (void *) bucket_data;
511
512 return 0;
513 }
514 }
515
516 /* Bucket full: extend bucket */
517 if (f->stack_pos > 0) {
518 bucket_index = f->stack[--f->stack_pos];
519
520 bucket = (struct rte_bucket_4_32 *)
521 &f->memory[(f->n_buckets + bucket_index) *
522 f->bucket_size];
523 bucket_prev->next = bucket;
524 bucket_prev->next_valid = 1;
525
526 bucket->signature[0] = signature;
527 keycpy(&bucket->key[0], key, f->key_mask);
528 memcpy(&bucket->data[0], entry, f->entry_size);
529 *key_found = 0;
530 *entry_ptr = (void *) &bucket->data[0];
531 return 0;
532 }
533
534 return -ENOSPC;
535 }
536
537 static int
538 rte_table_hash_entry_delete_key32_ext(
539 void *table,
540 void *key,
541 int *key_found,
542 void *entry)
543 {
544 struct rte_table_hash *f = table;
545 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
546 uint64_t signature;
547 uint32_t bucket_index, i;
548
549 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
550 bucket_index = signature & (f->n_buckets - 1);
551 bucket0 = (struct rte_bucket_4_32 *)
552 &f->memory[bucket_index * f->bucket_size];
553 signature |= RTE_BUCKET_ENTRY_VALID;
554
555 /* Key is present in the bucket */
556 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
557 bucket_prev = bucket, bucket = bucket->next)
558 for (i = 0; i < 4; i++) {
559 uint64_t bucket_signature = bucket->signature[i];
560 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
561
562 if ((bucket_signature == signature) &&
563 (keycmp(bucket_key, key, f->key_mask) == 0)) {
564 uint8_t *bucket_data = &bucket->data[i *
565 f->entry_size];
566
567 bucket->signature[i] = 0;
568 *key_found = 1;
569 if (entry)
570 memcpy(entry, bucket_data, f->entry_size);
571
572 if ((bucket->signature[0] == 0) &&
573 (bucket->signature[1] == 0) &&
574 (bucket->signature[2] == 0) &&
575 (bucket->signature[3] == 0) &&
576 (bucket_prev != NULL)) {
577 bucket_prev->next = bucket->next;
578 bucket_prev->next_valid =
579 bucket->next_valid;
580
581 memset(bucket, 0,
582 sizeof(struct rte_bucket_4_32));
583 bucket_index = (((uint8_t *)bucket -
584 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets;
585 f->stack[f->stack_pos++] = bucket_index;
586 }
587
588 return 0;
589 }
590 }
591
592 /* Key is not present in the bucket */
593 *key_found = 0;
594 return 0;
595 }
596
597 #define lookup_key32_cmp(key_in, bucket, pos, f) \
598 { \
599 uint64_t xor[4][4], or[4], signature[4], k[4]; \
600 \
601 k[0] = key_in[0] & f->key_mask[0]; \
602 k[1] = key_in[1] & f->key_mask[1]; \
603 k[2] = key_in[2] & f->key_mask[2]; \
604 k[3] = key_in[3] & f->key_mask[3]; \
605 \
606 signature[0] = ((~bucket->signature[0]) & 1); \
607 signature[1] = ((~bucket->signature[1]) & 1); \
608 signature[2] = ((~bucket->signature[2]) & 1); \
609 signature[3] = ((~bucket->signature[3]) & 1); \
610 \
611 xor[0][0] = k[0] ^ bucket->key[0][0]; \
612 xor[0][1] = k[1] ^ bucket->key[0][1]; \
613 xor[0][2] = k[2] ^ bucket->key[0][2]; \
614 xor[0][3] = k[3] ^ bucket->key[0][3]; \
615 \
616 xor[1][0] = k[0] ^ bucket->key[1][0]; \
617 xor[1][1] = k[1] ^ bucket->key[1][1]; \
618 xor[1][2] = k[2] ^ bucket->key[1][2]; \
619 xor[1][3] = k[3] ^ bucket->key[1][3]; \
620 \
621 xor[2][0] = k[0] ^ bucket->key[2][0]; \
622 xor[2][1] = k[1] ^ bucket->key[2][1]; \
623 xor[2][2] = k[2] ^ bucket->key[2][2]; \
624 xor[2][3] = k[3] ^ bucket->key[2][3]; \
625 \
626 xor[3][0] = k[0] ^ bucket->key[3][0]; \
627 xor[3][1] = k[1] ^ bucket->key[3][1]; \
628 xor[3][2] = k[2] ^ bucket->key[3][2]; \
629 xor[3][3] = k[3] ^ bucket->key[3][3]; \
630 \
631 or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\
632 or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\
633 or[2] = xor[2][0] | xor[2][1] | xor[2][2] | xor[2][3] | signature[2];\
634 or[3] = xor[3][0] | xor[3][1] | xor[3][2] | xor[3][3] | signature[3];\
635 \
636 pos = 4; \
637 if (or[0] == 0) \
638 pos = 0; \
639 if (or[1] == 0) \
640 pos = 1; \
641 if (or[2] == 0) \
642 pos = 2; \
643 if (or[3] == 0) \
644 pos = 3; \
645 }
646
647 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask, f) \
648 { \
649 uint64_t pkt_mask; \
650 uint32_t key_offset = f->key_offset; \
651 \
652 pkt0_index = __builtin_ctzll(pkts_mask); \
653 pkt_mask = 1LLU << pkt0_index; \
654 pkts_mask &= ~pkt_mask; \
655 \
656 mbuf0 = pkts[pkt0_index]; \
657 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, key_offset));\
658 }
659
660 #define lookup1_stage1(mbuf1, bucket1, f) \
661 { \
662 uint64_t *key; \
663 uint64_t signature; \
664 uint32_t bucket_index; \
665 \
666 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset); \
667 signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed); \
668 \
669 bucket_index = signature & (f->n_buckets - 1); \
670 bucket1 = (struct rte_bucket_4_32 *) \
671 &f->memory[bucket_index * f->bucket_size]; \
672 rte_prefetch0(bucket1); \
673 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
674 rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\
675 }
676
677 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
678 pkts_mask_out, entries, f) \
679 { \
680 void *a; \
681 uint64_t pkt_mask; \
682 uint64_t *key; \
683 uint32_t pos; \
684 \
685 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
686 lookup_key32_cmp(key, bucket2, pos, f); \
687 \
688 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
689 pkts_mask_out |= pkt_mask; \
690 \
691 a = (void *) &bucket2->data[pos * f->entry_size]; \
692 rte_prefetch0(a); \
693 entries[pkt2_index] = a; \
694 lru_update(bucket2, pos); \
695 }
696
697 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
698 entries, buckets_mask, buckets, keys, f) \
699 { \
700 struct rte_bucket_4_32 *bucket_next; \
701 void *a; \
702 uint64_t pkt_mask, bucket_mask; \
703 uint64_t *key; \
704 uint32_t pos; \
705 \
706 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
707 lookup_key32_cmp(key, bucket2, pos, f); \
708 \
709 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
710 pkts_mask_out |= pkt_mask; \
711 \
712 a = (void *) &bucket2->data[pos * f->entry_size]; \
713 rte_prefetch0(a); \
714 entries[pkt2_index] = a; \
715 \
716 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
717 buckets_mask |= bucket_mask; \
718 bucket_next = bucket2->next; \
719 buckets[pkt2_index] = bucket_next; \
720 keys[pkt2_index] = key; \
721 }
722
723 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, \
724 entries, buckets_mask, f) \
725 { \
726 struct rte_bucket_4_32 *bucket, *bucket_next; \
727 void *a; \
728 uint64_t pkt_mask, bucket_mask; \
729 uint64_t *key; \
730 uint32_t pos; \
731 \
732 bucket = buckets[pkt_index]; \
733 key = keys[pkt_index]; \
734 \
735 lookup_key32_cmp(key, bucket, pos, f); \
736 \
737 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
738 pkts_mask_out |= pkt_mask; \
739 \
740 a = (void *) &bucket->data[pos * f->entry_size]; \
741 rte_prefetch0(a); \
742 entries[pkt_index] = a; \
743 \
744 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
745 buckets_mask |= bucket_mask; \
746 bucket_next = bucket->next; \
747 rte_prefetch0(bucket_next); \
748 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
749 rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
750 2 * RTE_CACHE_LINE_SIZE)); \
751 buckets[pkt_index] = bucket_next; \
752 keys[pkt_index] = key; \
753 }
754
755 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
756 pkts, pkts_mask, f) \
757 { \
758 uint64_t pkt00_mask, pkt01_mask; \
759 uint32_t key_offset = f->key_offset; \
760 \
761 pkt00_index = __builtin_ctzll(pkts_mask); \
762 pkt00_mask = 1LLU << pkt00_index; \
763 pkts_mask &= ~pkt00_mask; \
764 \
765 mbuf00 = pkts[pkt00_index]; \
766 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
767 \
768 pkt01_index = __builtin_ctzll(pkts_mask); \
769 pkt01_mask = 1LLU << pkt01_index; \
770 pkts_mask &= ~pkt01_mask; \
771 \
772 mbuf01 = pkts[pkt01_index]; \
773 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
774 }
775
776 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
777 mbuf00, mbuf01, pkts, pkts_mask, f) \
778 { \
779 uint64_t pkt00_mask, pkt01_mask; \
780 uint32_t key_offset = f->key_offset; \
781 \
782 pkt00_index = __builtin_ctzll(pkts_mask); \
783 pkt00_mask = 1LLU << pkt00_index; \
784 pkts_mask &= ~pkt00_mask; \
785 \
786 mbuf00 = pkts[pkt00_index]; \
787 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \
788 \
789 pkt01_index = __builtin_ctzll(pkts_mask); \
790 if (pkts_mask == 0) \
791 pkt01_index = pkt00_index; \
792 \
793 pkt01_mask = 1LLU << pkt01_index; \
794 pkts_mask &= ~pkt01_mask; \
795 \
796 mbuf01 = pkts[pkt01_index]; \
797 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset)); \
798 }
799
800 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
801 { \
802 uint64_t *key10, *key11; \
803 uint64_t signature10, signature11; \
804 uint32_t bucket10_index, bucket11_index; \
805 \
806 key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset); \
807 signature10 = f->f_hash(key10, f->key_mask, KEY_SIZE, f->seed); \
808 \
809 bucket10_index = signature10 & (f->n_buckets - 1); \
810 bucket10 = (struct rte_bucket_4_32 *) \
811 &f->memory[bucket10_index * f->bucket_size]; \
812 rte_prefetch0(bucket10); \
813 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
814 rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
815 \
816 key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset); \
817 signature11 = f->f_hash(key11, f->key_mask, KEY_SIZE, f->seed);\
818 \
819 bucket11_index = signature11 & (f->n_buckets - 1); \
820 bucket11 = (struct rte_bucket_4_32 *) \
821 &f->memory[bucket11_index * f->bucket_size]; \
822 rte_prefetch0(bucket11); \
823 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
824 rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
825 }
826
827 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
828 bucket20, bucket21, pkts_mask_out, entries, f) \
829 { \
830 void *a20, *a21; \
831 uint64_t pkt20_mask, pkt21_mask; \
832 uint64_t *key20, *key21; \
833 uint32_t pos20, pos21; \
834 \
835 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
836 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
837 \
838 lookup_key32_cmp(key20, bucket20, pos20, f); \
839 lookup_key32_cmp(key21, bucket21, pos21, f); \
840 \
841 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
842 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
843 pkts_mask_out |= pkt20_mask | pkt21_mask; \
844 \
845 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
846 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
847 rte_prefetch0(a20); \
848 rte_prefetch0(a21); \
849 entries[pkt20_index] = a20; \
850 entries[pkt21_index] = a21; \
851 lru_update(bucket20, pos20); \
852 lru_update(bucket21, pos21); \
853 }
854
855 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
856 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
857 { \
858 struct rte_bucket_4_32 *bucket20_next, *bucket21_next; \
859 void *a20, *a21; \
860 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
861 uint64_t *key20, *key21; \
862 uint32_t pos20, pos21; \
863 \
864 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
865 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
866 \
867 lookup_key32_cmp(key20, bucket20, pos20, f); \
868 lookup_key32_cmp(key21, bucket21, pos21, f); \
869 \
870 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
871 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
872 pkts_mask_out |= pkt20_mask | pkt21_mask; \
873 \
874 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
875 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
876 rte_prefetch0(a20); \
877 rte_prefetch0(a21); \
878 entries[pkt20_index] = a20; \
879 entries[pkt21_index] = a21; \
880 \
881 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
882 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
883 buckets_mask |= bucket20_mask | bucket21_mask; \
884 bucket20_next = bucket20->next; \
885 bucket21_next = bucket21->next; \
886 buckets[pkt20_index] = bucket20_next; \
887 buckets[pkt21_index] = bucket21_next; \
888 keys[pkt20_index] = key20; \
889 keys[pkt21_index] = key21; \
890 }
891
892 static int
893 rte_table_hash_lookup_key32_lru(
894 void *table,
895 struct rte_mbuf **pkts,
896 uint64_t pkts_mask,
897 uint64_t *lookup_hit_mask,
898 void **entries)
899 {
900 struct rte_table_hash *f = (struct rte_table_hash *) table;
901 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
902 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
903 uint32_t pkt00_index, pkt01_index, pkt10_index;
904 uint32_t pkt11_index, pkt20_index, pkt21_index;
905 uint64_t pkts_mask_out = 0;
906
907 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
908 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in);
909
910 /* Cannot run the pipeline with less than 5 packets */
911 if (__builtin_popcountll(pkts_mask) < 5) {
912 for ( ; pkts_mask; ) {
913 struct rte_bucket_4_32 *bucket;
914 struct rte_mbuf *mbuf;
915 uint32_t pkt_index;
916
917 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
918 lookup1_stage1(mbuf, bucket, f);
919 lookup1_stage2_lru(pkt_index, mbuf, bucket,
920 pkts_mask_out, entries, f);
921 }
922
923 *lookup_hit_mask = pkts_mask_out;
924 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
925 return 0;
926 }
927
928 /*
929 * Pipeline fill
930 *
931 */
932 /* Pipeline stage 0 */
933 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
934 pkts_mask, f);
935
936 /* Pipeline feed */
937 mbuf10 = mbuf00;
938 mbuf11 = mbuf01;
939 pkt10_index = pkt00_index;
940 pkt11_index = pkt01_index;
941
942 /* Pipeline stage 0 */
943 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
944 pkts_mask, f);
945
946 /* Pipeline stage 1 */
947 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
948
949 /*
950 * Pipeline run
951 *
952 */
953 for ( ; pkts_mask; ) {
954 /* Pipeline feed */
955 bucket20 = bucket10;
956 bucket21 = bucket11;
957 mbuf20 = mbuf10;
958 mbuf21 = mbuf11;
959 mbuf10 = mbuf00;
960 mbuf11 = mbuf01;
961 pkt20_index = pkt10_index;
962 pkt21_index = pkt11_index;
963 pkt10_index = pkt00_index;
964 pkt11_index = pkt01_index;
965
966 /* Pipeline stage 0 */
967 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
968 mbuf00, mbuf01, pkts, pkts_mask, f);
969
970 /* Pipeline stage 1 */
971 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
972
973 /* Pipeline stage 2 */
974 lookup2_stage2_lru(pkt20_index, pkt21_index,
975 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out,
976 entries, f);
977 }
978
979 /*
980 * Pipeline flush
981 *
982 */
983 /* Pipeline feed */
984 bucket20 = bucket10;
985 bucket21 = bucket11;
986 mbuf20 = mbuf10;
987 mbuf21 = mbuf11;
988 mbuf10 = mbuf00;
989 mbuf11 = mbuf01;
990 pkt20_index = pkt10_index;
991 pkt21_index = pkt11_index;
992 pkt10_index = pkt00_index;
993 pkt11_index = pkt01_index;
994
995 /* Pipeline stage 1 */
996 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
997
998 /* Pipeline stage 2 */
999 lookup2_stage2_lru(pkt20_index, pkt21_index,
1000 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
1001
1002 /* Pipeline feed */
1003 bucket20 = bucket10;
1004 bucket21 = bucket11;
1005 mbuf20 = mbuf10;
1006 mbuf21 = mbuf11;
1007 pkt20_index = pkt10_index;
1008 pkt21_index = pkt11_index;
1009
1010 /* Pipeline stage 2 */
1011 lookup2_stage2_lru(pkt20_index, pkt21_index,
1012 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
1013
1014 *lookup_hit_mask = pkts_mask_out;
1015 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
1016 return 0;
1017 } /* rte_table_hash_lookup_key32_lru() */
1018
1019 static int
1020 rte_table_hash_lookup_key32_ext(
1021 void *table,
1022 struct rte_mbuf **pkts,
1023 uint64_t pkts_mask,
1024 uint64_t *lookup_hit_mask,
1025 void **entries)
1026 {
1027 struct rte_table_hash *f = (struct rte_table_hash *) table;
1028 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
1029 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1030 uint32_t pkt00_index, pkt01_index, pkt10_index;
1031 uint32_t pkt11_index, pkt20_index, pkt21_index;
1032 uint64_t pkts_mask_out = 0, buckets_mask = 0;
1033 struct rte_bucket_4_32 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
1034 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1035
1036 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
1037 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in);
1038
1039 /* Cannot run the pipeline with less than 5 packets */
1040 if (__builtin_popcountll(pkts_mask) < 5) {
1041 for ( ; pkts_mask; ) {
1042 struct rte_bucket_4_32 *bucket;
1043 struct rte_mbuf *mbuf;
1044 uint32_t pkt_index;
1045
1046 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
1047 lookup1_stage1(mbuf, bucket, f);
1048 lookup1_stage2_ext(pkt_index, mbuf, bucket,
1049 pkts_mask_out, entries, buckets_mask, buckets,
1050 keys, f);
1051 }
1052
1053 goto grind_next_buckets;
1054 }
1055
1056 /*
1057 * Pipeline fill
1058 *
1059 */
1060 /* Pipeline stage 0 */
1061 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1062 pkts_mask, f);
1063
1064 /* Pipeline feed */
1065 mbuf10 = mbuf00;
1066 mbuf11 = mbuf01;
1067 pkt10_index = pkt00_index;
1068 pkt11_index = pkt01_index;
1069
1070 /* Pipeline stage 0 */
1071 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1072 pkts_mask, f);
1073
1074 /* Pipeline stage 1 */
1075 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1076
1077 /*
1078 * Pipeline run
1079 *
1080 */
1081 for ( ; pkts_mask; ) {
1082 /* Pipeline feed */
1083 bucket20 = bucket10;
1084 bucket21 = bucket11;
1085 mbuf20 = mbuf10;
1086 mbuf21 = mbuf11;
1087 mbuf10 = mbuf00;
1088 mbuf11 = mbuf01;
1089 pkt20_index = pkt10_index;
1090 pkt21_index = pkt11_index;
1091 pkt10_index = pkt00_index;
1092 pkt11_index = pkt01_index;
1093
1094 /* Pipeline stage 0 */
1095 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1096 mbuf00, mbuf01, pkts, pkts_mask, f);
1097
1098 /* Pipeline stage 1 */
1099 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1100
1101 /* Pipeline stage 2 */
1102 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1103 bucket20, bucket21, pkts_mask_out, entries,
1104 buckets_mask, buckets, keys, f);
1105 }
1106
1107 /*
1108 * Pipeline flush
1109 *
1110 */
1111 /* Pipeline feed */
1112 bucket20 = bucket10;
1113 bucket21 = bucket11;
1114 mbuf20 = mbuf10;
1115 mbuf21 = mbuf11;
1116 mbuf10 = mbuf00;
1117 mbuf11 = mbuf01;
1118 pkt20_index = pkt10_index;
1119 pkt21_index = pkt11_index;
1120 pkt10_index = pkt00_index;
1121 pkt11_index = pkt01_index;
1122
1123 /* Pipeline stage 1 */
1124 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1125
1126 /* Pipeline stage 2 */
1127 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1128 bucket20, bucket21, pkts_mask_out, entries,
1129 buckets_mask, buckets, keys, f);
1130
1131 /* Pipeline feed */
1132 bucket20 = bucket10;
1133 bucket21 = bucket11;
1134 mbuf20 = mbuf10;
1135 mbuf21 = mbuf11;
1136 pkt20_index = pkt10_index;
1137 pkt21_index = pkt11_index;
1138
1139 /* Pipeline stage 2 */
1140 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1141 bucket20, bucket21, pkts_mask_out, entries,
1142 buckets_mask, buckets, keys, f);
1143
1144 grind_next_buckets:
1145 /* Grind next buckets */
1146 for ( ; buckets_mask; ) {
1147 uint64_t buckets_mask_next = 0;
1148
1149 for ( ; buckets_mask; ) {
1150 uint64_t pkt_mask;
1151 uint32_t pkt_index;
1152
1153 pkt_index = __builtin_ctzll(buckets_mask);
1154 pkt_mask = 1LLU << pkt_index;
1155 buckets_mask &= ~pkt_mask;
1156
1157 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1158 entries, buckets_mask_next, f);
1159 }
1160
1161 buckets_mask = buckets_mask_next;
1162 }
1163
1164 *lookup_hit_mask = pkts_mask_out;
1165 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
1166 return 0;
1167 } /* rte_table_hash_lookup_key32_ext() */
1168
1169 static int
1170 rte_table_hash_key32_stats_read(void *table, struct rte_table_stats *stats, int clear)
1171 {
1172 struct rte_table_hash *t = table;
1173
1174 if (stats != NULL)
1175 memcpy(stats, &t->stats, sizeof(t->stats));
1176
1177 if (clear)
1178 memset(&t->stats, 0, sizeof(t->stats));
1179
1180 return 0;
1181 }
1182
1183 struct rte_table_ops rte_table_hash_key32_lru_ops = {
1184 .f_create = rte_table_hash_create_key32_lru,
1185 .f_free = rte_table_hash_free_key32_lru,
1186 .f_add = rte_table_hash_entry_add_key32_lru,
1187 .f_delete = rte_table_hash_entry_delete_key32_lru,
1188 .f_add_bulk = NULL,
1189 .f_delete_bulk = NULL,
1190 .f_lookup = rte_table_hash_lookup_key32_lru,
1191 .f_stats = rte_table_hash_key32_stats_read,
1192 };
1193
1194 struct rte_table_ops rte_table_hash_key32_ext_ops = {
1195 .f_create = rte_table_hash_create_key32_ext,
1196 .f_free = rte_table_hash_free_key32_ext,
1197 .f_add = rte_table_hash_entry_add_key32_ext,
1198 .f_delete = rte_table_hash_entry_delete_key32_ext,
1199 .f_add_bulk = NULL,
1200 .f_delete_bulk = NULL,
1201 .f_lookup = rte_table_hash_lookup_key32_ext,
1202 .f_stats = rte_table_hash_key32_stats_read,
1203 };