]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / octeontx2 / otx2_flow.c
CommitLineData
f67539c2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5#include "otx2_ethdev.h"
6#include "otx2_ethdev_sec.h"
7#include "otx2_flow.h"
8
9int
10otx2_flow_free_all_resources(struct otx2_eth_dev *hw)
11{
12 struct otx2_npc_flow_info *npc = &hw->npc_flow;
13 struct otx2_mbox *mbox = hw->mbox;
14 struct otx2_mcam_ents_info *info;
15 struct rte_bitmap *bmap;
16 struct rte_flow *flow;
17 int entry_count = 0;
18 int rc, idx;
19
20 for (idx = 0; idx < npc->flow_max_priority; idx++) {
21 info = &npc->flow_entry_info[idx];
22 entry_count += info->live_ent;
23 }
24
25 if (entry_count == 0)
26 return 0;
27
28 /* Free all MCAM entries allocated */
29 rc = otx2_flow_mcam_free_all_entries(mbox);
30
31 /* Free any MCAM counters and delete flow list */
32 for (idx = 0; idx < npc->flow_max_priority; idx++) {
33 while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
34 if (flow->ctr_id != NPC_COUNTER_NONE)
35 rc |= otx2_flow_mcam_free_counter(mbox,
36 flow->ctr_id);
37
38 TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
39 rte_free(flow);
40 bmap = npc->live_entries[flow->priority];
41 rte_bitmap_clear(bmap, flow->mcam_id);
42 }
43 info = &npc->flow_entry_info[idx];
44 info->free_ent = 0;
45 info->live_ent = 0;
46 }
47 return rc;
48}
49
50
51static int
52flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
53 struct otx2_npc_flow_info *flow_info)
54{
55 /* This is non-LDATA part in search key */
56 uint64_t key_data[2] = {0ULL, 0ULL};
57 uint64_t key_mask[2] = {0ULL, 0ULL};
58 int intf = pst->flow->nix_intf;
59 int key_len, bit = 0, index;
60 int off, idx, data_off = 0;
61 uint8_t lid, mask, data;
62 uint16_t layer_info;
63 uint64_t lt, flags;
64
65
66 /* Skip till Layer A data start */
67 while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
68 if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
69 data_off++;
70 bit++;
71 }
72
73 /* Each bit represents 1 nibble */
74 data_off *= 4;
75
76 index = 0;
77 for (lid = 0; lid < NPC_MAX_LID; lid++) {
78 /* Offset in key */
79 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
80 lt = pst->lt[lid] & 0xf;
81 flags = pst->flags[lid] & 0xff;
82
83 /* NPC_LAYER_KEX_S */
84 layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
85
86 if (layer_info) {
87 for (idx = 0; idx <= 2 ; idx++) {
88 if (layer_info & (1 << idx)) {
89 if (idx == 2)
90 data = lt;
91 else if (idx == 1)
92 data = ((flags >> 4) & 0xf);
93 else
94 data = (flags & 0xf);
95
96 if (data_off >= 64) {
97 data_off = 0;
98 index++;
99 }
100 key_data[index] |= ((uint64_t)data <<
101 data_off);
102 mask = 0xf;
103 if (lt == 0)
104 mask = 0;
105 key_mask[index] |= ((uint64_t)mask <<
106 data_off);
107 data_off += 4;
108 }
109 }
110 }
111 }
112
113 otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
114 key_data[0], key_data[1]);
115
116 /* Copy this into mcam string */
117 key_len = (pst->npc->keyx_len[intf] + 7) / 8;
118 otx2_npc_dbg("Key_len = %d", key_len);
119 memcpy(pst->flow->mcam_data, key_data, key_len);
120 memcpy(pst->flow->mcam_mask, key_mask, key_len);
121
122 otx2_npc_dbg("Final flow data");
123 for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
124 otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
125 idx, pst->flow->mcam_data[idx],
126 idx, pst->flow->mcam_mask[idx]);
127 }
128
129 /*
130 * Now we have mcam data and mask formatted as
131 * [Key_len/4 nibbles][0 or 1 nibble hole][data]
132 * hole is present if key_len is odd number of nibbles.
133 * mcam data must be split into 64 bits + 48 bits segments
134 * for each back W0, W1.
135 */
136
137 return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
138}
139
140static int
141flow_parse_attr(struct rte_eth_dev *eth_dev,
142 const struct rte_flow_attr *attr,
143 struct rte_flow_error *error,
144 struct rte_flow *flow)
145{
146 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
147 const char *errmsg = NULL;
148
149 if (attr == NULL)
150 errmsg = "Attribute can't be empty";
151 else if (attr->group)
152 errmsg = "Groups are not supported";
153 else if (attr->priority >= dev->npc_flow.flow_max_priority)
154 errmsg = "Priority should be with in specified range";
155 else if ((!attr->egress && !attr->ingress) ||
156 (attr->egress && attr->ingress))
157 errmsg = "Exactly one of ingress or egress must be set";
158
159 if (errmsg != NULL) {
160 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
161 attr, errmsg);
162 return -ENOTSUP;
163 }
164
165 if (attr->ingress)
166 flow->nix_intf = OTX2_INTF_RX;
167 else
168 flow->nix_intf = OTX2_INTF_TX;
169
170 flow->priority = attr->priority;
171 return 0;
172}
173
174static inline int
175flow_get_free_rss_grp(struct rte_bitmap *bmap,
176 uint32_t size, uint32_t *pos)
177{
178 for (*pos = 0; *pos < size; ++*pos) {
179 if (!rte_bitmap_get(bmap, *pos))
180 break;
181 }
182
183 return *pos < size ? 0 : -1;
184}
185
186static int
187flow_configure_rss_action(struct otx2_eth_dev *dev,
188 const struct rte_flow_action_rss *rss,
189 uint8_t *alg_idx, uint32_t *rss_grp,
190 int mcam_index)
191{
192 struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
193 uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
194 uint32_t flowkey_cfg, grp_aval, i;
195 uint16_t *ind_tbl = NULL;
196 uint8_t flowkey_algx;
197 int rc;
198
199 rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
200 flow_info->rss_grps, &grp_aval);
201 /* RSS group :0 is not usable for flow rss action */
202 if (rc < 0 || grp_aval == 0)
203 return -ENOSPC;
204
205 *rss_grp = grp_aval;
206
207 otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
208 rss->key_len);
209
210 /* If queue count passed in the rss action is less than
211 * HW configured reta size, replicate rss action reta
212 * across HW reta table.
213 */
214 if (dev->rss_info.rss_size > rss->queue_num) {
215 ind_tbl = reta;
216
217 for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
218 memcpy(reta + i * rss->queue_num, rss->queue,
219 sizeof(uint16_t) * rss->queue_num);
220
221 i = dev->rss_info.rss_size % rss->queue_num;
222 if (i)
223 memcpy(&reta[dev->rss_info.rss_size] - i,
224 rss->queue, i * sizeof(uint16_t));
225 } else {
226 ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
227 }
228
229 rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
230 if (rc) {
231 otx2_err("Failed to init rss table rc = %d", rc);
232 return rc;
233 }
234
235 flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
236
237 rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
238 *rss_grp, mcam_index);
239 if (rc) {
240 otx2_err("Failed to set rss hash function rc = %d", rc);
241 return rc;
242 }
243
244 *alg_idx = flowkey_algx;
245
246 rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
247
248 return 0;
249}
250
251
252static int
253flow_program_rss_action(struct rte_eth_dev *eth_dev,
254 const struct rte_flow_action actions[],
255 struct rte_flow *flow)
256{
257 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
258 const struct rte_flow_action_rss *rss;
259 uint32_t rss_grp;
260 uint8_t alg_idx;
261 int rc;
262
263 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
264 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
265 rss = (const struct rte_flow_action_rss *)actions->conf;
266
267 rc = flow_configure_rss_action(dev,
268 rss, &alg_idx, &rss_grp,
269 flow->mcam_id);
270 if (rc)
271 return rc;
272
273 flow->npc_action |=
274 ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
275 NIX_RSS_ACT_ALG_OFFSET) |
276 ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
277 NIX_RSS_ACT_GRP_OFFSET);
278 }
279 }
280 return 0;
281}
282
283static int
284flow_free_rss_action(struct rte_eth_dev *eth_dev,
285 struct rte_flow *flow)
286{
287 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
288 struct otx2_npc_flow_info *npc = &dev->npc_flow;
289 uint32_t rss_grp;
290
291 if (flow->npc_action & NIX_RX_ACTIONOP_RSS) {
292 rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) &
293 NIX_RSS_ACT_GRP_MASK;
294 if (rss_grp == 0 || rss_grp >= npc->rss_grps)
295 return -EINVAL;
296
297 rte_bitmap_clear(npc->rss_grp_entries, rss_grp);
298 }
299
300 return 0;
301}
302
303static int
304flow_update_sec_tt(struct rte_eth_dev *eth_dev,
305 const struct rte_flow_action actions[])
306{
307 int rc = 0;
308
309 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
310 if (actions->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
311 rc = otx2_eth_sec_update_tag_type(eth_dev);
312 break;
313 }
314 }
315
316 return rc;
317}
318
319static int
320flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
321{
322 otx2_npc_dbg("Meta Item");
323 return 0;
324}
325
326/*
327 * Parse function of each layer:
328 * - Consume one or more patterns that are relevant.
329 * - Update parse_state
330 * - Set parse_state.pattern = last item consumed
331 * - Set appropriate error code/message when returning error.
332 */
333typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
334
335static int
336flow_parse_pattern(struct rte_eth_dev *dev,
337 const struct rte_flow_item pattern[],
338 struct rte_flow_error *error,
339 struct rte_flow *flow,
340 struct otx2_parse_state *pst)
341{
342 flow_parse_stage_func_t parse_stage_funcs[] = {
343 flow_parse_meta_items,
344 otx2_flow_parse_higig2_hdr,
345 otx2_flow_parse_la,
346 otx2_flow_parse_lb,
347 otx2_flow_parse_lc,
348 otx2_flow_parse_ld,
349 otx2_flow_parse_le,
350 otx2_flow_parse_lf,
351 otx2_flow_parse_lg,
352 otx2_flow_parse_lh,
353 };
354 struct otx2_eth_dev *hw = dev->data->dev_private;
355 uint8_t layer = 0;
356 int key_offset;
357 int rc;
358
359 if (pattern == NULL) {
360 rte_flow_error_set(error, EINVAL,
361 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
362 "pattern is NULL");
363 return -EINVAL;
364 }
365
366 memset(pst, 0, sizeof(*pst));
367 pst->npc = &hw->npc_flow;
368 pst->error = error;
369 pst->flow = flow;
370
371 /* Use integral byte offset */
372 key_offset = pst->npc->keyx_len[flow->nix_intf];
373 key_offset = (key_offset + 7) / 8;
374
375 /* Location where LDATA would begin */
376 pst->mcam_data = (uint8_t *)flow->mcam_data;
377 pst->mcam_mask = (uint8_t *)flow->mcam_mask;
378
379 while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
380 layer < RTE_DIM(parse_stage_funcs)) {
381 otx2_npc_dbg("Pattern type = %d", pattern->type);
382
383 /* Skip place-holders */
384 pattern = otx2_flow_skip_void_and_any_items(pattern);
385
386 pst->pattern = pattern;
387 otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
388 rc = parse_stage_funcs[layer](pst);
389 if (rc != 0)
390 return -rte_errno;
391
392 layer++;
393
394 /*
395 * Parse stage function sets pst->pattern to
396 * 1 past the last item it consumed.
397 */
398 pattern = pst->pattern;
399
400 if (pst->terminate)
401 break;
402 }
403
404 /* Skip trailing place-holders */
405 pattern = otx2_flow_skip_void_and_any_items(pattern);
406
407 /* Are there more items than what we can handle? */
408 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
409 rte_flow_error_set(error, ENOTSUP,
410 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
411 "unsupported item in the sequence");
412 return -ENOTSUP;
413 }
414
415 return 0;
416}
417
418static int
419flow_parse_rule(struct rte_eth_dev *dev,
420 const struct rte_flow_attr *attr,
421 const struct rte_flow_item pattern[],
422 const struct rte_flow_action actions[],
423 struct rte_flow_error *error,
424 struct rte_flow *flow,
425 struct otx2_parse_state *pst)
426{
427 int err;
428
429 /* Check attributes */
430 err = flow_parse_attr(dev, attr, error, flow);
431 if (err)
432 return err;
433
434 /* Check actions */
435 err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
436 if (err)
437 return err;
438
439 /* Check pattern */
440 err = flow_parse_pattern(dev, pattern, error, flow, pst);
441 if (err)
442 return err;
443
444 /* Check for overlaps? */
445 return 0;
446}
447
448static int
449otx2_flow_validate(struct rte_eth_dev *dev,
450 const struct rte_flow_attr *attr,
451 const struct rte_flow_item pattern[],
452 const struct rte_flow_action actions[],
453 struct rte_flow_error *error)
454{
455 struct otx2_parse_state parse_state;
456 struct rte_flow flow;
457
458 memset(&flow, 0, sizeof(flow));
459 return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
460 &parse_state);
461}
462
463static struct rte_flow *
464otx2_flow_create(struct rte_eth_dev *dev,
465 const struct rte_flow_attr *attr,
466 const struct rte_flow_item pattern[],
467 const struct rte_flow_action actions[],
468 struct rte_flow_error *error)
469{
470 struct otx2_eth_dev *hw = dev->data->dev_private;
471 struct otx2_parse_state parse_state;
472 struct otx2_mbox *mbox = hw->mbox;
473 struct rte_flow *flow, *flow_iter;
474 struct otx2_flow_list *list;
475 int rc;
476
477 flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
478 if (flow == NULL) {
479 rte_flow_error_set(error, ENOMEM,
480 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
481 NULL,
482 "Memory allocation failed");
483 return NULL;
484 }
485 memset(flow, 0, sizeof(*flow));
486
487 rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
488 &parse_state);
489 if (rc != 0)
490 goto err_exit;
491
492 rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
493 if (rc != 0) {
494 rte_flow_error_set(error, EIO,
495 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
496 NULL,
497 "Failed to insert filter");
498 goto err_exit;
499 }
500
501 rc = flow_program_rss_action(dev, actions, flow);
502 if (rc != 0) {
503 rte_flow_error_set(error, EIO,
504 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
505 NULL,
506 "Failed to program rss action");
507 goto err_exit;
508 }
509
510 if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
511 rc = flow_update_sec_tt(dev, actions);
512 if (rc != 0) {
513 rte_flow_error_set(error, EIO,
514 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
515 NULL,
516 "Failed to update tt with sec act");
517 goto err_exit;
518 }
519 }
520
521 list = &hw->npc_flow.flow_list[flow->priority];
522 /* List in ascending order of mcam entries */
523 TAILQ_FOREACH(flow_iter, list, next) {
524 if (flow_iter->mcam_id > flow->mcam_id) {
525 TAILQ_INSERT_BEFORE(flow_iter, flow, next);
526 return flow;
527 }
528 }
529
530 TAILQ_INSERT_TAIL(list, flow, next);
531 return flow;
532
533err_exit:
534 rte_free(flow);
535 return NULL;
536}
537
538static int
539otx2_flow_destroy(struct rte_eth_dev *dev,
540 struct rte_flow *flow,
541 struct rte_flow_error *error)
542{
543 struct otx2_eth_dev *hw = dev->data->dev_private;
544 struct otx2_npc_flow_info *npc = &hw->npc_flow;
545 struct otx2_mbox *mbox = hw->mbox;
546 struct rte_bitmap *bmap;
547 uint16_t match_id;
548 int rc;
549
550 match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) &
551 NIX_RX_ACT_MATCH_MASK;
552
553 if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) {
554 if (rte_atomic32_read(&npc->mark_actions) == 0)
555 return -EINVAL;
556
557 /* Clear mark offload flag if there are no more mark actions */
558 if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
559 hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
560 otx2_eth_set_rx_function(dev);
561 }
562 }
563
564 rc = flow_free_rss_action(dev, flow);
565 if (rc != 0) {
566 rte_flow_error_set(error, EIO,
567 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
568 NULL,
569 "Failed to free rss action");
570 }
571
572 rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id);
573 if (rc != 0) {
574 rte_flow_error_set(error, EIO,
575 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
576 NULL,
577 "Failed to destroy filter");
578 }
579
580 TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next);
581
582 bmap = npc->live_entries[flow->priority];
583 rte_bitmap_clear(bmap, flow->mcam_id);
584
585 rte_free(flow);
586 return 0;
587}
588
589static int
590otx2_flow_flush(struct rte_eth_dev *dev,
591 struct rte_flow_error *error)
592{
593 struct otx2_eth_dev *hw = dev->data->dev_private;
594 int rc;
595
596 rc = otx2_flow_free_all_resources(hw);
597 if (rc) {
598 otx2_err("Error when deleting NPC MCAM entries "
599 ", counters");
600 rte_flow_error_set(error, EIO,
601 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
602 NULL,
603 "Failed to flush filter");
604 return -rte_errno;
605 }
606
607 return 0;
608}
609
610static int
611otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused,
612 int enable __rte_unused,
613 struct rte_flow_error *error)
614{
615 /*
616 * If we support, we need to un-install the default mcam
617 * entry for this port.
618 */
619
620 rte_flow_error_set(error, ENOTSUP,
621 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
622 NULL,
623 "Flow isolation not supported");
624
625 return -rte_errno;
626}
627
628static int
629otx2_flow_query(struct rte_eth_dev *dev,
630 struct rte_flow *flow,
631 const struct rte_flow_action *action,
632 void *data,
633 struct rte_flow_error *error)
634{
635 struct otx2_eth_dev *hw = dev->data->dev_private;
636 struct rte_flow_query_count *query = data;
637 struct otx2_mbox *mbox = hw->mbox;
638 const char *errmsg = NULL;
639 int errcode = ENOTSUP;
640 int rc;
641
642 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
643 errmsg = "Only COUNT is supported in query";
644 goto err_exit;
645 }
646
647 if (flow->ctr_id == NPC_COUNTER_NONE) {
648 errmsg = "Counter is not available";
649 goto err_exit;
650 }
651
652 rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits);
653 if (rc != 0) {
654 errcode = EIO;
655 errmsg = "Error reading flow counter";
656 goto err_exit;
657 }
658 query->hits_set = 1;
659 query->bytes_set = 0;
660
661 if (query->reset)
662 rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id);
663 if (rc != 0) {
664 errcode = EIO;
665 errmsg = "Error clearing flow counter";
666 goto err_exit;
667 }
668
669 return 0;
670
671err_exit:
672 rte_flow_error_set(error, errcode,
673 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
674 NULL,
675 errmsg);
676 return -rte_errno;
677}
678
679const struct rte_flow_ops otx2_flow_ops = {
680 .validate = otx2_flow_validate,
681 .create = otx2_flow_create,
682 .destroy = otx2_flow_destroy,
683 .flush = otx2_flow_flush,
684 .query = otx2_flow_query,
685 .isolate = otx2_flow_isolate,
686};
687
688static int
689flow_supp_key_len(uint32_t supp_mask)
690{
691 int nib_count = 0;
692 while (supp_mask) {
693 nib_count++;
694 supp_mask &= (supp_mask - 1);
695 }
696 return nib_count * 4;
697}
698
699/* Refer HRM register:
700 * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG
701 * and
702 * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG
703 **/
704#define BYTESM1_SHIFT 16
705#define HDR_OFF_SHIFT 8
706static void
707flow_update_kex_info(struct npc_xtract_info *xtract_info,
708 uint64_t val)
709{
710 xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
711 xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
712 xtract_info->key_off = val & 0x3f;
713 xtract_info->enable = ((val >> 7) & 0x1);
714 xtract_info->flags_enable = ((val >> 6) & 0x1);
715}
716
717static void
718flow_process_mkex_cfg(struct otx2_npc_flow_info *npc,
719 struct npc_get_kex_cfg_rsp *kex_rsp)
720{
721 volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
722 [NPC_MAX_LD];
723 struct npc_xtract_info *x_info = NULL;
724 int lid, lt, ld, fl, ix;
725 otx2_dxcfg_t *p;
726 uint64_t keyw;
727 uint64_t val;
728
729 npc->keyx_supp_nmask[NPC_MCAM_RX] =
730 kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
731 npc->keyx_supp_nmask[NPC_MCAM_TX] =
732 kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
733 npc->keyx_len[NPC_MCAM_RX] =
734 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
735 npc->keyx_len[NPC_MCAM_TX] =
736 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
737
738 keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
739 npc->keyw[NPC_MCAM_RX] = keyw;
740 keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
741 npc->keyw[NPC_MCAM_TX] = keyw;
742
743 /* Update KEX_LD_FLAG */
744 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
745 for (ld = 0; ld < NPC_MAX_LD; ld++) {
746 for (fl = 0; fl < NPC_MAX_LFL; fl++) {
747 x_info =
748 &npc->prx_fxcfg[ix][ld][fl].xtract[0];
749 val = kex_rsp->intf_ld_flags[ix][ld][fl];
750 flow_update_kex_info(x_info, val);
751 }
752 }
753 }
754
755 /* Update LID, LT and LDATA cfg */
756 p = &npc->prx_dxcfg;
757 q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])
758 (&kex_rsp->intf_lid_lt_ld);
759 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
760 for (lid = 0; lid < NPC_MAX_LID; lid++) {
761 for (lt = 0; lt < NPC_MAX_LT; lt++) {
762 for (ld = 0; ld < NPC_MAX_LD; ld++) {
763 x_info = &(*p)[ix][lid][lt].xtract[ld];
764 val = (*q)[ix][lid][lt][ld];
765 flow_update_kex_info(x_info, val);
766 }
767 }
768 }
769 }
770 /* Update LDATA Flags cfg */
771 npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
772 npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
773}
774
775static struct otx2_idev_kex_cfg *
776flow_intra_dev_kex_cfg(void)
777{
778 static const char name[] = "octeontx2_intra_device_kex_conf";
779 struct otx2_idev_kex_cfg *idev;
780 const struct rte_memzone *mz;
781
782 mz = rte_memzone_lookup(name);
783 if (mz)
784 return mz->addr;
785
786 /* Request for the first time */
787 mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg),
788 SOCKET_ID_ANY, 0, OTX2_ALIGN);
789 if (mz) {
790 idev = mz->addr;
791 rte_atomic16_set(&idev->kex_refcnt, 0);
792 return idev;
793 }
794 return NULL;
795}
796
797static int
798flow_fetch_kex_cfg(struct otx2_eth_dev *dev)
799{
800 struct otx2_npc_flow_info *npc = &dev->npc_flow;
801 struct npc_get_kex_cfg_rsp *kex_rsp;
802 struct otx2_mbox *mbox = dev->mbox;
803 char mkex_pfl_name[MKEX_NAME_LEN];
804 struct otx2_idev_kex_cfg *idev;
805 int rc = 0;
806
807 idev = flow_intra_dev_kex_cfg();
808 if (!idev)
809 return -ENOMEM;
810
811 /* Is kex_cfg read by any another driver? */
812 if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) {
813 /* Call mailbox to get key & data size */
814 (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox);
815 otx2_mbox_msg_send(mbox, 0);
816 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp);
817 if (rc) {
818 otx2_err("Failed to fetch NPC keyx config");
819 goto done;
820 }
821 memcpy(&idev->kex_cfg, kex_rsp,
822 sizeof(struct npc_get_kex_cfg_rsp));
823 }
824
825 otx2_mbox_memcpy(mkex_pfl_name,
826 idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN);
827
828 strlcpy((char *)dev->mkex_pfl_name,
829 mkex_pfl_name, sizeof(dev->mkex_pfl_name));
830
831 flow_process_mkex_cfg(npc, &idev->kex_cfg);
832
833done:
834 return rc;
835}
836
837int
838otx2_flow_init(struct otx2_eth_dev *hw)
839{
840 uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL;
841 struct otx2_npc_flow_info *npc = &hw->npc_flow;
842 uint32_t bmap_sz;
843 int rc = 0, idx;
844
845 rc = flow_fetch_kex_cfg(hw);
846 if (rc) {
847 otx2_err("Failed to fetch NPC keyx config from idev");
848 return rc;
849 }
850
851 rte_atomic32_init(&npc->mark_actions);
852
853 npc->mcam_entries = NPC_MCAM_TOT_ENTRIES >> npc->keyw[NPC_MCAM_RX];
854 /* Free, free_rev, live and live_rev entries */
855 bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries);
856 mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority,
857 RTE_CACHE_LINE_SIZE);
858 if (mem == NULL) {
859 otx2_err("Bmap alloc failed");
860 rc = -ENOMEM;
861 return rc;
862 }
863
864 npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority
865 * sizeof(struct otx2_mcam_ents_info),
866 0);
867 if (npc->flow_entry_info == NULL) {
868 otx2_err("flow_entry_info alloc failed");
869 rc = -ENOMEM;
870 goto err;
871 }
872
873 npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
874 * sizeof(struct rte_bitmap *),
875 0);
876 if (npc->free_entries == NULL) {
877 otx2_err("free_entries alloc failed");
878 rc = -ENOMEM;
879 goto err;
880 }
881
882 npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
883 * sizeof(struct rte_bitmap *),
884 0);
885 if (npc->free_entries_rev == NULL) {
886 otx2_err("free_entries_rev alloc failed");
887 rc = -ENOMEM;
888 goto err;
889 }
890
891 npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
892 * sizeof(struct rte_bitmap *),
893 0);
894 if (npc->live_entries == NULL) {
895 otx2_err("live_entries alloc failed");
896 rc = -ENOMEM;
897 goto err;
898 }
899
900 npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
901 * sizeof(struct rte_bitmap *),
902 0);
903 if (npc->live_entries_rev == NULL) {
904 otx2_err("live_entries_rev alloc failed");
905 rc = -ENOMEM;
906 goto err;
907 }
908
909 npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority
910 * sizeof(struct otx2_flow_list),
911 0);
912 if (npc->flow_list == NULL) {
913 otx2_err("flow_list alloc failed");
914 rc = -ENOMEM;
915 goto err;
916 }
917
918 npc_mem = mem;
919 for (idx = 0; idx < npc->flow_max_priority; idx++) {
920 TAILQ_INIT(&npc->flow_list[idx]);
921
922 npc->free_entries[idx] =
923 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
924 mem += bmap_sz;
925
926 npc->free_entries_rev[idx] =
927 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
928 mem += bmap_sz;
929
930 npc->live_entries[idx] =
931 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
932 mem += bmap_sz;
933
934 npc->live_entries_rev[idx] =
935 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
936 mem += bmap_sz;
937
938 npc->flow_entry_info[idx].free_ent = 0;
939 npc->flow_entry_info[idx].live_ent = 0;
940 npc->flow_entry_info[idx].max_id = 0;
941 npc->flow_entry_info[idx].min_id = ~(0);
942 }
943
944 npc->rss_grps = NIX_RSS_GRPS;
945
946 bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps);
947 nix_mem = rte_zmalloc(NULL, bmap_sz, RTE_CACHE_LINE_SIZE);
948 if (nix_mem == NULL) {
949 otx2_err("Bmap alloc failed");
950 rc = -ENOMEM;
951 goto err;
952 }
953
954 npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz);
955
956 /* Group 0 will be used for RSS,
957 * 1 -7 will be used for rte_flow RSS action
958 */
959 rte_bitmap_set(npc->rss_grp_entries, 0);
960
961 return 0;
962
963err:
964 if (npc->flow_list)
965 rte_free(npc->flow_list);
966 if (npc->live_entries_rev)
967 rte_free(npc->live_entries_rev);
968 if (npc->live_entries)
969 rte_free(npc->live_entries);
970 if (npc->free_entries_rev)
971 rte_free(npc->free_entries_rev);
972 if (npc->free_entries)
973 rte_free(npc->free_entries);
974 if (npc->flow_entry_info)
975 rte_free(npc->flow_entry_info);
976 if (npc_mem)
977 rte_free(npc_mem);
978 return rc;
979}
980
981int
982otx2_flow_fini(struct otx2_eth_dev *hw)
983{
984 struct otx2_npc_flow_info *npc = &hw->npc_flow;
985 int rc;
986
987 rc = otx2_flow_free_all_resources(hw);
988 if (rc) {
989 otx2_err("Error when deleting NPC MCAM entries, counters");
990 return rc;
991 }
992
993 if (npc->flow_list)
994 rte_free(npc->flow_list);
995 if (npc->live_entries_rev)
996 rte_free(npc->live_entries_rev);
997 if (npc->live_entries)
998 rte_free(npc->live_entries);
999 if (npc->free_entries_rev)
1000 rte_free(npc->free_entries_rev);
1001 if (npc->free_entries)
1002 rte_free(npc->free_entries);
1003 if (npc->flow_entry_info)
1004 rte_free(npc->flow_entry_info);
1005
1006 return 0;
1007}