]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
Merge remote-tracking branches 'spi/fix/armada', 'spi/fix/atmel', 'spi/fix/doc',...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_tc.c
CommitLineData
2ae7408f
SP
1/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2017 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/netdevice.h>
11#include <linux/inetdevice.h>
12#include <linux/if_vlan.h>
13#include <net/flow_dissector.h>
14#include <net/pkt_cls.h>
15#include <net/tc_act/tc_gact.h>
16#include <net/tc_act/tc_skbedit.h>
17#include <net/tc_act/tc_mirred.h>
18#include <net/tc_act/tc_vlan.h>
8c95f773 19#include <net/tc_act/tc_tunnel_key.h>
2ae7408f
SP
20
21#include "bnxt_hsi.h"
22#include "bnxt.h"
23#include "bnxt_sriov.h"
24#include "bnxt_tc.h"
25#include "bnxt_vfr.h"
26
2ae7408f
SP
27#define BNXT_FID_INVALID 0xffff
28#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
29
30/* Return the dst fid of the func for flow forwarding
31 * For PFs: src_fid is the fid of the PF
32 * For VF-reps: src_fid the fid of the VF
33 */
34static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
35{
36 struct bnxt *bp;
37
38 /* check if dev belongs to the same switch */
39 if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
40 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
41 dev->ifindex);
42 return BNXT_FID_INVALID;
43 }
44
45 /* Is dev a VF-rep? */
46 if (dev != pf_bp->dev)
47 return bnxt_vf_rep_get_fid(dev);
48
49 bp = netdev_priv(dev);
50 return bp->pf.fw_fid;
51}
52
53static int bnxt_tc_parse_redir(struct bnxt *bp,
54 struct bnxt_tc_actions *actions,
55 const struct tc_action *tc_act)
56{
57 int ifindex = tcf_mirred_ifindex(tc_act);
58 struct net_device *dev;
2ae7408f
SP
59
60 dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
61 if (!dev) {
62 netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
63 return -EINVAL;
64 }
65
2ae7408f 66 actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
2ae7408f
SP
67 actions->dst_dev = dev;
68 return 0;
69}
70
71static void bnxt_tc_parse_vlan(struct bnxt *bp,
72 struct bnxt_tc_actions *actions,
73 const struct tc_action *tc_act)
74{
75 if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
76 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
77 } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
78 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
79 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
80 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
81 }
82}
83
8c95f773
SP
84static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
85 struct bnxt_tc_actions *actions,
86 const struct tc_action *tc_act)
87{
88 struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
89 struct ip_tunnel_key *tun_key = &tun_info->key;
90
91 if (ip_tunnel_info_af(tun_info) != AF_INET) {
92 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
93 return -EOPNOTSUPP;
94 }
95
96 actions->tun_encap_key = *tun_key;
97 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
98 return 0;
99}
100
2ae7408f
SP
101static int bnxt_tc_parse_actions(struct bnxt *bp,
102 struct bnxt_tc_actions *actions,
103 struct tcf_exts *tc_exts)
104{
105 const struct tc_action *tc_act;
106 LIST_HEAD(tc_actions);
107 int rc;
108
109 if (!tcf_exts_has_actions(tc_exts)) {
110 netdev_info(bp->dev, "no actions");
111 return -EINVAL;
112 }
113
114 tcf_exts_to_list(tc_exts, &tc_actions);
115 list_for_each_entry(tc_act, &tc_actions, list) {
116 /* Drop action */
117 if (is_tcf_gact_shot(tc_act)) {
118 actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
119 return 0; /* don't bother with other actions */
120 }
121
122 /* Redirect action */
123 if (is_tcf_mirred_egress_redirect(tc_act)) {
124 rc = bnxt_tc_parse_redir(bp, actions, tc_act);
125 if (rc)
126 return rc;
127 continue;
128 }
129
130 /* Push/pop VLAN */
131 if (is_tcf_vlan(tc_act)) {
132 bnxt_tc_parse_vlan(bp, actions, tc_act);
133 continue;
134 }
8c95f773
SP
135
136 /* Tunnel encap */
137 if (is_tcf_tunnel_set(tc_act)) {
138 rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
139 if (rc)
140 return rc;
141 continue;
142 }
143
144 /* Tunnel decap */
145 if (is_tcf_tunnel_release(tc_act)) {
146 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
147 continue;
148 }
2ae7408f
SP
149 }
150
8c95f773
SP
151 if (rc)
152 return rc;
153
e9ecc731
SP
154 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
155 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
156 /* dst_fid is PF's fid */
157 actions->dst_fid = bp->pf.fw_fid;
158 } else {
159 /* find the FID from dst_dev */
160 actions->dst_fid =
161 bnxt_flow_get_dst_fid(bp, actions->dst_dev);
162 if (actions->dst_fid == BNXT_FID_INVALID)
163 return -EINVAL;
164 }
8c95f773
SP
165 }
166
167 return rc;
2ae7408f
SP
168}
169
170#define GET_KEY(flow_cmd, key_type) \
171 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
172 (flow_cmd)->key)
173#define GET_MASK(flow_cmd, key_type) \
174 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
175 (flow_cmd)->mask)
176
177static int bnxt_tc_parse_flow(struct bnxt *bp,
178 struct tc_cls_flower_offload *tc_flow_cmd,
179 struct bnxt_tc_flow *flow)
180{
181 struct flow_dissector *dissector = tc_flow_cmd->dissector;
182 u16 addr_type = 0;
183
184 /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
185 if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
186 (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
187 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
188 dissector->used_keys);
189 return -EOPNOTSUPP;
190 }
191
192 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
193 struct flow_dissector_key_control *key =
194 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
195
196 addr_type = key->addr_type;
197 }
198
199 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
200 struct flow_dissector_key_basic *key =
201 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
202 struct flow_dissector_key_basic *mask =
203 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
204
205 flow->l2_key.ether_type = key->n_proto;
206 flow->l2_mask.ether_type = mask->n_proto;
207
208 if (key->n_proto == htons(ETH_P_IP) ||
209 key->n_proto == htons(ETH_P_IPV6)) {
210 flow->l4_key.ip_proto = key->ip_proto;
211 flow->l4_mask.ip_proto = mask->ip_proto;
212 }
213 }
214
215 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
216 struct flow_dissector_key_eth_addrs *key =
217 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
218 struct flow_dissector_key_eth_addrs *mask =
219 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
220
221 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
222 ether_addr_copy(flow->l2_key.dmac, key->dst);
223 ether_addr_copy(flow->l2_mask.dmac, mask->dst);
224 ether_addr_copy(flow->l2_key.smac, key->src);
225 ether_addr_copy(flow->l2_mask.smac, mask->src);
226 }
227
228 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
229 struct flow_dissector_key_vlan *key =
230 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
231 struct flow_dissector_key_vlan *mask =
232 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
233
234 flow->l2_key.inner_vlan_tci =
235 cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
236 flow->l2_mask.inner_vlan_tci =
237 cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
238 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
239 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
240 flow->l2_key.num_vlans = 1;
241 }
242
243 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
244 struct flow_dissector_key_ipv4_addrs *key =
245 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
246 struct flow_dissector_key_ipv4_addrs *mask =
247 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
248
249 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
250 flow->l3_key.ipv4.daddr.s_addr = key->dst;
251 flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
252 flow->l3_key.ipv4.saddr.s_addr = key->src;
253 flow->l3_mask.ipv4.saddr.s_addr = mask->src;
254 } else if (dissector_uses_key(dissector,
255 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
256 struct flow_dissector_key_ipv6_addrs *key =
257 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
258 struct flow_dissector_key_ipv6_addrs *mask =
259 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
260
261 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
262 flow->l3_key.ipv6.daddr = key->dst;
263 flow->l3_mask.ipv6.daddr = mask->dst;
264 flow->l3_key.ipv6.saddr = key->src;
265 flow->l3_mask.ipv6.saddr = mask->src;
266 }
267
268 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
269 struct flow_dissector_key_ports *key =
270 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
271 struct flow_dissector_key_ports *mask =
272 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
273
274 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
275 flow->l4_key.ports.dport = key->dst;
276 flow->l4_mask.ports.dport = mask->dst;
277 flow->l4_key.ports.sport = key->src;
278 flow->l4_mask.ports.sport = mask->src;
279 }
280
281 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
282 struct flow_dissector_key_icmp *key =
283 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
284 struct flow_dissector_key_icmp *mask =
285 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
286
287 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
288 flow->l4_key.icmp.type = key->type;
289 flow->l4_key.icmp.code = key->code;
290 flow->l4_mask.icmp.type = mask->type;
291 flow->l4_mask.icmp.code = mask->code;
292 }
293
8c95f773
SP
294 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
295 struct flow_dissector_key_control *key =
296 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
297
298 addr_type = key->addr_type;
299 }
300
301 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
302 struct flow_dissector_key_ipv4_addrs *key =
303 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
304 struct flow_dissector_key_ipv4_addrs *mask =
305 GET_MASK(tc_flow_cmd,
306 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
307
308 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
309 flow->tun_key.u.ipv4.dst = key->dst;
310 flow->tun_mask.u.ipv4.dst = mask->dst;
311 flow->tun_key.u.ipv4.src = key->src;
312 flow->tun_mask.u.ipv4.src = mask->src;
313 } else if (dissector_uses_key(dissector,
314 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
315 return -EOPNOTSUPP;
316 }
317
318 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
319 struct flow_dissector_key_keyid *key =
320 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
321 struct flow_dissector_key_keyid *mask =
322 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
323
324 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
325 flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
326 flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
327 }
328
329 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
330 struct flow_dissector_key_ports *key =
331 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
332 struct flow_dissector_key_ports *mask =
333 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
334
335 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
336 flow->tun_key.tp_dst = key->dst;
337 flow->tun_mask.tp_dst = mask->dst;
338 flow->tun_key.tp_src = key->src;
339 flow->tun_mask.tp_src = mask->src;
340 }
341
2ae7408f
SP
342 return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
343}
344
345static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
346{
db1d36a2
SP
347 struct hwrm_cfa_flow_free_input req = { 0 };
348 int rc;
349
350 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
351 req.flow_handle = flow_handle;
352
353 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
354 if (rc)
355 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
356 __func__, flow_handle, rc);
357 return rc;
358}
359
360static int ipv6_mask_len(struct in6_addr *mask)
361{
362 int mask_len = 0, i;
363
364 for (i = 0; i < 4; i++)
365 mask_len += inet_mask_len(mask->s6_addr32[i]);
366
367 return mask_len;
368}
369
370static bool is_wildcard(void *mask, int len)
371{
372 const u8 *p = mask;
373 int i;
374
375 for (i = 0; i < len; i++) {
376 if (p[i] != 0)
377 return false;
378 }
379 return true;
2ae7408f
SP
380}
381
382static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
8c95f773
SP
383 __le16 ref_flow_handle,
384 __le32 tunnel_handle, __le16 *flow_handle)
2ae7408f 385{
db1d36a2
SP
386 struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
387 struct bnxt_tc_actions *actions = &flow->actions;
388 struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
389 struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
390 struct hwrm_cfa_flow_alloc_input req = { 0 };
391 u16 flow_flags = 0, action_flags = 0;
392 int rc;
393
394 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
395
396 req.src_fid = cpu_to_le16(flow->src_fid);
397 req.ref_flow_handle = ref_flow_handle;
8c95f773
SP
398
399 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
400 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
401 req.tunnel_handle = tunnel_handle;
402 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
403 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
404 }
405
db1d36a2
SP
406 req.ethertype = flow->l2_key.ether_type;
407 req.ip_proto = flow->l4_key.ip_proto;
408
409 if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
410 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
411 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
412 }
413
414 if (flow->l2_key.num_vlans > 0) {
415 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
416 /* FW expects the inner_vlan_tci value to be set
417 * in outer_vlan_tci when num_vlans is 1 (which is
418 * always the case in TC.)
419 */
420 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
421 }
422
423 /* If all IP and L4 fields are wildcarded then this is an L2 flow */
424 if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
425 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
426 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
427 } else {
428 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
429 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
430 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
431
432 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
433 req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
434 req.ip_dst_mask_len =
435 inet_mask_len(l3_mask->ipv4.daddr.s_addr);
436 req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
437 req.ip_src_mask_len =
438 inet_mask_len(l3_mask->ipv4.saddr.s_addr);
439 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
440 memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
441 sizeof(req.ip_dst));
442 req.ip_dst_mask_len =
443 ipv6_mask_len(&l3_mask->ipv6.daddr);
444 memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
445 sizeof(req.ip_src));
446 req.ip_src_mask_len =
447 ipv6_mask_len(&l3_mask->ipv6.saddr);
448 }
449 }
450
451 if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
452 req.l4_src_port = flow->l4_key.ports.sport;
453 req.l4_src_port_mask = flow->l4_mask.ports.sport;
454 req.l4_dst_port = flow->l4_key.ports.dport;
455 req.l4_dst_port_mask = flow->l4_mask.ports.dport;
456 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
457 /* l4 ports serve as type/code when ip_proto is ICMP */
458 req.l4_src_port = htons(flow->l4_key.icmp.type);
459 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
460 req.l4_dst_port = htons(flow->l4_key.icmp.code);
461 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
462 }
463 req.flags = cpu_to_le16(flow_flags);
464
465 if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
466 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
467 } else {
468 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
469 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
470 req.dst_fid = cpu_to_le16(actions->dst_fid);
471 }
472 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
473 action_flags |=
474 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
475 req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
476 req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
477 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
478 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
479 }
480 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
481 action_flags |=
482 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
483 /* Rewrite config with tpid = 0 implies vlan pop */
484 req.l2_rewrite_vlan_tpid = 0;
485 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
486 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
487 }
488 }
489 req.action_flags = cpu_to_le16(action_flags);
490
491 mutex_lock(&bp->hwrm_cmd_lock);
492
493 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
494 if (!rc)
495 *flow_handle = resp->flow_handle;
496
497 mutex_unlock(&bp->hwrm_cmd_lock);
498
499 return rc;
2ae7408f
SP
500}
501
8c95f773
SP
502static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
503 struct bnxt_tc_flow *flow,
504 struct bnxt_tc_l2_key *l2_info,
505 __le32 ref_decap_handle,
506 __le32 *decap_filter_handle)
507{
f484f678
SP
508 struct hwrm_cfa_decap_filter_alloc_output *resp =
509 bp->hwrm_cmd_resp_addr;
510 struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
511 struct ip_tunnel_key *tun_key = &flow->tun_key;
512 u32 enables = 0;
513 int rc;
514
515 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
516
517 req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
518 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
519 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
520 req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
521 req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
522
523 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
524 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
525 /* tunnel_id is wrongly defined in hsi defn. as __le32 */
526 req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
527 }
528
529 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
c8fb7b82 530 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
f484f678 531 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
f484f678
SP
532 }
533 if (l2_info->num_vlans) {
534 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
535 req.t_ivlan_vid = l2_info->inner_vlan_tci;
536 }
537
538 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
539 req.ethertype = htons(ETH_P_IP);
540
541 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
542 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
543 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
544 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
545 req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
546 req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
547 req.src_ipaddr[0] = tun_key->u.ipv4.src;
548 }
549
550 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
551 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
552 req.dst_port = tun_key->tp_dst;
553 }
554
555 /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
556 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
557 */
558 req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
559 req.enables = cpu_to_le32(enables);
560
561 mutex_lock(&bp->hwrm_cmd_lock);
562 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
563 if (!rc)
564 *decap_filter_handle = resp->decap_filter_id;
565 else
566 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
567 mutex_unlock(&bp->hwrm_cmd_lock);
568
569 return rc;
8c95f773
SP
570}
571
572static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
573 __le32 decap_filter_handle)
574{
f484f678
SP
575 struct hwrm_cfa_decap_filter_free_input req = { 0 };
576 int rc;
577
578 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
579 req.decap_filter_id = decap_filter_handle;
580
581 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
582 if (rc)
583 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
584 return rc;
8c95f773
SP
585}
586
587static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
588 struct ip_tunnel_key *encap_key,
589 struct bnxt_tc_l2_key *l2_info,
590 __le32 *encap_record_handle)
591{
f484f678
SP
592 struct hwrm_cfa_encap_record_alloc_output *resp =
593 bp->hwrm_cmd_resp_addr;
594 struct hwrm_cfa_encap_record_alloc_input req = { 0 };
595 struct hwrm_cfa_encap_data_vxlan *encap =
596 (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
597 struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
598 (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
599 int rc;
600
601 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
602
603 req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
604
605 ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
606 ether_addr_copy(encap->src_mac_addr, l2_info->smac);
607 if (l2_info->num_vlans) {
608 encap->num_vlan_tags = l2_info->num_vlans;
609 encap->ovlan_tci = l2_info->inner_vlan_tci;
610 encap->ovlan_tpid = l2_info->inner_vlan_tpid;
611 }
612
613 encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
614 encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
615 encap_ipv4->ttl = encap_key->ttl;
616
617 encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
618 encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
619 encap_ipv4->protocol = IPPROTO_UDP;
620
621 encap->dst_port = encap_key->tp_dst;
622 encap->vni = tunnel_id_to_key32(encap_key->tun_id);
623
624 mutex_lock(&bp->hwrm_cmd_lock);
625 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
626 if (!rc)
627 *encap_record_handle = resp->encap_record_id;
628 else
629 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
630 mutex_unlock(&bp->hwrm_cmd_lock);
631
632 return rc;
8c95f773
SP
633}
634
635static int hwrm_cfa_encap_record_free(struct bnxt *bp,
636 __le32 encap_record_handle)
637{
f484f678
SP
638 struct hwrm_cfa_encap_record_free_input req = { 0 };
639 int rc;
640
641 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
642 req.encap_record_id = encap_record_handle;
643
644 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
645 if (rc)
646 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
647 return rc;
8c95f773
SP
648}
649
2ae7408f
SP
650static int bnxt_tc_put_l2_node(struct bnxt *bp,
651 struct bnxt_tc_flow_node *flow_node)
652{
653 struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
cd66358e 654 struct bnxt_tc_info *tc_info = bp->tc_info;
2ae7408f
SP
655 int rc;
656
657 /* remove flow_node from the L2 shared flow list */
658 list_del(&flow_node->l2_list_node);
659 if (--l2_node->refcount == 0) {
660 rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
661 tc_info->l2_ht_params);
662 if (rc)
663 netdev_err(bp->dev,
664 "Error: %s: rhashtable_remove_fast: %d",
665 __func__, rc);
666 kfree_rcu(l2_node, rcu);
667 }
668 return 0;
669}
670
671static struct bnxt_tc_l2_node *
672bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
673 struct rhashtable_params ht_params,
674 struct bnxt_tc_l2_key *l2_key)
675{
676 struct bnxt_tc_l2_node *l2_node;
677 int rc;
678
679 l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
680 if (!l2_node) {
681 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
682 if (!l2_node) {
683 rc = -ENOMEM;
684 return NULL;
685 }
686
687 l2_node->key = *l2_key;
688 rc = rhashtable_insert_fast(l2_table, &l2_node->node,
689 ht_params);
690 if (rc) {
8c95f773 691 kfree_rcu(l2_node, rcu);
2ae7408f
SP
692 netdev_err(bp->dev,
693 "Error: %s: rhashtable_insert_fast: %d",
694 __func__, rc);
695 return NULL;
696 }
697 INIT_LIST_HEAD(&l2_node->common_l2_flows);
698 }
699 return l2_node;
700}
701
702/* Get the ref_flow_handle for a flow by checking if there are any other
703 * flows that share the same L2 key as this flow.
704 */
705static int
706bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
707 struct bnxt_tc_flow_node *flow_node,
708 __le16 *ref_flow_handle)
709{
cd66358e 710 struct bnxt_tc_info *tc_info = bp->tc_info;
2ae7408f
SP
711 struct bnxt_tc_flow_node *ref_flow_node;
712 struct bnxt_tc_l2_node *l2_node;
713
714 l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
715 tc_info->l2_ht_params,
716 &flow->l2_key);
717 if (!l2_node)
718 return -1;
719
720 /* If any other flow is using this l2_node, use it's flow_handle
721 * as the ref_flow_handle
722 */
723 if (l2_node->refcount > 0) {
724 ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
725 struct bnxt_tc_flow_node,
726 l2_list_node);
727 *ref_flow_handle = ref_flow_node->flow_handle;
728 } else {
729 *ref_flow_handle = cpu_to_le16(0xffff);
730 }
731
732 /* Insert the l2_node into the flow_node so that subsequent flows
733 * with a matching l2 key can use the flow_handle of this flow
734 * as their ref_flow_handle
735 */
736 flow_node->l2_node = l2_node;
737 list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
738 l2_node->refcount++;
739 return 0;
740}
741
742/* After the flow parsing is done, this routine is used for checking
743 * if there are any aspects of the flow that prevent it from being
744 * offloaded.
745 */
746static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
747{
748 /* If L4 ports are specified then ip_proto must be TCP or UDP */
749 if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
750 (flow->l4_key.ip_proto != IPPROTO_TCP &&
751 flow->l4_key.ip_proto != IPPROTO_UDP)) {
752 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
753 flow->l4_key.ip_proto);
754 return false;
755 }
756
757 return true;
758}
759
8c95f773
SP
760/* Returns the final refcount of the node on success
761 * or a -ve error code on failure
762 */
763static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
764 struct rhashtable *tunnel_table,
765 struct rhashtable_params *ht_params,
766 struct bnxt_tc_tunnel_node *tunnel_node)
767{
768 int rc;
769
770 if (--tunnel_node->refcount == 0) {
771 rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
772 *ht_params);
773 if (rc) {
774 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
775 rc = -1;
776 }
777 kfree_rcu(tunnel_node, rcu);
778 return rc;
779 } else {
780 return tunnel_node->refcount;
781 }
782}
783
784/* Get (or add) either encap or decap tunnel node from/to the supplied
785 * hash table.
786 */
787static struct bnxt_tc_tunnel_node *
788bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
789 struct rhashtable_params *ht_params,
790 struct ip_tunnel_key *tun_key)
791{
792 struct bnxt_tc_tunnel_node *tunnel_node;
793 int rc;
794
795 tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
796 if (!tunnel_node) {
797 tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
798 if (!tunnel_node) {
799 rc = -ENOMEM;
800 goto err;
801 }
802
803 tunnel_node->key = *tun_key;
804 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
805 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
806 *ht_params);
807 if (rc) {
808 kfree_rcu(tunnel_node, rcu);
809 goto err;
810 }
811 }
812 tunnel_node->refcount++;
813 return tunnel_node;
814err:
815 netdev_info(bp->dev, "error rc=%d", rc);
816 return NULL;
817}
818
819static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
820 struct bnxt_tc_flow *flow,
821 struct bnxt_tc_l2_key *l2_key,
822 struct bnxt_tc_flow_node *flow_node,
823 __le32 *ref_decap_handle)
824{
cd66358e 825 struct bnxt_tc_info *tc_info = bp->tc_info;
8c95f773
SP
826 struct bnxt_tc_flow_node *ref_flow_node;
827 struct bnxt_tc_l2_node *decap_l2_node;
828
829 decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
830 tc_info->decap_l2_ht_params,
831 l2_key);
832 if (!decap_l2_node)
833 return -1;
834
835 /* If any other flow is using this decap_l2_node, use it's decap_handle
836 * as the ref_decap_handle
837 */
838 if (decap_l2_node->refcount > 0) {
839 ref_flow_node =
840 list_first_entry(&decap_l2_node->common_l2_flows,
841 struct bnxt_tc_flow_node,
842 decap_l2_list_node);
843 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
844 } else {
845 *ref_decap_handle = INVALID_TUNNEL_HANDLE;
846 }
847
848 /* Insert the l2_node into the flow_node so that subsequent flows
849 * with a matching decap l2 key can use the decap_filter_handle of
850 * this flow as their ref_decap_handle
851 */
852 flow_node->decap_l2_node = decap_l2_node;
853 list_add(&flow_node->decap_l2_list_node,
854 &decap_l2_node->common_l2_flows);
855 decap_l2_node->refcount++;
856 return 0;
857}
858
859static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
860 struct bnxt_tc_flow_node *flow_node)
861{
862 struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
cd66358e 863 struct bnxt_tc_info *tc_info = bp->tc_info;
8c95f773
SP
864 int rc;
865
866 /* remove flow_node from the decap L2 sharing flow list */
867 list_del(&flow_node->decap_l2_list_node);
868 if (--decap_l2_node->refcount == 0) {
869 rc = rhashtable_remove_fast(&tc_info->decap_l2_table,
870 &decap_l2_node->node,
871 tc_info->decap_l2_ht_params);
872 if (rc)
873 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
874 kfree_rcu(decap_l2_node, rcu);
875 }
876}
877
878static void bnxt_tc_put_decap_handle(struct bnxt *bp,
879 struct bnxt_tc_flow_node *flow_node)
880{
881 __le32 decap_handle = flow_node->decap_node->tunnel_handle;
cd66358e 882 struct bnxt_tc_info *tc_info = bp->tc_info;
8c95f773
SP
883 int rc;
884
885 if (flow_node->decap_l2_node)
886 bnxt_tc_put_decap_l2_node(bp, flow_node);
887
888 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
889 &tc_info->decap_ht_params,
890 flow_node->decap_node);
891 if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
892 hwrm_cfa_decap_filter_free(bp, decap_handle);
893}
894
895static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
896 struct ip_tunnel_key *tun_key,
e9ecc731 897 struct bnxt_tc_l2_key *l2_info)
8c95f773 898{
952c5719 899#ifdef CONFIG_INET
e9ecc731 900 struct net_device *real_dst_dev = bp->dev;
8c95f773
SP
901 struct flowi4 flow = { {0} };
902 struct net_device *dst_dev;
903 struct neighbour *nbr;
904 struct rtable *rt;
905 int rc;
906
907 flow.flowi4_proto = IPPROTO_UDP;
908 flow.fl4_dport = tun_key->tp_dst;
909 flow.daddr = tun_key->u.ipv4.dst;
910
911 rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
912 if (IS_ERR(rt)) {
913 netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
914 return -EOPNOTSUPP;
915 }
916
917 /* The route must either point to the real_dst_dev or a dst_dev that
918 * uses the real_dst_dev.
919 */
920 dst_dev = rt->dst.dev;
921 if (is_vlan_dev(dst_dev)) {
952c5719 922#if IS_ENABLED(CONFIG_VLAN_8021Q)
8c95f773
SP
923 struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
924
925 if (vlan->real_dev != real_dst_dev) {
926 netdev_info(bp->dev,
927 "dst_dev(%s) doesn't use PF-if(%s)",
928 netdev_name(dst_dev),
929 netdev_name(real_dst_dev));
930 rc = -EOPNOTSUPP;
931 goto put_rt;
932 }
933 l2_info->inner_vlan_tci = htons(vlan->vlan_id);
934 l2_info->inner_vlan_tpid = vlan->vlan_proto;
935 l2_info->num_vlans = 1;
952c5719 936#endif
8c95f773
SP
937 } else if (dst_dev != real_dst_dev) {
938 netdev_info(bp->dev,
939 "dst_dev(%s) for %pI4b is not PF-if(%s)",
940 netdev_name(dst_dev), &flow.daddr,
941 netdev_name(real_dst_dev));
942 rc = -EOPNOTSUPP;
943 goto put_rt;
944 }
945
946 nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
947 if (!nbr) {
948 netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
949 &flow.daddr);
950 rc = -EOPNOTSUPP;
951 goto put_rt;
952 }
953
954 tun_key->u.ipv4.src = flow.saddr;
955 tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
956 neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
957 ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
958 neigh_release(nbr);
959 ip_rt_put(rt);
960
961 return 0;
962put_rt:
963 ip_rt_put(rt);
964 return rc;
952c5719
MC
965#else
966 return -EOPNOTSUPP;
967#endif
8c95f773
SP
968}
969
970static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
971 struct bnxt_tc_flow_node *flow_node,
972 __le32 *decap_filter_handle)
973{
974 struct ip_tunnel_key *decap_key = &flow->tun_key;
cd66358e 975 struct bnxt_tc_info *tc_info = bp->tc_info;
8c95f773
SP
976 struct bnxt_tc_l2_key l2_info = { {0} };
977 struct bnxt_tc_tunnel_node *decap_node;
978 struct ip_tunnel_key tun_key = { 0 };
979 struct bnxt_tc_l2_key *decap_l2_info;
980 __le32 ref_decap_handle;
981 int rc;
982
983 /* Check if there's another flow using the same tunnel decap.
984 * If not, add this tunnel to the table and resolve the other
985 * tunnel header fileds
986 */
987 decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
988 &tc_info->decap_ht_params,
989 decap_key);
990 if (!decap_node)
991 return -ENOMEM;
992
993 flow_node->decap_node = decap_node;
994
995 if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
996 goto done;
997
998 /* Resolve the L2 fields for tunnel decap
999 * Resolve the route for remote vtep (saddr) of the decap key
1000 * Find it's next-hop mac addrs
1001 */
1002 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1003 tun_key.tp_dst = flow->tun_key.tp_dst;
e9ecc731 1004 rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
8c95f773
SP
1005 if (rc)
1006 goto put_decap;
1007
8c95f773 1008 decap_l2_info = &decap_node->l2_info;
c8fb7b82 1009 /* decap smac is wildcarded */
8c95f773 1010 ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
8c95f773
SP
1011 if (l2_info.num_vlans) {
1012 decap_l2_info->num_vlans = l2_info.num_vlans;
1013 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1014 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1015 }
1016 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1017
1018 /* For getting a decap_filter_handle we first need to check if
1019 * there are any other decap flows that share the same tunnel L2
1020 * key and if so, pass that flow's decap_filter_handle as the
1021 * ref_decap_handle for this flow.
1022 */
1023 rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1024 &ref_decap_handle);
1025 if (rc)
1026 goto put_decap;
1027
1028 /* Issue the hwrm cmd to allocate a decap filter handle */
1029 rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1030 ref_decap_handle,
1031 &decap_node->tunnel_handle);
1032 if (rc)
1033 goto put_decap_l2;
1034
1035done:
1036 *decap_filter_handle = decap_node->tunnel_handle;
1037 return 0;
1038
1039put_decap_l2:
1040 bnxt_tc_put_decap_l2_node(bp, flow_node);
1041put_decap:
1042 bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1043 &tc_info->decap_ht_params,
1044 flow_node->decap_node);
1045 return rc;
1046}
1047
1048static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1049 struct bnxt_tc_tunnel_node *encap_node)
1050{
1051 __le32 encap_handle = encap_node->tunnel_handle;
cd66358e 1052 struct bnxt_tc_info *tc_info = bp->tc_info;
8c95f773
SP
1053 int rc;
1054
1055 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1056 &tc_info->encap_ht_params, encap_node);
1057 if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1058 hwrm_cfa_encap_record_free(bp, encap_handle);
1059}
1060
1061/* Lookup the tunnel encap table and check if there's an encap_handle
1062 * alloc'd already.
1063 * If not, query L2 info via a route lookup and issue an encap_record_alloc
1064 * cmd to FW.
1065 */
1066static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1067 struct bnxt_tc_flow_node *flow_node,
1068 __le32 *encap_handle)
1069{
1070 struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
cd66358e 1071 struct bnxt_tc_info *tc_info = bp->tc_info;
8c95f773
SP
1072 struct bnxt_tc_tunnel_node *encap_node;
1073 int rc;
1074
1075 /* Check if there's another flow using the same tunnel encap.
1076 * If not, add this tunnel to the table and resolve the other
1077 * tunnel header fileds
1078 */
1079 encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1080 &tc_info->encap_ht_params,
1081 encap_key);
1082 if (!encap_node)
1083 return -ENOMEM;
1084
1085 flow_node->encap_node = encap_node;
1086
1087 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1088 goto done;
1089
e9ecc731 1090 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
8c95f773
SP
1091 if (rc)
1092 goto put_encap;
1093
1094 /* Allocate a new tunnel encap record */
1095 rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1096 &encap_node->tunnel_handle);
1097 if (rc)
1098 goto put_encap;
1099
1100done:
1101 *encap_handle = encap_node->tunnel_handle;
1102 return 0;
1103
1104put_encap:
1105 bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1106 &tc_info->encap_ht_params, encap_node);
1107 return rc;
1108}
1109
1110static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1111 struct bnxt_tc_flow *flow,
1112 struct bnxt_tc_flow_node *flow_node)
1113{
1114 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1115 bnxt_tc_put_decap_handle(bp, flow_node);
1116 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1117 bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1118}
1119
1120static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1121 struct bnxt_tc_flow *flow,
1122 struct bnxt_tc_flow_node *flow_node,
1123 __le32 *tunnel_handle)
1124{
1125 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1126 return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1127 tunnel_handle);
1128 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1129 return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1130 tunnel_handle);
1131 else
1132 return 0;
1133}
2ae7408f
SP
1134static int __bnxt_tc_del_flow(struct bnxt *bp,
1135 struct bnxt_tc_flow_node *flow_node)
1136{
cd66358e 1137 struct bnxt_tc_info *tc_info = bp->tc_info;
2ae7408f
SP
1138 int rc;
1139
1140 /* send HWRM cmd to free the flow-id */
1141 bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
1142
1143 mutex_lock(&tc_info->lock);
1144
8c95f773
SP
1145 /* release references to any tunnel encap/decap nodes */
1146 bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1147
2ae7408f
SP
1148 /* release reference to l2 node */
1149 bnxt_tc_put_l2_node(bp, flow_node);
1150
1151 mutex_unlock(&tc_info->lock);
1152
1153 rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1154 tc_info->flow_ht_params);
1155 if (rc)
1156 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1157 __func__, rc);
1158
1159 kfree_rcu(flow_node, rcu);
1160 return 0;
1161}
1162
e9ecc731
SP
1163static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1164 u16 src_fid)
1165{
1166 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1167 flow->src_fid = bp->pf.fw_fid;
1168 else
1169 flow->src_fid = src_fid;
1170}
1171
2ae7408f
SP
1172/* Add a new flow or replace an existing flow.
1173 * Notes on locking:
1174 * There are essentially two critical sections here.
1175 * 1. while adding a new flow
1176 * a) lookup l2-key
1177 * b) issue HWRM cmd and get flow_handle
1178 * c) link l2-key with flow
1179 * 2. while deleting a flow
1180 * a) unlinking l2-key from flow
1181 * A lock is needed to protect these two critical sections.
1182 *
1183 * The hash-tables are already protected by the rhashtable API.
1184 */
1185static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1186 struct tc_cls_flower_offload *tc_flow_cmd)
1187{
1188 struct bnxt_tc_flow_node *new_node, *old_node;
cd66358e 1189 struct bnxt_tc_info *tc_info = bp->tc_info;
2ae7408f 1190 struct bnxt_tc_flow *flow;
8c95f773 1191 __le32 tunnel_handle = 0;
2ae7408f
SP
1192 __le16 ref_flow_handle;
1193 int rc;
1194
1195 /* allocate memory for the new flow and it's node */
1196 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1197 if (!new_node) {
1198 rc = -ENOMEM;
1199 goto done;
1200 }
1201 new_node->cookie = tc_flow_cmd->cookie;
1202 flow = &new_node->flow;
1203
1204 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1205 if (rc)
1206 goto free_node;
e9ecc731
SP
1207
1208 bnxt_tc_set_src_fid(bp, flow, src_fid);
2ae7408f
SP
1209
1210 if (!bnxt_tc_can_offload(bp, flow)) {
1211 rc = -ENOSPC;
1212 goto free_node;
1213 }
1214
1215 /* If a flow exists with the same cookie, delete it */
1216 old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1217 &tc_flow_cmd->cookie,
1218 tc_info->flow_ht_params);
1219 if (old_node)
1220 __bnxt_tc_del_flow(bp, old_node);
1221
1222 /* Check if the L2 part of the flow has been offloaded already.
1223 * If so, bump up it's refcnt and get it's reference handle.
1224 */
1225 mutex_lock(&tc_info->lock);
1226 rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1227 if (rc)
1228 goto unlock;
1229
8c95f773
SP
1230 /* If the flow involves tunnel encap/decap, get tunnel_handle */
1231 rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
1232 if (rc)
1233 goto put_l2;
1234
2ae7408f
SP
1235 /* send HWRM cmd to alloc the flow */
1236 rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
8c95f773 1237 tunnel_handle, &new_node->flow_handle);
2ae7408f 1238 if (rc)
8c95f773 1239 goto put_tunnel;
2ae7408f 1240
5a84acbe
SP
1241 flow->lastused = jiffies;
1242 spin_lock_init(&flow->stats_lock);
2ae7408f
SP
1243 /* add new flow to flow-table */
1244 rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1245 tc_info->flow_ht_params);
1246 if (rc)
1247 goto hwrm_flow_free;
1248
1249 mutex_unlock(&tc_info->lock);
1250 return 0;
1251
1252hwrm_flow_free:
1253 bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
8c95f773
SP
1254put_tunnel:
1255 bnxt_tc_put_tunnel_handle(bp, flow, new_node);
2ae7408f
SP
1256put_l2:
1257 bnxt_tc_put_l2_node(bp, new_node);
1258unlock:
1259 mutex_unlock(&tc_info->lock);
1260free_node:
8c95f773 1261 kfree_rcu(new_node, rcu);
2ae7408f
SP
1262done:
1263 netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1264 __func__, tc_flow_cmd->cookie, rc);
1265 return rc;
1266}
1267
1268static int bnxt_tc_del_flow(struct bnxt *bp,
1269 struct tc_cls_flower_offload *tc_flow_cmd)
1270{
cd66358e 1271 struct bnxt_tc_info *tc_info = bp->tc_info;
2ae7408f
SP
1272 struct bnxt_tc_flow_node *flow_node;
1273
1274 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1275 &tc_flow_cmd->cookie,
1276 tc_info->flow_ht_params);
1277 if (!flow_node) {
1278 netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
1279 tc_flow_cmd->cookie);
1280 return -EINVAL;
1281 }
1282
1283 return __bnxt_tc_del_flow(bp, flow_node);
1284}
1285
1286static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1287 struct tc_cls_flower_offload *tc_flow_cmd)
1288{
5a84acbe 1289 struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
cd66358e 1290 struct bnxt_tc_info *tc_info = bp->tc_info;
d7bc7305 1291 struct bnxt_tc_flow_node *flow_node;
5a84acbe
SP
1292 struct bnxt_tc_flow *flow;
1293 unsigned long lastused;
d7bc7305
SP
1294
1295 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1296 &tc_flow_cmd->cookie,
1297 tc_info->flow_ht_params);
1298 if (!flow_node) {
1299 netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
1300 tc_flow_cmd->cookie);
1301 return -1;
1302 }
1303
5a84acbe
SP
1304 flow = &flow_node->flow;
1305 curr_stats = &flow->stats;
1306 prev_stats = &flow->prev_stats;
1307
1308 spin_lock(&flow->stats_lock);
1309 stats.packets = curr_stats->packets - prev_stats->packets;
1310 stats.bytes = curr_stats->bytes - prev_stats->bytes;
1311 *prev_stats = *curr_stats;
1312 lastused = flow->lastused;
1313 spin_unlock(&flow->stats_lock);
1314
1315 tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
1316 lastused);
1317 return 0;
1318}
1319
1320static int
1321bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1322 struct bnxt_tc_stats_batch stats_batch[])
1323{
1324 struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
1325 struct hwrm_cfa_flow_stats_input req = { 0 };
1326 __le16 *req_flow_handles = &req.flow_handle_0;
1327 int rc, i;
1328
1329 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1330 req.num_flows = cpu_to_le16(num_flows);
1331 for (i = 0; i < num_flows; i++) {
1332 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1333
1334 req_flow_handles[i] = flow_node->flow_handle;
1335 }
1336
1337 mutex_lock(&bp->hwrm_cmd_lock);
1338 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1339 if (!rc) {
1340 __le64 *resp_packets = &resp->packet_0;
1341 __le64 *resp_bytes = &resp->byte_0;
1342
1343 for (i = 0; i < num_flows; i++) {
1344 stats_batch[i].hw_stats.packets =
1345 le64_to_cpu(resp_packets[i]);
1346 stats_batch[i].hw_stats.bytes =
1347 le64_to_cpu(resp_bytes[i]);
1348 }
1349 } else {
1350 netdev_info(bp->dev, "error rc=%d", rc);
1351 }
1352
1353 mutex_unlock(&bp->hwrm_cmd_lock);
1354 return rc;
1355}
1356
1357/* Add val to accum while handling a possible wraparound
1358 * of val. Eventhough val is of type u64, its actual width
1359 * is denoted by mask and will wrap-around beyond that width.
1360 */
1361static void accumulate_val(u64 *accum, u64 val, u64 mask)
1362{
1363#define low_bits(x, mask) ((x) & (mask))
1364#define high_bits(x, mask) ((x) & ~(mask))
1365 bool wrapped = val < low_bits(*accum, mask);
1366
1367 *accum = high_bits(*accum, mask) + val;
1368 if (wrapped)
1369 *accum += (mask + 1);
1370}
1371
1372/* The HW counters' width is much less than 64bits.
1373 * Handle possible wrap-around while updating the stat counters
1374 */
1375static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1376 struct bnxt_tc_flow_stats *acc_stats,
1377 struct bnxt_tc_flow_stats *hw_stats)
1378{
1379 accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1380 accumulate_val(&acc_stats->packets, hw_stats->packets,
1381 tc_info->packets_mask);
1382}
1383
1384static int
1385bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1386 struct bnxt_tc_stats_batch stats_batch[])
1387{
cd66358e 1388 struct bnxt_tc_info *tc_info = bp->tc_info;
5a84acbe
SP
1389 int rc, i;
1390
1391 rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
d7bc7305
SP
1392 if (rc)
1393 return rc;
1394
5a84acbe
SP
1395 for (i = 0; i < num_flows; i++) {
1396 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1397 struct bnxt_tc_flow *flow = &flow_node->flow;
1398
1399 spin_lock(&flow->stats_lock);
1400 bnxt_flow_stats_accum(tc_info, &flow->stats,
1401 &stats_batch[i].hw_stats);
1402 if (flow->stats.packets != flow->prev_stats.packets)
1403 flow->lastused = jiffies;
1404 spin_unlock(&flow->stats_lock);
1405 }
1406
2ae7408f
SP
1407 return 0;
1408}
1409
5a84acbe
SP
1410static int
1411bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1412 struct bnxt_tc_stats_batch stats_batch[],
1413 int *num_flows)
1414{
cd66358e 1415 struct bnxt_tc_info *tc_info = bp->tc_info;
5a84acbe
SP
1416 struct rhashtable_iter *iter = &tc_info->iter;
1417 void *flow_node;
1418 int rc, i;
1419
1420 rc = rhashtable_walk_start(iter);
1421 if (rc && rc != -EAGAIN) {
1422 i = 0;
1423 goto done;
1424 }
1425
1426 rc = 0;
1427 for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1428 flow_node = rhashtable_walk_next(iter);
1429 if (IS_ERR(flow_node)) {
1430 i = 0;
1431 if (PTR_ERR(flow_node) == -EAGAIN) {
1432 continue;
1433 } else {
1434 rc = PTR_ERR(flow_node);
1435 goto done;
1436 }
1437 }
1438
1439 /* No more flows */
1440 if (!flow_node)
1441 goto done;
1442
1443 stats_batch[i].flow_node = flow_node;
1444 }
1445done:
1446 rhashtable_walk_stop(iter);
1447 *num_flows = i;
1448 return rc;
1449}
1450
1451void bnxt_tc_flow_stats_work(struct bnxt *bp)
1452{
cd66358e 1453 struct bnxt_tc_info *tc_info = bp->tc_info;
5a84acbe
SP
1454 int num_flows, rc;
1455
1456 num_flows = atomic_read(&tc_info->flow_table.nelems);
1457 if (!num_flows)
1458 return;
1459
1460 rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1461
1462 for (;;) {
1463 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1464 &num_flows);
1465 if (rc) {
1466 if (rc == -EAGAIN)
1467 continue;
1468 break;
1469 }
1470
1471 if (!num_flows)
1472 break;
1473
1474 bnxt_tc_flow_stats_batch_update(bp, num_flows,
1475 tc_info->stats_batch);
1476 }
1477
1478 rhashtable_walk_exit(&tc_info->iter);
1479}
1480
2ae7408f
SP
1481int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1482 struct tc_cls_flower_offload *cls_flower)
1483{
1484 int rc = 0;
1485
9e0fd15d 1486 if (cls_flower->common.chain_index)
1e3c5ec6
SP
1487 return -EOPNOTSUPP;
1488
2ae7408f
SP
1489 switch (cls_flower->command) {
1490 case TC_CLSFLOWER_REPLACE:
1491 rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
1492 break;
1493
1494 case TC_CLSFLOWER_DESTROY:
1495 rc = bnxt_tc_del_flow(bp, cls_flower);
1496 break;
1497
1498 case TC_CLSFLOWER_STATS:
1499 rc = bnxt_tc_get_flow_stats(bp, cls_flower);
1500 break;
1501 }
1502 return rc;
1503}
1504
1505static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1506 .head_offset = offsetof(struct bnxt_tc_flow_node, node),
1507 .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1508 .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1509 .automatic_shrinking = true
1510};
1511
1512static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1513 .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1514 .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1515 .key_len = BNXT_TC_L2_KEY_LEN,
1516 .automatic_shrinking = true
1517};
1518
8c95f773
SP
1519static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1520 .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1521 .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1522 .key_len = BNXT_TC_L2_KEY_LEN,
1523 .automatic_shrinking = true
1524};
1525
1526static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1527 .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1528 .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1529 .key_len = sizeof(struct ip_tunnel_key),
1530 .automatic_shrinking = true
1531};
1532
2ae7408f
SP
1533/* convert counter width in bits to a mask */
1534#define mask(width) ((u64)~0 >> (64 - (width)))
1535
1536int bnxt_init_tc(struct bnxt *bp)
1537{
cd66358e 1538 struct bnxt_tc_info *tc_info;
2ae7408f
SP
1539 int rc;
1540
8c95f773 1541 if (bp->hwrm_spec_code < 0x10803) {
2ae7408f
SP
1542 netdev_warn(bp->dev,
1543 "Firmware does not support TC flower offload.\n");
1544 return -ENOTSUPP;
1545 }
cd66358e
SP
1546
1547 tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
1548 if (!tc_info)
1549 return -ENOMEM;
2ae7408f
SP
1550 mutex_init(&tc_info->lock);
1551
1552 /* Counter widths are programmed by FW */
1553 tc_info->bytes_mask = mask(36);
1554 tc_info->packets_mask = mask(28);
1555
1556 tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1557 rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1558 if (rc)
cd66358e 1559 goto free_tc_info;
2ae7408f
SP
1560
1561 tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1562 rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1563 if (rc)
1564 goto destroy_flow_table;
1565
8c95f773
SP
1566 tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1567 rc = rhashtable_init(&tc_info->decap_l2_table,
1568 &tc_info->decap_l2_ht_params);
1569 if (rc)
1570 goto destroy_l2_table;
1571
1572 tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1573 rc = rhashtable_init(&tc_info->decap_table,
1574 &tc_info->decap_ht_params);
1575 if (rc)
1576 goto destroy_decap_l2_table;
1577
1578 tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1579 rc = rhashtable_init(&tc_info->encap_table,
1580 &tc_info->encap_ht_params);
1581 if (rc)
1582 goto destroy_decap_table;
1583
2ae7408f
SP
1584 tc_info->enabled = true;
1585 bp->dev->hw_features |= NETIF_F_HW_TC;
1586 bp->dev->features |= NETIF_F_HW_TC;
cd66358e 1587 bp->tc_info = tc_info;
2ae7408f
SP
1588 return 0;
1589
8c95f773
SP
1590destroy_decap_table:
1591 rhashtable_destroy(&tc_info->decap_table);
1592destroy_decap_l2_table:
1593 rhashtable_destroy(&tc_info->decap_l2_table);
1594destroy_l2_table:
1595 rhashtable_destroy(&tc_info->l2_table);
2ae7408f
SP
1596destroy_flow_table:
1597 rhashtable_destroy(&tc_info->flow_table);
cd66358e
SP
1598free_tc_info:
1599 kfree(tc_info);
2ae7408f
SP
1600 return rc;
1601}
1602
1603void bnxt_shutdown_tc(struct bnxt *bp)
1604{
cd66358e 1605 struct bnxt_tc_info *tc_info = bp->tc_info;
2ae7408f 1606
cd66358e 1607 if (!bnxt_tc_flower_enabled(bp))
2ae7408f
SP
1608 return;
1609
1610 rhashtable_destroy(&tc_info->flow_table);
1611 rhashtable_destroy(&tc_info->l2_table);
8c95f773
SP
1612 rhashtable_destroy(&tc_info->decap_l2_table);
1613 rhashtable_destroy(&tc_info->decap_table);
1614 rhashtable_destroy(&tc_info->encap_table);
cd66358e
SP
1615 kfree(tc_info);
1616 bp->tc_info = NULL;
2ae7408f 1617}