]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/cxgbe/cxgbe_filter.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
3 * All rights reserved.
4 */
5 #include <rte_net.h>
6
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
11 #include "clip_tbl.h"
12 #include "l2t.h"
13
14 /**
15 * Initialize Hash Filters
16 */
17 int init_hash_filter(struct adapter *adap)
18 {
19 unsigned int n_user_filters;
20 unsigned int user_filter_perc;
21 int ret;
22 u32 params[7], val[7];
23
24 #define FW_PARAM_DEV(param) \
25 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
26 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
27
28 #define FW_PARAM_PFVF(param) \
29 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
30 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
31 V_FW_PARAMS_PARAM_Y(0) | \
32 V_FW_PARAMS_PARAM_Z(0))
33
34 params[0] = FW_PARAM_DEV(NTID);
35 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
36 params, val);
37 if (ret < 0)
38 return ret;
39 adap->tids.ntids = val[0];
40 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
41
42 user_filter_perc = 100;
43 n_user_filters = mult_frac(adap->tids.nftids,
44 user_filter_perc,
45 100);
46
47 adap->tids.nftids = n_user_filters;
48 adap->params.hash_filter = 1;
49 return 0;
50 }
51
52 /**
53 * Validate if the requested filter specification can be set by checking
54 * if the requested features have been enabled
55 */
56 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
57 {
58 u32 fconf;
59
60 /*
61 * Check for unconfigured fields being used.
62 */
63 fconf = adapter->params.tp.vlan_pri_map;
64
65 #define S(_field) \
66 (fs->val._field || fs->mask._field)
67 #define U(_mask, _field) \
68 (!(fconf & (_mask)) && S(_field))
69
70 if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
71 U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx))
72 return -EOPNOTSUPP;
73
74 #undef S
75 #undef U
76
77 /*
78 * If the user is requesting that the filter action loop
79 * matching packets back out one of our ports, make sure that
80 * the egress port is in range.
81 */
82 if (fs->action == FILTER_SWITCH &&
83 fs->eport >= adapter->params.nports)
84 return -ERANGE;
85
86 /*
87 * Don't allow various trivially obvious bogus out-of-range
88 * values ...
89 */
90 if (fs->val.iport >= adapter->params.nports)
91 return -ERANGE;
92
93 if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
94 return -EOPNOTSUPP;
95
96 if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
97 return -EOPNOTSUPP;
98
99 return 0;
100 }
101
102 /**
103 * Get the queue to which the traffic must be steered to.
104 */
105 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
106 struct ch_filter_specification *fs)
107 {
108 struct port_info *pi = ethdev2pinfo(dev);
109 struct adapter *adapter = pi->adapter;
110 unsigned int iq;
111
112 /*
113 * If the user has requested steering matching Ingress Packets
114 * to a specific Queue Set, we need to make sure it's in range
115 * for the port and map that into the Absolute Queue ID of the
116 * Queue Set's Response Queue.
117 */
118 if (!fs->dirsteer) {
119 iq = 0;
120 } else {
121 /*
122 * If the iq id is greater than the number of qsets,
123 * then assume it is an absolute qid.
124 */
125 if (fs->iq < pi->n_rx_qsets)
126 iq = adapter->sge.ethrxq[pi->first_qset +
127 fs->iq].rspq.abs_id;
128 else
129 iq = fs->iq;
130 }
131
132 return iq;
133 }
134
135 /* Return an error number if the indicated filter isn't writable ... */
136 int writable_filter(struct filter_entry *f)
137 {
138 if (f->locked)
139 return -EPERM;
140 if (f->pending)
141 return -EBUSY;
142
143 return 0;
144 }
145
146 /**
147 * Send CPL_SET_TCB_FIELD message
148 */
149 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
150 u16 word, u64 mask, u64 val, int no_reply)
151 {
152 struct rte_mbuf *mbuf;
153 struct cpl_set_tcb_field *req;
154 struct sge_ctrl_txq *ctrlq;
155
156 ctrlq = &adapter->sge.ctrlq[0];
157 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
158 WARN_ON(!mbuf);
159
160 mbuf->data_len = sizeof(*req);
161 mbuf->pkt_len = mbuf->data_len;
162
163 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
164 memset(req, 0, sizeof(*req));
165 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
166 req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
167 V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
168 V_NO_REPLY(no_reply));
169 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
170 req->mask = cpu_to_be64(mask);
171 req->val = cpu_to_be64(val);
172
173 t4_mgmt_tx(ctrlq, mbuf);
174 }
175
176 /**
177 * Set one of the t_flags bits in the TCB.
178 */
179 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
180 unsigned int bit_pos, unsigned int val, int no_reply)
181 {
182 set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos,
183 (unsigned long long)val << bit_pos, no_reply);
184 }
185
186 /**
187 * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
188 */
189 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
190 struct cpl_set_tcb_field *req,
191 unsigned int word,
192 u64 mask, u64 val, u8 cookie,
193 int no_reply)
194 {
195 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
196 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
197
198 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
199 V_ULP_TXPKT_DEST(0));
200 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
201 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
202 sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
203 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
204 req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
205 V_QUEUENO(0));
206 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
207 req->mask = cpu_to_be64(mask);
208 req->val = cpu_to_be64(val);
209 sc = (struct ulptx_idata *)(req + 1);
210 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
211 sc->len = cpu_to_be32(0);
212 }
213
214 /**
215 * Check if entry already filled.
216 */
217 bool is_filter_set(struct tid_info *t, int fidx, int family)
218 {
219 bool result = FALSE;
220 int i, max;
221
222 /* IPv6 requires four slots and IPv4 requires only 1 slot.
223 * Ensure, there's enough slots available.
224 */
225 max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
226
227 t4_os_lock(&t->ftid_lock);
228 for (i = fidx; i <= max; i++) {
229 if (rte_bitmap_get(t->ftid_bmap, i)) {
230 result = TRUE;
231 break;
232 }
233 }
234 t4_os_unlock(&t->ftid_lock);
235 return result;
236 }
237
238 /**
239 * Allocate a available free entry
240 */
241 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
242 {
243 struct tid_info *t = &adap->tids;
244 int pos;
245 int size = t->nftids;
246
247 t4_os_lock(&t->ftid_lock);
248 if (family == FILTER_TYPE_IPV6)
249 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
250 else
251 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
252 t4_os_unlock(&t->ftid_lock);
253
254 return pos < size ? pos : -1;
255 }
256
257 /**
258 * Construct hash filter ntuple.
259 */
260 static u64 hash_filter_ntuple(const struct filter_entry *f)
261 {
262 struct adapter *adap = ethdev2adap(f->dev);
263 struct tp_params *tp = &adap->params.tp;
264 u64 ntuple = 0;
265 u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
266
267 if (tp->port_shift >= 0 && f->fs.mask.iport)
268 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
269
270 if (tp->protocol_shift >= 0) {
271 if (!f->fs.val.proto)
272 ntuple |= (u64)tcp_proto << tp->protocol_shift;
273 else
274 ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
275 }
276
277 if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
278 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
279 if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
280 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
281
282 return ntuple;
283 }
284
285 /**
286 * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
287 */
288 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
289 unsigned int tid)
290 {
291 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
292 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
293
294 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
295 V_ULP_TXPKT_DEST(0));
296 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
297 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
298 sc->len = cpu_to_be32(sizeof(*abort_req) -
299 sizeof(struct work_request_hdr));
300 OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
301 abort_req->rsvd0 = cpu_to_be32(0);
302 abort_req->rsvd1 = 0;
303 abort_req->cmd = CPL_ABORT_NO_RST;
304 sc = (struct ulptx_idata *)(abort_req + 1);
305 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
306 sc->len = cpu_to_be32(0);
307 }
308
309 /**
310 * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
311 */
312 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
313 unsigned int tid)
314 {
315 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
316 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
317
318 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
319 V_ULP_TXPKT_DEST(0));
320 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
321 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
322 sc->len = cpu_to_be32(sizeof(*abort_rpl) -
323 sizeof(struct work_request_hdr));
324 OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
325 abort_rpl->rsvd0 = cpu_to_be32(0);
326 abort_rpl->rsvd1 = 0;
327 abort_rpl->cmd = CPL_ABORT_NO_RST;
328 sc = (struct ulptx_idata *)(abort_rpl + 1);
329 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
330 sc->len = cpu_to_be32(0);
331 }
332
333 /**
334 * Delete the specified hash filter.
335 */
336 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
337 unsigned int filter_id,
338 struct filter_ctx *ctx)
339 {
340 struct adapter *adapter = ethdev2adap(dev);
341 struct tid_info *t = &adapter->tids;
342 struct filter_entry *f;
343 struct sge_ctrl_txq *ctrlq;
344 unsigned int port_id = ethdev2pinfo(dev)->port_id;
345 int ret;
346
347 if (filter_id > adapter->tids.ntids)
348 return -E2BIG;
349
350 f = lookup_tid(t, filter_id);
351 if (!f) {
352 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
353 __func__, filter_id);
354 return -EINVAL;
355 }
356
357 ret = writable_filter(f);
358 if (ret)
359 return ret;
360
361 if (f->valid) {
362 unsigned int wrlen;
363 struct rte_mbuf *mbuf;
364 struct work_request_hdr *wr;
365 struct ulptx_idata *aligner;
366 struct cpl_set_tcb_field *req;
367 struct cpl_abort_req *abort_req;
368 struct cpl_abort_rpl *abort_rpl;
369
370 f->ctx = ctx;
371 f->pending = 1;
372
373 wrlen = cxgbe_roundup(sizeof(*wr) +
374 (sizeof(*req) + sizeof(*aligner)) +
375 sizeof(*abort_req) + sizeof(*abort_rpl),
376 16);
377
378 ctrlq = &adapter->sge.ctrlq[port_id];
379 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
380 if (!mbuf) {
381 dev_err(adapter, "%s: could not allocate skb ..\n",
382 __func__);
383 goto out_err;
384 }
385
386 mbuf->data_len = wrlen;
387 mbuf->pkt_len = mbuf->data_len;
388
389 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
390 INIT_ULPTX_WR(req, wrlen, 0, 0);
391 wr = (struct work_request_hdr *)req;
392 wr++;
393 req = (struct cpl_set_tcb_field *)wr;
394 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
395 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
396 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
397 0, 1);
398 aligner = (struct ulptx_idata *)(req + 1);
399 abort_req = (struct cpl_abort_req *)(aligner + 1);
400 mk_abort_req_ulp(abort_req, f->tid);
401 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
402 mk_abort_rpl_ulp(abort_rpl, f->tid);
403 t4_mgmt_tx(ctrlq, mbuf);
404 }
405 return 0;
406
407 out_err:
408 return -ENOMEM;
409 }
410
411 /**
412 * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
413 */
414 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
415 unsigned int qid_filterid, struct adapter *adap)
416 {
417 struct cpl_t6_act_open_req6 *req = NULL;
418 u64 local_lo, local_hi, peer_lo, peer_hi;
419 u32 *lip = (u32 *)f->fs.val.lip;
420 u32 *fip = (u32 *)f->fs.val.fip;
421
422 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
423 case CHELSIO_T6:
424 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
425
426 INIT_TP_WR(req, 0);
427 break;
428 default:
429 dev_err(adap, "%s: unsupported chip type!\n", __func__);
430 return;
431 }
432
433 local_hi = ((u64)lip[1]) << 32 | lip[0];
434 local_lo = ((u64)lip[3]) << 32 | lip[2];
435 peer_hi = ((u64)fip[1]) << 32 | fip[0];
436 peer_lo = ((u64)fip[3]) << 32 | fip[2];
437
438 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
439 qid_filterid));
440 req->local_port = cpu_to_be16(f->fs.val.lport);
441 req->peer_port = cpu_to_be16(f->fs.val.fport);
442 req->local_ip_hi = local_hi;
443 req->local_ip_lo = local_lo;
444 req->peer_ip_hi = peer_hi;
445 req->peer_ip_lo = peer_lo;
446 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
447 f->fs.newvlan == VLAN_REWRITE) |
448 V_DELACK(f->fs.hitcnts) |
449 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
450 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
451 << 1) |
452 V_TX_CHAN(f->fs.eport) |
453 V_ULP_MODE(ULP_MODE_NONE) |
454 F_TCAM_BYPASS | F_NON_OFFLOAD);
455 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
456 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
457 V_RSS_QUEUE(f->fs.iq) |
458 F_T5_OPT_2_VALID |
459 F_RX_CHANNEL |
460 V_SACK_EN(f->fs.swapmac) |
461 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
462 (f->fs.dirsteer << 1)) |
463 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
464 }
465
466 /**
467 * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
468 */
469 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
470 unsigned int qid_filterid, struct adapter *adap)
471 {
472 struct cpl_t6_act_open_req *req = NULL;
473
474 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
475 case CHELSIO_T6:
476 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
477
478 INIT_TP_WR(req, 0);
479 break;
480 default:
481 dev_err(adap, "%s: unsupported chip type!\n", __func__);
482 return;
483 }
484
485 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
486 qid_filterid));
487 req->local_port = cpu_to_be16(f->fs.val.lport);
488 req->peer_port = cpu_to_be16(f->fs.val.fport);
489 req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
490 f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
491 req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
492 f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
493 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
494 f->fs.newvlan == VLAN_REWRITE) |
495 V_DELACK(f->fs.hitcnts) |
496 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
497 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
498 << 1) |
499 V_TX_CHAN(f->fs.eport) |
500 V_ULP_MODE(ULP_MODE_NONE) |
501 F_TCAM_BYPASS | F_NON_OFFLOAD);
502 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
503 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
504 V_RSS_QUEUE(f->fs.iq) |
505 F_T5_OPT_2_VALID |
506 F_RX_CHANNEL |
507 V_SACK_EN(f->fs.swapmac) |
508 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
509 (f->fs.dirsteer << 1)) |
510 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
511 }
512
513 /**
514 * Set the specified hash filter.
515 */
516 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
517 struct ch_filter_specification *fs,
518 struct filter_ctx *ctx)
519 {
520 struct port_info *pi = ethdev2pinfo(dev);
521 struct adapter *adapter = pi->adapter;
522 struct tid_info *t = &adapter->tids;
523 struct filter_entry *f;
524 struct rte_mbuf *mbuf;
525 struct sge_ctrl_txq *ctrlq;
526 unsigned int iq;
527 int atid, size;
528 int ret = 0;
529
530 ret = validate_filter(adapter, fs);
531 if (ret)
532 return ret;
533
534 iq = get_filter_steerq(dev, fs);
535
536 ctrlq = &adapter->sge.ctrlq[pi->port_id];
537
538 f = t4_os_alloc(sizeof(*f));
539 if (!f)
540 goto out_err;
541
542 f->fs = *fs;
543 f->ctx = ctx;
544 f->dev = dev;
545 f->fs.iq = iq;
546
547 /*
548 * If the new filter requires loopback Destination MAC and/or VLAN
549 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
550 * the filter.
551 */
552 if (f->fs.newvlan == VLAN_INSERT ||
553 f->fs.newvlan == VLAN_REWRITE) {
554 /* allocate L2T entry for new filter */
555 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
556 f->fs.eport, f->fs.dmac);
557 if (!f->l2t) {
558 ret = -ENOMEM;
559 goto out_err;
560 }
561 }
562
563 atid = cxgbe_alloc_atid(t, f);
564 if (atid < 0)
565 goto out_err;
566
567 if (f->fs.type) {
568 /* IPv6 hash filter */
569 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
570 if (!f->clipt)
571 goto free_atid;
572
573 size = sizeof(struct cpl_t6_act_open_req6);
574 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
575 if (!mbuf) {
576 ret = -ENOMEM;
577 goto free_clip;
578 }
579
580 mbuf->data_len = size;
581 mbuf->pkt_len = mbuf->data_len;
582
583 mk_act_open_req6(f, mbuf,
584 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
585 adapter);
586 } else {
587 /* IPv4 hash filter */
588 size = sizeof(struct cpl_t6_act_open_req);
589 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
590 if (!mbuf) {
591 ret = -ENOMEM;
592 goto free_atid;
593 }
594
595 mbuf->data_len = size;
596 mbuf->pkt_len = mbuf->data_len;
597
598 mk_act_open_req(f, mbuf,
599 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
600 adapter);
601 }
602
603 f->pending = 1;
604 t4_mgmt_tx(ctrlq, mbuf);
605 return 0;
606
607 free_clip:
608 cxgbe_clip_release(f->dev, f->clipt);
609 free_atid:
610 cxgbe_free_atid(t, atid);
611
612 out_err:
613 t4_os_free(f);
614 return ret;
615 }
616
617 /**
618 * Clear a filter and release any of its resources that we own. This also
619 * clears the filter's "pending" status.
620 */
621 void clear_filter(struct filter_entry *f)
622 {
623 if (f->clipt)
624 cxgbe_clip_release(f->dev, f->clipt);
625
626 /*
627 * The zeroing of the filter rule below clears the filter valid,
628 * pending, locked flags etc. so it's all we need for
629 * this operation.
630 */
631 memset(f, 0, sizeof(*f));
632 }
633
634 /**
635 * t4_mk_filtdelwr - create a delete filter WR
636 * @adap: adapter context
637 * @ftid: the filter ID
638 * @wr: the filter work request to populate
639 * @qid: ingress queue to receive the delete notification
640 *
641 * Creates a filter work request to delete the supplied filter. If @qid is
642 * negative the delete notification is suppressed.
643 */
644 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
645 struct fw_filter2_wr *wr, int qid)
646 {
647 memset(wr, 0, sizeof(*wr));
648 if (adap->params.filter2_wr_support)
649 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
650 else
651 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
652 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
653 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
654 V_FW_FILTER_WR_NOREPLY(qid < 0));
655 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
656 if (qid >= 0)
657 wr->rx_chan_rx_rpl_iq =
658 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
659 }
660
661 /**
662 * Create FW work request to delete the filter at a specified index
663 */
664 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
665 {
666 struct adapter *adapter = ethdev2adap(dev);
667 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
668 struct rte_mbuf *mbuf;
669 struct fw_filter2_wr *fwr;
670 struct sge_ctrl_txq *ctrlq;
671 unsigned int port_id = ethdev2pinfo(dev)->port_id;
672
673 ctrlq = &adapter->sge.ctrlq[port_id];
674 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
675 if (!mbuf)
676 return -ENOMEM;
677
678 mbuf->data_len = sizeof(*fwr);
679 mbuf->pkt_len = mbuf->data_len;
680
681 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
682 t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
683
684 /*
685 * Mark the filter as "pending" and ship off the Filter Work Request.
686 * When we get the Work Request Reply we'll clear the pending status.
687 */
688 f->pending = 1;
689 t4_mgmt_tx(ctrlq, mbuf);
690 return 0;
691 }
692
693 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
694 {
695 struct adapter *adapter = ethdev2adap(dev);
696 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
697 struct rte_mbuf *mbuf;
698 struct fw_filter2_wr *fwr;
699 struct sge_ctrl_txq *ctrlq;
700 unsigned int port_id = ethdev2pinfo(dev)->port_id;
701 int ret;
702
703 /*
704 * If the new filter requires loopback Destination MAC and/or VLAN
705 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
706 * the filter.
707 */
708 if (f->fs.newvlan) {
709 /* allocate L2T entry for new filter */
710 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
711 f->fs.eport, f->fs.dmac);
712 if (!f->l2t)
713 return -ENOMEM;
714 }
715
716 ctrlq = &adapter->sge.ctrlq[port_id];
717 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
718 if (!mbuf) {
719 ret = -ENOMEM;
720 goto out;
721 }
722
723 mbuf->data_len = sizeof(*fwr);
724 mbuf->pkt_len = mbuf->data_len;
725
726 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
727 memset(fwr, 0, sizeof(*fwr));
728
729 /*
730 * Construct the work request to set the filter.
731 */
732 if (adapter->params.filter2_wr_support)
733 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
734 else
735 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
736 fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
737 fwr->tid_to_iq =
738 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
739 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
740 V_FW_FILTER_WR_NOREPLY(0) |
741 V_FW_FILTER_WR_IQ(f->fs.iq));
742 fwr->del_filter_to_l2tix =
743 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
744 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
745 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
746 V_FW_FILTER_WR_INSVLAN
747 (f->fs.newvlan == VLAN_INSERT ||
748 f->fs.newvlan == VLAN_REWRITE) |
749 V_FW_FILTER_WR_RMVLAN
750 (f->fs.newvlan == VLAN_REMOVE ||
751 f->fs.newvlan == VLAN_REWRITE) |
752 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
753 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
754 V_FW_FILTER_WR_PRIO(f->fs.prio) |
755 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
756 fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
757 fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
758 fwr->smac_sel = 0;
759 fwr->rx_chan_rx_rpl_iq =
760 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
761 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
762 ));
763 fwr->maci_to_matchtypem =
764 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
765 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
766 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
767 V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
768 fwr->ptcl = f->fs.val.proto;
769 fwr->ptclm = f->fs.mask.proto;
770 rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
771 rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
772 rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
773 rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
774 fwr->lp = cpu_to_be16(f->fs.val.lport);
775 fwr->lpm = cpu_to_be16(f->fs.mask.lport);
776 fwr->fp = cpu_to_be16(f->fs.val.fport);
777 fwr->fpm = cpu_to_be16(f->fs.mask.fport);
778
779 if (adapter->params.filter2_wr_support) {
780 fwr->filter_type_swapmac =
781 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
782 fwr->natmode_to_ulp_type =
783 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
784 ULP_MODE_TCPDDP :
785 ULP_MODE_NONE) |
786 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
787 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
788 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
789 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
790 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
791 }
792
793 /*
794 * Mark the filter as "pending" and ship off the Filter Work Request.
795 * When we get the Work Request Reply we'll clear the pending status.
796 */
797 f->pending = 1;
798 t4_mgmt_tx(ctrlq, mbuf);
799 return 0;
800
801 out:
802 return ret;
803 }
804
805 /**
806 * Set the corresponding entry in the bitmap. 4 slots are
807 * marked for IPv6, whereas only 1 slot is marked for IPv4.
808 */
809 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
810 {
811 t4_os_lock(&t->ftid_lock);
812 if (rte_bitmap_get(t->ftid_bmap, fidx)) {
813 t4_os_unlock(&t->ftid_lock);
814 return -EBUSY;
815 }
816
817 if (family == FILTER_TYPE_IPV4) {
818 rte_bitmap_set(t->ftid_bmap, fidx);
819 } else {
820 rte_bitmap_set(t->ftid_bmap, fidx);
821 rte_bitmap_set(t->ftid_bmap, fidx + 1);
822 rte_bitmap_set(t->ftid_bmap, fidx + 2);
823 rte_bitmap_set(t->ftid_bmap, fidx + 3);
824 }
825 t4_os_unlock(&t->ftid_lock);
826 return 0;
827 }
828
829 /**
830 * Clear the corresponding entry in the bitmap. 4 slots are
831 * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
832 */
833 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
834 {
835 t4_os_lock(&t->ftid_lock);
836 if (family == FILTER_TYPE_IPV4) {
837 rte_bitmap_clear(t->ftid_bmap, fidx);
838 } else {
839 rte_bitmap_clear(t->ftid_bmap, fidx);
840 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
841 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
842 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
843 }
844 t4_os_unlock(&t->ftid_lock);
845 }
846
847 /**
848 * Check a delete filter request for validity and send it to the hardware.
849 * Return 0 on success, an error number otherwise. We attach any provided
850 * filter operation context to the internal filter specification in order to
851 * facilitate signaling completion of the operation.
852 */
853 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
854 struct ch_filter_specification *fs,
855 struct filter_ctx *ctx)
856 {
857 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
858 struct adapter *adapter = pi->adapter;
859 struct filter_entry *f;
860 unsigned int chip_ver;
861 int ret;
862
863 if (is_hashfilter(adapter) && fs->cap)
864 return cxgbe_del_hash_filter(dev, filter_id, ctx);
865
866 if (filter_id >= adapter->tids.nftids)
867 return -ERANGE;
868
869 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
870
871 ret = is_filter_set(&adapter->tids, filter_id, fs->type);
872 if (!ret) {
873 dev_warn(adap, "%s: could not find filter entry: %u\n",
874 __func__, filter_id);
875 return -EINVAL;
876 }
877
878 /*
879 * Ensure filter id is aligned on the 2 slot boundary for T6,
880 * and 4 slot boundary for cards below T6.
881 */
882 if (fs->type) {
883 if (chip_ver < CHELSIO_T6)
884 filter_id &= ~(0x3);
885 else
886 filter_id &= ~(0x1);
887 }
888
889 f = &adapter->tids.ftid_tab[filter_id];
890 ret = writable_filter(f);
891 if (ret)
892 return ret;
893
894 if (f->valid) {
895 f->ctx = ctx;
896 cxgbe_clear_ftid(&adapter->tids,
897 f->tid - adapter->tids.ftid_base,
898 f->fs.type ? FILTER_TYPE_IPV6 :
899 FILTER_TYPE_IPV4);
900 return del_filter_wr(dev, filter_id);
901 }
902
903 /*
904 * If the caller has passed in a Completion Context then we need to
905 * mark it as a successful completion so they don't stall waiting
906 * for it.
907 */
908 if (ctx) {
909 ctx->result = 0;
910 t4_complete(&ctx->completion);
911 }
912
913 return 0;
914 }
915
916 /**
917 * Check a Chelsio Filter Request for validity, convert it into our internal
918 * format and send it to the hardware. Return 0 on success, an error number
919 * otherwise. We attach any provided filter operation context to the internal
920 * filter specification in order to facilitate signaling completion of the
921 * operation.
922 */
923 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
924 struct ch_filter_specification *fs,
925 struct filter_ctx *ctx)
926 {
927 struct port_info *pi = ethdev2pinfo(dev);
928 struct adapter *adapter = pi->adapter;
929 unsigned int fidx, iq, fid_bit = 0;
930 struct filter_entry *f;
931 unsigned int chip_ver;
932 uint8_t bitoff[16] = {0};
933 int ret;
934
935 if (is_hashfilter(adapter) && fs->cap)
936 return cxgbe_set_hash_filter(dev, fs, ctx);
937
938 if (filter_id >= adapter->tids.nftids)
939 return -ERANGE;
940
941 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
942
943 ret = validate_filter(adapter, fs);
944 if (ret)
945 return ret;
946
947 /*
948 * Ensure filter id is aligned on the 4 slot boundary for IPv6
949 * maskfull filters.
950 */
951 if (fs->type)
952 filter_id &= ~(0x3);
953
954 ret = is_filter_set(&adapter->tids, filter_id, fs->type);
955 if (ret)
956 return -EBUSY;
957
958 iq = get_filter_steerq(dev, fs);
959
960 /*
961 * IPv6 filters occupy four slots and must be aligned on four-slot
962 * boundaries for T5. On T6, IPv6 filters occupy two-slots and
963 * must be aligned on two-slot boundaries.
964 *
965 * IPv4 filters only occupy a single slot and have no alignment
966 * requirements but writing a new IPv4 filter into the middle
967 * of an existing IPv6 filter requires clearing the old IPv6
968 * filter.
969 */
970 if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
971 /*
972 * For T6, If our IPv4 filter isn't being written to a
973 * multiple of two filter index and there's an IPv6
974 * filter at the multiple of 2 base slot, then we need
975 * to delete that IPv6 filter ...
976 * For adapters below T6, IPv6 filter occupies 4 entries.
977 */
978 if (chip_ver < CHELSIO_T6)
979 fidx = filter_id & ~0x3;
980 else
981 fidx = filter_id & ~0x1;
982
983 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
984 f = &adapter->tids.ftid_tab[fidx];
985 if (f->valid)
986 return -EBUSY;
987 }
988 } else { /* IPv6 */
989 unsigned int max_filter_id;
990
991 if (chip_ver < CHELSIO_T6) {
992 /*
993 * Ensure that the IPv6 filter is aligned on a
994 * multiple of 4 boundary.
995 */
996 if (filter_id & 0x3)
997 return -EINVAL;
998
999 max_filter_id = filter_id + 4;
1000 } else {
1001 /*
1002 * For T6, CLIP being enabled, IPv6 filter would occupy
1003 * 2 entries.
1004 */
1005 if (filter_id & 0x1)
1006 return -EINVAL;
1007
1008 max_filter_id = filter_id + 2;
1009 }
1010
1011 /*
1012 * Check all except the base overlapping IPv4 filter
1013 * slots.
1014 */
1015 for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
1016 f = &adapter->tids.ftid_tab[fidx];
1017 if (f->valid)
1018 return -EBUSY;
1019 }
1020 }
1021
1022 /*
1023 * Check to make sure that provided filter index is not
1024 * already in use by someone else
1025 */
1026 f = &adapter->tids.ftid_tab[filter_id];
1027 if (f->valid)
1028 return -EBUSY;
1029
1030 fidx = adapter->tids.ftid_base + filter_id;
1031 fid_bit = filter_id;
1032 ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
1033 fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
1034 if (ret)
1035 return ret;
1036
1037 /*
1038 * Check to make sure the filter requested is writable ...
1039 */
1040 ret = writable_filter(f);
1041 if (ret) {
1042 /* Clear the bits we have set above */
1043 cxgbe_clear_ftid(&adapter->tids, fid_bit,
1044 fs->type ? FILTER_TYPE_IPV6 :
1045 FILTER_TYPE_IPV4);
1046 return ret;
1047 }
1048
1049 /*
1050 * Allocate a clip table entry only if we have non-zero IPv6 address
1051 */
1052 if (chip_ver > CHELSIO_T5 && fs->type &&
1053 memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1054 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
1055 if (!f->clipt)
1056 goto free_tid;
1057 }
1058
1059 /*
1060 * Convert the filter specification into our internal format.
1061 * We copy the PF/VF specification into the Outer VLAN field
1062 * here so the rest of the code -- including the interface to
1063 * the firmware -- doesn't have to constantly do these checks.
1064 */
1065 f->fs = *fs;
1066 f->fs.iq = iq;
1067 f->dev = dev;
1068
1069 /*
1070 * Attempt to set the filter. If we don't succeed, we clear
1071 * it and return the failure.
1072 */
1073 f->ctx = ctx;
1074 f->tid = fidx; /* Save the actual tid */
1075 ret = set_filter_wr(dev, filter_id);
1076 if (ret) {
1077 fid_bit = f->tid - adapter->tids.ftid_base;
1078 goto free_tid;
1079 }
1080
1081 return ret;
1082
1083 free_tid:
1084 cxgbe_clear_ftid(&adapter->tids, fid_bit,
1085 fs->type ? FILTER_TYPE_IPV6 :
1086 FILTER_TYPE_IPV4);
1087 clear_filter(f);
1088 return ret;
1089 }
1090
1091 /**
1092 * Handle a Hash filter write reply.
1093 */
1094 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1095 {
1096 struct tid_info *t = &adap->tids;
1097 struct filter_entry *f;
1098 struct filter_ctx *ctx = NULL;
1099 unsigned int tid = GET_TID(rpl);
1100 unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1101 (be32_to_cpu(rpl->atid_status)));
1102 unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1103
1104 f = lookup_atid(t, ftid);
1105 if (!f) {
1106 dev_warn(adap, "%s: could not find filter entry: %d\n",
1107 __func__, ftid);
1108 return;
1109 }
1110
1111 ctx = f->ctx;
1112 f->ctx = NULL;
1113
1114 switch (status) {
1115 case CPL_ERR_NONE: {
1116 f->tid = tid;
1117 f->pending = 0; /* asynchronous setup completed */
1118 f->valid = 1;
1119
1120 cxgbe_insert_tid(t, f, f->tid, 0);
1121 cxgbe_free_atid(t, ftid);
1122 if (ctx) {
1123 ctx->tid = f->tid;
1124 ctx->result = 0;
1125 }
1126 if (f->fs.hitcnts)
1127 set_tcb_field(adap, tid,
1128 W_TCB_TIMESTAMP,
1129 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1130 V_TCB_T_RTT_TS_RECENT_AGE
1131 (M_TCB_T_RTT_TS_RECENT_AGE),
1132 V_TCB_TIMESTAMP(0ULL) |
1133 V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1134 1);
1135 if (f->fs.newvlan == VLAN_INSERT ||
1136 f->fs.newvlan == VLAN_REWRITE)
1137 set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1138 break;
1139 }
1140 default:
1141 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1142 __func__, status);
1143
1144 if (ctx) {
1145 if (status == CPL_ERR_TCAM_FULL)
1146 ctx->result = -EAGAIN;
1147 else
1148 ctx->result = -EINVAL;
1149 }
1150
1151 cxgbe_free_atid(t, ftid);
1152 t4_os_free(f);
1153 }
1154
1155 if (ctx)
1156 t4_complete(&ctx->completion);
1157 }
1158
1159 /**
1160 * Handle a LE-TCAM filter write/deletion reply.
1161 */
1162 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1163 {
1164 struct filter_entry *f = NULL;
1165 unsigned int tid = GET_TID(rpl);
1166 int idx, max_fidx = adap->tids.nftids;
1167
1168 /* Get the corresponding filter entry for this tid */
1169 if (adap->tids.ftid_tab) {
1170 /* Check this in normal filter region */
1171 idx = tid - adap->tids.ftid_base;
1172 if (idx >= max_fidx)
1173 return;
1174
1175 f = &adap->tids.ftid_tab[idx];
1176 if (f->tid != tid)
1177 return;
1178 }
1179
1180 /* We found the filter entry for this tid */
1181 if (f) {
1182 unsigned int ret = G_COOKIE(rpl->cookie);
1183 struct filter_ctx *ctx;
1184
1185 /*
1186 * Pull off any filter operation context attached to the
1187 * filter.
1188 */
1189 ctx = f->ctx;
1190 f->ctx = NULL;
1191
1192 if (ret == FW_FILTER_WR_FLT_ADDED) {
1193 f->pending = 0; /* asynchronous setup completed */
1194 f->valid = 1;
1195 if (ctx) {
1196 ctx->tid = f->tid;
1197 ctx->result = 0;
1198 }
1199 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1200 /*
1201 * Clear the filter when we get confirmation from the
1202 * hardware that the filter has been deleted.
1203 */
1204 clear_filter(f);
1205 if (ctx)
1206 ctx->result = 0;
1207 } else {
1208 /*
1209 * Something went wrong. Issue a warning about the
1210 * problem and clear everything out.
1211 */
1212 dev_warn(adap, "filter %u setup failed with error %u\n",
1213 idx, ret);
1214 clear_filter(f);
1215 if (ctx)
1216 ctx->result = -EINVAL;
1217 }
1218
1219 if (ctx)
1220 t4_complete(&ctx->completion);
1221 }
1222 }
1223
1224 /*
1225 * Retrieve the packet count for the specified filter.
1226 */
1227 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1228 u64 *c, int hash, bool get_byte)
1229 {
1230 struct filter_entry *f;
1231 unsigned int tcb_base, tcbaddr;
1232 int ret;
1233
1234 tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1235 if (is_hashfilter(adapter) && hash) {
1236 if (fidx < adapter->tids.ntids) {
1237 f = adapter->tids.tid_tab[fidx];
1238 if (!f)
1239 return -EINVAL;
1240
1241 if (is_t5(adapter->params.chip)) {
1242 *c = 0;
1243 return 0;
1244 }
1245 tcbaddr = tcb_base + (fidx * TCB_SIZE);
1246 goto get_count;
1247 } else {
1248 return -ERANGE;
1249 }
1250 } else {
1251 if (fidx >= adapter->tids.nftids)
1252 return -ERANGE;
1253
1254 f = &adapter->tids.ftid_tab[fidx];
1255 if (!f->valid)
1256 return -EINVAL;
1257
1258 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1259 }
1260
1261 f = &adapter->tids.ftid_tab[fidx];
1262 if (!f->valid)
1263 return -EINVAL;
1264
1265 get_count:
1266 if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1267 /*
1268 * For T5, the Filter Packet Hit Count is maintained as a
1269 * 32-bit Big Endian value in the TCB field {timestamp}.
1270 * Similar to the craziness above, instead of the filter hit
1271 * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1272 * sizeof(u32)), it actually shows up at offset 24. Whacky.
1273 */
1274 if (get_byte) {
1275 unsigned int word_offset = 4;
1276 __be64 be64_byte_count;
1277
1278 t4_os_lock(&adapter->win0_lock);
1279 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1280 tcbaddr +
1281 (word_offset * sizeof(__be32)),
1282 sizeof(be64_byte_count),
1283 &be64_byte_count,
1284 T4_MEMORY_READ);
1285 t4_os_unlock(&adapter->win0_lock);
1286 if (ret < 0)
1287 return ret;
1288 *c = be64_to_cpu(be64_byte_count);
1289 } else {
1290 unsigned int word_offset = 6;
1291 __be32 be32_count;
1292
1293 t4_os_lock(&adapter->win0_lock);
1294 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1295 tcbaddr +
1296 (word_offset * sizeof(__be32)),
1297 sizeof(be32_count), &be32_count,
1298 T4_MEMORY_READ);
1299 t4_os_unlock(&adapter->win0_lock);
1300 if (ret < 0)
1301 return ret;
1302 *c = (u64)be32_to_cpu(be32_count);
1303 }
1304 }
1305 return 0;
1306 }
1307
1308 /*
1309 * Clear the packet count for the specified filter.
1310 */
1311 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1312 int hash, bool clear_byte)
1313 {
1314 u64 tcb_mask = 0, tcb_val = 0;
1315 struct filter_entry *f = NULL;
1316 u16 tcb_word = 0;
1317
1318 if (is_hashfilter(adapter) && hash) {
1319 if (fidx >= adapter->tids.ntids)
1320 return -ERANGE;
1321
1322 /* No hitcounts supported for T5 hashfilters */
1323 if (is_t5(adapter->params.chip))
1324 return 0;
1325
1326 f = adapter->tids.tid_tab[fidx];
1327 } else {
1328 if (fidx >= adapter->tids.nftids)
1329 return -ERANGE;
1330
1331 f = &adapter->tids.ftid_tab[fidx];
1332 }
1333
1334 if (!f || !f->valid)
1335 return -EINVAL;
1336
1337 tcb_word = W_TCB_TIMESTAMP;
1338 tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1339 tcb_val = V_TCB_TIMESTAMP(0ULL);
1340
1341 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1342
1343 if (clear_byte) {
1344 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1345 tcb_mask =
1346 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1347 V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1348 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1349 V_TCB_T_RTSEQ_RECENT(0ULL);
1350
1351 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1352 }
1353
1354 return 0;
1355 }
1356
1357 /**
1358 * Handle a Hash filter delete reply.
1359 */
1360 void hash_del_filter_rpl(struct adapter *adap,
1361 const struct cpl_abort_rpl_rss *rpl)
1362 {
1363 struct tid_info *t = &adap->tids;
1364 struct filter_entry *f;
1365 struct filter_ctx *ctx = NULL;
1366 unsigned int tid = GET_TID(rpl);
1367
1368 f = lookup_tid(t, tid);
1369 if (!f) {
1370 dev_warn(adap, "%s: could not find filter entry: %u\n",
1371 __func__, tid);
1372 return;
1373 }
1374
1375 ctx = f->ctx;
1376 f->ctx = NULL;
1377
1378 f->valid = 0;
1379
1380 if (f->clipt)
1381 cxgbe_clip_release(f->dev, f->clipt);
1382
1383 cxgbe_remove_tid(t, 0, tid, 0);
1384 t4_os_free(f);
1385
1386 if (ctx) {
1387 ctx->result = 0;
1388 t4_complete(&ctx->completion);
1389 }
1390 }