]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/dsa/bcm_sf2_cfp.c
net: dsa: bcm_sf2: Move IPv4 CFP processing to specific functions
[mirror_ubuntu-bionic-kernel.git] / drivers / net / dsa / bcm_sf2_cfp.c
CommitLineData
7318166c
FF
1/*
2 * Broadcom Starfighter 2 DSA switch CFP support
3 *
4 * Copyright (C) 2016, Broadcom
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/list.h>
7318166c
FF
13#include <linux/ethtool.h>
14#include <linux/if_ether.h>
15#include <linux/in.h>
c6e970a0
AL
16#include <linux/netdevice.h>
17#include <net/dsa.h>
7318166c
FF
18#include <linux/bitmap.h>
19
20#include "bcm_sf2.h"
21#include "bcm_sf2_regs.h"
22
23struct cfp_udf_layout {
24 u8 slices[UDF_NUM_SLICES];
25 u32 mask_value;
26
27};
28
29/* UDF slices layout for a TCPv4/UDPv4 specification */
30static const struct cfp_udf_layout udf_tcpip4_layout = {
31 .slices = {
32 /* End of L2, byte offset 12, src IP[0:15] */
33 CFG_UDF_EOL2 | 6,
34 /* End of L2, byte offset 14, src IP[16:31] */
35 CFG_UDF_EOL2 | 7,
36 /* End of L2, byte offset 16, dst IP[0:15] */
37 CFG_UDF_EOL2 | 8,
38 /* End of L2, byte offset 18, dst IP[16:31] */
39 CFG_UDF_EOL2 | 9,
40 /* End of L3, byte offset 0, src port */
41 CFG_UDF_EOL3 | 0,
42 /* End of L3, byte offset 2, dst port */
43 CFG_UDF_EOL3 | 1,
44 0, 0, 0
45 },
46 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
47};
48
49static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
50{
51 unsigned int i, count = 0;
52
53 for (i = 0; i < UDF_NUM_SLICES; i++) {
54 if (layout[i] != 0)
55 count++;
56 }
57
58 return count;
59}
60
61static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
62 unsigned int slice_num,
63 const u8 *layout)
64{
65 u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET;
66 unsigned int i;
67
68 for (i = 0; i < UDF_NUM_SLICES; i++)
69 core_writel(priv, layout[i], offset + i * 4);
70}
71
72static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
73{
74 unsigned int timeout = 1000;
75 u32 reg;
76
77 reg = core_readl(priv, CORE_CFP_ACC);
78 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
79 reg |= OP_STR_DONE | op;
80 core_writel(priv, reg, CORE_CFP_ACC);
81
82 do {
83 reg = core_readl(priv, CORE_CFP_ACC);
84 if (!(reg & OP_STR_DONE))
85 break;
86
87 cpu_relax();
88 } while (timeout--);
89
90 if (!timeout)
91 return -ETIMEDOUT;
92
93 return 0;
94}
95
96static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
97 unsigned int addr)
98{
99 u32 reg;
100
df191632 101 WARN_ON(addr >= priv->num_cfp_rules);
7318166c
FF
102
103 reg = core_readl(priv, CORE_CFP_ACC);
104 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
105 reg |= addr << XCESS_ADDR_SHIFT;
106 core_writel(priv, reg, CORE_CFP_ACC);
107}
108
109static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
110{
111 /* Entry #0 is reserved */
df191632 112 return priv->num_cfp_rules - 1;
7318166c
FF
113}
114
33061458
FF
115static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
116 unsigned int rule_index,
117 unsigned int port_num,
118 unsigned int queue_num)
7318166c 119{
7318166c 120 int ret;
33061458 121 u32 reg;
7318166c 122
33061458
FF
123 /* Replace ARL derived destination with DST_MAP derived, define
124 * which port and queue this should be forwarded to.
125 */
126 reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
127 CHANGE_TC | queue_num << NEW_TC_SHIFT;
7318166c 128
33061458 129 core_writel(priv, reg, CORE_ACT_POL_DATA0);
7318166c 130
33061458
FF
131 /* Set classification ID that needs to be put in Broadcom tag */
132 core_writel(priv, rule_index << CHAIN_ID_SHIFT,
133 CORE_ACT_POL_DATA1);
7318166c 134
33061458 135 core_writel(priv, 0, CORE_ACT_POL_DATA2);
7318166c 136
33061458
FF
137 /* Configure policer RAM now */
138 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
139 if (ret) {
140 pr_err("Policer entry at %d failed\n", rule_index);
141 return ret;
142 }
7318166c 143
33061458
FF
144 /* Disable the policer */
145 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
146
147 /* Now the rate meter */
148 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
149 if (ret) {
150 pr_err("Meter entry at %d failed\n", rule_index);
151 return ret;
152 }
153
154 return 0;
155}
156
157static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
158 unsigned int port_num,
159 unsigned int queue_num,
160 struct ethtool_rx_flow_spec *fs)
161{
162 const struct cfp_udf_layout *layout;
163 struct ethtool_tcpip4_spec *v4_spec;
164 unsigned int slice_num, rule_index;
165 u8 ip_proto, ip_frag;
166 u8 num_udf;
167 u32 reg;
168 int ret;
7318166c
FF
169
170 switch (fs->flow_type & ~FLOW_EXT) {
171 case TCP_V4_FLOW:
172 ip_proto = IPPROTO_TCP;
173 v4_spec = &fs->h_u.tcp_ip4_spec;
174 break;
175 case UDP_V4_FLOW:
176 ip_proto = IPPROTO_UDP;
177 v4_spec = &fs->h_u.udp_ip4_spec;
178 break;
179 default:
180 return -EINVAL;
181 }
182
33061458
FF
183 ip_frag = be32_to_cpu(fs->m_ext.data[0]);
184
185 /* Locate the first rule available */
186 if (fs->location == RX_CLS_LOC_ANY)
187 rule_index = find_first_zero_bit(priv->cfp.used,
188 bcm_sf2_cfp_rule_size(priv));
189 else
190 rule_index = fs->location;
191
7318166c
FF
192 /* We only use one UDF slice for now */
193 slice_num = 1;
194 layout = &udf_tcpip4_layout;
195 num_udf = bcm_sf2_get_num_udf_slices(layout->slices);
196
197 /* Apply the UDF layout for this filter */
198 bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices);
199
200 /* Apply to all packets received through this port */
201 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
202
33061458
FF
203 /* Source port map match */
204 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
205
7318166c
FF
206 /* S-Tag status [31:30]
207 * C-Tag status [29:28]
208 * L2 framing [27:26]
209 * L3 framing [25:24]
210 * IP ToS [23:16]
211 * IP proto [15:08]
212 * IP Fragm [7]
213 * Non 1st frag [6]
214 * IP Authen [5]
215 * TTL range [4:3]
216 * PPPoE session [2]
217 * Reserved [1]
218 * UDF_Valid[8] [0]
219 */
39cdd349
FF
220 core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
221 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT,
7318166c
FF
222 CORE_CFP_DATA_PORT(6));
223
224 /* UDF_Valid[7:0] [31:24]
225 * S-Tag [23:8]
226 * C-Tag [7:0]
227 */
228 core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5));
229
230 /* C-Tag [31:24]
231 * UDF_n_A8 [23:8]
232 * UDF_n_A7 [7:0]
233 */
234 core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
235
236 /* UDF_n_A7 [31:24]
237 * UDF_n_A6 [23:8]
238 * UDF_n_A5 [7:0]
239 */
240 core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
241 CORE_CFP_DATA_PORT(3));
242
243 /* UDF_n_A5 [31:24]
244 * UDF_n_A4 [23:8]
245 * UDF_n_A3 [7:0]
246 */
247 reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
248 (u32)be16_to_cpu(v4_spec->psrc) << 8 |
249 (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
250 core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
251
252 /* UDF_n_A3 [31:24]
253 * UDF_n_A2 [23:8]
254 * UDF_n_A1 [7:0]
255 */
256 reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
257 (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
258 (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
259 core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
260
261 /* UDF_n_A1 [31:24]
262 * UDF_n_A0 [23:8]
263 * Reserved [7:4]
264 * Slice ID [3:2]
265 * Slice valid [1:0]
266 */
267 reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
268 (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
269 SLICE_NUM(slice_num) | SLICE_VALID;
270 core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
271
7318166c
FF
272 /* Mask with the specific layout for IPv4 packets */
273 core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6));
274
275 /* Mask all but valid UDFs */
276 core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5));
277
278 /* Mask all */
279 core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
280
281 /* All other UDFs should be matched with the filter */
282 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
283 core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
284 core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
285 core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
286
7318166c
FF
287 /* Insert into TCAM now */
288 bcm_sf2_cfp_rule_addr_set(priv, rule_index);
289
290 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
291 if (ret) {
292 pr_err("TCAM entry at addr %d failed\n", rule_index);
293 return ret;
294 }
295
33061458
FF
296 /* Insert into Action and policer RAMs now */
297 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num, queue_num);
298 if (ret)
7318166c 299 return ret;
7318166c
FF
300
301 /* Turn on CFP for this rule now */
302 reg = core_readl(priv, CORE_CFP_CTL_REG);
303 reg |= BIT(port);
304 core_writel(priv, reg, CORE_CFP_CTL_REG);
305
306 /* Flag the rule as being used and return it */
307 set_bit(rule_index, priv->cfp.used);
308 fs->location = rule_index;
309
310 return 0;
311}
312
33061458
FF
313static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
314 struct ethtool_rx_flow_spec *fs)
315{
316 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
317 unsigned int queue_num, port_num;
318 int ret;
319
320 /* Check for unsupported extensions */
321 if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
322 fs->m_ext.data[1]))
323 return -EINVAL;
324
325 if (fs->location != RX_CLS_LOC_ANY &&
326 test_bit(fs->location, priv->cfp.used))
327 return -EBUSY;
328
329 if (fs->location != RX_CLS_LOC_ANY &&
330 fs->location > bcm_sf2_cfp_rule_size(priv))
331 return -EINVAL;
332
333 /* We do not support discarding packets, check that the
334 * destination port is enabled and that we are within the
335 * number of ports supported by the switch
336 */
337 port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES;
338
339 if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
340 !(BIT(port_num) & ds->enabled_port_mask) ||
341 port_num >= priv->hw_params.num_ports)
342 return -EINVAL;
343 /*
344 * We have a small oddity where Port 6 just does not have a
345 * valid bit here (so we substract by one).
346 */
347 queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES;
348 if (port_num >= 7)
349 port_num -= 1;
350
351 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num, queue_num, fs);
352 if (ret)
353 return ret;
354
355 return 0;
356}
357
7318166c
FF
358static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
359 u32 loc)
360{
361 int ret;
362 u32 reg;
363
364 /* Refuse deletion of unused rules, and the default reserved rule */
365 if (!test_bit(loc, priv->cfp.used) || loc == 0)
366 return -EINVAL;
367
368 /* Indicate which rule we want to read */
369 bcm_sf2_cfp_rule_addr_set(priv, loc);
370
371 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
372 if (ret)
373 return ret;
374
375 /* Clear its valid bits */
376 reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
377 reg &= ~SLICE_VALID;
378 core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
379
380 /* Write back this entry into the TCAM now */
381 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
382 if (ret)
383 return ret;
384
385 clear_bit(loc, priv->cfp.used);
386
387 return 0;
388}
389
390static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
391{
392 unsigned int i;
393
394 for (i = 0; i < sizeof(flow->m_u); i++)
395 flow->m_u.hdata[i] ^= 0xff;
396
397 flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
398 flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
399 flow->m_ext.data[0] ^= cpu_to_be32(~0);
400 flow->m_ext.data[1] ^= cpu_to_be32(~0);
401}
402
33061458
FF
403static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
404 struct ethtool_tcpip4_spec *v4_spec,
405 struct ethtool_tcpip4_spec *v4_m_spec)
406{
407 u16 src_dst_port;
408 u32 reg, ipv4;
409
410 reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
411 /* src port [15:8] */
412 src_dst_port = reg << 8;
413
414 reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
415 /* src port [7:0] */
416 src_dst_port |= (reg >> 24);
417
418 v4_spec->pdst = cpu_to_be16(src_dst_port);
419 v4_m_spec->pdst = cpu_to_be16(~0);
420 v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
421 v4_m_spec->psrc = cpu_to_be16(~0);
422
423 /* IPv4 dst [15:8] */
424 ipv4 = (reg & 0xff) << 8;
425 reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
426 /* IPv4 dst [31:16] */
427 ipv4 |= ((reg >> 8) & 0xffff) << 16;
428 /* IPv4 dst [7:0] */
429 ipv4 |= (reg >> 24) & 0xff;
430 v4_spec->ip4dst = cpu_to_be32(ipv4);
431 v4_m_spec->ip4dst = cpu_to_be32(~0);
432
433 /* IPv4 src [15:8] */
434 ipv4 = (reg & 0xff) << 8;
435 reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
436
437 if (!(reg & SLICE_VALID))
438 return -EINVAL;
439
440 /* IPv4 src [7:0] */
441 ipv4 |= (reg >> 24) & 0xff;
442 /* IPv4 src [31:16] */
443 ipv4 |= ((reg >> 8) & 0xffff) << 16;
444 v4_spec->ip4src = cpu_to_be32(ipv4);
445 v4_m_spec->ip4src = cpu_to_be32(~0);
446
447 return 0;
448}
449
7318166c
FF
450static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
451 struct ethtool_rxnfc *nfc, bool search)
452{
33061458 453 struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec;
7318166c 454 unsigned int queue_num;
33061458 455 u32 reg;
7318166c
FF
456 int ret;
457
458 if (!search) {
459 bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
460
461 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
462 if (ret)
463 return ret;
464
465 reg = core_readl(priv, CORE_ACT_POL_DATA0);
466
467 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
468 if (ret)
469 return ret;
470 } else {
471 reg = core_readl(priv, CORE_ACT_POL_DATA0);
472 }
473
474 /* Extract the destination port */
475 nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
476 DST_MAP_IB_MASK) - 1;
477
478 /* There is no Port 6, so we compensate for that here */
479 if (nfc->fs.ring_cookie >= 6)
480 nfc->fs.ring_cookie++;
152b6fd6 481 nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
7318166c
FF
482
483 /* Extract the destination queue */
484 queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
485 nfc->fs.ring_cookie += queue_num;
486
487 /* Extract the IP protocol */
488 reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
489 switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
490 case IPPROTO_TCP:
491 nfc->fs.flow_type = TCP_V4_FLOW;
492 v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
33061458 493 v4_m_spec = &nfc->fs.m_u.tcp_ip4_spec;
7318166c
FF
494 break;
495 case IPPROTO_UDP:
496 nfc->fs.flow_type = UDP_V4_FLOW;
497 v4_spec = &nfc->fs.h_u.udp_ip4_spec;
33061458 498 v4_m_spec = &nfc->fs.m_u.udp_ip4_spec;
7318166c
FF
499 break;
500 default:
501 /* Clear to exit the search process */
502 if (search)
503 core_readl(priv, CORE_CFP_DATA_PORT(7));
504 return -EINVAL;
505 }
506
39cdd349 507 nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
33061458
FF
508 if (v4_spec) {
509 v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
510 ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, v4_spec, v4_m_spec);
511 }
7318166c 512
33061458
FF
513 if (ret)
514 return ret;
7318166c
FF
515
516 /* Read last to avoid next entry clobbering the results during search
517 * operations
518 */
519 reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
520 if (!(reg & 1 << port))
521 return -EINVAL;
522
523 bcm_sf2_invert_masks(&nfc->fs);
524
525 /* Put the TCAM size here */
526 nfc->data = bcm_sf2_cfp_rule_size(priv);
527
528 return 0;
529}
530
531/* We implement the search doing a TCAM search operation */
532static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
533 int port, struct ethtool_rxnfc *nfc,
534 u32 *rule_locs)
535{
536 unsigned int index = 1, rules_cnt = 0;
537 int ret;
538 u32 reg;
539
540 /* Do not poll on OP_STR_DONE to be self-clearing for search
541 * operations, we cannot use bcm_sf2_cfp_op here because it completes
542 * on clearing OP_STR_DONE which won't clear until the entire search
543 * operation is over.
544 */
545 reg = core_readl(priv, CORE_CFP_ACC);
546 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
547 reg |= index << XCESS_ADDR_SHIFT;
548 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
549 reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE;
550 core_writel(priv, reg, CORE_CFP_ACC);
551
552 do {
553 /* Wait for results to be ready */
554 reg = core_readl(priv, CORE_CFP_ACC);
555
556 /* Extract the address we are searching */
557 index = reg >> XCESS_ADDR_SHIFT;
558 index &= XCESS_ADDR_MASK;
559
560 /* We have a valid search result, so flag it accordingly */
561 if (reg & SEARCH_STS) {
562 ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true);
563 if (ret)
564 continue;
565
566 rule_locs[rules_cnt] = index;
567 rules_cnt++;
568 }
569
570 /* Search is over break out */
571 if (!(reg & OP_STR_DONE))
572 break;
573
df191632 574 } while (index < priv->num_cfp_rules);
7318166c
FF
575
576 /* Put the TCAM size here */
577 nfc->data = bcm_sf2_cfp_rule_size(priv);
578 nfc->rule_cnt = rules_cnt;
579
580 return 0;
581}
582
583int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
584 struct ethtool_rxnfc *nfc, u32 *rule_locs)
585{
586 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
587 int ret = 0;
588
589 mutex_lock(&priv->cfp.lock);
590
591 switch (nfc->cmd) {
592 case ETHTOOL_GRXCLSRLCNT:
593 /* Subtract the default, unusable rule */
594 nfc->rule_cnt = bitmap_weight(priv->cfp.used,
df191632 595 priv->num_cfp_rules) - 1;
7318166c
FF
596 /* We support specifying rule locations */
597 nfc->data |= RX_CLS_LOC_SPECIAL;
598 break;
599 case ETHTOOL_GRXCLSRULE:
600 ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false);
601 break;
602 case ETHTOOL_GRXCLSRLALL:
603 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
604 break;
605 default:
606 ret = -EOPNOTSUPP;
607 break;
608 }
609
610 mutex_unlock(&priv->cfp.lock);
611
612 return ret;
613}
614
615int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
616 struct ethtool_rxnfc *nfc)
617{
618 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
619 int ret = 0;
620
621 mutex_lock(&priv->cfp.lock);
622
623 switch (nfc->cmd) {
624 case ETHTOOL_SRXCLSRLINS:
625 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
626 break;
627
628 case ETHTOOL_SRXCLSRLDEL:
629 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
630 break;
631 default:
632 ret = -EOPNOTSUPP;
633 break;
634 }
635
636 mutex_unlock(&priv->cfp.lock);
637
638 return ret;
639}
640
641int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
642{
643 unsigned int timeout = 1000;
644 u32 reg;
645
646 reg = core_readl(priv, CORE_CFP_ACC);
647 reg |= TCAM_RESET;
648 core_writel(priv, reg, CORE_CFP_ACC);
649
650 do {
651 reg = core_readl(priv, CORE_CFP_ACC);
652 if (!(reg & TCAM_RESET))
653 break;
654
655 cpu_relax();
656 } while (timeout--);
657
658 if (!timeout)
659 return -ETIMEDOUT;
660
661 return 0;
662}