]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/aquantia/atlantic/aq_filters.c
slip: Fix use-after-free Read in slip_open
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / aquantia / atlantic / aq_filters.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2014-2017 aQuantia Corporation. */
3
4 /* File aq_filters.c: RX filters related functions. */
5
6 #include "aq_filters.h"
7
8 static bool __must_check
9 aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
10 {
11 if (fsp->flow_type & FLOW_MAC_EXT)
12 return false;
13
14 switch (fsp->flow_type & ~FLOW_EXT) {
15 case ETHER_FLOW:
16 case TCP_V4_FLOW:
17 case UDP_V4_FLOW:
18 case SCTP_V4_FLOW:
19 case TCP_V6_FLOW:
20 case UDP_V6_FLOW:
21 case SCTP_V6_FLOW:
22 case IPV4_FLOW:
23 case IPV6_FLOW:
24 return true;
25 case IP_USER_FLOW:
26 switch (fsp->h_u.usr_ip4_spec.proto) {
27 case IPPROTO_TCP:
28 case IPPROTO_UDP:
29 case IPPROTO_SCTP:
30 case IPPROTO_IP:
31 return true;
32 default:
33 return false;
34 }
35 case IPV6_USER_FLOW:
36 switch (fsp->h_u.usr_ip6_spec.l4_proto) {
37 case IPPROTO_TCP:
38 case IPPROTO_UDP:
39 case IPPROTO_SCTP:
40 case IPPROTO_IP:
41 return true;
42 default:
43 return false;
44 }
45 default:
46 return false;
47 }
48
49 return false;
50 }
51
52 static bool __must_check
53 aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
54 struct ethtool_rx_flow_spec *fsp2)
55 {
56 if (fsp1->flow_type != fsp2->flow_type ||
57 memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
58 memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
59 memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
60 memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
61 return false;
62
63 return true;
64 }
65
66 static bool __must_check
67 aq_rule_already_exists(struct aq_nic_s *aq_nic,
68 struct ethtool_rx_flow_spec *fsp)
69 {
70 struct aq_rx_filter *rule;
71 struct hlist_node *aq_node2;
72 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
73
74 hlist_for_each_entry_safe(rule, aq_node2,
75 &rx_fltrs->filter_list, aq_node) {
76 if (rule->aq_fsp.location == fsp->location)
77 continue;
78 if (aq_match_filter(&rule->aq_fsp, fsp)) {
79 netdev_err(aq_nic->ndev,
80 "ethtool: This filter is already set\n");
81 return true;
82 }
83 }
84
85 return false;
86 }
87
88 static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
89 struct aq_hw_rx_fltrs_s *rx_fltrs,
90 struct ethtool_rx_flow_spec *fsp)
91 {
92 if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
93 fsp->location > AQ_RX_LAST_LOC_FL3L4) {
94 netdev_err(aq_nic->ndev,
95 "ethtool: location must be in range [%d, %d]",
96 AQ_RX_FIRST_LOC_FL3L4,
97 AQ_RX_LAST_LOC_FL3L4);
98 return -EINVAL;
99 }
100 if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
101 rx_fltrs->fl3l4.is_ipv6 = false;
102 netdev_err(aq_nic->ndev,
103 "ethtool: mixing ipv4 and ipv6 is not allowed");
104 return -EINVAL;
105 } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
106 rx_fltrs->fl3l4.is_ipv6 = true;
107 netdev_err(aq_nic->ndev,
108 "ethtool: mixing ipv4 and ipv6 is not allowed");
109 return -EINVAL;
110 } else if (rx_fltrs->fl3l4.is_ipv6 &&
111 fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
112 fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
113 netdev_err(aq_nic->ndev,
114 "ethtool: The specified location for ipv6 must be %d or %d",
115 AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
116 return -EINVAL;
117 }
118
119 return 0;
120 }
121
122 static int __must_check
123 aq_check_approve_fl2(struct aq_nic_s *aq_nic,
124 struct aq_hw_rx_fltrs_s *rx_fltrs,
125 struct ethtool_rx_flow_spec *fsp)
126 {
127 if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
128 fsp->location > AQ_RX_LAST_LOC_FETHERT) {
129 netdev_err(aq_nic->ndev,
130 "ethtool: location must be in range [%d, %d]",
131 AQ_RX_FIRST_LOC_FETHERT,
132 AQ_RX_LAST_LOC_FETHERT);
133 return -EINVAL;
134 }
135
136 if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
137 fsp->m_u.ether_spec.h_proto == 0U) {
138 netdev_err(aq_nic->ndev,
139 "ethtool: proto (ether_type) parameter must be specified");
140 return -EINVAL;
141 }
142
143 return 0;
144 }
145
146 static int __must_check
147 aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
148 struct aq_hw_rx_fltrs_s *rx_fltrs,
149 struct ethtool_rx_flow_spec *fsp)
150 {
151 if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
152 fsp->location > AQ_RX_LAST_LOC_FVLANID) {
153 netdev_err(aq_nic->ndev,
154 "ethtool: location must be in range [%d, %d]",
155 AQ_RX_FIRST_LOC_FVLANID,
156 AQ_RX_LAST_LOC_FVLANID);
157 return -EINVAL;
158 }
159
160 if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
161 (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
162 aq_nic->active_vlans))) {
163 netdev_err(aq_nic->ndev,
164 "ethtool: unknown vlan-id specified");
165 return -EINVAL;
166 }
167
168 if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
169 netdev_err(aq_nic->ndev,
170 "ethtool: queue number must be in range [0, %d]",
171 aq_nic->aq_nic_cfg.num_rss_queues - 1);
172 return -EINVAL;
173 }
174 return 0;
175 }
176
177 static int __must_check
178 aq_check_filter(struct aq_nic_s *aq_nic,
179 struct ethtool_rx_flow_spec *fsp)
180 {
181 int err = 0;
182 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
183
184 if (fsp->flow_type & FLOW_EXT) {
185 if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
186 err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
187 } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
188 err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
189 } else {
190 netdev_err(aq_nic->ndev,
191 "ethtool: invalid vlan mask 0x%x specified",
192 be16_to_cpu(fsp->m_ext.vlan_tci));
193 err = -EINVAL;
194 }
195 } else {
196 switch (fsp->flow_type & ~FLOW_EXT) {
197 case ETHER_FLOW:
198 err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
199 break;
200 case TCP_V4_FLOW:
201 case UDP_V4_FLOW:
202 case SCTP_V4_FLOW:
203 case IPV4_FLOW:
204 case IP_USER_FLOW:
205 rx_fltrs->fl3l4.is_ipv6 = false;
206 err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
207 break;
208 case TCP_V6_FLOW:
209 case UDP_V6_FLOW:
210 case SCTP_V6_FLOW:
211 case IPV6_FLOW:
212 case IPV6_USER_FLOW:
213 rx_fltrs->fl3l4.is_ipv6 = true;
214 err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
215 break;
216 default:
217 netdev_err(aq_nic->ndev,
218 "ethtool: unknown flow-type specified");
219 err = -EINVAL;
220 }
221 }
222
223 return err;
224 }
225
226 static bool __must_check
227 aq_rule_is_not_support(struct aq_nic_s *aq_nic,
228 struct ethtool_rx_flow_spec *fsp)
229 {
230 bool rule_is_not_support = false;
231
232 if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
233 netdev_err(aq_nic->ndev,
234 "ethtool: Please, to enable the RX flow control:\n"
235 "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
236 rule_is_not_support = true;
237 } else if (!aq_rule_is_approve(fsp)) {
238 netdev_err(aq_nic->ndev,
239 "ethtool: The specified flow type is not supported\n");
240 rule_is_not_support = true;
241 } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
242 (fsp->h_u.tcp_ip4_spec.tos ||
243 fsp->h_u.tcp_ip6_spec.tclass)) {
244 netdev_err(aq_nic->ndev,
245 "ethtool: The specified tos tclass are not supported\n");
246 rule_is_not_support = true;
247 } else if (fsp->flow_type & FLOW_MAC_EXT) {
248 netdev_err(aq_nic->ndev,
249 "ethtool: MAC_EXT is not supported");
250 rule_is_not_support = true;
251 }
252
253 return rule_is_not_support;
254 }
255
256 static bool __must_check
257 aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
258 struct ethtool_rx_flow_spec *fsp)
259 {
260 bool rule_is_not_correct = false;
261
262 if (!aq_nic) {
263 rule_is_not_correct = true;
264 } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
265 netdev_err(aq_nic->ndev,
266 "ethtool: The specified number %u rule is invalid\n",
267 fsp->location);
268 rule_is_not_correct = true;
269 } else if (aq_check_filter(aq_nic, fsp)) {
270 rule_is_not_correct = true;
271 } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
272 if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
273 netdev_err(aq_nic->ndev,
274 "ethtool: The specified action is invalid.\n"
275 "Maximum allowable value action is %u.\n",
276 aq_nic->aq_nic_cfg.num_rss_queues - 1);
277 rule_is_not_correct = true;
278 }
279 }
280
281 return rule_is_not_correct;
282 }
283
284 static int __must_check
285 aq_check_rule(struct aq_nic_s *aq_nic,
286 struct ethtool_rx_flow_spec *fsp)
287 {
288 int err = 0;
289
290 if (aq_rule_is_not_correct(aq_nic, fsp))
291 err = -EINVAL;
292 else if (aq_rule_is_not_support(aq_nic, fsp))
293 err = -EOPNOTSUPP;
294 else if (aq_rule_already_exists(aq_nic, fsp))
295 err = -EEXIST;
296
297 return err;
298 }
299
300 static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
301 struct aq_rx_filter *aq_rx_fltr,
302 struct aq_rx_filter_l2 *data, bool add)
303 {
304 const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
305
306 memset(data, 0, sizeof(*data));
307
308 data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
309
310 if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
311 data->queue = fsp->ring_cookie;
312 else
313 data->queue = -1;
314
315 data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
316 data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
317 == VLAN_PRIO_MASK;
318 data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
319 & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
320 }
321
322 static int aq_add_del_fether(struct aq_nic_s *aq_nic,
323 struct aq_rx_filter *aq_rx_fltr, bool add)
324 {
325 struct aq_rx_filter_l2 data;
326 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
327 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
328
329 aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
330
331 if (unlikely(!aq_hw_ops->hw_filter_l2_set))
332 return -EOPNOTSUPP;
333 if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
334 return -EOPNOTSUPP;
335
336 if (add)
337 return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
338 else
339 return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
340 }
341
342 static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
343 {
344 int i;
345
346 for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
347 if (aq_vlans[i].enable &&
348 aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
349 aq_vlans[i].vlan_id == vlan) {
350 return true;
351 }
352 }
353
354 return false;
355 }
356
357 /* Function rebuilds array of vlan filters so that filters with assigned
358 * queue have a precedence over just vlans on the interface.
359 */
360 static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
361 unsigned long *active_vlans,
362 struct aq_rx_filter_vlan *aq_vlans)
363 {
364 bool vlan_busy = false;
365 int vlan = -1;
366 int i;
367
368 for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
369 if (aq_vlans[i].enable &&
370 aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
371 continue;
372 do {
373 vlan = find_next_bit(active_vlans,
374 VLAN_N_VID,
375 vlan + 1);
376 if (vlan == VLAN_N_VID) {
377 aq_vlans[i].enable = 0U;
378 aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
379 aq_vlans[i].vlan_id = 0;
380 continue;
381 }
382
383 vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
384 if (!vlan_busy) {
385 aq_vlans[i].enable = 1U;
386 aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
387 aq_vlans[i].vlan_id = vlan;
388 }
389 } while (vlan_busy && vlan != VLAN_N_VID);
390 }
391 }
392
393 static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
394 struct aq_rx_filter *aq_rx_fltr,
395 struct aq_rx_filter_vlan *aq_vlans, bool add)
396 {
397 const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
398 int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
399 int i;
400
401 memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
402
403 if (!add)
404 return 0;
405
406 /* remove vlan if it was in table without queue assignment */
407 for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
408 if (aq_vlans[i].vlan_id ==
409 (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
410 aq_vlans[i].enable = false;
411 }
412 }
413
414 aq_vlans[location].location = location;
415 aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
416 & VLAN_VID_MASK;
417 aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
418 aq_vlans[location].enable = 1U;
419
420 return 0;
421 }
422
423 int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
424 {
425 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
426 struct aq_rx_filter *rule = NULL;
427 struct hlist_node *aq_node2;
428
429 hlist_for_each_entry_safe(rule, aq_node2,
430 &rx_fltrs->filter_list, aq_node) {
431 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
432 break;
433 }
434 if (rule && rule->type == aq_rx_filter_vlan &&
435 be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
436 struct ethtool_rxnfc cmd;
437
438 cmd.fs.location = rule->aq_fsp.location;
439 return aq_del_rxnfc_rule(aq_nic, &cmd);
440 }
441
442 return -ENOENT;
443 }
444
445 static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
446 struct aq_rx_filter *aq_rx_fltr, bool add)
447 {
448 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
449
450 if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
451 return -EOPNOTSUPP;
452
453 aq_set_data_fvlan(aq_nic,
454 aq_rx_fltr,
455 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
456 add);
457
458 return aq_filters_vlans_update(aq_nic);
459 }
460
461 static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
462 struct aq_rx_filter *aq_rx_fltr,
463 struct aq_rx_filter_l3l4 *data, bool add)
464 {
465 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
466 const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
467
468 memset(data, 0, sizeof(*data));
469
470 data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
471 data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
472
473 if (!add) {
474 if (!data->is_ipv6)
475 rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
476 else
477 rx_fltrs->fl3l4.active_ipv6 &=
478 ~BIT((data->location) / 4);
479
480 return 0;
481 }
482
483 data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
484
485 switch (fsp->flow_type) {
486 case TCP_V4_FLOW:
487 case TCP_V6_FLOW:
488 data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
489 break;
490 case UDP_V4_FLOW:
491 case UDP_V6_FLOW:
492 data->cmd |= HW_ATL_RX_UDP;
493 data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
494 break;
495 case SCTP_V4_FLOW:
496 case SCTP_V6_FLOW:
497 data->cmd |= HW_ATL_RX_SCTP;
498 data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
499 break;
500 default:
501 break;
502 }
503
504 if (!data->is_ipv6) {
505 data->ip_src[0] =
506 ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
507 data->ip_dst[0] =
508 ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
509 rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
510 } else {
511 int i;
512
513 rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
514 for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
515 data->ip_dst[i] =
516 ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
517 data->ip_src[i] =
518 ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
519 }
520 data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
521 }
522 if (fsp->flow_type != IP_USER_FLOW &&
523 fsp->flow_type != IPV6_USER_FLOW) {
524 if (!data->is_ipv6) {
525 data->p_dst =
526 ntohs(fsp->h_u.tcp_ip4_spec.pdst);
527 data->p_src =
528 ntohs(fsp->h_u.tcp_ip4_spec.psrc);
529 } else {
530 data->p_dst =
531 ntohs(fsp->h_u.tcp_ip6_spec.pdst);
532 data->p_src =
533 ntohs(fsp->h_u.tcp_ip6_spec.psrc);
534 }
535 }
536 if (data->ip_src[0] && !data->is_ipv6)
537 data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
538 if (data->ip_dst[0] && !data->is_ipv6)
539 data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
540 if (data->p_dst)
541 data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
542 if (data->p_src)
543 data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
544 if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
545 data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
546 data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
547 data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
548 } else {
549 data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
550 }
551
552 return 0;
553 }
554
555 static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
556 const struct aq_hw_ops *aq_hw_ops,
557 struct aq_rx_filter_l3l4 *data)
558 {
559 if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
560 return -EOPNOTSUPP;
561
562 return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
563 }
564
565 static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
566 struct aq_rx_filter *aq_rx_fltr, bool add)
567 {
568 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
569 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
570 struct aq_rx_filter_l3l4 data;
571
572 if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
573 aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
574 aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
575 return -EINVAL;
576
577 return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
578 }
579
580 static int aq_add_del_rule(struct aq_nic_s *aq_nic,
581 struct aq_rx_filter *aq_rx_fltr, bool add)
582 {
583 int err = -EINVAL;
584
585 if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
586 if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
587 == VLAN_VID_MASK) {
588 aq_rx_fltr->type = aq_rx_filter_vlan;
589 err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
590 } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
591 == VLAN_PRIO_MASK) {
592 aq_rx_fltr->type = aq_rx_filter_ethertype;
593 err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
594 }
595 } else {
596 switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
597 case ETHER_FLOW:
598 aq_rx_fltr->type = aq_rx_filter_ethertype;
599 err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
600 break;
601 case TCP_V4_FLOW:
602 case UDP_V4_FLOW:
603 case SCTP_V4_FLOW:
604 case IP_USER_FLOW:
605 case TCP_V6_FLOW:
606 case UDP_V6_FLOW:
607 case SCTP_V6_FLOW:
608 case IPV6_USER_FLOW:
609 aq_rx_fltr->type = aq_rx_filter_l3l4;
610 err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
611 break;
612 default:
613 err = -EINVAL;
614 break;
615 }
616 }
617
618 return err;
619 }
620
621 static int aq_update_table_filters(struct aq_nic_s *aq_nic,
622 struct aq_rx_filter *aq_rx_fltr, u16 index,
623 struct ethtool_rxnfc *cmd)
624 {
625 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
626 struct aq_rx_filter *rule = NULL, *parent = NULL;
627 struct hlist_node *aq_node2;
628 int err = -EINVAL;
629
630 hlist_for_each_entry_safe(rule, aq_node2,
631 &rx_fltrs->filter_list, aq_node) {
632 if (rule->aq_fsp.location >= index)
633 break;
634 parent = rule;
635 }
636
637 if (rule && rule->aq_fsp.location == index) {
638 err = aq_add_del_rule(aq_nic, rule, false);
639 hlist_del(&rule->aq_node);
640 kfree(rule);
641 --rx_fltrs->active_filters;
642 }
643
644 if (unlikely(!aq_rx_fltr))
645 return err;
646
647 INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
648
649 if (parent)
650 hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
651 else
652 hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
653
654 ++rx_fltrs->active_filters;
655
656 return 0;
657 }
658
659 u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
660 {
661 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
662
663 return rx_fltrs->active_filters;
664 }
665
666 struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
667 {
668 return &aq_nic->aq_hw_rx_fltrs;
669 }
670
671 int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
672 {
673 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
674 struct ethtool_rx_flow_spec *fsp =
675 (struct ethtool_rx_flow_spec *)&cmd->fs;
676 struct aq_rx_filter *aq_rx_fltr;
677 int err = 0;
678
679 err = aq_check_rule(aq_nic, fsp);
680 if (err)
681 goto err_exit;
682
683 aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
684 if (unlikely(!aq_rx_fltr)) {
685 err = -ENOMEM;
686 goto err_exit;
687 }
688
689 memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
690
691 err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
692 if (unlikely(err))
693 goto err_free;
694
695 err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
696 if (unlikely(err)) {
697 hlist_del(&aq_rx_fltr->aq_node);
698 --rx_fltrs->active_filters;
699 goto err_free;
700 }
701
702 return 0;
703
704 err_free:
705 kfree(aq_rx_fltr);
706 err_exit:
707 return err;
708 }
709
710 int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
711 {
712 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
713 struct aq_rx_filter *rule = NULL;
714 struct hlist_node *aq_node2;
715 int err = -EINVAL;
716
717 hlist_for_each_entry_safe(rule, aq_node2,
718 &rx_fltrs->filter_list, aq_node) {
719 if (rule->aq_fsp.location == cmd->fs.location)
720 break;
721 }
722
723 if (rule && rule->aq_fsp.location == cmd->fs.location) {
724 err = aq_add_del_rule(aq_nic, rule, false);
725 hlist_del(&rule->aq_node);
726 kfree(rule);
727 --rx_fltrs->active_filters;
728 }
729 return err;
730 }
731
732 int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
733 {
734 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
735 struct ethtool_rx_flow_spec *fsp =
736 (struct ethtool_rx_flow_spec *)&cmd->fs;
737 struct aq_rx_filter *rule = NULL;
738 struct hlist_node *aq_node2;
739
740 hlist_for_each_entry_safe(rule, aq_node2,
741 &rx_fltrs->filter_list, aq_node)
742 if (fsp->location <= rule->aq_fsp.location)
743 break;
744
745 if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
746 return -EINVAL;
747
748 memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
749
750 return 0;
751 }
752
753 int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
754 u32 *rule_locs)
755 {
756 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
757 struct hlist_node *aq_node2;
758 struct aq_rx_filter *rule;
759 int count = 0;
760
761 cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
762
763 hlist_for_each_entry_safe(rule, aq_node2,
764 &rx_fltrs->filter_list, aq_node) {
765 if (unlikely(count == cmd->rule_cnt))
766 return -EMSGSIZE;
767
768 rule_locs[count++] = rule->aq_fsp.location;
769 }
770
771 cmd->rule_cnt = count;
772
773 return 0;
774 }
775
776 int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
777 {
778 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
779 struct hlist_node *aq_node2;
780 struct aq_rx_filter *rule;
781 int err = 0;
782
783 hlist_for_each_entry_safe(rule, aq_node2,
784 &rx_fltrs->filter_list, aq_node) {
785 err = aq_add_del_rule(aq_nic, rule, false);
786 if (err)
787 goto err_exit;
788 hlist_del(&rule->aq_node);
789 kfree(rule);
790 --rx_fltrs->active_filters;
791 }
792
793 err_exit:
794 return err;
795 }
796
797 int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
798 {
799 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
800 struct hlist_node *aq_node2;
801 struct aq_rx_filter *rule;
802 int err = 0;
803
804 hlist_for_each_entry_safe(rule, aq_node2,
805 &rx_fltrs->filter_list, aq_node) {
806 err = aq_add_del_rule(aq_nic, rule, true);
807 if (err)
808 goto err_exit;
809 }
810
811 err_exit:
812 return err;
813 }
814
815 int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
816 {
817 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
818 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
819 int hweight = 0;
820 int err = 0;
821 int i;
822
823 if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
824 return -EOPNOTSUPP;
825 if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
826 return -EOPNOTSUPP;
827
828 aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
829 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
830
831 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
832 for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
833 hweight += hweight_long(aq_nic->active_vlans[i]);
834
835 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
836 if (err)
837 return err;
838 }
839
840 err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
841 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
842 );
843 if (err)
844 return err;
845
846 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
847 if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
848 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
849 !(aq_nic->packet_filter & IFF_PROMISC));
850 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
851 } else {
852 /* otherwise left in promiscue mode */
853 aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
854 }
855 }
856
857 return err;
858 }
859
860 int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
861 {
862 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
863 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
864 int err = 0;
865
866 memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
867 aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
868 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
869
870 if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
871 return -EOPNOTSUPP;
872 if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
873 return -EOPNOTSUPP;
874
875 aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
876 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
877 if (err)
878 return err;
879 err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
880 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
881 );
882 return err;
883 }