]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx4/en_netdev.c
net/mlx4_en: Optimized single ring steering
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / en_netdev.c
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <linux/bpf.h>
35 #include <linux/etherdevice.h>
36 #include <linux/tcp.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/slab.h>
40 #include <linux/hash.h>
41 #include <net/ip.h>
42 #include <net/busy_poll.h>
43 #include <net/vxlan.h>
44 #include <net/devlink.h>
45
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/device.h>
48 #include <linux/mlx4/cmd.h>
49 #include <linux/mlx4/cq.h>
50
51 #include "mlx4_en.h"
52 #include "en_port.h"
53
54 #define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
55 XDP_PACKET_HEADROOM))
56
57 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
58 {
59 struct mlx4_en_priv *priv = netdev_priv(dev);
60 int i;
61 unsigned int offset = 0;
62
63 if (up && up != MLX4_EN_NUM_UP)
64 return -EINVAL;
65
66 netdev_set_num_tc(dev, up);
67
68 /* Partition Tx queues evenly amongst UP's */
69 for (i = 0; i < up; i++) {
70 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
71 offset += priv->num_tx_rings_p_up;
72 }
73
74 #ifdef CONFIG_MLX4_EN_DCB
75 if (!mlx4_is_slave(priv->mdev->dev)) {
76 if (up) {
77 if (priv->dcbx_cap)
78 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
79 } else {
80 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
81 priv->cee_config.pfc_state = false;
82 }
83 }
84 #endif /* CONFIG_MLX4_EN_DCB */
85
86 return 0;
87 }
88
89 static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
90 u32 chain_index, __be16 proto,
91 struct tc_to_netdev *tc)
92 {
93 if (tc->type != TC_SETUP_MQPRIO)
94 return -EINVAL;
95
96 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
97
98 return mlx4_en_setup_tc(dev, tc->mqprio->num_tc);
99 }
100
101 #ifdef CONFIG_RFS_ACCEL
102
103 struct mlx4_en_filter {
104 struct list_head next;
105 struct work_struct work;
106
107 u8 ip_proto;
108 __be32 src_ip;
109 __be32 dst_ip;
110 __be16 src_port;
111 __be16 dst_port;
112
113 int rxq_index;
114 struct mlx4_en_priv *priv;
115 u32 flow_id; /* RFS infrastructure id */
116 int id; /* mlx4_en driver id */
117 u64 reg_id; /* Flow steering API id */
118 u8 activated; /* Used to prevent expiry before filter
119 * is attached
120 */
121 struct hlist_node filter_chain;
122 };
123
124 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
125
126 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
127 {
128 switch (ip_proto) {
129 case IPPROTO_UDP:
130 return MLX4_NET_TRANS_RULE_ID_UDP;
131 case IPPROTO_TCP:
132 return MLX4_NET_TRANS_RULE_ID_TCP;
133 default:
134 return MLX4_NET_TRANS_RULE_NUM;
135 }
136 };
137
138 /* Must not acquire state_lock, as its corresponding work_sync
139 * is done under it.
140 */
141 static void mlx4_en_filter_work(struct work_struct *work)
142 {
143 struct mlx4_en_filter *filter = container_of(work,
144 struct mlx4_en_filter,
145 work);
146 struct mlx4_en_priv *priv = filter->priv;
147 struct mlx4_spec_list spec_tcp_udp = {
148 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
149 {
150 .tcp_udp = {
151 .dst_port = filter->dst_port,
152 .dst_port_msk = (__force __be16)-1,
153 .src_port = filter->src_port,
154 .src_port_msk = (__force __be16)-1,
155 },
156 },
157 };
158 struct mlx4_spec_list spec_ip = {
159 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
160 {
161 .ipv4 = {
162 .dst_ip = filter->dst_ip,
163 .dst_ip_msk = (__force __be32)-1,
164 .src_ip = filter->src_ip,
165 .src_ip_msk = (__force __be32)-1,
166 },
167 },
168 };
169 struct mlx4_spec_list spec_eth = {
170 .id = MLX4_NET_TRANS_RULE_ID_ETH,
171 };
172 struct mlx4_net_trans_rule rule = {
173 .list = LIST_HEAD_INIT(rule.list),
174 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
175 .exclusive = 1,
176 .allow_loopback = 1,
177 .promisc_mode = MLX4_FS_REGULAR,
178 .port = priv->port,
179 .priority = MLX4_DOMAIN_RFS,
180 };
181 int rc;
182 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
183
184 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
185 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
186 filter->ip_proto);
187 goto ignore;
188 }
189 list_add_tail(&spec_eth.list, &rule.list);
190 list_add_tail(&spec_ip.list, &rule.list);
191 list_add_tail(&spec_tcp_udp.list, &rule.list);
192
193 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
194 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
195 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
196
197 filter->activated = 0;
198
199 if (filter->reg_id) {
200 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
201 if (rc && rc != -ENOENT)
202 en_err(priv, "Error detaching flow. rc = %d\n", rc);
203 }
204
205 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
206 if (rc)
207 en_err(priv, "Error attaching flow. err = %d\n", rc);
208
209 ignore:
210 mlx4_en_filter_rfs_expire(priv);
211
212 filter->activated = 1;
213 }
214
215 static inline struct hlist_head *
216 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
217 __be16 src_port, __be16 dst_port)
218 {
219 unsigned long l;
220 int bucket_idx;
221
222 l = (__force unsigned long)src_port |
223 ((__force unsigned long)dst_port << 2);
224 l ^= (__force unsigned long)(src_ip ^ dst_ip);
225
226 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
227
228 return &priv->filter_hash[bucket_idx];
229 }
230
231 static struct mlx4_en_filter *
232 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
233 __be32 dst_ip, u8 ip_proto, __be16 src_port,
234 __be16 dst_port, u32 flow_id)
235 {
236 struct mlx4_en_filter *filter = NULL;
237
238 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
239 if (!filter)
240 return NULL;
241
242 filter->priv = priv;
243 filter->rxq_index = rxq_index;
244 INIT_WORK(&filter->work, mlx4_en_filter_work);
245
246 filter->src_ip = src_ip;
247 filter->dst_ip = dst_ip;
248 filter->ip_proto = ip_proto;
249 filter->src_port = src_port;
250 filter->dst_port = dst_port;
251
252 filter->flow_id = flow_id;
253
254 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
255
256 list_add_tail(&filter->next, &priv->filters);
257 hlist_add_head(&filter->filter_chain,
258 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
259 dst_port));
260
261 return filter;
262 }
263
264 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
265 {
266 struct mlx4_en_priv *priv = filter->priv;
267 int rc;
268
269 list_del(&filter->next);
270
271 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
272 if (rc && rc != -ENOENT)
273 en_err(priv, "Error detaching flow. rc = %d\n", rc);
274
275 kfree(filter);
276 }
277
278 static inline struct mlx4_en_filter *
279 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
280 u8 ip_proto, __be16 src_port, __be16 dst_port)
281 {
282 struct mlx4_en_filter *filter;
283 struct mlx4_en_filter *ret = NULL;
284
285 hlist_for_each_entry(filter,
286 filter_hash_bucket(priv, src_ip, dst_ip,
287 src_port, dst_port),
288 filter_chain) {
289 if (filter->src_ip == src_ip &&
290 filter->dst_ip == dst_ip &&
291 filter->ip_proto == ip_proto &&
292 filter->src_port == src_port &&
293 filter->dst_port == dst_port) {
294 ret = filter;
295 break;
296 }
297 }
298
299 return ret;
300 }
301
302 static int
303 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
304 u16 rxq_index, u32 flow_id)
305 {
306 struct mlx4_en_priv *priv = netdev_priv(net_dev);
307 struct mlx4_en_filter *filter;
308 const struct iphdr *ip;
309 const __be16 *ports;
310 u8 ip_proto;
311 __be32 src_ip;
312 __be32 dst_ip;
313 __be16 src_port;
314 __be16 dst_port;
315 int nhoff = skb_network_offset(skb);
316 int ret = 0;
317
318 if (skb->protocol != htons(ETH_P_IP))
319 return -EPROTONOSUPPORT;
320
321 ip = (const struct iphdr *)(skb->data + nhoff);
322 if (ip_is_fragment(ip))
323 return -EPROTONOSUPPORT;
324
325 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
326 return -EPROTONOSUPPORT;
327 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
328
329 ip_proto = ip->protocol;
330 src_ip = ip->saddr;
331 dst_ip = ip->daddr;
332 src_port = ports[0];
333 dst_port = ports[1];
334
335 spin_lock_bh(&priv->filters_lock);
336 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
337 src_port, dst_port);
338 if (filter) {
339 if (filter->rxq_index == rxq_index)
340 goto out;
341
342 filter->rxq_index = rxq_index;
343 } else {
344 filter = mlx4_en_filter_alloc(priv, rxq_index,
345 src_ip, dst_ip, ip_proto,
346 src_port, dst_port, flow_id);
347 if (!filter) {
348 ret = -ENOMEM;
349 goto err;
350 }
351 }
352
353 queue_work(priv->mdev->workqueue, &filter->work);
354
355 out:
356 ret = filter->id;
357 err:
358 spin_unlock_bh(&priv->filters_lock);
359
360 return ret;
361 }
362
363 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
364 {
365 struct mlx4_en_filter *filter, *tmp;
366 LIST_HEAD(del_list);
367
368 spin_lock_bh(&priv->filters_lock);
369 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
370 list_move(&filter->next, &del_list);
371 hlist_del(&filter->filter_chain);
372 }
373 spin_unlock_bh(&priv->filters_lock);
374
375 list_for_each_entry_safe(filter, tmp, &del_list, next) {
376 cancel_work_sync(&filter->work);
377 mlx4_en_filter_free(filter);
378 }
379 }
380
381 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
382 {
383 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
384 LIST_HEAD(del_list);
385 int i = 0;
386
387 spin_lock_bh(&priv->filters_lock);
388 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
389 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
390 break;
391
392 if (filter->activated &&
393 !work_pending(&filter->work) &&
394 rps_may_expire_flow(priv->dev,
395 filter->rxq_index, filter->flow_id,
396 filter->id)) {
397 list_move(&filter->next, &del_list);
398 hlist_del(&filter->filter_chain);
399 } else
400 last_filter = filter;
401
402 i++;
403 }
404
405 if (last_filter && (&last_filter->next != priv->filters.next))
406 list_move(&priv->filters, &last_filter->next);
407
408 spin_unlock_bh(&priv->filters_lock);
409
410 list_for_each_entry_safe(filter, tmp, &del_list, next)
411 mlx4_en_filter_free(filter);
412 }
413 #endif
414
415 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
416 __be16 proto, u16 vid)
417 {
418 struct mlx4_en_priv *priv = netdev_priv(dev);
419 struct mlx4_en_dev *mdev = priv->mdev;
420 int err;
421 int idx;
422
423 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
424
425 set_bit(vid, priv->active_vlans);
426
427 /* Add VID to port VLAN filter */
428 mutex_lock(&mdev->state_lock);
429 if (mdev->device_up && priv->port_up) {
430 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
431 if (err) {
432 en_err(priv, "Failed configuring VLAN filter\n");
433 goto out;
434 }
435 }
436 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
437 if (err)
438 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
439
440 out:
441 mutex_unlock(&mdev->state_lock);
442 return err;
443 }
444
445 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
446 __be16 proto, u16 vid)
447 {
448 struct mlx4_en_priv *priv = netdev_priv(dev);
449 struct mlx4_en_dev *mdev = priv->mdev;
450 int err = 0;
451
452 en_dbg(HW, priv, "Killing VID:%d\n", vid);
453
454 clear_bit(vid, priv->active_vlans);
455
456 /* Remove VID from port VLAN filter */
457 mutex_lock(&mdev->state_lock);
458 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
459
460 if (mdev->device_up && priv->port_up) {
461 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
462 if (err)
463 en_err(priv, "Failed configuring VLAN filter\n");
464 }
465 mutex_unlock(&mdev->state_lock);
466
467 return err;
468 }
469
470 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
471 {
472 int i;
473 for (i = ETH_ALEN - 1; i >= 0; --i) {
474 dst_mac[i] = src_mac & 0xff;
475 src_mac >>= 8;
476 }
477 memset(&dst_mac[ETH_ALEN], 0, 2);
478 }
479
480
481 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
482 int qpn, u64 *reg_id)
483 {
484 int err;
485
486 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
487 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
488 return 0; /* do nothing */
489
490 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
491 MLX4_DOMAIN_NIC, reg_id);
492 if (err) {
493 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
494 return err;
495 }
496 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
497 return 0;
498 }
499
500
501 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
502 unsigned char *mac, int *qpn, u64 *reg_id)
503 {
504 struct mlx4_en_dev *mdev = priv->mdev;
505 struct mlx4_dev *dev = mdev->dev;
506 int err;
507
508 switch (dev->caps.steering_mode) {
509 case MLX4_STEERING_MODE_B0: {
510 struct mlx4_qp qp;
511 u8 gid[16] = {0};
512
513 qp.qpn = *qpn;
514 memcpy(&gid[10], mac, ETH_ALEN);
515 gid[5] = priv->port;
516
517 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
518 break;
519 }
520 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
521 struct mlx4_spec_list spec_eth = { {NULL} };
522 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
523
524 struct mlx4_net_trans_rule rule = {
525 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
526 .exclusive = 0,
527 .allow_loopback = 1,
528 .promisc_mode = MLX4_FS_REGULAR,
529 .priority = MLX4_DOMAIN_NIC,
530 };
531
532 rule.port = priv->port;
533 rule.qpn = *qpn;
534 INIT_LIST_HEAD(&rule.list);
535
536 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
537 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
538 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
539 list_add_tail(&spec_eth.list, &rule.list);
540
541 err = mlx4_flow_attach(dev, &rule, reg_id);
542 break;
543 }
544 default:
545 return -EINVAL;
546 }
547 if (err)
548 en_warn(priv, "Failed Attaching Unicast\n");
549
550 return err;
551 }
552
553 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
554 unsigned char *mac, int qpn, u64 reg_id)
555 {
556 struct mlx4_en_dev *mdev = priv->mdev;
557 struct mlx4_dev *dev = mdev->dev;
558
559 switch (dev->caps.steering_mode) {
560 case MLX4_STEERING_MODE_B0: {
561 struct mlx4_qp qp;
562 u8 gid[16] = {0};
563
564 qp.qpn = qpn;
565 memcpy(&gid[10], mac, ETH_ALEN);
566 gid[5] = priv->port;
567
568 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
569 break;
570 }
571 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
572 mlx4_flow_detach(dev, reg_id);
573 break;
574 }
575 default:
576 en_err(priv, "Invalid steering mode.\n");
577 }
578 }
579
580 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
581 {
582 struct mlx4_en_dev *mdev = priv->mdev;
583 struct mlx4_dev *dev = mdev->dev;
584 int index = 0;
585 int err = 0;
586 int *qpn = &priv->base_qpn;
587 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
588
589 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
590 priv->dev->dev_addr);
591 index = mlx4_register_mac(dev, priv->port, mac);
592 if (index < 0) {
593 err = index;
594 en_err(priv, "Failed adding MAC: %pM\n",
595 priv->dev->dev_addr);
596 return err;
597 }
598
599 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
600
601 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
602 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
603 *qpn = base_qpn + index;
604 return 0;
605 }
606
607 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
608 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
609 if (err) {
610 en_err(priv, "Failed to reserve qp for mac registration\n");
611 mlx4_unregister_mac(dev, priv->port, mac);
612 return err;
613 }
614
615 return 0;
616 }
617
618 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
619 {
620 struct mlx4_en_dev *mdev = priv->mdev;
621 struct mlx4_dev *dev = mdev->dev;
622 int qpn = priv->base_qpn;
623
624 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
625 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
626 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
627 priv->dev->dev_addr);
628 mlx4_unregister_mac(dev, priv->port, mac);
629 } else {
630 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
631 priv->port, qpn);
632 mlx4_qp_release_range(dev, qpn, 1);
633 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
634 }
635 }
636
637 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
638 unsigned char *new_mac, unsigned char *prev_mac)
639 {
640 struct mlx4_en_dev *mdev = priv->mdev;
641 struct mlx4_dev *dev = mdev->dev;
642 int err = 0;
643 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
644
645 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
646 struct hlist_head *bucket;
647 unsigned int mac_hash;
648 struct mlx4_mac_entry *entry;
649 struct hlist_node *tmp;
650 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
651
652 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
653 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
654 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
655 mlx4_en_uc_steer_release(priv, entry->mac,
656 qpn, entry->reg_id);
657 mlx4_unregister_mac(dev, priv->port,
658 prev_mac_u64);
659 hlist_del_rcu(&entry->hlist);
660 synchronize_rcu();
661 memcpy(entry->mac, new_mac, ETH_ALEN);
662 entry->reg_id = 0;
663 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
664 hlist_add_head_rcu(&entry->hlist,
665 &priv->mac_hash[mac_hash]);
666 mlx4_register_mac(dev, priv->port, new_mac_u64);
667 err = mlx4_en_uc_steer_add(priv, new_mac,
668 &qpn,
669 &entry->reg_id);
670 if (err)
671 return err;
672 if (priv->tunnel_reg_id) {
673 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
674 priv->tunnel_reg_id = 0;
675 }
676 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
677 &priv->tunnel_reg_id);
678 return err;
679 }
680 }
681 return -EINVAL;
682 }
683
684 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
685 }
686
687 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
688 unsigned char new_mac[ETH_ALEN + 2])
689 {
690 int err = 0;
691
692 if (priv->port_up) {
693 /* Remove old MAC and insert the new one */
694 err = mlx4_en_replace_mac(priv, priv->base_qpn,
695 new_mac, priv->current_mac);
696 if (err)
697 en_err(priv, "Failed changing HW MAC address\n");
698 } else
699 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
700
701 if (!err)
702 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
703
704 return err;
705 }
706
707 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
708 {
709 struct mlx4_en_priv *priv = netdev_priv(dev);
710 struct mlx4_en_dev *mdev = priv->mdev;
711 struct sockaddr *saddr = addr;
712 unsigned char new_mac[ETH_ALEN + 2];
713 int err;
714
715 if (!is_valid_ether_addr(saddr->sa_data))
716 return -EADDRNOTAVAIL;
717
718 mutex_lock(&mdev->state_lock);
719 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
720 err = mlx4_en_do_set_mac(priv, new_mac);
721 if (!err)
722 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
723 mutex_unlock(&mdev->state_lock);
724
725 return err;
726 }
727
728 static void mlx4_en_clear_list(struct net_device *dev)
729 {
730 struct mlx4_en_priv *priv = netdev_priv(dev);
731 struct mlx4_en_mc_list *tmp, *mc_to_del;
732
733 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
734 list_del(&mc_to_del->list);
735 kfree(mc_to_del);
736 }
737 }
738
739 static void mlx4_en_cache_mclist(struct net_device *dev)
740 {
741 struct mlx4_en_priv *priv = netdev_priv(dev);
742 struct netdev_hw_addr *ha;
743 struct mlx4_en_mc_list *tmp;
744
745 mlx4_en_clear_list(dev);
746 netdev_for_each_mc_addr(ha, dev) {
747 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
748 if (!tmp) {
749 mlx4_en_clear_list(dev);
750 return;
751 }
752 memcpy(tmp->addr, ha->addr, ETH_ALEN);
753 list_add_tail(&tmp->list, &priv->mc_list);
754 }
755 }
756
757 static void update_mclist_flags(struct mlx4_en_priv *priv,
758 struct list_head *dst,
759 struct list_head *src)
760 {
761 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
762 bool found;
763
764 /* Find all the entries that should be removed from dst,
765 * These are the entries that are not found in src
766 */
767 list_for_each_entry(dst_tmp, dst, list) {
768 found = false;
769 list_for_each_entry(src_tmp, src, list) {
770 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
771 found = true;
772 break;
773 }
774 }
775 if (!found)
776 dst_tmp->action = MCLIST_REM;
777 }
778
779 /* Add entries that exist in src but not in dst
780 * mark them as need to add
781 */
782 list_for_each_entry(src_tmp, src, list) {
783 found = false;
784 list_for_each_entry(dst_tmp, dst, list) {
785 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
786 dst_tmp->action = MCLIST_NONE;
787 found = true;
788 break;
789 }
790 }
791 if (!found) {
792 new_mc = kmemdup(src_tmp,
793 sizeof(struct mlx4_en_mc_list),
794 GFP_KERNEL);
795 if (!new_mc)
796 return;
797
798 new_mc->action = MCLIST_ADD;
799 list_add_tail(&new_mc->list, dst);
800 }
801 }
802 }
803
804 static void mlx4_en_set_rx_mode(struct net_device *dev)
805 {
806 struct mlx4_en_priv *priv = netdev_priv(dev);
807
808 if (!priv->port_up)
809 return;
810
811 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
812 }
813
814 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
815 struct mlx4_en_dev *mdev)
816 {
817 int err = 0;
818
819 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
820 if (netif_msg_rx_status(priv))
821 en_warn(priv, "Entering promiscuous mode\n");
822 priv->flags |= MLX4_EN_FLAG_PROMISC;
823
824 /* Enable promiscouos mode */
825 switch (mdev->dev->caps.steering_mode) {
826 case MLX4_STEERING_MODE_DEVICE_MANAGED:
827 err = mlx4_flow_steer_promisc_add(mdev->dev,
828 priv->port,
829 priv->base_qpn,
830 MLX4_FS_ALL_DEFAULT);
831 if (err)
832 en_err(priv, "Failed enabling promiscuous mode\n");
833 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
834 break;
835
836 case MLX4_STEERING_MODE_B0:
837 err = mlx4_unicast_promisc_add(mdev->dev,
838 priv->base_qpn,
839 priv->port);
840 if (err)
841 en_err(priv, "Failed enabling unicast promiscuous mode\n");
842
843 /* Add the default qp number as multicast
844 * promisc
845 */
846 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
847 err = mlx4_multicast_promisc_add(mdev->dev,
848 priv->base_qpn,
849 priv->port);
850 if (err)
851 en_err(priv, "Failed enabling multicast promiscuous mode\n");
852 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
853 }
854 break;
855
856 case MLX4_STEERING_MODE_A0:
857 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
858 priv->port,
859 priv->base_qpn,
860 1);
861 if (err)
862 en_err(priv, "Failed enabling promiscuous mode\n");
863 break;
864 }
865
866 /* Disable port multicast filter (unconditionally) */
867 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
868 0, MLX4_MCAST_DISABLE);
869 if (err)
870 en_err(priv, "Failed disabling multicast filter\n");
871 }
872 }
873
874 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
875 struct mlx4_en_dev *mdev)
876 {
877 int err = 0;
878
879 if (netif_msg_rx_status(priv))
880 en_warn(priv, "Leaving promiscuous mode\n");
881 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
882
883 /* Disable promiscouos mode */
884 switch (mdev->dev->caps.steering_mode) {
885 case MLX4_STEERING_MODE_DEVICE_MANAGED:
886 err = mlx4_flow_steer_promisc_remove(mdev->dev,
887 priv->port,
888 MLX4_FS_ALL_DEFAULT);
889 if (err)
890 en_err(priv, "Failed disabling promiscuous mode\n");
891 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
892 break;
893
894 case MLX4_STEERING_MODE_B0:
895 err = mlx4_unicast_promisc_remove(mdev->dev,
896 priv->base_qpn,
897 priv->port);
898 if (err)
899 en_err(priv, "Failed disabling unicast promiscuous mode\n");
900 /* Disable Multicast promisc */
901 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
902 err = mlx4_multicast_promisc_remove(mdev->dev,
903 priv->base_qpn,
904 priv->port);
905 if (err)
906 en_err(priv, "Failed disabling multicast promiscuous mode\n");
907 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
908 }
909 break;
910
911 case MLX4_STEERING_MODE_A0:
912 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
913 priv->port,
914 priv->base_qpn, 0);
915 if (err)
916 en_err(priv, "Failed disabling promiscuous mode\n");
917 break;
918 }
919 }
920
921 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
922 struct net_device *dev,
923 struct mlx4_en_dev *mdev)
924 {
925 struct mlx4_en_mc_list *mclist, *tmp;
926 u64 mcast_addr = 0;
927 u8 mc_list[16] = {0};
928 int err = 0;
929
930 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
931 if (dev->flags & IFF_ALLMULTI) {
932 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
933 0, MLX4_MCAST_DISABLE);
934 if (err)
935 en_err(priv, "Failed disabling multicast filter\n");
936
937 /* Add the default qp number as multicast promisc */
938 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
939 switch (mdev->dev->caps.steering_mode) {
940 case MLX4_STEERING_MODE_DEVICE_MANAGED:
941 err = mlx4_flow_steer_promisc_add(mdev->dev,
942 priv->port,
943 priv->base_qpn,
944 MLX4_FS_MC_DEFAULT);
945 break;
946
947 case MLX4_STEERING_MODE_B0:
948 err = mlx4_multicast_promisc_add(mdev->dev,
949 priv->base_qpn,
950 priv->port);
951 break;
952
953 case MLX4_STEERING_MODE_A0:
954 break;
955 }
956 if (err)
957 en_err(priv, "Failed entering multicast promisc mode\n");
958 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
959 }
960 } else {
961 /* Disable Multicast promisc */
962 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
963 switch (mdev->dev->caps.steering_mode) {
964 case MLX4_STEERING_MODE_DEVICE_MANAGED:
965 err = mlx4_flow_steer_promisc_remove(mdev->dev,
966 priv->port,
967 MLX4_FS_MC_DEFAULT);
968 break;
969
970 case MLX4_STEERING_MODE_B0:
971 err = mlx4_multicast_promisc_remove(mdev->dev,
972 priv->base_qpn,
973 priv->port);
974 break;
975
976 case MLX4_STEERING_MODE_A0:
977 break;
978 }
979 if (err)
980 en_err(priv, "Failed disabling multicast promiscuous mode\n");
981 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
982 }
983
984 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
985 0, MLX4_MCAST_DISABLE);
986 if (err)
987 en_err(priv, "Failed disabling multicast filter\n");
988
989 /* Flush mcast filter and init it with broadcast address */
990 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
991 1, MLX4_MCAST_CONFIG);
992
993 /* Update multicast list - we cache all addresses so they won't
994 * change while HW is updated holding the command semaphor */
995 netif_addr_lock_bh(dev);
996 mlx4_en_cache_mclist(dev);
997 netif_addr_unlock_bh(dev);
998 list_for_each_entry(mclist, &priv->mc_list, list) {
999 mcast_addr = mlx4_mac_to_u64(mclist->addr);
1000 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1001 mcast_addr, 0, MLX4_MCAST_CONFIG);
1002 }
1003 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1004 0, MLX4_MCAST_ENABLE);
1005 if (err)
1006 en_err(priv, "Failed enabling multicast filter\n");
1007
1008 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1009 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1010 if (mclist->action == MCLIST_REM) {
1011 /* detach this address and delete from list */
1012 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1013 mc_list[5] = priv->port;
1014 err = mlx4_multicast_detach(mdev->dev,
1015 priv->rss_map.indir_qp,
1016 mc_list,
1017 MLX4_PROT_ETH,
1018 mclist->reg_id);
1019 if (err)
1020 en_err(priv, "Fail to detach multicast address\n");
1021
1022 if (mclist->tunnel_reg_id) {
1023 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1024 if (err)
1025 en_err(priv, "Failed to detach multicast address\n");
1026 }
1027
1028 /* remove from list */
1029 list_del(&mclist->list);
1030 kfree(mclist);
1031 } else if (mclist->action == MCLIST_ADD) {
1032 /* attach the address */
1033 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1034 /* needed for B0 steering support */
1035 mc_list[5] = priv->port;
1036 err = mlx4_multicast_attach(mdev->dev,
1037 priv->rss_map.indir_qp,
1038 mc_list,
1039 priv->port, 0,
1040 MLX4_PROT_ETH,
1041 &mclist->reg_id);
1042 if (err)
1043 en_err(priv, "Fail to attach multicast address\n");
1044
1045 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1046 &mclist->tunnel_reg_id);
1047 if (err)
1048 en_err(priv, "Failed to attach multicast address\n");
1049 }
1050 }
1051 }
1052 }
1053
1054 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1055 struct net_device *dev,
1056 struct mlx4_en_dev *mdev)
1057 {
1058 struct netdev_hw_addr *ha;
1059 struct mlx4_mac_entry *entry;
1060 struct hlist_node *tmp;
1061 bool found;
1062 u64 mac;
1063 int err = 0;
1064 struct hlist_head *bucket;
1065 unsigned int i;
1066 int removed = 0;
1067 u32 prev_flags;
1068
1069 /* Note that we do not need to protect our mac_hash traversal with rcu,
1070 * since all modification code is protected by mdev->state_lock
1071 */
1072
1073 /* find what to remove */
1074 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1075 bucket = &priv->mac_hash[i];
1076 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1077 found = false;
1078 netdev_for_each_uc_addr(ha, dev) {
1079 if (ether_addr_equal_64bits(entry->mac,
1080 ha->addr)) {
1081 found = true;
1082 break;
1083 }
1084 }
1085
1086 /* MAC address of the port is not in uc list */
1087 if (ether_addr_equal_64bits(entry->mac,
1088 priv->current_mac))
1089 found = true;
1090
1091 if (!found) {
1092 mac = mlx4_mac_to_u64(entry->mac);
1093 mlx4_en_uc_steer_release(priv, entry->mac,
1094 priv->base_qpn,
1095 entry->reg_id);
1096 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1097
1098 hlist_del_rcu(&entry->hlist);
1099 kfree_rcu(entry, rcu);
1100 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1101 entry->mac, priv->port);
1102 ++removed;
1103 }
1104 }
1105 }
1106
1107 /* if we didn't remove anything, there is no use in trying to add
1108 * again once we are in a forced promisc mode state
1109 */
1110 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1111 return;
1112
1113 prev_flags = priv->flags;
1114 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1115
1116 /* find what to add */
1117 netdev_for_each_uc_addr(ha, dev) {
1118 found = false;
1119 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1120 hlist_for_each_entry(entry, bucket, hlist) {
1121 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1122 found = true;
1123 break;
1124 }
1125 }
1126
1127 if (!found) {
1128 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1129 if (!entry) {
1130 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1131 ha->addr, priv->port);
1132 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1133 break;
1134 }
1135 mac = mlx4_mac_to_u64(ha->addr);
1136 memcpy(entry->mac, ha->addr, ETH_ALEN);
1137 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1138 if (err < 0) {
1139 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1140 ha->addr, priv->port, err);
1141 kfree(entry);
1142 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1143 break;
1144 }
1145 err = mlx4_en_uc_steer_add(priv, ha->addr,
1146 &priv->base_qpn,
1147 &entry->reg_id);
1148 if (err) {
1149 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1150 ha->addr, priv->port, err);
1151 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1152 kfree(entry);
1153 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1154 break;
1155 } else {
1156 unsigned int mac_hash;
1157 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1158 ha->addr, priv->port);
1159 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1160 bucket = &priv->mac_hash[mac_hash];
1161 hlist_add_head_rcu(&entry->hlist, bucket);
1162 }
1163 }
1164 }
1165
1166 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1167 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1168 priv->port);
1169 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1170 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1171 priv->port);
1172 }
1173 }
1174
1175 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1176 {
1177 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1178 rx_mode_task);
1179 struct mlx4_en_dev *mdev = priv->mdev;
1180 struct net_device *dev = priv->dev;
1181
1182 mutex_lock(&mdev->state_lock);
1183 if (!mdev->device_up) {
1184 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1185 goto out;
1186 }
1187 if (!priv->port_up) {
1188 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1189 goto out;
1190 }
1191
1192 if (!netif_carrier_ok(dev)) {
1193 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1194 if (priv->port_state.link_state) {
1195 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1196 netif_carrier_on(dev);
1197 en_dbg(LINK, priv, "Link Up\n");
1198 }
1199 }
1200 }
1201
1202 if (dev->priv_flags & IFF_UNICAST_FLT)
1203 mlx4_en_do_uc_filter(priv, dev, mdev);
1204
1205 /* Promsicuous mode: disable all filters */
1206 if ((dev->flags & IFF_PROMISC) ||
1207 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1208 mlx4_en_set_promisc_mode(priv, mdev);
1209 goto out;
1210 }
1211
1212 /* Not in promiscuous mode */
1213 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1214 mlx4_en_clear_promisc_mode(priv, mdev);
1215
1216 mlx4_en_do_multicast(priv, dev, mdev);
1217 out:
1218 mutex_unlock(&mdev->state_lock);
1219 }
1220
1221 #ifdef CONFIG_NET_POLL_CONTROLLER
1222 static void mlx4_en_netpoll(struct net_device *dev)
1223 {
1224 struct mlx4_en_priv *priv = netdev_priv(dev);
1225 struct mlx4_en_cq *cq;
1226 int i;
1227
1228 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1229 cq = priv->tx_cq[TX][i];
1230 napi_schedule(&cq->napi);
1231 }
1232 }
1233 #endif
1234
1235 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1236 {
1237 u64 reg_id;
1238 int err = 0;
1239 int *qpn = &priv->base_qpn;
1240 struct mlx4_mac_entry *entry;
1241
1242 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
1243 if (err)
1244 return err;
1245
1246 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1247 &priv->tunnel_reg_id);
1248 if (err)
1249 goto tunnel_err;
1250
1251 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1252 if (!entry) {
1253 err = -ENOMEM;
1254 goto alloc_err;
1255 }
1256
1257 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1258 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1259 entry->reg_id = reg_id;
1260 hlist_add_head_rcu(&entry->hlist,
1261 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1262
1263 return 0;
1264
1265 alloc_err:
1266 if (priv->tunnel_reg_id)
1267 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1268
1269 tunnel_err:
1270 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1271 return err;
1272 }
1273
1274 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1275 {
1276 u64 mac;
1277 unsigned int i;
1278 int qpn = priv->base_qpn;
1279 struct hlist_head *bucket;
1280 struct hlist_node *tmp;
1281 struct mlx4_mac_entry *entry;
1282
1283 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1284 bucket = &priv->mac_hash[i];
1285 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1286 mac = mlx4_mac_to_u64(entry->mac);
1287 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1288 entry->mac);
1289 mlx4_en_uc_steer_release(priv, entry->mac,
1290 qpn, entry->reg_id);
1291
1292 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1293 hlist_del_rcu(&entry->hlist);
1294 kfree_rcu(entry, rcu);
1295 }
1296 }
1297
1298 if (priv->tunnel_reg_id) {
1299 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1300 priv->tunnel_reg_id = 0;
1301 }
1302 }
1303
1304 static void mlx4_en_tx_timeout(struct net_device *dev)
1305 {
1306 struct mlx4_en_priv *priv = netdev_priv(dev);
1307 struct mlx4_en_dev *mdev = priv->mdev;
1308 int i;
1309
1310 if (netif_msg_timer(priv))
1311 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1312
1313 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1314 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
1315
1316 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1317 continue;
1318 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1319 i, tx_ring->qpn, tx_ring->sp_cqn,
1320 tx_ring->cons, tx_ring->prod);
1321 }
1322
1323 priv->port_stats.tx_timeout++;
1324 en_dbg(DRV, priv, "Scheduling watchdog\n");
1325 queue_work(mdev->workqueue, &priv->watchdog_task);
1326 }
1327
1328
1329 static void
1330 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1331 {
1332 struct mlx4_en_priv *priv = netdev_priv(dev);
1333
1334 spin_lock_bh(&priv->stats_lock);
1335 mlx4_en_fold_software_stats(dev);
1336 netdev_stats_to_stats64(stats, &dev->stats);
1337 spin_unlock_bh(&priv->stats_lock);
1338 }
1339
1340 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1341 {
1342 struct mlx4_en_cq *cq;
1343 int i, t;
1344
1345 /* If we haven't received a specific coalescing setting
1346 * (module param), we set the moderation parameters as follows:
1347 * - moder_cnt is set to the number of mtu sized packets to
1348 * satisfy our coalescing target.
1349 * - moder_time is set to a fixed value.
1350 */
1351 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1352 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1353 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1354 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1355 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1356 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1357
1358 /* Setup cq moderation params */
1359 for (i = 0; i < priv->rx_ring_num; i++) {
1360 cq = priv->rx_cq[i];
1361 cq->moder_cnt = priv->rx_frames;
1362 cq->moder_time = priv->rx_usecs;
1363 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1364 priv->last_moder_packets[i] = 0;
1365 priv->last_moder_bytes[i] = 0;
1366 }
1367
1368 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1369 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1370 cq = priv->tx_cq[t][i];
1371 cq->moder_cnt = priv->tx_frames;
1372 cq->moder_time = priv->tx_usecs;
1373 }
1374 }
1375
1376 /* Reset auto-moderation params */
1377 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1378 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1379 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1380 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1381 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1382 priv->adaptive_rx_coal = 1;
1383 priv->last_moder_jiffies = 0;
1384 priv->last_moder_tx_packets = 0;
1385 }
1386
1387 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1388 {
1389 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1390 u32 pkt_rate_high, pkt_rate_low;
1391 struct mlx4_en_cq *cq;
1392 unsigned long packets;
1393 unsigned long rate;
1394 unsigned long avg_pkt_size;
1395 unsigned long rx_packets;
1396 unsigned long rx_bytes;
1397 unsigned long rx_pkt_diff;
1398 int moder_time;
1399 int ring, err;
1400
1401 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1402 return;
1403
1404 pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
1405 pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
1406
1407 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1408 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
1409 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
1410
1411 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
1412 packets = rx_pkt_diff;
1413 rate = packets * HZ / period;
1414 avg_pkt_size = packets ? (rx_bytes -
1415 priv->last_moder_bytes[ring]) / packets : 0;
1416
1417 /* Apply auto-moderation only when packet rate
1418 * exceeds a rate that it matters */
1419 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1420 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1421 if (rate <= pkt_rate_low)
1422 moder_time = priv->rx_usecs_low;
1423 else if (rate >= pkt_rate_high)
1424 moder_time = priv->rx_usecs_high;
1425 else
1426 moder_time = (rate - pkt_rate_low) *
1427 (priv->rx_usecs_high - priv->rx_usecs_low) /
1428 (pkt_rate_high - pkt_rate_low) +
1429 priv->rx_usecs_low;
1430 } else {
1431 moder_time = priv->rx_usecs_low;
1432 }
1433
1434 cq = priv->rx_cq[ring];
1435 if (moder_time != priv->last_moder_time[ring] ||
1436 cq->moder_cnt != priv->rx_frames) {
1437 priv->last_moder_time[ring] = moder_time;
1438 cq->moder_time = moder_time;
1439 cq->moder_cnt = priv->rx_frames;
1440 err = mlx4_en_set_cq_moder(priv, cq);
1441 if (err)
1442 en_err(priv, "Failed modifying moderation for cq:%d\n",
1443 ring);
1444 }
1445 priv->last_moder_packets[ring] = rx_packets;
1446 priv->last_moder_bytes[ring] = rx_bytes;
1447 }
1448
1449 priv->last_moder_jiffies = jiffies;
1450 }
1451
1452 static void mlx4_en_do_get_stats(struct work_struct *work)
1453 {
1454 struct delayed_work *delay = to_delayed_work(work);
1455 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1456 stats_task);
1457 struct mlx4_en_dev *mdev = priv->mdev;
1458 int err;
1459
1460 mutex_lock(&mdev->state_lock);
1461 if (mdev->device_up) {
1462 if (priv->port_up) {
1463 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1464 if (err)
1465 en_dbg(HW, priv, "Could not update stats\n");
1466
1467 mlx4_en_auto_moderation(priv);
1468 }
1469
1470 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1471 }
1472 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1473 mlx4_en_do_set_mac(priv, priv->current_mac);
1474 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1475 }
1476 mutex_unlock(&mdev->state_lock);
1477 }
1478
1479 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1480 * periodically
1481 */
1482 static void mlx4_en_service_task(struct work_struct *work)
1483 {
1484 struct delayed_work *delay = to_delayed_work(work);
1485 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1486 service_task);
1487 struct mlx4_en_dev *mdev = priv->mdev;
1488
1489 mutex_lock(&mdev->state_lock);
1490 if (mdev->device_up) {
1491 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1492 mlx4_en_ptp_overflow_check(mdev);
1493
1494 mlx4_en_recover_from_oom(priv);
1495 queue_delayed_work(mdev->workqueue, &priv->service_task,
1496 SERVICE_TASK_DELAY);
1497 }
1498 mutex_unlock(&mdev->state_lock);
1499 }
1500
1501 static void mlx4_en_linkstate(struct work_struct *work)
1502 {
1503 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1504 linkstate_task);
1505 struct mlx4_en_dev *mdev = priv->mdev;
1506 int linkstate = priv->link_state;
1507
1508 mutex_lock(&mdev->state_lock);
1509 /* If observable port state changed set carrier state and
1510 * report to system log */
1511 if (priv->last_link_state != linkstate) {
1512 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1513 en_info(priv, "Link Down\n");
1514 netif_carrier_off(priv->dev);
1515 } else {
1516 en_info(priv, "Link Up\n");
1517 netif_carrier_on(priv->dev);
1518 }
1519 }
1520 priv->last_link_state = linkstate;
1521 mutex_unlock(&mdev->state_lock);
1522 }
1523
1524 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1525 {
1526 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1527 int numa_node = priv->mdev->dev->numa_node;
1528
1529 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1530 return -ENOMEM;
1531
1532 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1533 ring->affinity_mask);
1534 return 0;
1535 }
1536
1537 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1538 {
1539 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1540 }
1541
1542 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1543 int tx_ring_idx)
1544 {
1545 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
1546 int rr_index = tx_ring_idx;
1547
1548 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1549 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1550 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
1551 TX_XDP, tx_ring_idx, rr_index);
1552 }
1553
1554 int mlx4_en_start_port(struct net_device *dev)
1555 {
1556 struct mlx4_en_priv *priv = netdev_priv(dev);
1557 struct mlx4_en_dev *mdev = priv->mdev;
1558 struct mlx4_en_cq *cq;
1559 struct mlx4_en_tx_ring *tx_ring;
1560 int rx_index = 0;
1561 int err = 0;
1562 int i, t;
1563 int j;
1564 u8 mc_list[16] = {0};
1565
1566 if (priv->port_up) {
1567 en_dbg(DRV, priv, "start port called while port already up\n");
1568 return 0;
1569 }
1570
1571 INIT_LIST_HEAD(&priv->mc_list);
1572 INIT_LIST_HEAD(&priv->curr_list);
1573 INIT_LIST_HEAD(&priv->ethtool_list);
1574 memset(&priv->ethtool_rules[0], 0,
1575 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1576
1577 /* Calculate Rx buf size */
1578 dev->mtu = min(dev->mtu, priv->max_mtu);
1579 mlx4_en_calc_rx_buf(dev);
1580 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1581
1582 /* Configure rx cq's and rings */
1583 err = mlx4_en_activate_rx_rings(priv);
1584 if (err) {
1585 en_err(priv, "Failed to activate RX rings\n");
1586 return err;
1587 }
1588 for (i = 0; i < priv->rx_ring_num; i++) {
1589 cq = priv->rx_cq[i];
1590
1591 err = mlx4_en_init_affinity_hint(priv, i);
1592 if (err) {
1593 en_err(priv, "Failed preparing IRQ affinity hint\n");
1594 goto cq_err;
1595 }
1596
1597 err = mlx4_en_activate_cq(priv, cq, i);
1598 if (err) {
1599 en_err(priv, "Failed activating Rx CQ\n");
1600 mlx4_en_free_affinity_hint(priv, i);
1601 goto cq_err;
1602 }
1603
1604 for (j = 0; j < cq->size; j++) {
1605 struct mlx4_cqe *cqe = NULL;
1606
1607 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1608 priv->cqe_factor;
1609 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1610 }
1611
1612 err = mlx4_en_set_cq_moder(priv, cq);
1613 if (err) {
1614 en_err(priv, "Failed setting cq moderation parameters\n");
1615 mlx4_en_deactivate_cq(priv, cq);
1616 mlx4_en_free_affinity_hint(priv, i);
1617 goto cq_err;
1618 }
1619 mlx4_en_arm_cq(priv, cq);
1620 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1621 ++rx_index;
1622 }
1623
1624 /* Set qp number */
1625 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1626 err = mlx4_en_get_qp(priv);
1627 if (err) {
1628 en_err(priv, "Failed getting eth qp\n");
1629 goto cq_err;
1630 }
1631 mdev->mac_removed[priv->port] = 0;
1632
1633 priv->counter_index =
1634 mlx4_get_default_counter_index(mdev->dev, priv->port);
1635
1636 err = mlx4_en_config_rss_steer(priv);
1637 if (err) {
1638 en_err(priv, "Failed configuring rss steering\n");
1639 goto mac_err;
1640 }
1641
1642 err = mlx4_en_create_drop_qp(priv);
1643 if (err)
1644 goto rss_err;
1645
1646 /* Configure tx cq's and rings */
1647 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1648 u8 num_tx_rings_p_up = t == TX ?
1649 priv->num_tx_rings_p_up : priv->tx_ring_num[t];
1650
1651 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1652 /* Configure cq */
1653 cq = priv->tx_cq[t][i];
1654 err = mlx4_en_activate_cq(priv, cq, i);
1655 if (err) {
1656 en_err(priv, "Failed allocating Tx CQ\n");
1657 goto tx_err;
1658 }
1659 err = mlx4_en_set_cq_moder(priv, cq);
1660 if (err) {
1661 en_err(priv, "Failed setting cq moderation parameters\n");
1662 mlx4_en_deactivate_cq(priv, cq);
1663 goto tx_err;
1664 }
1665 en_dbg(DRV, priv,
1666 "Resetting index of collapsed CQ:%d to -1\n", i);
1667 cq->buf->wqe_index = cpu_to_be16(0xffff);
1668
1669 /* Configure ring */
1670 tx_ring = priv->tx_ring[t][i];
1671 err = mlx4_en_activate_tx_ring(priv, tx_ring,
1672 cq->mcq.cqn,
1673 i / num_tx_rings_p_up);
1674 if (err) {
1675 en_err(priv, "Failed allocating Tx ring\n");
1676 mlx4_en_deactivate_cq(priv, cq);
1677 goto tx_err;
1678 }
1679 if (t != TX_XDP) {
1680 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1681 tx_ring->recycle_ring = NULL;
1682 } else {
1683 mlx4_en_init_recycle_ring(priv, i);
1684 }
1685
1686 /* Arm CQ for TX completions */
1687 mlx4_en_arm_cq(priv, cq);
1688
1689 /* Set initial ownership of all Tx TXBBs to SW (1) */
1690 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1691 *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
1692 }
1693 }
1694
1695 /* Configure port */
1696 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1697 priv->rx_skb_size + ETH_FCS_LEN,
1698 priv->prof->tx_pause,
1699 priv->prof->tx_ppp,
1700 priv->prof->rx_pause,
1701 priv->prof->rx_ppp);
1702 if (err) {
1703 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1704 priv->port, err);
1705 goto tx_err;
1706 }
1707
1708 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
1709 if (err) {
1710 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
1711 dev->mtu, priv->port, err);
1712 goto tx_err;
1713 }
1714
1715 /* Set default qp number */
1716 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1717 if (err) {
1718 en_err(priv, "Failed setting default qp numbers\n");
1719 goto tx_err;
1720 }
1721
1722 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1723 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1724 if (err) {
1725 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1726 err);
1727 goto tx_err;
1728 }
1729 }
1730
1731 /* Init port */
1732 en_dbg(HW, priv, "Initializing port\n");
1733 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1734 if (err) {
1735 en_err(priv, "Failed Initializing port\n");
1736 goto tx_err;
1737 }
1738
1739 /* Set Unicast and VXLAN steering rules */
1740 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1741 mlx4_en_set_rss_steer_rules(priv))
1742 mlx4_warn(mdev, "Failed setting steering rules\n");
1743
1744 /* Attach rx QP to bradcast address */
1745 eth_broadcast_addr(&mc_list[10]);
1746 mc_list[5] = priv->port; /* needed for B0 steering support */
1747 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
1748 priv->port, 0, MLX4_PROT_ETH,
1749 &priv->broadcast_id))
1750 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1751
1752 /* Must redo promiscuous mode setup. */
1753 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1754
1755 /* Schedule multicast task to populate multicast list */
1756 queue_work(mdev->workqueue, &priv->rx_mode_task);
1757
1758 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1759 udp_tunnel_get_rx_info(dev);
1760
1761 priv->port_up = true;
1762
1763 /* Process all completions if exist to prevent
1764 * the queues freezing if they are full
1765 */
1766 for (i = 0; i < priv->rx_ring_num; i++) {
1767 local_bh_disable();
1768 napi_schedule(&priv->rx_cq[i]->napi);
1769 local_bh_enable();
1770 }
1771
1772 netif_tx_start_all_queues(dev);
1773 netif_device_attach(dev);
1774
1775 return 0;
1776
1777 tx_err:
1778 if (t == MLX4_EN_NUM_TX_TYPES) {
1779 t--;
1780 i = priv->tx_ring_num[t];
1781 }
1782 while (t >= 0) {
1783 while (i--) {
1784 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1785 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1786 }
1787 if (!t--)
1788 break;
1789 i = priv->tx_ring_num[t];
1790 }
1791 mlx4_en_destroy_drop_qp(priv);
1792 rss_err:
1793 mlx4_en_release_rss_steer(priv);
1794 mac_err:
1795 mlx4_en_put_qp(priv);
1796 cq_err:
1797 while (rx_index--) {
1798 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1799 mlx4_en_free_affinity_hint(priv, rx_index);
1800 }
1801 for (i = 0; i < priv->rx_ring_num; i++)
1802 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1803
1804 return err; /* need to close devices */
1805 }
1806
1807
1808 void mlx4_en_stop_port(struct net_device *dev, int detach)
1809 {
1810 struct mlx4_en_priv *priv = netdev_priv(dev);
1811 struct mlx4_en_dev *mdev = priv->mdev;
1812 struct mlx4_en_mc_list *mclist, *tmp;
1813 struct ethtool_flow_id *flow, *tmp_flow;
1814 int i, t;
1815 u8 mc_list[16] = {0};
1816
1817 if (!priv->port_up) {
1818 en_dbg(DRV, priv, "stop port called while port already down\n");
1819 return;
1820 }
1821
1822 /* close port*/
1823 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1824
1825 /* Synchronize with tx routine */
1826 netif_tx_lock_bh(dev);
1827 if (detach)
1828 netif_device_detach(dev);
1829 netif_tx_stop_all_queues(dev);
1830 netif_tx_unlock_bh(dev);
1831
1832 netif_tx_disable(dev);
1833
1834 spin_lock_bh(&priv->stats_lock);
1835 mlx4_en_fold_software_stats(dev);
1836 /* Set port as not active */
1837 priv->port_up = false;
1838 spin_unlock_bh(&priv->stats_lock);
1839
1840 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1841
1842 /* Promsicuous mode */
1843 if (mdev->dev->caps.steering_mode ==
1844 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1845 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1846 MLX4_EN_FLAG_MC_PROMISC);
1847 mlx4_flow_steer_promisc_remove(mdev->dev,
1848 priv->port,
1849 MLX4_FS_ALL_DEFAULT);
1850 mlx4_flow_steer_promisc_remove(mdev->dev,
1851 priv->port,
1852 MLX4_FS_MC_DEFAULT);
1853 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1854 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1855
1856 /* Disable promiscouos mode */
1857 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1858 priv->port);
1859
1860 /* Disable Multicast promisc */
1861 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1862 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1863 priv->port);
1864 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1865 }
1866 }
1867
1868 /* Detach All multicasts */
1869 eth_broadcast_addr(&mc_list[10]);
1870 mc_list[5] = priv->port; /* needed for B0 steering support */
1871 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
1872 MLX4_PROT_ETH, priv->broadcast_id);
1873 list_for_each_entry(mclist, &priv->curr_list, list) {
1874 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1875 mc_list[5] = priv->port;
1876 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
1877 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1878 if (mclist->tunnel_reg_id)
1879 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1880 }
1881 mlx4_en_clear_list(dev);
1882 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1883 list_del(&mclist->list);
1884 kfree(mclist);
1885 }
1886
1887 /* Flush multicast filter */
1888 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1889
1890 /* Remove flow steering rules for the port*/
1891 if (mdev->dev->caps.steering_mode ==
1892 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1893 ASSERT_RTNL();
1894 list_for_each_entry_safe(flow, tmp_flow,
1895 &priv->ethtool_list, list) {
1896 mlx4_flow_detach(mdev->dev, flow->id);
1897 list_del(&flow->list);
1898 }
1899 }
1900
1901 mlx4_en_destroy_drop_qp(priv);
1902
1903 /* Free TX Rings */
1904 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
1905 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1906 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1907 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1908 }
1909 }
1910 msleep(10);
1911
1912 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
1913 for (i = 0; i < priv->tx_ring_num[t]; i++)
1914 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
1915
1916 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1917 mlx4_en_delete_rss_steer_rules(priv);
1918
1919 /* Free RSS qps */
1920 mlx4_en_release_rss_steer(priv);
1921
1922 /* Unregister Mac address for the port */
1923 mlx4_en_put_qp(priv);
1924 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1925 mdev->mac_removed[priv->port] = 1;
1926
1927 /* Free RX Rings */
1928 for (i = 0; i < priv->rx_ring_num; i++) {
1929 struct mlx4_en_cq *cq = priv->rx_cq[i];
1930
1931 napi_synchronize(&cq->napi);
1932 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1933 mlx4_en_deactivate_cq(priv, cq);
1934
1935 mlx4_en_free_affinity_hint(priv, i);
1936 }
1937 }
1938
1939 static void mlx4_en_restart(struct work_struct *work)
1940 {
1941 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1942 watchdog_task);
1943 struct mlx4_en_dev *mdev = priv->mdev;
1944 struct net_device *dev = priv->dev;
1945
1946 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1947
1948 rtnl_lock();
1949 mutex_lock(&mdev->state_lock);
1950 if (priv->port_up) {
1951 mlx4_en_stop_port(dev, 1);
1952 if (mlx4_en_start_port(dev))
1953 en_err(priv, "Failed restarting port %d\n", priv->port);
1954 }
1955 mutex_unlock(&mdev->state_lock);
1956 rtnl_unlock();
1957 }
1958
1959 static void mlx4_en_clear_stats(struct net_device *dev)
1960 {
1961 struct mlx4_en_priv *priv = netdev_priv(dev);
1962 struct mlx4_en_dev *mdev = priv->mdev;
1963 struct mlx4_en_tx_ring **tx_ring;
1964 int i;
1965
1966 if (!mlx4_is_slave(mdev->dev))
1967 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1968 en_dbg(HW, priv, "Failed dumping statistics\n");
1969
1970 memset(&priv->pstats, 0, sizeof(priv->pstats));
1971 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1972 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1973 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
1974 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
1975 memset(&priv->rx_priority_flowstats, 0,
1976 sizeof(priv->rx_priority_flowstats));
1977 memset(&priv->tx_priority_flowstats, 0,
1978 sizeof(priv->tx_priority_flowstats));
1979 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
1980
1981 tx_ring = priv->tx_ring[TX];
1982 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1983 tx_ring[i]->bytes = 0;
1984 tx_ring[i]->packets = 0;
1985 tx_ring[i]->tx_csum = 0;
1986 tx_ring[i]->tx_dropped = 0;
1987 tx_ring[i]->queue_stopped = 0;
1988 tx_ring[i]->wake_queue = 0;
1989 tx_ring[i]->tso_packets = 0;
1990 tx_ring[i]->xmit_more = 0;
1991 }
1992 for (i = 0; i < priv->rx_ring_num; i++) {
1993 priv->rx_ring[i]->bytes = 0;
1994 priv->rx_ring[i]->packets = 0;
1995 priv->rx_ring[i]->csum_ok = 0;
1996 priv->rx_ring[i]->csum_none = 0;
1997 priv->rx_ring[i]->csum_complete = 0;
1998 }
1999 }
2000
2001 static int mlx4_en_open(struct net_device *dev)
2002 {
2003 struct mlx4_en_priv *priv = netdev_priv(dev);
2004 struct mlx4_en_dev *mdev = priv->mdev;
2005 int err = 0;
2006
2007 mutex_lock(&mdev->state_lock);
2008
2009 if (!mdev->device_up) {
2010 en_err(priv, "Cannot open - device down/disabled\n");
2011 err = -EBUSY;
2012 goto out;
2013 }
2014
2015 /* Reset HW statistics and SW counters */
2016 mlx4_en_clear_stats(dev);
2017
2018 err = mlx4_en_start_port(dev);
2019 if (err)
2020 en_err(priv, "Failed starting port:%d\n", priv->port);
2021
2022 out:
2023 mutex_unlock(&mdev->state_lock);
2024 return err;
2025 }
2026
2027
2028 static int mlx4_en_close(struct net_device *dev)
2029 {
2030 struct mlx4_en_priv *priv = netdev_priv(dev);
2031 struct mlx4_en_dev *mdev = priv->mdev;
2032
2033 en_dbg(IFDOWN, priv, "Close port called\n");
2034
2035 mutex_lock(&mdev->state_lock);
2036
2037 mlx4_en_stop_port(dev, 0);
2038 netif_carrier_off(dev);
2039
2040 mutex_unlock(&mdev->state_lock);
2041 return 0;
2042 }
2043
2044 static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
2045 {
2046 int i, t;
2047
2048 #ifdef CONFIG_RFS_ACCEL
2049 priv->dev->rx_cpu_rmap = NULL;
2050 #endif
2051
2052 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2053 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2054 if (priv->tx_ring[t] && priv->tx_ring[t][i])
2055 mlx4_en_destroy_tx_ring(priv,
2056 &priv->tx_ring[t][i]);
2057 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2058 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2059 }
2060 kfree(priv->tx_ring[t]);
2061 kfree(priv->tx_cq[t]);
2062 }
2063
2064 for (i = 0; i < priv->rx_ring_num; i++) {
2065 if (priv->rx_ring[i])
2066 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2067 priv->prof->rx_ring_size, priv->stride);
2068 if (priv->rx_cq[i])
2069 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2070 }
2071
2072 }
2073
2074 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2075 {
2076 struct mlx4_en_port_profile *prof = priv->prof;
2077 int i, t;
2078 int node;
2079
2080 /* Create tx Rings */
2081 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2082 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2083 node = cpu_to_node(i % num_online_cpus());
2084 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
2085 prof->tx_ring_size, i, t, node))
2086 goto err;
2087
2088 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
2089 prof->tx_ring_size,
2090 TXBB_SIZE, node, i))
2091 goto err;
2092 }
2093 }
2094
2095 /* Create rx Rings */
2096 for (i = 0; i < priv->rx_ring_num; i++) {
2097 node = cpu_to_node(i % num_online_cpus());
2098 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2099 prof->rx_ring_size, i, RX, node))
2100 goto err;
2101
2102 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2103 prof->rx_ring_size, priv->stride,
2104 node))
2105 goto err;
2106 }
2107
2108 #ifdef CONFIG_RFS_ACCEL
2109 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
2110 #endif
2111
2112 return 0;
2113
2114 err:
2115 en_err(priv, "Failed to allocate NIC resources\n");
2116 for (i = 0; i < priv->rx_ring_num; i++) {
2117 if (priv->rx_ring[i])
2118 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2119 prof->rx_ring_size,
2120 priv->stride);
2121 if (priv->rx_cq[i])
2122 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2123 }
2124 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2125 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2126 if (priv->tx_ring[t][i])
2127 mlx4_en_destroy_tx_ring(priv,
2128 &priv->tx_ring[t][i]);
2129 if (priv->tx_cq[t][i])
2130 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2131 }
2132 }
2133 return -ENOMEM;
2134 }
2135
2136
2137 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2138 struct mlx4_en_priv *src,
2139 struct mlx4_en_port_profile *prof)
2140 {
2141 int t;
2142
2143 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2144 sizeof(dst->hwtstamp_config));
2145 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
2146 dst->rx_ring_num = prof->rx_ring_num;
2147 dst->flags = prof->flags;
2148 dst->mdev = src->mdev;
2149 dst->port = src->port;
2150 dst->dev = src->dev;
2151 dst->prof = prof;
2152 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2153 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2154
2155 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2156 dst->tx_ring_num[t] = prof->tx_ring_num[t];
2157 if (!dst->tx_ring_num[t])
2158 continue;
2159
2160 dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
2161 MAX_TX_RINGS, GFP_KERNEL);
2162 if (!dst->tx_ring[t])
2163 goto err_free_tx;
2164
2165 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
2166 MAX_TX_RINGS, GFP_KERNEL);
2167 if (!dst->tx_cq[t]) {
2168 kfree(dst->tx_ring[t]);
2169 goto err_free_tx;
2170 }
2171 }
2172
2173 return 0;
2174
2175 err_free_tx:
2176 while (t--) {
2177 kfree(dst->tx_ring[t]);
2178 kfree(dst->tx_cq[t]);
2179 }
2180 return -ENOMEM;
2181 }
2182
2183 static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2184 struct mlx4_en_priv *src)
2185 {
2186 int t;
2187 memcpy(dst->rx_ring, src->rx_ring,
2188 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2189 memcpy(dst->rx_cq, src->rx_cq,
2190 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2191 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2192 sizeof(dst->hwtstamp_config));
2193 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2194 dst->tx_ring_num[t] = src->tx_ring_num[t];
2195 dst->tx_ring[t] = src->tx_ring[t];
2196 dst->tx_cq[t] = src->tx_cq[t];
2197 }
2198 dst->rx_ring_num = src->rx_ring_num;
2199 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2200 }
2201
2202 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2203 struct mlx4_en_priv *tmp,
2204 struct mlx4_en_port_profile *prof,
2205 bool carry_xdp_prog)
2206 {
2207 struct bpf_prog *xdp_prog;
2208 int i, t;
2209
2210 mlx4_en_copy_priv(tmp, priv, prof);
2211
2212 if (mlx4_en_alloc_resources(tmp)) {
2213 en_warn(priv,
2214 "%s: Resource allocation failed, using previous configuration\n",
2215 __func__);
2216 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2217 kfree(tmp->tx_ring[t]);
2218 kfree(tmp->tx_cq[t]);
2219 }
2220 return -ENOMEM;
2221 }
2222
2223 /* All rx_rings has the same xdp_prog. Pick the first one. */
2224 xdp_prog = rcu_dereference_protected(
2225 priv->rx_ring[0]->xdp_prog,
2226 lockdep_is_held(&priv->mdev->state_lock));
2227
2228 if (xdp_prog && carry_xdp_prog) {
2229 xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
2230 if (IS_ERR(xdp_prog)) {
2231 mlx4_en_free_resources(tmp);
2232 return PTR_ERR(xdp_prog);
2233 }
2234 for (i = 0; i < tmp->rx_ring_num; i++)
2235 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
2236 xdp_prog);
2237 }
2238
2239 return 0;
2240 }
2241
2242 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2243 struct mlx4_en_priv *tmp)
2244 {
2245 mlx4_en_free_resources(priv);
2246 mlx4_en_update_priv(priv, tmp);
2247 }
2248
2249 void mlx4_en_destroy_netdev(struct net_device *dev)
2250 {
2251 struct mlx4_en_priv *priv = netdev_priv(dev);
2252 struct mlx4_en_dev *mdev = priv->mdev;
2253
2254 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2255
2256 /* Unregister device - this will close the port if it was up */
2257 if (priv->registered) {
2258 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2259 priv->port));
2260 unregister_netdev(dev);
2261 }
2262
2263 if (priv->allocated)
2264 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2265
2266 cancel_delayed_work(&priv->stats_task);
2267 cancel_delayed_work(&priv->service_task);
2268 /* flush any pending task for this netdev */
2269 flush_workqueue(mdev->workqueue);
2270
2271 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2272 mlx4_en_remove_timestamp(mdev);
2273
2274 /* Detach the netdev so tasks would not attempt to access it */
2275 mutex_lock(&mdev->state_lock);
2276 mdev->pndev[priv->port] = NULL;
2277 mdev->upper[priv->port] = NULL;
2278
2279 #ifdef CONFIG_RFS_ACCEL
2280 mlx4_en_cleanup_filters(priv);
2281 #endif
2282
2283 mlx4_en_free_resources(priv);
2284 mutex_unlock(&mdev->state_lock);
2285
2286 free_netdev(dev);
2287 }
2288
2289 static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
2290 {
2291 struct mlx4_en_priv *priv = netdev_priv(dev);
2292
2293 if (mtu > MLX4_EN_MAX_XDP_MTU) {
2294 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
2295 mtu, MLX4_EN_MAX_XDP_MTU);
2296 return false;
2297 }
2298
2299 return true;
2300 }
2301
2302 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2303 {
2304 struct mlx4_en_priv *priv = netdev_priv(dev);
2305 struct mlx4_en_dev *mdev = priv->mdev;
2306 int err = 0;
2307
2308 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2309 dev->mtu, new_mtu);
2310
2311 if (priv->tx_ring_num[TX_XDP] &&
2312 !mlx4_en_check_xdp_mtu(dev, new_mtu))
2313 return -EOPNOTSUPP;
2314
2315 dev->mtu = new_mtu;
2316
2317 if (netif_running(dev)) {
2318 mutex_lock(&mdev->state_lock);
2319 if (!mdev->device_up) {
2320 /* NIC is probably restarting - let watchdog task reset
2321 * the port */
2322 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2323 } else {
2324 mlx4_en_stop_port(dev, 1);
2325 err = mlx4_en_start_port(dev);
2326 if (err) {
2327 en_err(priv, "Failed restarting port:%d\n",
2328 priv->port);
2329 queue_work(mdev->workqueue, &priv->watchdog_task);
2330 }
2331 }
2332 mutex_unlock(&mdev->state_lock);
2333 }
2334 return 0;
2335 }
2336
2337 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2338 {
2339 struct mlx4_en_priv *priv = netdev_priv(dev);
2340 struct mlx4_en_dev *mdev = priv->mdev;
2341 struct hwtstamp_config config;
2342
2343 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2344 return -EFAULT;
2345
2346 /* reserved for future extensions */
2347 if (config.flags)
2348 return -EINVAL;
2349
2350 /* device doesn't support time stamping */
2351 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2352 return -EINVAL;
2353
2354 /* TX HW timestamp */
2355 switch (config.tx_type) {
2356 case HWTSTAMP_TX_OFF:
2357 case HWTSTAMP_TX_ON:
2358 break;
2359 default:
2360 return -ERANGE;
2361 }
2362
2363 /* RX HW timestamp */
2364 switch (config.rx_filter) {
2365 case HWTSTAMP_FILTER_NONE:
2366 break;
2367 case HWTSTAMP_FILTER_ALL:
2368 case HWTSTAMP_FILTER_SOME:
2369 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2370 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2371 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2372 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2373 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2374 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2375 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2376 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2377 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2378 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2379 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2380 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2381 case HWTSTAMP_FILTER_NTP_ALL:
2382 config.rx_filter = HWTSTAMP_FILTER_ALL;
2383 break;
2384 default:
2385 return -ERANGE;
2386 }
2387
2388 if (mlx4_en_reset_config(dev, config, dev->features)) {
2389 config.tx_type = HWTSTAMP_TX_OFF;
2390 config.rx_filter = HWTSTAMP_FILTER_NONE;
2391 }
2392
2393 return copy_to_user(ifr->ifr_data, &config,
2394 sizeof(config)) ? -EFAULT : 0;
2395 }
2396
2397 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2398 {
2399 struct mlx4_en_priv *priv = netdev_priv(dev);
2400
2401 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2402 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2403 }
2404
2405 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2406 {
2407 switch (cmd) {
2408 case SIOCSHWTSTAMP:
2409 return mlx4_en_hwtstamp_set(dev, ifr);
2410 case SIOCGHWTSTAMP:
2411 return mlx4_en_hwtstamp_get(dev, ifr);
2412 default:
2413 return -EOPNOTSUPP;
2414 }
2415 }
2416
2417 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2418 netdev_features_t features)
2419 {
2420 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2421 struct mlx4_en_dev *mdev = en_priv->mdev;
2422
2423 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2424 * enable/disable make sure S-TAG flag is always in same state as
2425 * C-TAG.
2426 */
2427 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2428 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2429 features |= NETIF_F_HW_VLAN_STAG_RX;
2430 else
2431 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2432
2433 return features;
2434 }
2435
2436 static int mlx4_en_set_features(struct net_device *netdev,
2437 netdev_features_t features)
2438 {
2439 struct mlx4_en_priv *priv = netdev_priv(netdev);
2440 bool reset = false;
2441 int ret = 0;
2442
2443 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2444 en_info(priv, "Turn %s RX-FCS\n",
2445 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2446 reset = true;
2447 }
2448
2449 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2450 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2451
2452 en_info(priv, "Turn %s RX-ALL\n",
2453 ignore_fcs_value ? "ON" : "OFF");
2454 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2455 priv->port, ignore_fcs_value);
2456 if (ret)
2457 return ret;
2458 }
2459
2460 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2461 en_info(priv, "Turn %s RX vlan strip offload\n",
2462 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2463 reset = true;
2464 }
2465
2466 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2467 en_info(priv, "Turn %s TX vlan strip offload\n",
2468 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2469
2470 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2471 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2472 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2473
2474 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2475 en_info(priv, "Turn %s loopback\n",
2476 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2477 mlx4_en_update_loopback_state(netdev, features);
2478 }
2479
2480 if (reset) {
2481 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2482 features);
2483 if (ret)
2484 return ret;
2485 }
2486
2487 return 0;
2488 }
2489
2490 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2491 {
2492 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2493 struct mlx4_en_dev *mdev = en_priv->mdev;
2494
2495 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
2496 }
2497
2498 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2499 __be16 vlan_proto)
2500 {
2501 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2502 struct mlx4_en_dev *mdev = en_priv->mdev;
2503
2504 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2505 vlan_proto);
2506 }
2507
2508 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2509 int max_tx_rate)
2510 {
2511 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2512 struct mlx4_en_dev *mdev = en_priv->mdev;
2513
2514 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2515 max_tx_rate);
2516 }
2517
2518 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2519 {
2520 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2521 struct mlx4_en_dev *mdev = en_priv->mdev;
2522
2523 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2524 }
2525
2526 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2527 {
2528 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2529 struct mlx4_en_dev *mdev = en_priv->mdev;
2530
2531 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2532 }
2533
2534 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2535 {
2536 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2537 struct mlx4_en_dev *mdev = en_priv->mdev;
2538
2539 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2540 }
2541
2542 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2543 struct ifla_vf_stats *vf_stats)
2544 {
2545 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2546 struct mlx4_en_dev *mdev = en_priv->mdev;
2547
2548 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2549 }
2550
2551 #define PORT_ID_BYTE_LEN 8
2552 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2553 struct netdev_phys_item_id *ppid)
2554 {
2555 struct mlx4_en_priv *priv = netdev_priv(dev);
2556 struct mlx4_dev *mdev = priv->mdev->dev;
2557 int i;
2558 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2559
2560 if (!phys_port_id)
2561 return -EOPNOTSUPP;
2562
2563 ppid->id_len = sizeof(phys_port_id);
2564 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2565 ppid->id[i] = phys_port_id & 0xff;
2566 phys_port_id >>= 8;
2567 }
2568 return 0;
2569 }
2570
2571 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2572 {
2573 int ret;
2574 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2575 vxlan_add_task);
2576
2577 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2578 if (ret)
2579 goto out;
2580
2581 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2582 VXLAN_STEER_BY_OUTER_MAC, 1);
2583 out:
2584 if (ret) {
2585 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2586 return;
2587 }
2588
2589 /* set offloads */
2590 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2591 NETIF_F_RXCSUM |
2592 NETIF_F_TSO | NETIF_F_TSO6 |
2593 NETIF_F_GSO_UDP_TUNNEL |
2594 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2595 NETIF_F_GSO_PARTIAL;
2596 }
2597
2598 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2599 {
2600 int ret;
2601 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2602 vxlan_del_task);
2603 /* unset offloads */
2604 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2605 NETIF_F_RXCSUM |
2606 NETIF_F_TSO | NETIF_F_TSO6 |
2607 NETIF_F_GSO_UDP_TUNNEL |
2608 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2609 NETIF_F_GSO_PARTIAL);
2610
2611 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2612 VXLAN_STEER_BY_OUTER_MAC, 0);
2613 if (ret)
2614 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2615
2616 priv->vxlan_port = 0;
2617 }
2618
2619 static void mlx4_en_add_vxlan_port(struct net_device *dev,
2620 struct udp_tunnel_info *ti)
2621 {
2622 struct mlx4_en_priv *priv = netdev_priv(dev);
2623 __be16 port = ti->port;
2624 __be16 current_port;
2625
2626 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2627 return;
2628
2629 if (ti->sa_family != AF_INET)
2630 return;
2631
2632 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2633 return;
2634
2635 current_port = priv->vxlan_port;
2636 if (current_port && current_port != port) {
2637 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2638 ntohs(current_port), ntohs(port));
2639 return;
2640 }
2641
2642 priv->vxlan_port = port;
2643 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2644 }
2645
2646 static void mlx4_en_del_vxlan_port(struct net_device *dev,
2647 struct udp_tunnel_info *ti)
2648 {
2649 struct mlx4_en_priv *priv = netdev_priv(dev);
2650 __be16 port = ti->port;
2651 __be16 current_port;
2652
2653 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2654 return;
2655
2656 if (ti->sa_family != AF_INET)
2657 return;
2658
2659 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2660 return;
2661
2662 current_port = priv->vxlan_port;
2663 if (current_port != port) {
2664 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2665 return;
2666 }
2667
2668 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2669 }
2670
2671 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2672 struct net_device *dev,
2673 netdev_features_t features)
2674 {
2675 features = vlan_features_check(skb, features);
2676 features = vxlan_features_check(skb, features);
2677
2678 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2679 * support inner IPv6 checksums and segmentation so we need to
2680 * strip that feature if this is an IPv6 encapsulated frame.
2681 */
2682 if (skb->encapsulation &&
2683 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2684 struct mlx4_en_priv *priv = netdev_priv(dev);
2685
2686 if (!priv->vxlan_port ||
2687 (ip_hdr(skb)->version != 4) ||
2688 (udp_hdr(skb)->dest != priv->vxlan_port))
2689 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2690 }
2691
2692 return features;
2693 }
2694
2695 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2696 {
2697 struct mlx4_en_priv *priv = netdev_priv(dev);
2698 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
2699 struct mlx4_update_qp_params params;
2700 int err;
2701
2702 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2703 return -EOPNOTSUPP;
2704
2705 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2706 if (maxrate >> 12) {
2707 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2708 params.rate_val = maxrate / 1000;
2709 } else if (maxrate) {
2710 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2711 params.rate_val = maxrate;
2712 } else { /* zero serves to revoke the QP rate-limitation */
2713 params.rate_unit = 0;
2714 params.rate_val = 0;
2715 }
2716
2717 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2718 &params);
2719 return err;
2720 }
2721
2722 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2723 {
2724 struct mlx4_en_priv *priv = netdev_priv(dev);
2725 struct mlx4_en_dev *mdev = priv->mdev;
2726 struct mlx4_en_port_profile new_prof;
2727 struct bpf_prog *old_prog;
2728 struct mlx4_en_priv *tmp;
2729 int tx_changed = 0;
2730 int xdp_ring_num;
2731 int port_up = 0;
2732 int err;
2733 int i;
2734
2735 xdp_ring_num = prog ? priv->rx_ring_num : 0;
2736
2737 /* No need to reconfigure buffers when simply swapping the
2738 * program for a new one.
2739 */
2740 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
2741 if (prog) {
2742 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2743 if (IS_ERR(prog))
2744 return PTR_ERR(prog);
2745 }
2746 mutex_lock(&mdev->state_lock);
2747 for (i = 0; i < priv->rx_ring_num; i++) {
2748 old_prog = rcu_dereference_protected(
2749 priv->rx_ring[i]->xdp_prog,
2750 lockdep_is_held(&mdev->state_lock));
2751 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2752 if (old_prog)
2753 bpf_prog_put(old_prog);
2754 }
2755 mutex_unlock(&mdev->state_lock);
2756 return 0;
2757 }
2758
2759 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
2760 return -EOPNOTSUPP;
2761
2762 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2763 if (!tmp)
2764 return -ENOMEM;
2765
2766 if (prog) {
2767 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2768 if (IS_ERR(prog)) {
2769 err = PTR_ERR(prog);
2770 goto out;
2771 }
2772 }
2773
2774 mutex_lock(&mdev->state_lock);
2775 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
2776 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
2777
2778 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
2779 tx_changed = 1;
2780 new_prof.tx_ring_num[TX] =
2781 MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP);
2782 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2783 }
2784
2785 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
2786 if (err) {
2787 if (prog)
2788 bpf_prog_sub(prog, priv->rx_ring_num - 1);
2789 goto unlock_out;
2790 }
2791
2792 if (priv->port_up) {
2793 port_up = 1;
2794 mlx4_en_stop_port(dev, 1);
2795 }
2796
2797 mlx4_en_safe_replace_resources(priv, tmp);
2798 if (tx_changed)
2799 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
2800
2801 for (i = 0; i < priv->rx_ring_num; i++) {
2802 old_prog = rcu_dereference_protected(
2803 priv->rx_ring[i]->xdp_prog,
2804 lockdep_is_held(&mdev->state_lock));
2805 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2806 if (old_prog)
2807 bpf_prog_put(old_prog);
2808 }
2809
2810 if (port_up) {
2811 err = mlx4_en_start_port(dev);
2812 if (err) {
2813 en_err(priv, "Failed starting port %d for XDP change\n",
2814 priv->port);
2815 queue_work(mdev->workqueue, &priv->watchdog_task);
2816 }
2817 }
2818
2819 unlock_out:
2820 mutex_unlock(&mdev->state_lock);
2821 out:
2822 kfree(tmp);
2823 return err;
2824 }
2825
2826 static bool mlx4_xdp_attached(struct net_device *dev)
2827 {
2828 struct mlx4_en_priv *priv = netdev_priv(dev);
2829
2830 return !!priv->tx_ring_num[TX_XDP];
2831 }
2832
2833 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
2834 {
2835 switch (xdp->command) {
2836 case XDP_SETUP_PROG:
2837 return mlx4_xdp_set(dev, xdp->prog);
2838 case XDP_QUERY_PROG:
2839 xdp->prog_attached = mlx4_xdp_attached(dev);
2840 return 0;
2841 default:
2842 return -EINVAL;
2843 }
2844 }
2845
2846 static const struct net_device_ops mlx4_netdev_ops = {
2847 .ndo_open = mlx4_en_open,
2848 .ndo_stop = mlx4_en_close,
2849 .ndo_start_xmit = mlx4_en_xmit,
2850 .ndo_select_queue = mlx4_en_select_queue,
2851 .ndo_get_stats64 = mlx4_en_get_stats64,
2852 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2853 .ndo_set_mac_address = mlx4_en_set_mac,
2854 .ndo_validate_addr = eth_validate_addr,
2855 .ndo_change_mtu = mlx4_en_change_mtu,
2856 .ndo_do_ioctl = mlx4_en_ioctl,
2857 .ndo_tx_timeout = mlx4_en_tx_timeout,
2858 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2859 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2860 #ifdef CONFIG_NET_POLL_CONTROLLER
2861 .ndo_poll_controller = mlx4_en_netpoll,
2862 #endif
2863 .ndo_set_features = mlx4_en_set_features,
2864 .ndo_fix_features = mlx4_en_fix_features,
2865 .ndo_setup_tc = __mlx4_en_setup_tc,
2866 #ifdef CONFIG_RFS_ACCEL
2867 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2868 #endif
2869 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2870 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2871 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2872 .ndo_features_check = mlx4_en_features_check,
2873 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2874 .ndo_xdp = mlx4_xdp,
2875 };
2876
2877 static const struct net_device_ops mlx4_netdev_ops_master = {
2878 .ndo_open = mlx4_en_open,
2879 .ndo_stop = mlx4_en_close,
2880 .ndo_start_xmit = mlx4_en_xmit,
2881 .ndo_select_queue = mlx4_en_select_queue,
2882 .ndo_get_stats64 = mlx4_en_get_stats64,
2883 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2884 .ndo_set_mac_address = mlx4_en_set_mac,
2885 .ndo_validate_addr = eth_validate_addr,
2886 .ndo_change_mtu = mlx4_en_change_mtu,
2887 .ndo_tx_timeout = mlx4_en_tx_timeout,
2888 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2889 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2890 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2891 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2892 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2893 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2894 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2895 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2896 .ndo_get_vf_config = mlx4_en_get_vf_config,
2897 #ifdef CONFIG_NET_POLL_CONTROLLER
2898 .ndo_poll_controller = mlx4_en_netpoll,
2899 #endif
2900 .ndo_set_features = mlx4_en_set_features,
2901 .ndo_fix_features = mlx4_en_fix_features,
2902 .ndo_setup_tc = __mlx4_en_setup_tc,
2903 #ifdef CONFIG_RFS_ACCEL
2904 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2905 #endif
2906 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2907 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2908 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2909 .ndo_features_check = mlx4_en_features_check,
2910 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2911 .ndo_xdp = mlx4_xdp,
2912 };
2913
2914 struct mlx4_en_bond {
2915 struct work_struct work;
2916 struct mlx4_en_priv *priv;
2917 int is_bonded;
2918 struct mlx4_port_map port_map;
2919 };
2920
2921 static void mlx4_en_bond_work(struct work_struct *work)
2922 {
2923 struct mlx4_en_bond *bond = container_of(work,
2924 struct mlx4_en_bond,
2925 work);
2926 int err = 0;
2927 struct mlx4_dev *dev = bond->priv->mdev->dev;
2928
2929 if (bond->is_bonded) {
2930 if (!mlx4_is_bonded(dev)) {
2931 err = mlx4_bond(dev);
2932 if (err)
2933 en_err(bond->priv, "Fail to bond device\n");
2934 }
2935 if (!err) {
2936 err = mlx4_port_map_set(dev, &bond->port_map);
2937 if (err)
2938 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2939 bond->port_map.port1,
2940 bond->port_map.port2,
2941 err);
2942 }
2943 } else if (mlx4_is_bonded(dev)) {
2944 err = mlx4_unbond(dev);
2945 if (err)
2946 en_err(bond->priv, "Fail to unbond device\n");
2947 }
2948 dev_put(bond->priv->dev);
2949 kfree(bond);
2950 }
2951
2952 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2953 u8 v2p_p1, u8 v2p_p2)
2954 {
2955 struct mlx4_en_bond *bond = NULL;
2956
2957 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2958 if (!bond)
2959 return -ENOMEM;
2960
2961 INIT_WORK(&bond->work, mlx4_en_bond_work);
2962 bond->priv = priv;
2963 bond->is_bonded = is_bonded;
2964 bond->port_map.port1 = v2p_p1;
2965 bond->port_map.port2 = v2p_p2;
2966 dev_hold(priv->dev);
2967 queue_work(priv->mdev->workqueue, &bond->work);
2968 return 0;
2969 }
2970
2971 int mlx4_en_netdev_event(struct notifier_block *this,
2972 unsigned long event, void *ptr)
2973 {
2974 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2975 u8 port = 0;
2976 struct mlx4_en_dev *mdev;
2977 struct mlx4_dev *dev;
2978 int i, num_eth_ports = 0;
2979 bool do_bond = true;
2980 struct mlx4_en_priv *priv;
2981 u8 v2p_port1 = 0;
2982 u8 v2p_port2 = 0;
2983
2984 if (!net_eq(dev_net(ndev), &init_net))
2985 return NOTIFY_DONE;
2986
2987 mdev = container_of(this, struct mlx4_en_dev, nb);
2988 dev = mdev->dev;
2989
2990 /* Go into this mode only when two network devices set on two ports
2991 * of the same mlx4 device are slaves of the same bonding master
2992 */
2993 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2994 ++num_eth_ports;
2995 if (!port && (mdev->pndev[i] == ndev))
2996 port = i;
2997 mdev->upper[i] = mdev->pndev[i] ?
2998 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2999 /* condition not met: network device is a slave */
3000 if (!mdev->upper[i])
3001 do_bond = false;
3002 if (num_eth_ports < 2)
3003 continue;
3004 /* condition not met: same master */
3005 if (mdev->upper[i] != mdev->upper[i-1])
3006 do_bond = false;
3007 }
3008 /* condition not met: 2 salves */
3009 do_bond = (num_eth_ports == 2) ? do_bond : false;
3010
3011 /* handle only events that come with enough info */
3012 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
3013 return NOTIFY_DONE;
3014
3015 priv = netdev_priv(ndev);
3016 if (do_bond) {
3017 struct netdev_notifier_bonding_info *notifier_info = ptr;
3018 struct netdev_bonding_info *bonding_info =
3019 &notifier_info->bonding_info;
3020
3021 /* required mode 1, 2 or 4 */
3022 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
3023 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
3024 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
3025 do_bond = false;
3026
3027 /* require exactly 2 slaves */
3028 if (bonding_info->master.num_slaves != 2)
3029 do_bond = false;
3030
3031 /* calc v2p */
3032 if (do_bond) {
3033 if (bonding_info->master.bond_mode ==
3034 BOND_MODE_ACTIVEBACKUP) {
3035 /* in active-backup mode virtual ports are
3036 * mapped to the physical port of the active
3037 * slave */
3038 if (bonding_info->slave.state ==
3039 BOND_STATE_BACKUP) {
3040 if (port == 1) {
3041 v2p_port1 = 2;
3042 v2p_port2 = 2;
3043 } else {
3044 v2p_port1 = 1;
3045 v2p_port2 = 1;
3046 }
3047 } else { /* BOND_STATE_ACTIVE */
3048 if (port == 1) {
3049 v2p_port1 = 1;
3050 v2p_port2 = 1;
3051 } else {
3052 v2p_port1 = 2;
3053 v2p_port2 = 2;
3054 }
3055 }
3056 } else { /* Active-Active */
3057 /* in active-active mode a virtual port is
3058 * mapped to the native physical port if and only
3059 * if the physical port is up */
3060 __s8 link = bonding_info->slave.link;
3061
3062 if (port == 1)
3063 v2p_port2 = 2;
3064 else
3065 v2p_port1 = 1;
3066 if ((link == BOND_LINK_UP) ||
3067 (link == BOND_LINK_FAIL)) {
3068 if (port == 1)
3069 v2p_port1 = 1;
3070 else
3071 v2p_port2 = 2;
3072 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
3073 if (port == 1)
3074 v2p_port1 = 2;
3075 else
3076 v2p_port2 = 1;
3077 }
3078 }
3079 }
3080 }
3081
3082 mlx4_en_queue_bond_work(priv, do_bond,
3083 v2p_port1, v2p_port2);
3084
3085 return NOTIFY_DONE;
3086 }
3087
3088 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
3089 struct mlx4_en_stats_bitmap *stats_bitmap,
3090 u8 rx_ppp, u8 rx_pause,
3091 u8 tx_ppp, u8 tx_pause)
3092 {
3093 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
3094
3095 if (!mlx4_is_slave(dev) &&
3096 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
3097 mutex_lock(&stats_bitmap->mutex);
3098 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
3099
3100 if (rx_ppp)
3101 bitmap_set(stats_bitmap->bitmap, last_i,
3102 NUM_FLOW_PRIORITY_STATS_RX);
3103 last_i += NUM_FLOW_PRIORITY_STATS_RX;
3104
3105 if (rx_pause && !(rx_ppp))
3106 bitmap_set(stats_bitmap->bitmap, last_i,
3107 NUM_FLOW_STATS_RX);
3108 last_i += NUM_FLOW_STATS_RX;
3109
3110 if (tx_ppp)
3111 bitmap_set(stats_bitmap->bitmap, last_i,
3112 NUM_FLOW_PRIORITY_STATS_TX);
3113 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3114
3115 if (tx_pause && !(tx_ppp))
3116 bitmap_set(stats_bitmap->bitmap, last_i,
3117 NUM_FLOW_STATS_TX);
3118 last_i += NUM_FLOW_STATS_TX;
3119
3120 mutex_unlock(&stats_bitmap->mutex);
3121 }
3122 }
3123
3124 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
3125 struct mlx4_en_stats_bitmap *stats_bitmap,
3126 u8 rx_ppp, u8 rx_pause,
3127 u8 tx_ppp, u8 tx_pause)
3128 {
3129 int last_i = 0;
3130
3131 mutex_init(&stats_bitmap->mutex);
3132 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
3133
3134 if (mlx4_is_slave(dev)) {
3135 bitmap_set(stats_bitmap->bitmap, last_i +
3136 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
3137 bitmap_set(stats_bitmap->bitmap, last_i +
3138 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
3139 bitmap_set(stats_bitmap->bitmap, last_i +
3140 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
3141 bitmap_set(stats_bitmap->bitmap, last_i +
3142 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
3143 bitmap_set(stats_bitmap->bitmap, last_i +
3144 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
3145 bitmap_set(stats_bitmap->bitmap, last_i +
3146 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3147 } else {
3148 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
3149 }
3150 last_i += NUM_MAIN_STATS;
3151
3152 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
3153 last_i += NUM_PORT_STATS;
3154
3155 if (mlx4_is_master(dev))
3156 bitmap_set(stats_bitmap->bitmap, last_i,
3157 NUM_PF_STATS);
3158 last_i += NUM_PF_STATS;
3159
3160 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3161 rx_ppp, rx_pause,
3162 tx_ppp, tx_pause);
3163 last_i += NUM_FLOW_STATS;
3164
3165 if (!mlx4_is_slave(dev))
3166 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
3167 last_i += NUM_PKT_STATS;
3168
3169 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
3170 last_i += NUM_XDP_STATS;
3171 }
3172
3173 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3174 struct mlx4_en_port_profile *prof)
3175 {
3176 struct net_device *dev;
3177 struct mlx4_en_priv *priv;
3178 int i, t;
3179 int err;
3180
3181 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
3182 MAX_TX_RINGS, MAX_RX_RINGS);
3183 if (dev == NULL)
3184 return -ENOMEM;
3185
3186 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
3187 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3188
3189 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
3190 dev->dev_port = port - 1;
3191
3192 /*
3193 * Initialize driver private data
3194 */
3195
3196 priv = netdev_priv(dev);
3197 memset(priv, 0, sizeof(struct mlx4_en_priv));
3198 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
3199 spin_lock_init(&priv->stats_lock);
3200 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3201 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
3202 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3203 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3204 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
3205 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3206 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
3207 #ifdef CONFIG_RFS_ACCEL
3208 INIT_LIST_HEAD(&priv->filters);
3209 spin_lock_init(&priv->filters_lock);
3210 #endif
3211
3212 priv->dev = dev;
3213 priv->mdev = mdev;
3214 priv->ddev = &mdev->pdev->dev;
3215 priv->prof = prof;
3216 priv->port = port;
3217 priv->port_up = false;
3218 priv->flags = prof->flags;
3219 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
3220 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3221 MLX4_WQE_CTRL_SOLICITED);
3222 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
3223 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
3224 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
3225
3226 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
3227 priv->tx_ring_num[t] = prof->tx_ring_num[t];
3228 if (!priv->tx_ring_num[t])
3229 continue;
3230
3231 priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
3232 MAX_TX_RINGS, GFP_KERNEL);
3233 if (!priv->tx_ring[t]) {
3234 err = -ENOMEM;
3235 goto err_free_tx;
3236 }
3237 priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
3238 MAX_TX_RINGS, GFP_KERNEL);
3239 if (!priv->tx_cq[t]) {
3240 kfree(priv->tx_ring[t]);
3241 err = -ENOMEM;
3242 goto out;
3243 }
3244 }
3245 priv->rx_ring_num = prof->rx_ring_num;
3246 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
3247 priv->cqe_size = mdev->dev->caps.cqe_size;
3248 priv->mac_index = -1;
3249 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3250 #ifdef CONFIG_MLX4_EN_DCB
3251 if (!mlx4_is_slave(priv->mdev->dev)) {
3252 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3253 DCB_CAP_DCBX_VER_IEEE;
3254 priv->flags |= MLX4_EN_DCB_ENABLED;
3255 priv->cee_config.pfc_state = false;
3256
3257 for (i = 0; i < MLX4_EN_NUM_UP; i++)
3258 priv->cee_config.dcb_pfc[i] = pfc_disabled;
3259
3260 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
3261 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3262 } else {
3263 en_info(priv, "enabling only PFC DCB ops\n");
3264 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3265 }
3266 }
3267 #endif
3268
3269 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3270 INIT_HLIST_HEAD(&priv->mac_hash[i]);
3271
3272 /* Query for default mac and max mtu */
3273 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
3274
3275 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3276 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3277 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3278
3279 /* Set default MAC */
3280 dev->addr_len = ETH_ALEN;
3281 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3282 if (!is_valid_ether_addr(dev->dev_addr)) {
3283 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
3284 priv->port, dev->dev_addr);
3285 err = -EINVAL;
3286 goto out;
3287 } else if (mlx4_is_slave(priv->mdev->dev) &&
3288 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3289 /* Random MAC was assigned in mlx4_slave_cap
3290 * in mlx4_core module
3291 */
3292 dev->addr_assign_type |= NET_ADDR_RANDOM;
3293 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
3294 }
3295
3296 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
3297
3298 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3299 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3300 err = mlx4_en_alloc_resources(priv);
3301 if (err)
3302 goto out;
3303
3304 /* Initialize time stamping config */
3305 priv->hwtstamp_config.flags = 0;
3306 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3307 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3308
3309 /* Allocate page for receive rings */
3310 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
3311 MLX4_EN_PAGE_SIZE);
3312 if (err) {
3313 en_err(priv, "Failed to allocate page for rx qps\n");
3314 goto out;
3315 }
3316 priv->allocated = 1;
3317
3318 /*
3319 * Initialize netdev entry points
3320 */
3321 if (mlx4_is_master(priv->mdev->dev))
3322 dev->netdev_ops = &mlx4_netdev_ops_master;
3323 else
3324 dev->netdev_ops = &mlx4_netdev_ops;
3325 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
3326 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
3327 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
3328
3329 dev->ethtool_ops = &mlx4_en_ethtool_ops;
3330
3331 /*
3332 * Set driver features
3333 */
3334 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3335 if (mdev->LSO_support)
3336 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3337
3338 dev->vlan_features = dev->hw_features;
3339
3340 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
3341 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
3342 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3343 NETIF_F_HW_VLAN_CTAG_FILTER;
3344 dev->hw_features |= NETIF_F_LOOPBACK |
3345 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3346
3347 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3348 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3349 NETIF_F_HW_VLAN_STAG_FILTER;
3350 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3351 }
3352
3353 if (mlx4_is_slave(mdev->dev)) {
3354 bool vlan_offload_disabled;
3355 int phv;
3356
3357 err = get_phv_bit(mdev->dev, port, &phv);
3358 if (!err && phv) {
3359 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3360 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3361 }
3362 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3363 &vlan_offload_disabled);
3364 if (!err && vlan_offload_disabled) {
3365 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3366 NETIF_F_HW_VLAN_CTAG_RX |
3367 NETIF_F_HW_VLAN_STAG_TX |
3368 NETIF_F_HW_VLAN_STAG_RX);
3369 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3370 NETIF_F_HW_VLAN_CTAG_RX |
3371 NETIF_F_HW_VLAN_STAG_TX |
3372 NETIF_F_HW_VLAN_STAG_RX);
3373 }
3374 } else {
3375 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3376 !(mdev->dev->caps.flags2 &
3377 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3378 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3379 }
3380
3381 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3382 dev->hw_features |= NETIF_F_RXFCS;
3383
3384 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3385 dev->hw_features |= NETIF_F_RXALL;
3386
3387 if (mdev->dev->caps.steering_mode ==
3388 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3389 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
3390 dev->hw_features |= NETIF_F_NTUPLE;
3391
3392 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3393 dev->priv_flags |= IFF_UNICAST_FLT;
3394
3395 /* Setting a default hash function value */
3396 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3397 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3398 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3399 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3400 } else {
3401 en_warn(priv,
3402 "No RSS hash capabilities exposed, using Toeplitz\n");
3403 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3404 }
3405
3406 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3407 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3408 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3409 NETIF_F_GSO_PARTIAL;
3410 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3411 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3412 NETIF_F_GSO_PARTIAL;
3413 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3414 }
3415
3416 /* MTU range: 46 - hw-specific max */
3417 dev->min_mtu = MLX4_EN_MIN_MTU;
3418 dev->max_mtu = priv->max_mtu;
3419
3420 mdev->pndev[port] = dev;
3421 mdev->upper[port] = NULL;
3422
3423 netif_carrier_off(dev);
3424 mlx4_en_set_default_moderation(priv);
3425
3426 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
3427 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3428
3429 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3430
3431 /* Configure port */
3432 mlx4_en_calc_rx_buf(dev);
3433 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3434 priv->rx_skb_size + ETH_FCS_LEN,
3435 prof->tx_pause, prof->tx_ppp,
3436 prof->rx_pause, prof->rx_ppp);
3437 if (err) {
3438 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3439 priv->port, err);
3440 goto out;
3441 }
3442
3443 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3444 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3445 if (err) {
3446 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3447 err);
3448 goto out;
3449 }
3450 }
3451
3452 /* Init port */
3453 en_warn(priv, "Initializing port\n");
3454 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3455 if (err) {
3456 en_err(priv, "Failed Initializing port\n");
3457 goto out;
3458 }
3459 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3460
3461 /* Initialize time stamp mechanism */
3462 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3463 mlx4_en_init_timestamp(mdev);
3464
3465 queue_delayed_work(mdev->workqueue, &priv->service_task,
3466 SERVICE_TASK_DELAY);
3467
3468 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3469 mdev->profile.prof[priv->port].rx_ppp,
3470 mdev->profile.prof[priv->port].rx_pause,
3471 mdev->profile.prof[priv->port].tx_ppp,
3472 mdev->profile.prof[priv->port].tx_pause);
3473
3474 err = register_netdev(dev);
3475 if (err) {
3476 en_err(priv, "Netdev registration failed for port %d\n", port);
3477 goto out;
3478 }
3479
3480 priv->registered = 1;
3481 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3482 dev);
3483
3484 return 0;
3485
3486 err_free_tx:
3487 while (t--) {
3488 kfree(priv->tx_ring[t]);
3489 kfree(priv->tx_cq[t]);
3490 }
3491 out:
3492 mlx4_en_destroy_netdev(dev);
3493 return err;
3494 }
3495
3496 int mlx4_en_reset_config(struct net_device *dev,
3497 struct hwtstamp_config ts_config,
3498 netdev_features_t features)
3499 {
3500 struct mlx4_en_priv *priv = netdev_priv(dev);
3501 struct mlx4_en_dev *mdev = priv->mdev;
3502 struct mlx4_en_port_profile new_prof;
3503 struct mlx4_en_priv *tmp;
3504 int port_up = 0;
3505 int err = 0;
3506
3507 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3508 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3509 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3510 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3511 return 0; /* Nothing to change */
3512
3513 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3514 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3515 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3516 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3517 return -EINVAL;
3518 }
3519
3520 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3521 if (!tmp)
3522 return -ENOMEM;
3523
3524 mutex_lock(&mdev->state_lock);
3525
3526 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3527 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3528
3529 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
3530 if (err)
3531 goto out;
3532
3533 if (priv->port_up) {
3534 port_up = 1;
3535 mlx4_en_stop_port(dev, 1);
3536 }
3537
3538 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3539 ts_config.rx_filter,
3540 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3541
3542 mlx4_en_safe_replace_resources(priv, tmp);
3543
3544 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3545 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3546 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3547 else
3548 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3549 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3550 /* RX time-stamping is OFF, update the RX vlan offload
3551 * to the latest wanted state
3552 */
3553 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3554 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3555 else
3556 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3557 }
3558
3559 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3560 if (features & NETIF_F_RXFCS)
3561 dev->features |= NETIF_F_RXFCS;
3562 else
3563 dev->features &= ~NETIF_F_RXFCS;
3564 }
3565
3566 /* RX vlan offload and RX time-stamping can't co-exist !
3567 * Regardless of the caller's choice,
3568 * Turn Off RX vlan offload in case of time-stamping is ON
3569 */
3570 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3571 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3572 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3573 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3574 }
3575
3576 if (port_up) {
3577 err = mlx4_en_start_port(dev);
3578 if (err)
3579 en_err(priv, "Failed starting port\n");
3580 }
3581
3582 out:
3583 mutex_unlock(&mdev->state_lock);
3584 kfree(tmp);
3585 if (!err)
3586 netdev_features_change(dev);
3587 return err;
3588 }