]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/lag.c
Merge remote-tracking branches 'regulator/topic/rc5t619' and 'regulator/topic/stm32...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / lag.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/netdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include "mlx5_core.h"
37
38 enum {
39 MLX5_LAG_FLAG_BONDED = 1 << 0,
40 };
41
42 struct lag_func {
43 struct mlx5_core_dev *dev;
44 struct net_device *netdev;
45 };
46
47 /* Used for collection of netdev event info. */
48 struct lag_tracker {
49 enum netdev_lag_tx_type tx_type;
50 struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
51 bool is_bonded;
52 };
53
54 /* LAG data of a ConnectX card.
55 * It serves both its phys functions.
56 */
57 struct mlx5_lag {
58 u8 flags;
59 u8 v2p_map[MLX5_MAX_PORTS];
60 struct lag_func pf[MLX5_MAX_PORTS];
61 struct lag_tracker tracker;
62 struct delayed_work bond_work;
63 struct notifier_block nb;
64
65 /* Admin state. Allow lag only if allowed is true
66 * even if network conditions for lag were met
67 */
68 bool allowed;
69 };
70
71 /* General purpose, use for short periods of time.
72 * Beware of lock dependencies (preferably, no locks should be acquired
73 * under it).
74 */
75 static DEFINE_MUTEX(lag_mutex);
76
77 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
78 u8 remap_port2)
79 {
80 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
81 u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
82 void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
83
84 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
85
86 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
87 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
88
89 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
90 }
91
92 static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
93 u8 remap_port2)
94 {
95 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
96 u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
97 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
98
99 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
100 MLX5_SET(modify_lag_in, in, field_select, 0x1);
101
102 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
103 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
104
105 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
106 }
107
108 static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
109 {
110 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
111 u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
112
113 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
114
115 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
116 }
117
118 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
119 {
120 u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
121 u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
122
123 MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
124
125 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
126 }
127 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
128
129 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
130 {
131 u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
133
134 MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
135
136 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
137 }
138 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
139
140 static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
141 {
142 return dev->priv.lag;
143 }
144
145 static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
146 struct net_device *ndev)
147 {
148 int i;
149
150 for (i = 0; i < MLX5_MAX_PORTS; i++)
151 if (ldev->pf[i].netdev == ndev)
152 return i;
153
154 return -1;
155 }
156
157 static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
158 {
159 return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
160 }
161
162 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
163 u8 *port1, u8 *port2)
164 {
165 *port1 = 1;
166 *port2 = 2;
167 if (!tracker->netdev_state[0].tx_enabled ||
168 !tracker->netdev_state[0].link_up) {
169 *port1 = 2;
170 return;
171 }
172
173 if (!tracker->netdev_state[1].tx_enabled ||
174 !tracker->netdev_state[1].link_up)
175 *port2 = 1;
176 }
177
178 static void mlx5_activate_lag(struct mlx5_lag *ldev,
179 struct lag_tracker *tracker)
180 {
181 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
182 int err;
183
184 ldev->flags |= MLX5_LAG_FLAG_BONDED;
185
186 mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
187 &ldev->v2p_map[1]);
188
189 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
190 if (err)
191 mlx5_core_err(dev0,
192 "Failed to create LAG (%d)\n",
193 err);
194 }
195
196 static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
197 {
198 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
199 int err;
200
201 ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
202
203 err = mlx5_cmd_destroy_lag(dev0);
204 if (err)
205 mlx5_core_err(dev0,
206 "Failed to destroy LAG (%d)\n",
207 err);
208 }
209
210 static void mlx5_do_bond(struct mlx5_lag *ldev)
211 {
212 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
213 struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
214 struct lag_tracker tracker;
215 u8 v2p_port1, v2p_port2;
216 int i, err;
217 bool do_bond;
218
219 if (!dev0 || !dev1)
220 return;
221
222 mutex_lock(&lag_mutex);
223 tracker = ldev->tracker;
224 mutex_unlock(&lag_mutex);
225
226 do_bond = tracker.is_bonded && ldev->allowed;
227
228 if (do_bond && !mlx5_lag_is_bonded(ldev)) {
229 for (i = 0; i < MLX5_MAX_PORTS; i++)
230 mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
231 MLX5_INTERFACE_PROTOCOL_IB);
232
233 mlx5_activate_lag(ldev, &tracker);
234
235 mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
236 mlx5_nic_vport_enable_roce(dev1);
237 } else if (do_bond && mlx5_lag_is_bonded(ldev)) {
238 mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
239 &v2p_port2);
240
241 if ((v2p_port1 != ldev->v2p_map[0]) ||
242 (v2p_port2 != ldev->v2p_map[1])) {
243 ldev->v2p_map[0] = v2p_port1;
244 ldev->v2p_map[1] = v2p_port2;
245
246 err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
247 if (err)
248 mlx5_core_err(dev0,
249 "Failed to modify LAG (%d)\n",
250 err);
251 }
252 } else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
253 mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
254 mlx5_nic_vport_disable_roce(dev1);
255
256 mlx5_deactivate_lag(ldev);
257
258 for (i = 0; i < MLX5_MAX_PORTS; i++)
259 if (ldev->pf[i].dev)
260 mlx5_add_dev_by_protocol(ldev->pf[i].dev,
261 MLX5_INTERFACE_PROTOCOL_IB);
262 }
263 }
264
265 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
266 {
267 schedule_delayed_work(&ldev->bond_work, delay);
268 }
269
270 static void mlx5_do_bond_work(struct work_struct *work)
271 {
272 struct delayed_work *delayed_work = to_delayed_work(work);
273 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
274 bond_work);
275 int status;
276
277 status = mlx5_dev_list_trylock();
278 if (!status) {
279 /* 1 sec delay. */
280 mlx5_queue_bond_work(ldev, HZ);
281 return;
282 }
283
284 mlx5_do_bond(ldev);
285 mlx5_dev_list_unlock();
286 }
287
288 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
289 struct lag_tracker *tracker,
290 struct net_device *ndev,
291 struct netdev_notifier_changeupper_info *info)
292 {
293 struct net_device *upper = info->upper_dev, *ndev_tmp;
294 struct netdev_lag_upper_info *lag_upper_info = NULL;
295 bool is_bonded;
296 int bond_status = 0;
297 int num_slaves = 0;
298 int idx;
299
300 if (!netif_is_lag_master(upper))
301 return 0;
302
303 if (info->linking)
304 lag_upper_info = info->upper_info;
305
306 /* The event may still be of interest if the slave does not belong to
307 * us, but is enslaved to a master which has one or more of our netdevs
308 * as slaves (e.g., if a new slave is added to a master that bonds two
309 * of our netdevs, we should unbond).
310 */
311 rcu_read_lock();
312 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
313 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
314 if (idx > -1)
315 bond_status |= (1 << idx);
316
317 num_slaves++;
318 }
319 rcu_read_unlock();
320
321 /* None of this lagdev's netdevs are slaves of this master. */
322 if (!(bond_status & 0x3))
323 return 0;
324
325 if (lag_upper_info)
326 tracker->tx_type = lag_upper_info->tx_type;
327
328 /* Determine bonding status:
329 * A device is considered bonded if both its physical ports are slaves
330 * of the same lag master, and only them.
331 * Lag mode must be activebackup or hash.
332 */
333 is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
334 (bond_status == 0x3) &&
335 ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
336 (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
337
338 if (tracker->is_bonded != is_bonded) {
339 tracker->is_bonded = is_bonded;
340 return 1;
341 }
342
343 return 0;
344 }
345
346 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
347 struct lag_tracker *tracker,
348 struct net_device *ndev,
349 struct netdev_notifier_changelowerstate_info *info)
350 {
351 struct netdev_lag_lower_state_info *lag_lower_info;
352 int idx;
353
354 if (!netif_is_lag_port(ndev))
355 return 0;
356
357 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
358 if (idx == -1)
359 return 0;
360
361 /* This information is used to determine virtual to physical
362 * port mapping.
363 */
364 lag_lower_info = info->lower_state_info;
365 if (!lag_lower_info)
366 return 0;
367
368 tracker->netdev_state[idx] = *lag_lower_info;
369
370 return 1;
371 }
372
373 static int mlx5_lag_netdev_event(struct notifier_block *this,
374 unsigned long event, void *ptr)
375 {
376 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
377 struct lag_tracker tracker;
378 struct mlx5_lag *ldev;
379 int changed = 0;
380
381 if (!net_eq(dev_net(ndev), &init_net))
382 return NOTIFY_DONE;
383
384 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
385 return NOTIFY_DONE;
386
387 ldev = container_of(this, struct mlx5_lag, nb);
388 tracker = ldev->tracker;
389
390 switch (event) {
391 case NETDEV_CHANGEUPPER:
392 changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
393 ptr);
394 break;
395 case NETDEV_CHANGELOWERSTATE:
396 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
397 ndev, ptr);
398 break;
399 }
400
401 mutex_lock(&lag_mutex);
402 ldev->tracker = tracker;
403 mutex_unlock(&lag_mutex);
404
405 if (changed)
406 mlx5_queue_bond_work(ldev, 0);
407
408 return NOTIFY_DONE;
409 }
410
411 static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
412 {
413 if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) ||
414 (ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev)))
415 return false;
416 else
417 return true;
418 }
419
420 static struct mlx5_lag *mlx5_lag_dev_alloc(void)
421 {
422 struct mlx5_lag *ldev;
423
424 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
425 if (!ldev)
426 return NULL;
427
428 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
429 ldev->allowed = mlx5_lag_check_prereq(ldev);
430
431 return ldev;
432 }
433
434 static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
435 {
436 kfree(ldev);
437 }
438
439 static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
440 struct mlx5_core_dev *dev,
441 struct net_device *netdev)
442 {
443 unsigned int fn = PCI_FUNC(dev->pdev->devfn);
444
445 if (fn >= MLX5_MAX_PORTS)
446 return;
447
448 mutex_lock(&lag_mutex);
449 ldev->pf[fn].dev = dev;
450 ldev->pf[fn].netdev = netdev;
451 ldev->tracker.netdev_state[fn].link_up = 0;
452 ldev->tracker.netdev_state[fn].tx_enabled = 0;
453
454 ldev->allowed = mlx5_lag_check_prereq(ldev);
455 dev->priv.lag = ldev;
456
457 mutex_unlock(&lag_mutex);
458 }
459
460 static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
461 struct mlx5_core_dev *dev)
462 {
463 int i;
464
465 for (i = 0; i < MLX5_MAX_PORTS; i++)
466 if (ldev->pf[i].dev == dev)
467 break;
468
469 if (i == MLX5_MAX_PORTS)
470 return;
471
472 mutex_lock(&lag_mutex);
473 memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
474
475 dev->priv.lag = NULL;
476 ldev->allowed = mlx5_lag_check_prereq(ldev);
477 mutex_unlock(&lag_mutex);
478 }
479
480 /* Must be called with intf_mutex held */
481 void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
482 {
483 struct mlx5_lag *ldev = NULL;
484 struct mlx5_core_dev *tmp_dev;
485
486 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
487 !MLX5_CAP_GEN(dev, lag_master) ||
488 (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
489 return;
490
491 tmp_dev = mlx5_get_next_phys_dev(dev);
492 if (tmp_dev)
493 ldev = tmp_dev->priv.lag;
494
495 if (!ldev) {
496 ldev = mlx5_lag_dev_alloc();
497 if (!ldev) {
498 mlx5_core_err(dev, "Failed to alloc lag dev\n");
499 return;
500 }
501 }
502
503 mlx5_lag_dev_add_pf(ldev, dev, netdev);
504
505 if (!ldev->nb.notifier_call) {
506 ldev->nb.notifier_call = mlx5_lag_netdev_event;
507 if (register_netdevice_notifier(&ldev->nb)) {
508 ldev->nb.notifier_call = NULL;
509 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
510 }
511 }
512 }
513
514 /* Must be called with intf_mutex held */
515 void mlx5_lag_remove(struct mlx5_core_dev *dev)
516 {
517 struct mlx5_lag *ldev;
518 int i;
519
520 ldev = mlx5_lag_dev_get(dev);
521 if (!ldev)
522 return;
523
524 if (mlx5_lag_is_bonded(ldev))
525 mlx5_deactivate_lag(ldev);
526
527 mlx5_lag_dev_remove_pf(ldev, dev);
528
529 for (i = 0; i < MLX5_MAX_PORTS; i++)
530 if (ldev->pf[i].dev)
531 break;
532
533 if (i == MLX5_MAX_PORTS) {
534 if (ldev->nb.notifier_call)
535 unregister_netdevice_notifier(&ldev->nb);
536 cancel_delayed_work_sync(&ldev->bond_work);
537 mlx5_lag_dev_free(ldev);
538 }
539 }
540
541 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
542 {
543 struct mlx5_lag *ldev;
544 bool res;
545
546 mutex_lock(&lag_mutex);
547 ldev = mlx5_lag_dev_get(dev);
548 res = ldev && mlx5_lag_is_bonded(ldev);
549 mutex_unlock(&lag_mutex);
550
551 return res;
552 }
553 EXPORT_SYMBOL(mlx5_lag_is_active);
554
555 static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow)
556 {
557 struct mlx5_lag *ldev;
558 int ret = 0;
559 bool lag_active;
560
561 mlx5_dev_list_lock();
562
563 ldev = mlx5_lag_dev_get(dev);
564 if (!ldev) {
565 ret = -ENODEV;
566 goto unlock;
567 }
568 lag_active = mlx5_lag_is_bonded(ldev);
569 if (!mlx5_lag_check_prereq(ldev) && allow) {
570 ret = -EINVAL;
571 goto unlock;
572 }
573 if (ldev->allowed == allow)
574 goto unlock;
575 ldev->allowed = allow;
576 if ((lag_active && !allow) || allow)
577 mlx5_do_bond(ldev);
578 unlock:
579 mlx5_dev_list_unlock();
580 return ret;
581 }
582
583 int mlx5_lag_forbid(struct mlx5_core_dev *dev)
584 {
585 return mlx5_lag_set_state(dev, false);
586 }
587
588 int mlx5_lag_allow(struct mlx5_core_dev *dev)
589 {
590 return mlx5_lag_set_state(dev, true);
591 }
592
593 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
594 {
595 struct net_device *ndev = NULL;
596 struct mlx5_lag *ldev;
597
598 mutex_lock(&lag_mutex);
599 ldev = mlx5_lag_dev_get(dev);
600
601 if (!(ldev && mlx5_lag_is_bonded(ldev)))
602 goto unlock;
603
604 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
605 ndev = ldev->tracker.netdev_state[0].tx_enabled ?
606 ldev->pf[0].netdev : ldev->pf[1].netdev;
607 } else {
608 ndev = ldev->pf[0].netdev;
609 }
610 if (ndev)
611 dev_hold(ndev);
612
613 unlock:
614 mutex_unlock(&lag_mutex);
615
616 return ndev;
617 }
618 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
619
620 bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
621 {
622 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
623 priv);
624 struct mlx5_lag *ldev;
625
626 if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
627 return true;
628
629 ldev = mlx5_lag_dev_get(dev);
630 if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
631 return true;
632
633 /* If bonded, we do not add an IB device for PF1. */
634 return false;
635 }