]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/mellanox/mlx4/port.c
NFC: pn533: handle interrupted commands in pn533_recv_frame
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / port.c
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
37
38 #include <linux/mlx4/cmd.h>
39
40 #include "mlx4.h"
41 #include "mlx4_stats.h"
42
43 #define MLX4_MAC_VALID (1ull << 63)
44
45 #define MLX4_VLAN_VALID (1u << 31)
46 #define MLX4_VLAN_MASK 0xfff
47
48 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
49 #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
50 #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
51 #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
52
53 #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
54 #define MLX4_IGNORE_FCS_MASK 0x1
55
56 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
57 {
58 int i;
59
60 mutex_init(&table->mutex);
61 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
62 table->entries[i] = 0;
63 table->refs[i] = 0;
64 table->is_dup[i] = false;
65 }
66 table->max = 1 << dev->caps.log_num_macs;
67 table->total = 0;
68 }
69
70 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
71 {
72 int i;
73
74 mutex_init(&table->mutex);
75 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
76 table->entries[i] = 0;
77 table->refs[i] = 0;
78 table->is_dup[i] = false;
79 }
80 table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
81 table->total = 0;
82 }
83
84 void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
85 struct mlx4_roce_gid_table *table)
86 {
87 int i;
88
89 mutex_init(&table->mutex);
90 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
91 memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
92 }
93
94 static int validate_index(struct mlx4_dev *dev,
95 struct mlx4_mac_table *table, int index)
96 {
97 int err = 0;
98
99 if (index < 0 || index >= table->max || !table->entries[index]) {
100 mlx4_warn(dev, "No valid Mac entry for the given index\n");
101 err = -EINVAL;
102 }
103 return err;
104 }
105
106 static int find_index(struct mlx4_dev *dev,
107 struct mlx4_mac_table *table, u64 mac)
108 {
109 int i;
110
111 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
112 if (table->refs[i] &&
113 (MLX4_MAC_MASK & mac) ==
114 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
115 return i;
116 }
117 /* Mac not found */
118 return -EINVAL;
119 }
120
121 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
122 __be64 *entries)
123 {
124 struct mlx4_cmd_mailbox *mailbox;
125 u32 in_mod;
126 int err;
127
128 mailbox = mlx4_alloc_cmd_mailbox(dev);
129 if (IS_ERR(mailbox))
130 return PTR_ERR(mailbox);
131
132 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
133
134 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
135
136 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
137 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
138 MLX4_CMD_NATIVE);
139
140 mlx4_free_cmd_mailbox(dev, mailbox);
141 return err;
142 }
143
144 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
145 {
146 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
147 struct mlx4_mac_table *table = &info->mac_table;
148 int i;
149
150 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
151 if (!table->refs[i])
152 continue;
153
154 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
155 *idx = i;
156 return 0;
157 }
158 }
159
160 return -ENOENT;
161 }
162 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
163
164 static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
165 {
166 int i, num_eth_ports = 0;
167
168 if (!mlx4_is_mfunc(dev))
169 return false;
170 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
171 ++num_eth_ports;
172
173 return (num_eth_ports == 2) ? true : false;
174 }
175
176 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
177 {
178 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
179 struct mlx4_mac_table *table = &info->mac_table;
180 int i, err = 0;
181 int free = -1;
182 int free_for_dup = -1;
183 bool dup = mlx4_is_mf_bonded(dev);
184 u8 dup_port = (port == 1) ? 2 : 1;
185 struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
186 bool need_mf_bond = mlx4_need_mf_bond(dev);
187 bool can_mf_bond = true;
188
189 mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n",
190 (unsigned long long)mac, port,
191 dup ? "with" : "without");
192
193 if (need_mf_bond) {
194 if (port == 1) {
195 mutex_lock(&table->mutex);
196 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
197 } else {
198 mutex_lock(&dup_table->mutex);
199 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
200 }
201 } else {
202 mutex_lock(&table->mutex);
203 }
204
205 if (need_mf_bond) {
206 int index_at_port = -1;
207 int index_at_dup_port = -1;
208
209 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
210 if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
211 index_at_port = i;
212 if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
213 index_at_dup_port = i;
214 }
215
216 /* check that same mac is not in the tables at different indices */
217 if ((index_at_port != index_at_dup_port) &&
218 (index_at_port >= 0) &&
219 (index_at_dup_port >= 0))
220 can_mf_bond = false;
221
222 /* If the mac is already in the primary table, the slot must be
223 * available in the duplicate table as well.
224 */
225 if (index_at_port >= 0 && index_at_dup_port < 0 &&
226 dup_table->refs[index_at_port]) {
227 can_mf_bond = false;
228 }
229 /* If the mac is already in the duplicate table, check that the
230 * corresponding index is not occupied in the primary table, or
231 * the primary table already contains the mac at the same index.
232 * Otherwise, you cannot bond (primary contains a different mac
233 * at that index).
234 */
235 if (index_at_dup_port >= 0) {
236 if (!table->refs[index_at_dup_port] ||
237 ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
238 free_for_dup = index_at_dup_port;
239 else
240 can_mf_bond = false;
241 }
242 }
243
244 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
245 if (!table->refs[i]) {
246 if (free < 0)
247 free = i;
248 if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
249 if (!dup_table->refs[i])
250 free_for_dup = i;
251 }
252 continue;
253 }
254
255 if ((MLX4_MAC_MASK & mac) ==
256 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
257 /* MAC already registered, increment ref count */
258 err = i;
259 ++table->refs[i];
260 if (dup) {
261 u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
262
263 if (dup_mac != mac || !dup_table->is_dup[i]) {
264 mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
265 mac, dup_port, i);
266 }
267 }
268 goto out;
269 }
270 }
271
272 if (need_mf_bond && (free_for_dup < 0)) {
273 if (dup) {
274 mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
275 mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
276 dup = false;
277 }
278 can_mf_bond = false;
279 }
280
281 if (need_mf_bond && can_mf_bond)
282 free = free_for_dup;
283
284 mlx4_dbg(dev, "Free MAC index is %d\n", free);
285
286 if (table->total == table->max) {
287 /* No free mac entries */
288 err = -ENOSPC;
289 goto out;
290 }
291
292 /* Register new MAC */
293 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
294
295 err = mlx4_set_port_mac_table(dev, port, table->entries);
296 if (unlikely(err)) {
297 mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
298 (unsigned long long) mac);
299 table->entries[free] = 0;
300 goto out;
301 }
302 table->refs[free] = 1;
303 table->is_dup[free] = false;
304 ++table->total;
305 if (dup) {
306 dup_table->refs[free] = 0;
307 dup_table->is_dup[free] = true;
308 dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
309
310 err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
311 if (unlikely(err)) {
312 mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac);
313 dup_table->is_dup[free] = false;
314 dup_table->entries[free] = 0;
315 goto out;
316 }
317 ++dup_table->total;
318 }
319 err = free;
320 out:
321 if (need_mf_bond) {
322 if (port == 2) {
323 mutex_unlock(&table->mutex);
324 mutex_unlock(&dup_table->mutex);
325 } else {
326 mutex_unlock(&dup_table->mutex);
327 mutex_unlock(&table->mutex);
328 }
329 } else {
330 mutex_unlock(&table->mutex);
331 }
332 return err;
333 }
334 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
335
336 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
337 {
338 u64 out_param = 0;
339 int err = -EINVAL;
340
341 if (mlx4_is_mfunc(dev)) {
342 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
343 err = mlx4_cmd_imm(dev, mac, &out_param,
344 ((u32) port) << 8 | (u32) RES_MAC,
345 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
346 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
347 }
348 if (err && err == -EINVAL && mlx4_is_slave(dev)) {
349 /* retry using old REG_MAC format */
350 set_param_l(&out_param, port);
351 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
352 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
353 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
354 if (!err)
355 dev->flags |= MLX4_FLAG_OLD_REG_MAC;
356 }
357 if (err)
358 return err;
359
360 return get_param_l(&out_param);
361 }
362 return __mlx4_register_mac(dev, port, mac);
363 }
364 EXPORT_SYMBOL_GPL(mlx4_register_mac);
365
366 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
367 {
368 return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
369 (port - 1) * (1 << dev->caps.log_num_macs);
370 }
371 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
372
373 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
374 {
375 struct mlx4_port_info *info;
376 struct mlx4_mac_table *table;
377 int index;
378 bool dup = mlx4_is_mf_bonded(dev);
379 u8 dup_port = (port == 1) ? 2 : 1;
380 struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
381
382 if (port < 1 || port > dev->caps.num_ports) {
383 mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
384 return;
385 }
386 info = &mlx4_priv(dev)->port[port];
387 table = &info->mac_table;
388
389 if (dup) {
390 if (port == 1) {
391 mutex_lock(&table->mutex);
392 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
393 } else {
394 mutex_lock(&dup_table->mutex);
395 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
396 }
397 } else {
398 mutex_lock(&table->mutex);
399 }
400
401 index = find_index(dev, table, mac);
402
403 if (validate_index(dev, table, index))
404 goto out;
405
406 if (--table->refs[index] || table->is_dup[index]) {
407 mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
408 index);
409 if (!table->refs[index])
410 dup_table->is_dup[index] = false;
411 goto out;
412 }
413
414 table->entries[index] = 0;
415 if (mlx4_set_port_mac_table(dev, port, table->entries))
416 mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
417 --table->total;
418
419 if (dup) {
420 dup_table->is_dup[index] = false;
421 if (dup_table->refs[index])
422 goto out;
423 dup_table->entries[index] = 0;
424 if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
425 mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
426
427 --table->total;
428 }
429 out:
430 if (dup) {
431 if (port == 2) {
432 mutex_unlock(&table->mutex);
433 mutex_unlock(&dup_table->mutex);
434 } else {
435 mutex_unlock(&dup_table->mutex);
436 mutex_unlock(&table->mutex);
437 }
438 } else {
439 mutex_unlock(&table->mutex);
440 }
441 }
442 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
443
444 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
445 {
446 u64 out_param = 0;
447
448 if (mlx4_is_mfunc(dev)) {
449 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
450 (void) mlx4_cmd_imm(dev, mac, &out_param,
451 ((u32) port) << 8 | (u32) RES_MAC,
452 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
453 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
454 } else {
455 /* use old unregister mac format */
456 set_param_l(&out_param, port);
457 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
458 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
459 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
460 }
461 return;
462 }
463 __mlx4_unregister_mac(dev, port, mac);
464 return;
465 }
466 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
467
468 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
469 {
470 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
471 struct mlx4_mac_table *table = &info->mac_table;
472 int index = qpn - info->base_qpn;
473 int err = 0;
474 bool dup = mlx4_is_mf_bonded(dev);
475 u8 dup_port = (port == 1) ? 2 : 1;
476 struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
477
478 /* CX1 doesn't support multi-functions */
479 if (dup) {
480 if (port == 1) {
481 mutex_lock(&table->mutex);
482 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
483 } else {
484 mutex_lock(&dup_table->mutex);
485 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
486 }
487 } else {
488 mutex_lock(&table->mutex);
489 }
490
491 err = validate_index(dev, table, index);
492 if (err)
493 goto out;
494
495 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
496
497 err = mlx4_set_port_mac_table(dev, port, table->entries);
498 if (unlikely(err)) {
499 mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
500 (unsigned long long) new_mac);
501 table->entries[index] = 0;
502 } else {
503 if (dup) {
504 dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
505
506 err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
507 if (unlikely(err)) {
508 mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n",
509 (unsigned long long)new_mac);
510 dup_table->entries[index] = 0;
511 }
512 }
513 }
514 out:
515 if (dup) {
516 if (port == 2) {
517 mutex_unlock(&table->mutex);
518 mutex_unlock(&dup_table->mutex);
519 } else {
520 mutex_unlock(&dup_table->mutex);
521 mutex_unlock(&table->mutex);
522 }
523 } else {
524 mutex_unlock(&table->mutex);
525 }
526 return err;
527 }
528 EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
529
530 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
531 __be32 *entries)
532 {
533 struct mlx4_cmd_mailbox *mailbox;
534 u32 in_mod;
535 int err;
536
537 mailbox = mlx4_alloc_cmd_mailbox(dev);
538 if (IS_ERR(mailbox))
539 return PTR_ERR(mailbox);
540
541 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
542 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
543 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
544 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
545 MLX4_CMD_NATIVE);
546
547 mlx4_free_cmd_mailbox(dev, mailbox);
548
549 return err;
550 }
551
552 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
553 {
554 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
555 int i;
556
557 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
558 if (table->refs[i] &&
559 (vid == (MLX4_VLAN_MASK &
560 be32_to_cpu(table->entries[i])))) {
561 /* VLAN already registered, increase reference count */
562 *idx = i;
563 return 0;
564 }
565 }
566
567 return -ENOENT;
568 }
569 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
570
571 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
572 int *index)
573 {
574 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
575 int i, err = 0;
576 int free = -1;
577 int free_for_dup = -1;
578 bool dup = mlx4_is_mf_bonded(dev);
579 u8 dup_port = (port == 1) ? 2 : 1;
580 struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
581 bool need_mf_bond = mlx4_need_mf_bond(dev);
582 bool can_mf_bond = true;
583
584 mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n",
585 vlan, port,
586 dup ? "with" : "without");
587
588 if (need_mf_bond) {
589 if (port == 1) {
590 mutex_lock(&table->mutex);
591 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
592 } else {
593 mutex_lock(&dup_table->mutex);
594 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
595 }
596 } else {
597 mutex_lock(&table->mutex);
598 }
599
600 if (table->total == table->max) {
601 /* No free vlan entries */
602 err = -ENOSPC;
603 goto out;
604 }
605
606 if (need_mf_bond) {
607 int index_at_port = -1;
608 int index_at_dup_port = -1;
609
610 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
611 if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))))
612 index_at_port = i;
613 if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))))
614 index_at_dup_port = i;
615 }
616 /* check that same vlan is not in the tables at different indices */
617 if ((index_at_port != index_at_dup_port) &&
618 (index_at_port >= 0) &&
619 (index_at_dup_port >= 0))
620 can_mf_bond = false;
621
622 /* If the vlan is already in the primary table, the slot must be
623 * available in the duplicate table as well.
624 */
625 if (index_at_port >= 0 && index_at_dup_port < 0 &&
626 dup_table->refs[index_at_port]) {
627 can_mf_bond = false;
628 }
629 /* If the vlan is already in the duplicate table, check that the
630 * corresponding index is not occupied in the primary table, or
631 * the primary table already contains the vlan at the same index.
632 * Otherwise, you cannot bond (primary contains a different vlan
633 * at that index).
634 */
635 if (index_at_dup_port >= 0) {
636 if (!table->refs[index_at_dup_port] ||
637 (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
638 free_for_dup = index_at_dup_port;
639 else
640 can_mf_bond = false;
641 }
642 }
643
644 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
645 if (!table->refs[i]) {
646 if (free < 0)
647 free = i;
648 if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
649 if (!dup_table->refs[i])
650 free_for_dup = i;
651 }
652 }
653
654 if ((table->refs[i] || table->is_dup[i]) &&
655 (vlan == (MLX4_VLAN_MASK &
656 be32_to_cpu(table->entries[i])))) {
657 /* Vlan already registered, increase references count */
658 mlx4_dbg(dev, "vlan %u is already registered.\n", vlan);
659 *index = i;
660 ++table->refs[i];
661 if (dup) {
662 u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]);
663
664 if (dup_vlan != vlan || !dup_table->is_dup[i]) {
665 mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
666 vlan, dup_port, i);
667 }
668 }
669 goto out;
670 }
671 }
672
673 if (need_mf_bond && (free_for_dup < 0)) {
674 if (dup) {
675 mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
676 mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
677 dup = false;
678 }
679 can_mf_bond = false;
680 }
681
682 if (need_mf_bond && can_mf_bond)
683 free = free_for_dup;
684
685 if (free < 0) {
686 err = -ENOMEM;
687 goto out;
688 }
689
690 /* Register new VLAN */
691 table->refs[free] = 1;
692 table->is_dup[free] = false;
693 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
694
695 err = mlx4_set_port_vlan_table(dev, port, table->entries);
696 if (unlikely(err)) {
697 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
698 table->refs[free] = 0;
699 table->entries[free] = 0;
700 goto out;
701 }
702 ++table->total;
703 if (dup) {
704 dup_table->refs[free] = 0;
705 dup_table->is_dup[free] = true;
706 dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
707
708 err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries);
709 if (unlikely(err)) {
710 mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan);
711 dup_table->is_dup[free] = false;
712 dup_table->entries[free] = 0;
713 goto out;
714 }
715 ++dup_table->total;
716 }
717
718 *index = free;
719 out:
720 if (need_mf_bond) {
721 if (port == 2) {
722 mutex_unlock(&table->mutex);
723 mutex_unlock(&dup_table->mutex);
724 } else {
725 mutex_unlock(&dup_table->mutex);
726 mutex_unlock(&table->mutex);
727 }
728 } else {
729 mutex_unlock(&table->mutex);
730 }
731 return err;
732 }
733
734 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
735 {
736 u64 out_param = 0;
737 int err;
738
739 if (vlan > 4095)
740 return -EINVAL;
741
742 if (mlx4_is_mfunc(dev)) {
743 err = mlx4_cmd_imm(dev, vlan, &out_param,
744 ((u32) port) << 8 | (u32) RES_VLAN,
745 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
746 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
747 if (!err)
748 *index = get_param_l(&out_param);
749
750 return err;
751 }
752 return __mlx4_register_vlan(dev, port, vlan, index);
753 }
754 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
755
756 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
757 {
758 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
759 int index;
760 bool dup = mlx4_is_mf_bonded(dev);
761 u8 dup_port = (port == 1) ? 2 : 1;
762 struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
763
764 if (dup) {
765 if (port == 1) {
766 mutex_lock(&table->mutex);
767 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
768 } else {
769 mutex_lock(&dup_table->mutex);
770 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
771 }
772 } else {
773 mutex_lock(&table->mutex);
774 }
775
776 if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
777 mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
778 goto out;
779 }
780
781 if (index < MLX4_VLAN_REGULAR) {
782 mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
783 goto out;
784 }
785
786 if (--table->refs[index] || table->is_dup[index]) {
787 mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
788 table->refs[index], index);
789 if (!table->refs[index])
790 dup_table->is_dup[index] = false;
791 goto out;
792 }
793 table->entries[index] = 0;
794 if (mlx4_set_port_vlan_table(dev, port, table->entries))
795 mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
796 --table->total;
797 if (dup) {
798 dup_table->is_dup[index] = false;
799 if (dup_table->refs[index])
800 goto out;
801 dup_table->entries[index] = 0;
802 if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
803 mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
804 --dup_table->total;
805 }
806 out:
807 if (dup) {
808 if (port == 2) {
809 mutex_unlock(&table->mutex);
810 mutex_unlock(&dup_table->mutex);
811 } else {
812 mutex_unlock(&dup_table->mutex);
813 mutex_unlock(&table->mutex);
814 }
815 } else {
816 mutex_unlock(&table->mutex);
817 }
818 }
819
820 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
821 {
822 u64 out_param = 0;
823
824 if (mlx4_is_mfunc(dev)) {
825 (void) mlx4_cmd_imm(dev, vlan, &out_param,
826 ((u32) port) << 8 | (u32) RES_VLAN,
827 RES_OP_RESERVE_AND_MAP,
828 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
829 MLX4_CMD_WRAPPED);
830 return;
831 }
832 __mlx4_unregister_vlan(dev, port, vlan);
833 }
834 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
835
836 int mlx4_bond_mac_table(struct mlx4_dev *dev)
837 {
838 struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
839 struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
840 int ret = 0;
841 int i;
842 bool update1 = false;
843 bool update2 = false;
844
845 mutex_lock(&t1->mutex);
846 mutex_lock(&t2->mutex);
847 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
848 if ((t1->entries[i] != t2->entries[i]) &&
849 t1->entries[i] && t2->entries[i]) {
850 mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i);
851 ret = -EINVAL;
852 goto unlock;
853 }
854 }
855
856 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
857 if (t1->entries[i] && !t2->entries[i]) {
858 t2->entries[i] = t1->entries[i];
859 t2->is_dup[i] = true;
860 update2 = true;
861 } else if (!t1->entries[i] && t2->entries[i]) {
862 t1->entries[i] = t2->entries[i];
863 t1->is_dup[i] = true;
864 update1 = true;
865 } else if (t1->entries[i] && t2->entries[i]) {
866 t1->is_dup[i] = true;
867 t2->is_dup[i] = true;
868 }
869 }
870
871 if (update1) {
872 ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
873 if (ret)
874 mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
875 }
876 if (!ret && update2) {
877 ret = mlx4_set_port_mac_table(dev, 2, t2->entries);
878 if (ret)
879 mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
880 }
881
882 if (ret)
883 mlx4_warn(dev, "failed to create mirror MAC tables\n");
884 unlock:
885 mutex_unlock(&t2->mutex);
886 mutex_unlock(&t1->mutex);
887 return ret;
888 }
889
890 int mlx4_unbond_mac_table(struct mlx4_dev *dev)
891 {
892 struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
893 struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
894 int ret = 0;
895 int ret1;
896 int i;
897 bool update1 = false;
898 bool update2 = false;
899
900 mutex_lock(&t1->mutex);
901 mutex_lock(&t2->mutex);
902 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
903 if (t1->entries[i] != t2->entries[i]) {
904 mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
905 ret = -EINVAL;
906 goto unlock;
907 }
908 }
909
910 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
911 if (!t1->entries[i])
912 continue;
913 t1->is_dup[i] = false;
914 if (!t1->refs[i]) {
915 t1->entries[i] = 0;
916 update1 = true;
917 }
918 t2->is_dup[i] = false;
919 if (!t2->refs[i]) {
920 t2->entries[i] = 0;
921 update2 = true;
922 }
923 }
924
925 if (update1) {
926 ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
927 if (ret)
928 mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
929 }
930 if (update2) {
931 ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries);
932 if (ret1) {
933 mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
934 ret = ret1;
935 }
936 }
937 unlock:
938 mutex_unlock(&t2->mutex);
939 mutex_unlock(&t1->mutex);
940 return ret;
941 }
942
943 int mlx4_bond_vlan_table(struct mlx4_dev *dev)
944 {
945 struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
946 struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
947 int ret = 0;
948 int i;
949 bool update1 = false;
950 bool update2 = false;
951
952 mutex_lock(&t1->mutex);
953 mutex_lock(&t2->mutex);
954 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
955 if ((t1->entries[i] != t2->entries[i]) &&
956 t1->entries[i] && t2->entries[i]) {
957 mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
958 ret = -EINVAL;
959 goto unlock;
960 }
961 }
962
963 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
964 if (t1->entries[i] && !t2->entries[i]) {
965 t2->entries[i] = t1->entries[i];
966 t2->is_dup[i] = true;
967 update2 = true;
968 } else if (!t1->entries[i] && t2->entries[i]) {
969 t1->entries[i] = t2->entries[i];
970 t1->is_dup[i] = true;
971 update1 = true;
972 } else if (t1->entries[i] && t2->entries[i]) {
973 t1->is_dup[i] = true;
974 t2->is_dup[i] = true;
975 }
976 }
977
978 if (update1) {
979 ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
980 if (ret)
981 mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
982 }
983 if (!ret && update2) {
984 ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
985 if (ret)
986 mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
987 }
988
989 if (ret)
990 mlx4_warn(dev, "failed to create mirror VLAN tables\n");
991 unlock:
992 mutex_unlock(&t2->mutex);
993 mutex_unlock(&t1->mutex);
994 return ret;
995 }
996
997 int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
998 {
999 struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
1000 struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
1001 int ret = 0;
1002 int ret1;
1003 int i;
1004 bool update1 = false;
1005 bool update2 = false;
1006
1007 mutex_lock(&t1->mutex);
1008 mutex_lock(&t2->mutex);
1009 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
1010 if (t1->entries[i] != t2->entries[i]) {
1011 mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
1012 ret = -EINVAL;
1013 goto unlock;
1014 }
1015 }
1016
1017 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
1018 if (!t1->entries[i])
1019 continue;
1020 t1->is_dup[i] = false;
1021 if (!t1->refs[i]) {
1022 t1->entries[i] = 0;
1023 update1 = true;
1024 }
1025 t2->is_dup[i] = false;
1026 if (!t2->refs[i]) {
1027 t2->entries[i] = 0;
1028 update2 = true;
1029 }
1030 }
1031
1032 if (update1) {
1033 ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
1034 if (ret)
1035 mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
1036 }
1037 if (update2) {
1038 ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
1039 if (ret1) {
1040 mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
1041 ret = ret1;
1042 }
1043 }
1044 unlock:
1045 mutex_unlock(&t2->mutex);
1046 mutex_unlock(&t1->mutex);
1047 return ret;
1048 }
1049
1050 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
1051 {
1052 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
1053 u8 *inbuf, *outbuf;
1054 int err;
1055
1056 inmailbox = mlx4_alloc_cmd_mailbox(dev);
1057 if (IS_ERR(inmailbox))
1058 return PTR_ERR(inmailbox);
1059
1060 outmailbox = mlx4_alloc_cmd_mailbox(dev);
1061 if (IS_ERR(outmailbox)) {
1062 mlx4_free_cmd_mailbox(dev, inmailbox);
1063 return PTR_ERR(outmailbox);
1064 }
1065
1066 inbuf = inmailbox->buf;
1067 outbuf = outmailbox->buf;
1068 inbuf[0] = 1;
1069 inbuf[1] = 1;
1070 inbuf[2] = 1;
1071 inbuf[3] = 1;
1072 *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
1073 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
1074
1075 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
1076 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
1077 MLX4_CMD_NATIVE);
1078 if (!err)
1079 *caps = *(__be32 *) (outbuf + 84);
1080 mlx4_free_cmd_mailbox(dev, inmailbox);
1081 mlx4_free_cmd_mailbox(dev, outmailbox);
1082 return err;
1083 }
1084 static struct mlx4_roce_gid_entry zgid_entry;
1085
1086 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
1087 {
1088 int vfs;
1089 int slave_gid = slave;
1090 unsigned i;
1091 struct mlx4_slaves_pport slaves_pport;
1092 struct mlx4_active_ports actv_ports;
1093 unsigned max_port_p_one;
1094
1095 if (slave == 0)
1096 return MLX4_ROCE_PF_GIDS;
1097
1098 /* Slave is a VF */
1099 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1100 actv_ports = mlx4_get_active_ports(dev, slave);
1101 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
1102 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
1103
1104 for (i = 1; i < max_port_p_one; i++) {
1105 struct mlx4_active_ports exclusive_ports;
1106 struct mlx4_slaves_pport slaves_pport_actv;
1107 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1108 set_bit(i - 1, exclusive_ports.ports);
1109 if (i == port)
1110 continue;
1111 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
1112 dev, &exclusive_ports);
1113 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
1114 dev->persist->num_vfs + 1);
1115 }
1116 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
1117 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
1118 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
1119 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
1120 }
1121
1122 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
1123 {
1124 int gids;
1125 unsigned i;
1126 int slave_gid = slave;
1127 int vfs;
1128
1129 struct mlx4_slaves_pport slaves_pport;
1130 struct mlx4_active_ports actv_ports;
1131 unsigned max_port_p_one;
1132
1133 if (slave == 0)
1134 return 0;
1135
1136 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1137 actv_ports = mlx4_get_active_ports(dev, slave);
1138 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
1139 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
1140
1141 for (i = 1; i < max_port_p_one; i++) {
1142 struct mlx4_active_ports exclusive_ports;
1143 struct mlx4_slaves_pport slaves_pport_actv;
1144 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1145 set_bit(i - 1, exclusive_ports.ports);
1146 if (i == port)
1147 continue;
1148 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
1149 dev, &exclusive_ports);
1150 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
1151 dev->persist->num_vfs + 1);
1152 }
1153 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1154 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
1155 if (slave_gid <= gids % vfs)
1156 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
1157
1158 return MLX4_ROCE_PF_GIDS + (gids % vfs) +
1159 ((gids / vfs) * (slave_gid - 1));
1160 }
1161 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
1162
1163 static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
1164 int port, struct mlx4_cmd_mailbox *mailbox)
1165 {
1166 struct mlx4_roce_gid_entry *gid_entry_mbox;
1167 struct mlx4_priv *priv = mlx4_priv(dev);
1168 int num_gids, base, offset;
1169 int i, err;
1170
1171 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
1172 base = mlx4_get_base_gid_ix(dev, slave, port);
1173
1174 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
1175
1176 mutex_lock(&(priv->port[port].gid_table.mutex));
1177 /* Zero-out gids belonging to that slave in the port GID table */
1178 for (i = 0, offset = base; i < num_gids; offset++, i++)
1179 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
1180 zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
1181
1182 /* Now, copy roce port gids table to mailbox for passing to FW */
1183 gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
1184 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
1185 memcpy(gid_entry_mbox->raw,
1186 priv->port[port].gid_table.roce_gids[i].raw,
1187 MLX4_ROCE_GID_ENTRY_SIZE);
1188
1189 err = mlx4_cmd(dev, mailbox->dma,
1190 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
1191 MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
1192 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1193 mutex_unlock(&(priv->port[port].gid_table.mutex));
1194 return err;
1195 }
1196
1197
1198 void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
1199 {
1200 struct mlx4_active_ports actv_ports;
1201 struct mlx4_cmd_mailbox *mailbox;
1202 int num_eth_ports, err;
1203 int i;
1204
1205 if (slave < 0 || slave > dev->persist->num_vfs)
1206 return;
1207
1208 actv_ports = mlx4_get_active_ports(dev, slave);
1209
1210 for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
1211 if (test_bit(i, actv_ports.ports)) {
1212 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
1213 continue;
1214 num_eth_ports++;
1215 }
1216 }
1217
1218 if (!num_eth_ports)
1219 return;
1220
1221 /* have ETH ports. Alloc mailbox for SET_PORT command */
1222 mailbox = mlx4_alloc_cmd_mailbox(dev);
1223 if (IS_ERR(mailbox))
1224 return;
1225
1226 for (i = 0; i < dev->caps.num_ports; i++) {
1227 if (test_bit(i, actv_ports.ports)) {
1228 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
1229 continue;
1230 err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
1231 if (err)
1232 mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
1233 slave, i + 1, err);
1234 }
1235 }
1236
1237 mlx4_free_cmd_mailbox(dev, mailbox);
1238 return;
1239 }
1240
1241 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
1242 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
1243 {
1244 struct mlx4_priv *priv = mlx4_priv(dev);
1245 struct mlx4_port_info *port_info;
1246 struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
1247 struct mlx4_slave_state *slave_st = &master->slave_state[slave];
1248 struct mlx4_set_port_rqp_calc_context *qpn_context;
1249 struct mlx4_set_port_general_context *gen_context;
1250 struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
1251 int reset_qkey_viols;
1252 int port;
1253 int is_eth;
1254 int num_gids;
1255 int base;
1256 u32 in_modifier;
1257 u32 promisc;
1258 u16 mtu, prev_mtu;
1259 int err;
1260 int i, j;
1261 int offset;
1262 __be32 agg_cap_mask;
1263 __be32 slave_cap_mask;
1264 __be32 new_cap_mask;
1265
1266 port = in_mod & 0xff;
1267 in_modifier = in_mod >> 8;
1268 is_eth = op_mod;
1269 port_info = &priv->port[port];
1270
1271 /* Slaves cannot perform SET_PORT operations except changing MTU */
1272 if (is_eth) {
1273 if (slave != dev->caps.function &&
1274 in_modifier != MLX4_SET_PORT_GENERAL &&
1275 in_modifier != MLX4_SET_PORT_GID_TABLE) {
1276 mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
1277 slave);
1278 return -EINVAL;
1279 }
1280 switch (in_modifier) {
1281 case MLX4_SET_PORT_RQP_CALC:
1282 qpn_context = inbox->buf;
1283 qpn_context->base_qpn =
1284 cpu_to_be32(port_info->base_qpn);
1285 qpn_context->n_mac = 0x7;
1286 promisc = be32_to_cpu(qpn_context->promisc) >>
1287 SET_PORT_PROMISC_SHIFT;
1288 qpn_context->promisc = cpu_to_be32(
1289 promisc << SET_PORT_PROMISC_SHIFT |
1290 port_info->base_qpn);
1291 promisc = be32_to_cpu(qpn_context->mcast) >>
1292 SET_PORT_MC_PROMISC_SHIFT;
1293 qpn_context->mcast = cpu_to_be32(
1294 promisc << SET_PORT_MC_PROMISC_SHIFT |
1295 port_info->base_qpn);
1296 break;
1297 case MLX4_SET_PORT_GENERAL:
1298 gen_context = inbox->buf;
1299 /* Mtu is configured as the max MTU among all the
1300 * the functions on the port. */
1301 mtu = be16_to_cpu(gen_context->mtu);
1302 mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
1303 ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
1304 prev_mtu = slave_st->mtu[port];
1305 slave_st->mtu[port] = mtu;
1306 if (mtu > master->max_mtu[port])
1307 master->max_mtu[port] = mtu;
1308 if (mtu < prev_mtu && prev_mtu ==
1309 master->max_mtu[port]) {
1310 slave_st->mtu[port] = mtu;
1311 master->max_mtu[port] = mtu;
1312 for (i = 0; i < dev->num_slaves; i++) {
1313 master->max_mtu[port] =
1314 max(master->max_mtu[port],
1315 master->slave_state[i].mtu[port]);
1316 }
1317 }
1318
1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
1320 break;
1321 case MLX4_SET_PORT_GID_TABLE:
1322 /* change to MULTIPLE entries: number of guest's gids
1323 * need a FOR-loop here over number of gids the guest has.
1324 * 1. Check no duplicates in gids passed by slave
1325 */
1326 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
1327 base = mlx4_get_base_gid_ix(dev, slave, port);
1328 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1329 for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
1330 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
1331 sizeof(zgid_entry)))
1332 continue;
1333 gid_entry_mb1 = gid_entry_mbox + 1;
1334 for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
1335 if (!memcmp(gid_entry_mb1->raw,
1336 zgid_entry.raw, sizeof(zgid_entry)))
1337 continue;
1338 if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
1339 sizeof(gid_entry_mbox->raw))) {
1340 /* found duplicate */
1341 return -EINVAL;
1342 }
1343 }
1344 }
1345
1346 /* 2. Check that do not have duplicates in OTHER
1347 * entries in the port GID table
1348 */
1349
1350 mutex_lock(&(priv->port[port].gid_table.mutex));
1351 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1352 if (i >= base && i < base + num_gids)
1353 continue; /* don't compare to slave's current gids */
1354 gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
1355 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
1356 continue;
1357 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1358 for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
1359 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
1360 sizeof(zgid_entry)))
1361 continue;
1362 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
1363 sizeof(gid_entry_tbl->raw))) {
1364 /* found duplicate */
1365 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
1366 slave, i);
1367 mutex_unlock(&(priv->port[port].gid_table.mutex));
1368 return -EINVAL;
1369 }
1370 }
1371 }
1372
1373 /* insert slave GIDs with memcpy, starting at slave's base index */
1374 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1375 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
1376 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
1377 gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
1378
1379 /* Now, copy roce port gids table to current mailbox for passing to FW */
1380 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1381 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
1382 memcpy(gid_entry_mbox->raw,
1383 priv->port[port].gid_table.roce_gids[i].raw,
1384 MLX4_ROCE_GID_ENTRY_SIZE);
1385
1386 err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
1387 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1388 MLX4_CMD_NATIVE);
1389 mutex_unlock(&(priv->port[port].gid_table.mutex));
1390 return err;
1391 }
1392
1393 return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
1394 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1395 MLX4_CMD_NATIVE);
1396 }
1397
1398 /* Slaves are not allowed to SET_PORT beacon (LED) blink */
1399 if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
1400 mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
1401 return -EPERM;
1402 }
1403
1404 /* For IB, we only consider:
1405 * - The capability mask, which is set to the aggregate of all
1406 * slave function capabilities
1407 * - The QKey violatin counter - reset according to each request.
1408 */
1409
1410 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1411 reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
1412 new_cap_mask = ((__be32 *) inbox->buf)[2];
1413 } else {
1414 reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
1415 new_cap_mask = ((__be32 *) inbox->buf)[1];
1416 }
1417
1418 /* slave may not set the IS_SM capability for the port */
1419 if (slave != mlx4_master_func_num(dev) &&
1420 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
1421 return -EINVAL;
1422
1423 /* No DEV_MGMT in multifunc mode */
1424 if (mlx4_is_mfunc(dev) &&
1425 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
1426 return -EINVAL;
1427
1428 agg_cap_mask = 0;
1429 slave_cap_mask =
1430 priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
1431 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
1432 for (i = 0; i < dev->num_slaves; i++)
1433 agg_cap_mask |=
1434 priv->mfunc.master.slave_state[i].ib_cap_mask[port];
1435
1436 /* only clear mailbox for guests. Master may be setting
1437 * MTU or PKEY table size
1438 */
1439 if (slave != dev->caps.function)
1440 memset(inbox->buf, 0, 256);
1441 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1442 *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
1443 ((__be32 *) inbox->buf)[2] = agg_cap_mask;
1444 } else {
1445 ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
1446 ((__be32 *) inbox->buf)[1] = agg_cap_mask;
1447 }
1448
1449 err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
1450 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1451 if (err)
1452 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
1453 slave_cap_mask;
1454 return err;
1455 }
1456
1457 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
1458 struct mlx4_vhcr *vhcr,
1459 struct mlx4_cmd_mailbox *inbox,
1460 struct mlx4_cmd_mailbox *outbox,
1461 struct mlx4_cmd_info *cmd)
1462 {
1463 int port = mlx4_slave_convert_port(
1464 dev, slave, vhcr->in_modifier & 0xFF);
1465
1466 if (port < 0)
1467 return -EINVAL;
1468
1469 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
1470 (port & 0xFF);
1471
1472 return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
1473 vhcr->op_modifier, inbox);
1474 }
1475
1476 /* bit locations for set port command with zero op modifier */
1477 enum {
1478 MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
1479 MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
1480 MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
1481 MLX4_CHANGE_PORT_VL_CAP = 21,
1482 MLX4_CHANGE_PORT_MTU_CAP = 22,
1483 };
1484
1485 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
1486 {
1487 struct mlx4_cmd_mailbox *mailbox;
1488 int err, vl_cap, pkey_tbl_flag = 0;
1489
1490 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
1491 return 0;
1492
1493 mailbox = mlx4_alloc_cmd_mailbox(dev);
1494 if (IS_ERR(mailbox))
1495 return PTR_ERR(mailbox);
1496
1497 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
1498
1499 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
1500 pkey_tbl_flag = 1;
1501 ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
1502 }
1503
1504 /* IB VL CAP enum isn't used by the firmware, just numerical values */
1505 for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
1506 ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
1507 (1 << MLX4_CHANGE_PORT_MTU_CAP) |
1508 (1 << MLX4_CHANGE_PORT_VL_CAP) |
1509 (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
1510 (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
1511 (vl_cap << MLX4_SET_PORT_VL_CAP));
1512 err = mlx4_cmd(dev, mailbox->dma, port,
1513 MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
1514 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
1515 if (err != -ENOMEM)
1516 break;
1517 }
1518
1519 mlx4_free_cmd_mailbox(dev, mailbox);
1520 return err;
1521 }
1522
1523 #define SET_PORT_ROCE_2_FLAGS 0x10
1524 #define MLX4_SET_PORT_ROCE_V1_V2 0x2
1525 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
1526 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
1527 {
1528 struct mlx4_cmd_mailbox *mailbox;
1529 struct mlx4_set_port_general_context *context;
1530 int err;
1531 u32 in_mod;
1532
1533 mailbox = mlx4_alloc_cmd_mailbox(dev);
1534 if (IS_ERR(mailbox))
1535 return PTR_ERR(mailbox);
1536 context = mailbox->buf;
1537 context->flags = SET_PORT_GEN_ALL_VALID;
1538 context->mtu = cpu_to_be16(mtu);
1539 context->pptx = (pptx * (!pfctx)) << 7;
1540 context->pfctx = pfctx;
1541 context->pprx = (pprx * (!pfcrx)) << 7;
1542 context->pfcrx = pfcrx;
1543
1544 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
1545 context->flags |= SET_PORT_ROCE_2_FLAGS;
1546 context->roce_mode |=
1547 MLX4_SET_PORT_ROCE_V1_V2 << 4;
1548 }
1549 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1550 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1551 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1552 MLX4_CMD_WRAPPED);
1553
1554 mlx4_free_cmd_mailbox(dev, mailbox);
1555 return err;
1556 }
1557 EXPORT_SYMBOL(mlx4_SET_PORT_general);
1558
1559 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1560 u8 promisc)
1561 {
1562 struct mlx4_cmd_mailbox *mailbox;
1563 struct mlx4_set_port_rqp_calc_context *context;
1564 int err;
1565 u32 in_mod;
1566 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
1567 MCAST_DIRECT : MCAST_DEFAULT;
1568
1569 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1570 return 0;
1571
1572 mailbox = mlx4_alloc_cmd_mailbox(dev);
1573 if (IS_ERR(mailbox))
1574 return PTR_ERR(mailbox);
1575 context = mailbox->buf;
1576 context->base_qpn = cpu_to_be32(base_qpn);
1577 context->n_mac = dev->caps.log_num_macs;
1578 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
1579 base_qpn);
1580 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
1581 base_qpn);
1582 context->intra_no_vlan = 0;
1583 context->no_vlan = MLX4_NO_VLAN_IDX;
1584 context->intra_vlan_miss = 0;
1585 context->vlan_miss = MLX4_VLAN_MISS_IDX;
1586
1587 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
1588 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1589 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1590 MLX4_CMD_WRAPPED);
1591
1592 mlx4_free_cmd_mailbox(dev, mailbox);
1593 return err;
1594 }
1595 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
1596
1597 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
1598 {
1599 struct mlx4_cmd_mailbox *mailbox;
1600 struct mlx4_set_port_general_context *context;
1601 u32 in_mod;
1602 int err;
1603
1604 mailbox = mlx4_alloc_cmd_mailbox(dev);
1605 if (IS_ERR(mailbox))
1606 return PTR_ERR(mailbox);
1607 context = mailbox->buf;
1608 context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
1609 if (ignore_fcs_value)
1610 context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
1611 else
1612 context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
1613
1614 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1615 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1616 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1617
1618 mlx4_free_cmd_mailbox(dev, mailbox);
1619 return err;
1620 }
1621 EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
1622
1623 enum {
1624 VXLAN_ENABLE_MODIFY = 1 << 7,
1625 VXLAN_STEERING_MODIFY = 1 << 6,
1626
1627 VXLAN_ENABLE = 1 << 7,
1628 };
1629
1630 struct mlx4_set_port_vxlan_context {
1631 u32 reserved1;
1632 u8 modify_flags;
1633 u8 reserved2;
1634 u8 enable_flags;
1635 u8 steering;
1636 };
1637
1638 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
1639 {
1640 int err;
1641 u32 in_mod;
1642 struct mlx4_cmd_mailbox *mailbox;
1643 struct mlx4_set_port_vxlan_context *context;
1644
1645 mailbox = mlx4_alloc_cmd_mailbox(dev);
1646 if (IS_ERR(mailbox))
1647 return PTR_ERR(mailbox);
1648 context = mailbox->buf;
1649 memset(context, 0, sizeof(*context));
1650
1651 context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
1652 if (enable)
1653 context->enable_flags = VXLAN_ENABLE;
1654 context->steering = steering;
1655
1656 in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
1657 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1658 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1659 MLX4_CMD_NATIVE);
1660
1661 mlx4_free_cmd_mailbox(dev, mailbox);
1662 return err;
1663 }
1664 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
1665
1666 int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
1667 {
1668 int err;
1669 struct mlx4_cmd_mailbox *mailbox;
1670
1671 mailbox = mlx4_alloc_cmd_mailbox(dev);
1672 if (IS_ERR(mailbox))
1673 return PTR_ERR(mailbox);
1674
1675 *((__be32 *)mailbox->buf) = cpu_to_be32(time);
1676
1677 err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
1678 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1679 MLX4_CMD_NATIVE);
1680
1681 mlx4_free_cmd_mailbox(dev, mailbox);
1682 return err;
1683 }
1684 EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
1685
1686 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1687 struct mlx4_vhcr *vhcr,
1688 struct mlx4_cmd_mailbox *inbox,
1689 struct mlx4_cmd_mailbox *outbox,
1690 struct mlx4_cmd_info *cmd)
1691 {
1692 int err = 0;
1693
1694 return err;
1695 }
1696
1697 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
1698 u64 mac, u64 clear, u8 mode)
1699 {
1700 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
1701 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
1702 MLX4_CMD_WRAPPED);
1703 }
1704 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
1705
1706 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1707 struct mlx4_vhcr *vhcr,
1708 struct mlx4_cmd_mailbox *inbox,
1709 struct mlx4_cmd_mailbox *outbox,
1710 struct mlx4_cmd_info *cmd)
1711 {
1712 int err = 0;
1713
1714 return err;
1715 }
1716
1717 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
1718 u32 in_mod, struct mlx4_cmd_mailbox *outbox)
1719 {
1720 return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
1721 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
1722 MLX4_CMD_NATIVE);
1723 }
1724
1725 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1726 struct mlx4_vhcr *vhcr,
1727 struct mlx4_cmd_mailbox *inbox,
1728 struct mlx4_cmd_mailbox *outbox,
1729 struct mlx4_cmd_info *cmd)
1730 {
1731 if (slave != dev->caps.function)
1732 return 0;
1733 return mlx4_common_dump_eth_stats(dev, slave,
1734 vhcr->in_modifier, outbox);
1735 }
1736
1737 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1738 int *slave_id)
1739 {
1740 struct mlx4_priv *priv = mlx4_priv(dev);
1741 int i, found_ix = -1;
1742 int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1743 struct mlx4_slaves_pport slaves_pport;
1744 unsigned num_vfs;
1745 int slave_gid;
1746
1747 if (!mlx4_is_mfunc(dev))
1748 return -EINVAL;
1749
1750 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1751 num_vfs = bitmap_weight(slaves_pport.slaves,
1752 dev->persist->num_vfs + 1) - 1;
1753
1754 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1755 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1756 MLX4_ROCE_GID_ENTRY_SIZE)) {
1757 found_ix = i;
1758 break;
1759 }
1760 }
1761
1762 if (found_ix >= 0) {
1763 /* Calculate a slave_gid which is the slave number in the gid
1764 * table and not a globally unique slave number.
1765 */
1766 if (found_ix < MLX4_ROCE_PF_GIDS)
1767 slave_gid = 0;
1768 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
1769 (vf_gids / num_vfs + 1))
1770 slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
1771 (vf_gids / num_vfs + 1)) + 1;
1772 else
1773 slave_gid =
1774 ((found_ix - MLX4_ROCE_PF_GIDS -
1775 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1776 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1777
1778 /* Calculate the globally unique slave id */
1779 if (slave_gid) {
1780 struct mlx4_active_ports exclusive_ports;
1781 struct mlx4_active_ports actv_ports;
1782 struct mlx4_slaves_pport slaves_pport_actv;
1783 unsigned max_port_p_one;
1784 int num_vfs_before = 0;
1785 int candidate_slave_gid;
1786
1787 /* Calculate how many VFs are on the previous port, if exists */
1788 for (i = 1; i < port; i++) {
1789 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1790 set_bit(i - 1, exclusive_ports.ports);
1791 slaves_pport_actv =
1792 mlx4_phys_to_slaves_pport_actv(
1793 dev, &exclusive_ports);
1794 num_vfs_before += bitmap_weight(
1795 slaves_pport_actv.slaves,
1796 dev->persist->num_vfs + 1);
1797 }
1798
1799 /* candidate_slave_gid isn't necessarily the correct slave, but
1800 * it has the same number of ports and is assigned to the same
1801 * ports as the real slave we're looking for. On dual port VF,
1802 * slave_gid = [single port VFs on port <port>] +
1803 * [offset of the current slave from the first dual port VF] +
1804 * 1 (for the PF).
1805 */
1806 candidate_slave_gid = slave_gid + num_vfs_before;
1807
1808 actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1809 max_port_p_one = find_first_bit(
1810 actv_ports.ports, dev->caps.num_ports) +
1811 bitmap_weight(actv_ports.ports,
1812 dev->caps.num_ports) + 1;
1813
1814 /* Calculate the real slave number */
1815 for (i = 1; i < max_port_p_one; i++) {
1816 if (i == port)
1817 continue;
1818 bitmap_zero(exclusive_ports.ports,
1819 dev->caps.num_ports);
1820 set_bit(i - 1, exclusive_ports.ports);
1821 slaves_pport_actv =
1822 mlx4_phys_to_slaves_pport_actv(
1823 dev, &exclusive_ports);
1824 slave_gid += bitmap_weight(
1825 slaves_pport_actv.slaves,
1826 dev->persist->num_vfs + 1);
1827 }
1828 }
1829 *slave_id = slave_gid;
1830 }
1831
1832 return (found_ix >= 0) ? 0 : -EINVAL;
1833 }
1834 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
1835
1836 int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1837 u8 *gid)
1838 {
1839 struct mlx4_priv *priv = mlx4_priv(dev);
1840
1841 if (!mlx4_is_master(dev))
1842 return -EINVAL;
1843
1844 memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1845 MLX4_ROCE_GID_ENTRY_SIZE);
1846 return 0;
1847 }
1848 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
1849
1850 /* Cable Module Info */
1851 #define MODULE_INFO_MAX_READ 48
1852
1853 #define I2C_ADDR_LOW 0x50
1854 #define I2C_ADDR_HIGH 0x51
1855 #define I2C_PAGE_SIZE 256
1856
1857 /* Module Info Data */
1858 struct mlx4_cable_info {
1859 u8 i2c_addr;
1860 u8 page_num;
1861 __be16 dev_mem_address;
1862 __be16 reserved1;
1863 __be16 size;
1864 __be32 reserved2[2];
1865 u8 data[MODULE_INFO_MAX_READ];
1866 };
1867
1868 enum cable_info_err {
1869 CABLE_INF_INV_PORT = 0x1,
1870 CABLE_INF_OP_NOSUP = 0x2,
1871 CABLE_INF_NOT_CONN = 0x3,
1872 CABLE_INF_NO_EEPRM = 0x4,
1873 CABLE_INF_PAGE_ERR = 0x5,
1874 CABLE_INF_INV_ADDR = 0x6,
1875 CABLE_INF_I2C_ADDR = 0x7,
1876 CABLE_INF_QSFP_VIO = 0x8,
1877 CABLE_INF_I2C_BUSY = 0x9,
1878 };
1879
1880 #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
1881
1882 static inline const char *cable_info_mad_err_str(u16 mad_status)
1883 {
1884 u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
1885
1886 switch (err) {
1887 case CABLE_INF_INV_PORT:
1888 return "invalid port selected";
1889 case CABLE_INF_OP_NOSUP:
1890 return "operation not supported for this port (the port is of type CX4 or internal)";
1891 case CABLE_INF_NOT_CONN:
1892 return "cable is not connected";
1893 case CABLE_INF_NO_EEPRM:
1894 return "the connected cable has no EPROM (passive copper cable)";
1895 case CABLE_INF_PAGE_ERR:
1896 return "page number is greater than 15";
1897 case CABLE_INF_INV_ADDR:
1898 return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
1899 case CABLE_INF_I2C_ADDR:
1900 return "invalid I2C slave address";
1901 case CABLE_INF_QSFP_VIO:
1902 return "at least one cable violates the QSFP specification and ignores the modsel signal";
1903 case CABLE_INF_I2C_BUSY:
1904 return "I2C bus is constantly busy";
1905 }
1906 return "Unknown Error";
1907 }
1908
1909 /**
1910 * mlx4_get_module_info - Read cable module eeprom data
1911 * @dev: mlx4_dev.
1912 * @port: port number.
1913 * @offset: byte offset in eeprom to start reading data from.
1914 * @size: num of bytes to read.
1915 * @data: output buffer to put the requested data into.
1916 *
1917 * Reads cable module eeprom data, puts the outcome data into
1918 * data pointer paramer.
1919 * Returns num of read bytes on success or a negative error
1920 * code.
1921 */
1922 int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
1923 u16 offset, u16 size, u8 *data)
1924 {
1925 struct mlx4_cmd_mailbox *inbox, *outbox;
1926 struct mlx4_mad_ifc *inmad, *outmad;
1927 struct mlx4_cable_info *cable_info;
1928 u16 i2c_addr;
1929 int ret;
1930
1931 if (size > MODULE_INFO_MAX_READ)
1932 size = MODULE_INFO_MAX_READ;
1933
1934 inbox = mlx4_alloc_cmd_mailbox(dev);
1935 if (IS_ERR(inbox))
1936 return PTR_ERR(inbox);
1937
1938 outbox = mlx4_alloc_cmd_mailbox(dev);
1939 if (IS_ERR(outbox)) {
1940 mlx4_free_cmd_mailbox(dev, inbox);
1941 return PTR_ERR(outbox);
1942 }
1943
1944 inmad = (struct mlx4_mad_ifc *)(inbox->buf);
1945 outmad = (struct mlx4_mad_ifc *)(outbox->buf);
1946
1947 inmad->method = 0x1; /* Get */
1948 inmad->class_version = 0x1;
1949 inmad->mgmt_class = 0x1;
1950 inmad->base_version = 0x1;
1951 inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
1952
1953 if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
1954 /* Cross pages reads are not allowed
1955 * read until offset 256 in low page
1956 */
1957 size -= offset + size - I2C_PAGE_SIZE;
1958
1959 i2c_addr = I2C_ADDR_LOW;
1960 if (offset >= I2C_PAGE_SIZE) {
1961 /* Reset offset to high page */
1962 i2c_addr = I2C_ADDR_HIGH;
1963 offset -= I2C_PAGE_SIZE;
1964 }
1965
1966 cable_info = (struct mlx4_cable_info *)inmad->data;
1967 cable_info->dev_mem_address = cpu_to_be16(offset);
1968 cable_info->page_num = 0;
1969 cable_info->i2c_addr = i2c_addr;
1970 cable_info->size = cpu_to_be16(size);
1971
1972 ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
1973 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
1974 MLX4_CMD_NATIVE);
1975 if (ret)
1976 goto out;
1977
1978 if (be16_to_cpu(outmad->status)) {
1979 /* Mad returned with bad status */
1980 ret = be16_to_cpu(outmad->status);
1981 mlx4_warn(dev,
1982 "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
1983 0xFF60, port, i2c_addr, offset, size,
1984 ret, cable_info_mad_err_str(ret));
1985
1986 if (i2c_addr == I2C_ADDR_HIGH &&
1987 MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
1988 /* Some SFP cables do not support i2c slave
1989 * address 0x51 (high page), abort silently.
1990 */
1991 ret = 0;
1992 else
1993 ret = -ret;
1994 goto out;
1995 }
1996 cable_info = (struct mlx4_cable_info *)outmad->data;
1997 memcpy(data, cable_info->data, size);
1998 ret = size;
1999 out:
2000 mlx4_free_cmd_mailbox(dev, inbox);
2001 mlx4_free_cmd_mailbox(dev, outbox);
2002 return ret;
2003 }
2004 EXPORT_SYMBOL(mlx4_get_module_info);