]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx4/port.c
946e0af5faef28b2e89bc677510e1032c24e9eef
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / port.c
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
37
38 #include <linux/mlx4/cmd.h>
39
40 #include "mlx4.h"
41
42 #define MLX4_MAC_VALID (1ull << 63)
43
44 #define MLX4_VLAN_VALID (1u << 31)
45 #define MLX4_VLAN_MASK 0xfff
46
47 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
48 #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
49 #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
50 #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
51
52 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
53 {
54 int i;
55
56 mutex_init(&table->mutex);
57 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
58 table->entries[i] = 0;
59 table->refs[i] = 0;
60 }
61 table->max = 1 << dev->caps.log_num_macs;
62 table->total = 0;
63 }
64
65 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
66 {
67 int i;
68
69 mutex_init(&table->mutex);
70 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
71 table->entries[i] = 0;
72 table->refs[i] = 0;
73 }
74 table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
75 table->total = 0;
76 }
77
78 static int validate_index(struct mlx4_dev *dev,
79 struct mlx4_mac_table *table, int index)
80 {
81 int err = 0;
82
83 if (index < 0 || index >= table->max || !table->entries[index]) {
84 mlx4_warn(dev, "No valid Mac entry for the given index\n");
85 err = -EINVAL;
86 }
87 return err;
88 }
89
90 static int find_index(struct mlx4_dev *dev,
91 struct mlx4_mac_table *table, u64 mac)
92 {
93 int i;
94
95 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
96 if ((mac & MLX4_MAC_MASK) ==
97 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
98 return i;
99 }
100 /* Mac not found */
101 return -EINVAL;
102 }
103
104 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
105 __be64 *entries)
106 {
107 struct mlx4_cmd_mailbox *mailbox;
108 u32 in_mod;
109 int err;
110
111 mailbox = mlx4_alloc_cmd_mailbox(dev);
112 if (IS_ERR(mailbox))
113 return PTR_ERR(mailbox);
114
115 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
116
117 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
118
119 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
120 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
121
122 mlx4_free_cmd_mailbox(dev, mailbox);
123 return err;
124 }
125
126 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
127 {
128 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
129 struct mlx4_mac_table *table = &info->mac_table;
130 int i, err = 0;
131 int free = -1;
132
133 mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
134 (unsigned long long) mac, port);
135
136 mutex_lock(&table->mutex);
137 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
138 if (free < 0 && !table->entries[i]) {
139 free = i;
140 continue;
141 }
142
143 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
144 /* MAC already registered, increment ref count */
145 err = i;
146 ++table->refs[i];
147 goto out;
148 }
149 }
150
151 mlx4_dbg(dev, "Free MAC index is %d\n", free);
152
153 if (table->total == table->max) {
154 /* No free mac entries */
155 err = -ENOSPC;
156 goto out;
157 }
158
159 /* Register new MAC */
160 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
161
162 err = mlx4_set_port_mac_table(dev, port, table->entries);
163 if (unlikely(err)) {
164 mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
165 (unsigned long long) mac);
166 table->entries[free] = 0;
167 goto out;
168 }
169 table->refs[free] = 1;
170 err = free;
171 ++table->total;
172 out:
173 mutex_unlock(&table->mutex);
174 return err;
175 }
176 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
177
178 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
179 {
180 u64 out_param = 0;
181 int err;
182
183 if (mlx4_is_mfunc(dev)) {
184 set_param_l(&out_param, port);
185 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
186 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
187 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
188 if (err)
189 return err;
190
191 return get_param_l(&out_param);
192 }
193 return __mlx4_register_mac(dev, port, mac);
194 }
195 EXPORT_SYMBOL_GPL(mlx4_register_mac);
196
197 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
198 {
199 return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
200 (port - 1) * (1 << dev->caps.log_num_macs);
201 }
202 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
203
204 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
205 {
206 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
207 struct mlx4_mac_table *table = &info->mac_table;
208 int index;
209
210 mutex_lock(&table->mutex);
211 index = find_index(dev, table, mac);
212
213 if (validate_index(dev, table, index))
214 goto out;
215 if (--table->refs[index]) {
216 mlx4_dbg(dev, "Have more references for index %d,"
217 "no need to modify mac table\n", index);
218 goto out;
219 }
220
221 table->entries[index] = 0;
222 mlx4_set_port_mac_table(dev, port, table->entries);
223 --table->total;
224 out:
225 mutex_unlock(&table->mutex);
226 }
227 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
228
229 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
230 {
231 u64 out_param = 0;
232
233 if (mlx4_is_mfunc(dev)) {
234 set_param_l(&out_param, port);
235 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
236 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
237 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
238 return;
239 }
240 __mlx4_unregister_mac(dev, port, mac);
241 return;
242 }
243 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
244
245 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
246 {
247 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
248 struct mlx4_mac_table *table = &info->mac_table;
249 int index = qpn - info->base_qpn;
250 int err = 0;
251
252 /* CX1 doesn't support multi-functions */
253 mutex_lock(&table->mutex);
254
255 err = validate_index(dev, table, index);
256 if (err)
257 goto out;
258
259 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
260
261 err = mlx4_set_port_mac_table(dev, port, table->entries);
262 if (unlikely(err)) {
263 mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
264 (unsigned long long) new_mac);
265 table->entries[index] = 0;
266 }
267 out:
268 mutex_unlock(&table->mutex);
269 return err;
270 }
271 EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
272
273 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
274 __be32 *entries)
275 {
276 struct mlx4_cmd_mailbox *mailbox;
277 u32 in_mod;
278 int err;
279
280 mailbox = mlx4_alloc_cmd_mailbox(dev);
281 if (IS_ERR(mailbox))
282 return PTR_ERR(mailbox);
283
284 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
285 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
286 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
287 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
288
289 mlx4_free_cmd_mailbox(dev, mailbox);
290
291 return err;
292 }
293
294 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
295 {
296 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
297 int i;
298
299 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
300 if (table->refs[i] &&
301 (vid == (MLX4_VLAN_MASK &
302 be32_to_cpu(table->entries[i])))) {
303 /* VLAN already registered, increase reference count */
304 *idx = i;
305 return 0;
306 }
307 }
308
309 return -ENOENT;
310 }
311 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
312
313 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
314 int *index)
315 {
316 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
317 int i, err = 0;
318 int free = -1;
319
320 mutex_lock(&table->mutex);
321
322 if (table->total == table->max) {
323 /* No free vlan entries */
324 err = -ENOSPC;
325 goto out;
326 }
327
328 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
329 if (free < 0 && (table->refs[i] == 0)) {
330 free = i;
331 continue;
332 }
333
334 if (table->refs[i] &&
335 (vlan == (MLX4_VLAN_MASK &
336 be32_to_cpu(table->entries[i])))) {
337 /* Vlan already registered, increase references count */
338 *index = i;
339 ++table->refs[i];
340 goto out;
341 }
342 }
343
344 if (free < 0) {
345 err = -ENOMEM;
346 goto out;
347 }
348
349 /* Register new VLAN */
350 table->refs[free] = 1;
351 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
352
353 err = mlx4_set_port_vlan_table(dev, port, table->entries);
354 if (unlikely(err)) {
355 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
356 table->refs[free] = 0;
357 table->entries[free] = 0;
358 goto out;
359 }
360
361 *index = free;
362 ++table->total;
363 out:
364 mutex_unlock(&table->mutex);
365 return err;
366 }
367
368 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
369 {
370 u64 out_param = 0;
371 int err;
372
373 if (mlx4_is_mfunc(dev)) {
374 set_param_l(&out_param, port);
375 err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
376 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
377 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
378 if (!err)
379 *index = get_param_l(&out_param);
380
381 return err;
382 }
383 return __mlx4_register_vlan(dev, port, vlan, index);
384 }
385 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
386
387 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
388 {
389 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
390
391 if (index < MLX4_VLAN_REGULAR) {
392 mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
393 return;
394 }
395
396 mutex_lock(&table->mutex);
397 if (!table->refs[index]) {
398 mlx4_warn(dev, "No vlan entry for index %d\n", index);
399 goto out;
400 }
401 if (--table->refs[index]) {
402 mlx4_dbg(dev, "Have more references for index %d,"
403 "no need to modify vlan table\n", index);
404 goto out;
405 }
406 table->entries[index] = 0;
407 mlx4_set_port_vlan_table(dev, port, table->entries);
408 --table->total;
409 out:
410 mutex_unlock(&table->mutex);
411 }
412
413 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
414 {
415 u64 in_param = 0;
416 int err;
417
418 if (mlx4_is_mfunc(dev)) {
419 set_param_l(&in_param, port);
420 err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
421 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
422 MLX4_CMD_WRAPPED);
423 if (!err)
424 mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
425 index);
426
427 return;
428 }
429 __mlx4_unregister_vlan(dev, port, index);
430 }
431 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
432
433 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
434 {
435 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
436 u8 *inbuf, *outbuf;
437 int err;
438
439 inmailbox = mlx4_alloc_cmd_mailbox(dev);
440 if (IS_ERR(inmailbox))
441 return PTR_ERR(inmailbox);
442
443 outmailbox = mlx4_alloc_cmd_mailbox(dev);
444 if (IS_ERR(outmailbox)) {
445 mlx4_free_cmd_mailbox(dev, inmailbox);
446 return PTR_ERR(outmailbox);
447 }
448
449 inbuf = inmailbox->buf;
450 outbuf = outmailbox->buf;
451 memset(inbuf, 0, 256);
452 memset(outbuf, 0, 256);
453 inbuf[0] = 1;
454 inbuf[1] = 1;
455 inbuf[2] = 1;
456 inbuf[3] = 1;
457 *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
458 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
459
460 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
461 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
462 MLX4_CMD_NATIVE);
463 if (!err)
464 *caps = *(__be32 *) (outbuf + 84);
465 mlx4_free_cmd_mailbox(dev, inmailbox);
466 mlx4_free_cmd_mailbox(dev, outmailbox);
467 return err;
468 }
469
470 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
471 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
472 {
473 struct mlx4_priv *priv = mlx4_priv(dev);
474 struct mlx4_port_info *port_info;
475 struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
476 struct mlx4_slave_state *slave_st = &master->slave_state[slave];
477 struct mlx4_set_port_rqp_calc_context *qpn_context;
478 struct mlx4_set_port_general_context *gen_context;
479 int reset_qkey_viols;
480 int port;
481 int is_eth;
482 u32 in_modifier;
483 u32 promisc;
484 u16 mtu, prev_mtu;
485 int err;
486 int i;
487 __be32 agg_cap_mask;
488 __be32 slave_cap_mask;
489 __be32 new_cap_mask;
490
491 port = in_mod & 0xff;
492 in_modifier = in_mod >> 8;
493 is_eth = op_mod;
494 port_info = &priv->port[port];
495
496 /* Slaves cannot perform SET_PORT operations except changing MTU */
497 if (is_eth) {
498 if (slave != dev->caps.function &&
499 in_modifier != MLX4_SET_PORT_GENERAL) {
500 mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
501 slave);
502 return -EINVAL;
503 }
504 switch (in_modifier) {
505 case MLX4_SET_PORT_RQP_CALC:
506 qpn_context = inbox->buf;
507 qpn_context->base_qpn =
508 cpu_to_be32(port_info->base_qpn);
509 qpn_context->n_mac = 0x7;
510 promisc = be32_to_cpu(qpn_context->promisc) >>
511 SET_PORT_PROMISC_SHIFT;
512 qpn_context->promisc = cpu_to_be32(
513 promisc << SET_PORT_PROMISC_SHIFT |
514 port_info->base_qpn);
515 promisc = be32_to_cpu(qpn_context->mcast) >>
516 SET_PORT_MC_PROMISC_SHIFT;
517 qpn_context->mcast = cpu_to_be32(
518 promisc << SET_PORT_MC_PROMISC_SHIFT |
519 port_info->base_qpn);
520 break;
521 case MLX4_SET_PORT_GENERAL:
522 gen_context = inbox->buf;
523 /* Mtu is configured as the max MTU among all the
524 * the functions on the port. */
525 mtu = be16_to_cpu(gen_context->mtu);
526 mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
527 ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
528 prev_mtu = slave_st->mtu[port];
529 slave_st->mtu[port] = mtu;
530 if (mtu > master->max_mtu[port])
531 master->max_mtu[port] = mtu;
532 if (mtu < prev_mtu && prev_mtu ==
533 master->max_mtu[port]) {
534 slave_st->mtu[port] = mtu;
535 master->max_mtu[port] = mtu;
536 for (i = 0; i < dev->num_slaves; i++) {
537 master->max_mtu[port] =
538 max(master->max_mtu[port],
539 master->slave_state[i].mtu[port]);
540 }
541 }
542
543 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
544 break;
545 }
546 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
547 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
548 MLX4_CMD_NATIVE);
549 }
550
551 /* For IB, we only consider:
552 * - The capability mask, which is set to the aggregate of all
553 * slave function capabilities
554 * - The QKey violatin counter - reset according to each request.
555 */
556
557 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
558 reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
559 new_cap_mask = ((__be32 *) inbox->buf)[2];
560 } else {
561 reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
562 new_cap_mask = ((__be32 *) inbox->buf)[1];
563 }
564
565 /* slave may not set the IS_SM capability for the port */
566 if (slave != mlx4_master_func_num(dev) &&
567 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
568 return -EINVAL;
569
570 /* No DEV_MGMT in multifunc mode */
571 if (mlx4_is_mfunc(dev) &&
572 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
573 return -EINVAL;
574
575 agg_cap_mask = 0;
576 slave_cap_mask =
577 priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
578 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
579 for (i = 0; i < dev->num_slaves; i++)
580 agg_cap_mask |=
581 priv->mfunc.master.slave_state[i].ib_cap_mask[port];
582
583 /* only clear mailbox for guests. Master may be setting
584 * MTU or PKEY table size
585 */
586 if (slave != dev->caps.function)
587 memset(inbox->buf, 0, 256);
588 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
589 *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
590 ((__be32 *) inbox->buf)[2] = agg_cap_mask;
591 } else {
592 ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
593 ((__be32 *) inbox->buf)[1] = agg_cap_mask;
594 }
595
596 err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
597 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
598 if (err)
599 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
600 slave_cap_mask;
601 return err;
602 }
603
604 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
605 struct mlx4_vhcr *vhcr,
606 struct mlx4_cmd_mailbox *inbox,
607 struct mlx4_cmd_mailbox *outbox,
608 struct mlx4_cmd_info *cmd)
609 {
610 return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
611 vhcr->op_modifier, inbox);
612 }
613
614 /* bit locations for set port command with zero op modifier */
615 enum {
616 MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
617 MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
618 MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
619 MLX4_CHANGE_PORT_VL_CAP = 21,
620 MLX4_CHANGE_PORT_MTU_CAP = 22,
621 };
622
623 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
624 {
625 struct mlx4_cmd_mailbox *mailbox;
626 int err, vl_cap, pkey_tbl_flag = 0;
627
628 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
629 return 0;
630
631 mailbox = mlx4_alloc_cmd_mailbox(dev);
632 if (IS_ERR(mailbox))
633 return PTR_ERR(mailbox);
634
635 memset(mailbox->buf, 0, 256);
636
637 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
638
639 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
640 pkey_tbl_flag = 1;
641 ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
642 }
643
644 /* IB VL CAP enum isn't used by the firmware, just numerical values */
645 for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
646 ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
647 (1 << MLX4_CHANGE_PORT_MTU_CAP) |
648 (1 << MLX4_CHANGE_PORT_VL_CAP) |
649 (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
650 (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
651 (vl_cap << MLX4_SET_PORT_VL_CAP));
652 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
653 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
654 if (err != -ENOMEM)
655 break;
656 }
657
658 mlx4_free_cmd_mailbox(dev, mailbox);
659 return err;
660 }
661
662 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
663 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
664 {
665 struct mlx4_cmd_mailbox *mailbox;
666 struct mlx4_set_port_general_context *context;
667 int err;
668 u32 in_mod;
669
670 mailbox = mlx4_alloc_cmd_mailbox(dev);
671 if (IS_ERR(mailbox))
672 return PTR_ERR(mailbox);
673 context = mailbox->buf;
674 memset(context, 0, sizeof *context);
675
676 context->flags = SET_PORT_GEN_ALL_VALID;
677 context->mtu = cpu_to_be16(mtu);
678 context->pptx = (pptx * (!pfctx)) << 7;
679 context->pfctx = pfctx;
680 context->pprx = (pprx * (!pfcrx)) << 7;
681 context->pfcrx = pfcrx;
682
683 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
684 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
685 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
686
687 mlx4_free_cmd_mailbox(dev, mailbox);
688 return err;
689 }
690 EXPORT_SYMBOL(mlx4_SET_PORT_general);
691
692 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
693 u8 promisc)
694 {
695 struct mlx4_cmd_mailbox *mailbox;
696 struct mlx4_set_port_rqp_calc_context *context;
697 int err;
698 u32 in_mod;
699 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
700 MCAST_DIRECT : MCAST_DEFAULT;
701
702 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
703 return 0;
704
705 mailbox = mlx4_alloc_cmd_mailbox(dev);
706 if (IS_ERR(mailbox))
707 return PTR_ERR(mailbox);
708 context = mailbox->buf;
709 memset(context, 0, sizeof *context);
710
711 context->base_qpn = cpu_to_be32(base_qpn);
712 context->n_mac = dev->caps.log_num_macs;
713 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
714 base_qpn);
715 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
716 base_qpn);
717 context->intra_no_vlan = 0;
718 context->no_vlan = MLX4_NO_VLAN_IDX;
719 context->intra_vlan_miss = 0;
720 context->vlan_miss = MLX4_VLAN_MISS_IDX;
721
722 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
723 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
724 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
725
726 mlx4_free_cmd_mailbox(dev, mailbox);
727 return err;
728 }
729 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
730
731 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
732 {
733 struct mlx4_cmd_mailbox *mailbox;
734 struct mlx4_set_port_prio2tc_context *context;
735 int err;
736 u32 in_mod;
737 int i;
738
739 mailbox = mlx4_alloc_cmd_mailbox(dev);
740 if (IS_ERR(mailbox))
741 return PTR_ERR(mailbox);
742 context = mailbox->buf;
743 memset(context, 0, sizeof *context);
744
745 for (i = 0; i < MLX4_NUM_UP; i += 2)
746 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
747
748 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
749 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
750 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
751
752 mlx4_free_cmd_mailbox(dev, mailbox);
753 return err;
754 }
755 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
756
757 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
758 u8 *pg, u16 *ratelimit)
759 {
760 struct mlx4_cmd_mailbox *mailbox;
761 struct mlx4_set_port_scheduler_context *context;
762 int err;
763 u32 in_mod;
764 int i;
765
766 mailbox = mlx4_alloc_cmd_mailbox(dev);
767 if (IS_ERR(mailbox))
768 return PTR_ERR(mailbox);
769 context = mailbox->buf;
770 memset(context, 0, sizeof *context);
771
772 for (i = 0; i < MLX4_NUM_TC; i++) {
773 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
774 u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
775 MLX4_RATELIMIT_DEFAULT;
776
777 tc->pg = htons(pg[i]);
778 tc->bw_precentage = htons(tc_tx_bw[i]);
779
780 tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
781 tc->max_bw_value = htons(r);
782 }
783
784 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
785 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
786 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
787
788 mlx4_free_cmd_mailbox(dev, mailbox);
789 return err;
790 }
791 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
792
793 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
794 struct mlx4_vhcr *vhcr,
795 struct mlx4_cmd_mailbox *inbox,
796 struct mlx4_cmd_mailbox *outbox,
797 struct mlx4_cmd_info *cmd)
798 {
799 int err = 0;
800
801 return err;
802 }
803
804 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
805 u64 mac, u64 clear, u8 mode)
806 {
807 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
808 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
809 MLX4_CMD_WRAPPED);
810 }
811 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
812
813 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
814 struct mlx4_vhcr *vhcr,
815 struct mlx4_cmd_mailbox *inbox,
816 struct mlx4_cmd_mailbox *outbox,
817 struct mlx4_cmd_info *cmd)
818 {
819 int err = 0;
820
821 return err;
822 }
823
824 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
825 u32 in_mod, struct mlx4_cmd_mailbox *outbox)
826 {
827 return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
828 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
829 MLX4_CMD_NATIVE);
830 }
831
832 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
833 struct mlx4_vhcr *vhcr,
834 struct mlx4_cmd_mailbox *inbox,
835 struct mlx4_cmd_mailbox *outbox,
836 struct mlx4_cmd_info *cmd)
837 {
838 if (slave != dev->caps.function)
839 return 0;
840 return mlx4_common_dump_eth_stats(dev, slave,
841 vhcr->in_modifier, outbox);
842 }
843
844 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
845 {
846 if (!mlx4_is_mfunc(dev)) {
847 *stats_bitmap = 0;
848 return;
849 }
850
851 *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
852 MLX4_STATS_TRAFFIC_DROPS_MASK |
853 MLX4_STATS_PORT_COUNTERS_MASK);
854
855 if (mlx4_is_master(dev))
856 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
857 }
858 EXPORT_SYMBOL(mlx4_set_stats_bitmap);