]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx4/port.c
net/mlx4_core: Fix register/unreg vlan flow
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / port.c
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
37
38 #include <linux/mlx4/cmd.h>
39
40 #include "mlx4.h"
41
42 #define MLX4_MAC_VALID (1ull << 63)
43
44 #define MLX4_VLAN_VALID (1u << 31)
45 #define MLX4_VLAN_MASK 0xfff
46
47 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
48 #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
49 #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
50 #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
51
52 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
53 {
54 int i;
55
56 mutex_init(&table->mutex);
57 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
58 table->entries[i] = 0;
59 table->refs[i] = 0;
60 }
61 table->max = 1 << dev->caps.log_num_macs;
62 table->total = 0;
63 }
64
65 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
66 {
67 int i;
68
69 mutex_init(&table->mutex);
70 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
71 table->entries[i] = 0;
72 table->refs[i] = 0;
73 }
74 table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
75 table->total = 0;
76 }
77
78 static int validate_index(struct mlx4_dev *dev,
79 struct mlx4_mac_table *table, int index)
80 {
81 int err = 0;
82
83 if (index < 0 || index >= table->max || !table->entries[index]) {
84 mlx4_warn(dev, "No valid Mac entry for the given index\n");
85 err = -EINVAL;
86 }
87 return err;
88 }
89
90 static int find_index(struct mlx4_dev *dev,
91 struct mlx4_mac_table *table, u64 mac)
92 {
93 int i;
94
95 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
96 if ((mac & MLX4_MAC_MASK) ==
97 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
98 return i;
99 }
100 /* Mac not found */
101 return -EINVAL;
102 }
103
104 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
105 __be64 *entries)
106 {
107 struct mlx4_cmd_mailbox *mailbox;
108 u32 in_mod;
109 int err;
110
111 mailbox = mlx4_alloc_cmd_mailbox(dev);
112 if (IS_ERR(mailbox))
113 return PTR_ERR(mailbox);
114
115 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
116
117 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
118
119 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
120 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
121
122 mlx4_free_cmd_mailbox(dev, mailbox);
123 return err;
124 }
125
126 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
127 {
128 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
129 struct mlx4_mac_table *table = &info->mac_table;
130 int i, err = 0;
131 int free = -1;
132
133 mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
134 (unsigned long long) mac, port);
135
136 mutex_lock(&table->mutex);
137 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
138 if (free < 0 && !table->entries[i]) {
139 free = i;
140 continue;
141 }
142
143 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
144 /* MAC already registered, increment ref count */
145 err = i;
146 ++table->refs[i];
147 goto out;
148 }
149 }
150
151 mlx4_dbg(dev, "Free MAC index is %d\n", free);
152
153 if (table->total == table->max) {
154 /* No free mac entries */
155 err = -ENOSPC;
156 goto out;
157 }
158
159 /* Register new MAC */
160 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
161
162 err = mlx4_set_port_mac_table(dev, port, table->entries);
163 if (unlikely(err)) {
164 mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
165 (unsigned long long) mac);
166 table->entries[free] = 0;
167 goto out;
168 }
169 table->refs[free] = 1;
170 err = free;
171 ++table->total;
172 out:
173 mutex_unlock(&table->mutex);
174 return err;
175 }
176 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
177
178 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
179 {
180 u64 out_param = 0;
181 int err;
182
183 if (mlx4_is_mfunc(dev)) {
184 set_param_l(&out_param, port);
185 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
186 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
187 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
188 if (err)
189 return err;
190
191 return get_param_l(&out_param);
192 }
193 return __mlx4_register_mac(dev, port, mac);
194 }
195 EXPORT_SYMBOL_GPL(mlx4_register_mac);
196
197 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
198 {
199 return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
200 (port - 1) * (1 << dev->caps.log_num_macs);
201 }
202 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
203
204 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
205 {
206 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
207 struct mlx4_mac_table *table = &info->mac_table;
208 int index;
209
210 mutex_lock(&table->mutex);
211 index = find_index(dev, table, mac);
212
213 if (validate_index(dev, table, index))
214 goto out;
215 if (--table->refs[index]) {
216 mlx4_dbg(dev, "Have more references for index %d,"
217 "no need to modify mac table\n", index);
218 goto out;
219 }
220
221 table->entries[index] = 0;
222 mlx4_set_port_mac_table(dev, port, table->entries);
223 --table->total;
224 out:
225 mutex_unlock(&table->mutex);
226 }
227 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
228
229 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
230 {
231 u64 out_param = 0;
232
233 if (mlx4_is_mfunc(dev)) {
234 set_param_l(&out_param, port);
235 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
236 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
237 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
238 return;
239 }
240 __mlx4_unregister_mac(dev, port, mac);
241 return;
242 }
243 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
244
245 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
246 {
247 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
248 struct mlx4_mac_table *table = &info->mac_table;
249 int index = qpn - info->base_qpn;
250 int err = 0;
251
252 /* CX1 doesn't support multi-functions */
253 mutex_lock(&table->mutex);
254
255 err = validate_index(dev, table, index);
256 if (err)
257 goto out;
258
259 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
260
261 err = mlx4_set_port_mac_table(dev, port, table->entries);
262 if (unlikely(err)) {
263 mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
264 (unsigned long long) new_mac);
265 table->entries[index] = 0;
266 }
267 out:
268 mutex_unlock(&table->mutex);
269 return err;
270 }
271 EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
272
273 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
274 __be32 *entries)
275 {
276 struct mlx4_cmd_mailbox *mailbox;
277 u32 in_mod;
278 int err;
279
280 mailbox = mlx4_alloc_cmd_mailbox(dev);
281 if (IS_ERR(mailbox))
282 return PTR_ERR(mailbox);
283
284 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
285 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
286 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
287 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
288
289 mlx4_free_cmd_mailbox(dev, mailbox);
290
291 return err;
292 }
293
294 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
295 {
296 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
297 int i;
298
299 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
300 if (table->refs[i] &&
301 (vid == (MLX4_VLAN_MASK &
302 be32_to_cpu(table->entries[i])))) {
303 /* VLAN already registered, increase reference count */
304 *idx = i;
305 return 0;
306 }
307 }
308
309 return -ENOENT;
310 }
311 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
312
313 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
314 int *index)
315 {
316 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
317 int i, err = 0;
318 int free = -1;
319
320 mutex_lock(&table->mutex);
321
322 if (table->total == table->max) {
323 /* No free vlan entries */
324 err = -ENOSPC;
325 goto out;
326 }
327
328 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
329 if (free < 0 && (table->refs[i] == 0)) {
330 free = i;
331 continue;
332 }
333
334 if (table->refs[i] &&
335 (vlan == (MLX4_VLAN_MASK &
336 be32_to_cpu(table->entries[i])))) {
337 /* Vlan already registered, increase references count */
338 *index = i;
339 ++table->refs[i];
340 goto out;
341 }
342 }
343
344 if (free < 0) {
345 err = -ENOMEM;
346 goto out;
347 }
348
349 /* Register new VLAN */
350 table->refs[free] = 1;
351 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
352
353 err = mlx4_set_port_vlan_table(dev, port, table->entries);
354 if (unlikely(err)) {
355 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
356 table->refs[free] = 0;
357 table->entries[free] = 0;
358 goto out;
359 }
360
361 *index = free;
362 ++table->total;
363 out:
364 mutex_unlock(&table->mutex);
365 return err;
366 }
367
368 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
369 {
370 u64 out_param = 0;
371 int err;
372
373 if (vlan > 4095)
374 return -EINVAL;
375
376 if (mlx4_is_mfunc(dev)) {
377 set_param_l(&out_param, port);
378 err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
379 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
380 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
381 if (!err)
382 *index = get_param_l(&out_param);
383
384 return err;
385 }
386 return __mlx4_register_vlan(dev, port, vlan, index);
387 }
388 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
389
390 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
391 {
392 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
393
394 if (index < MLX4_VLAN_REGULAR) {
395 mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
396 return;
397 }
398
399 mutex_lock(&table->mutex);
400 if (!table->refs[index]) {
401 mlx4_warn(dev, "No vlan entry for index %d\n", index);
402 goto out;
403 }
404 if (--table->refs[index]) {
405 mlx4_dbg(dev, "Have more references for index %d,"
406 "no need to modify vlan table\n", index);
407 goto out;
408 }
409 table->entries[index] = 0;
410 mlx4_set_port_vlan_table(dev, port, table->entries);
411 --table->total;
412 out:
413 mutex_unlock(&table->mutex);
414 }
415
416 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
417 {
418 u64 out_param = 0;
419
420 if (mlx4_is_mfunc(dev)) {
421 set_param_l(&out_param, port);
422 (void) mlx4_cmd_imm(dev, index, &out_param, RES_VLAN,
423 RES_OP_RESERVE_AND_MAP,
424 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
425 MLX4_CMD_WRAPPED);
426 return;
427 }
428 __mlx4_unregister_vlan(dev, port, index);
429 }
430 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
431
432 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
433 {
434 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
435 u8 *inbuf, *outbuf;
436 int err;
437
438 inmailbox = mlx4_alloc_cmd_mailbox(dev);
439 if (IS_ERR(inmailbox))
440 return PTR_ERR(inmailbox);
441
442 outmailbox = mlx4_alloc_cmd_mailbox(dev);
443 if (IS_ERR(outmailbox)) {
444 mlx4_free_cmd_mailbox(dev, inmailbox);
445 return PTR_ERR(outmailbox);
446 }
447
448 inbuf = inmailbox->buf;
449 outbuf = outmailbox->buf;
450 memset(inbuf, 0, 256);
451 memset(outbuf, 0, 256);
452 inbuf[0] = 1;
453 inbuf[1] = 1;
454 inbuf[2] = 1;
455 inbuf[3] = 1;
456 *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
457 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
458
459 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
460 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
461 MLX4_CMD_NATIVE);
462 if (!err)
463 *caps = *(__be32 *) (outbuf + 84);
464 mlx4_free_cmd_mailbox(dev, inmailbox);
465 mlx4_free_cmd_mailbox(dev, outmailbox);
466 return err;
467 }
468
469 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
470 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
471 {
472 struct mlx4_priv *priv = mlx4_priv(dev);
473 struct mlx4_port_info *port_info;
474 struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
475 struct mlx4_slave_state *slave_st = &master->slave_state[slave];
476 struct mlx4_set_port_rqp_calc_context *qpn_context;
477 struct mlx4_set_port_general_context *gen_context;
478 int reset_qkey_viols;
479 int port;
480 int is_eth;
481 u32 in_modifier;
482 u32 promisc;
483 u16 mtu, prev_mtu;
484 int err;
485 int i;
486 __be32 agg_cap_mask;
487 __be32 slave_cap_mask;
488 __be32 new_cap_mask;
489
490 port = in_mod & 0xff;
491 in_modifier = in_mod >> 8;
492 is_eth = op_mod;
493 port_info = &priv->port[port];
494
495 /* Slaves cannot perform SET_PORT operations except changing MTU */
496 if (is_eth) {
497 if (slave != dev->caps.function &&
498 in_modifier != MLX4_SET_PORT_GENERAL) {
499 mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
500 slave);
501 return -EINVAL;
502 }
503 switch (in_modifier) {
504 case MLX4_SET_PORT_RQP_CALC:
505 qpn_context = inbox->buf;
506 qpn_context->base_qpn =
507 cpu_to_be32(port_info->base_qpn);
508 qpn_context->n_mac = 0x7;
509 promisc = be32_to_cpu(qpn_context->promisc) >>
510 SET_PORT_PROMISC_SHIFT;
511 qpn_context->promisc = cpu_to_be32(
512 promisc << SET_PORT_PROMISC_SHIFT |
513 port_info->base_qpn);
514 promisc = be32_to_cpu(qpn_context->mcast) >>
515 SET_PORT_MC_PROMISC_SHIFT;
516 qpn_context->mcast = cpu_to_be32(
517 promisc << SET_PORT_MC_PROMISC_SHIFT |
518 port_info->base_qpn);
519 break;
520 case MLX4_SET_PORT_GENERAL:
521 gen_context = inbox->buf;
522 /* Mtu is configured as the max MTU among all the
523 * the functions on the port. */
524 mtu = be16_to_cpu(gen_context->mtu);
525 mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
526 ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
527 prev_mtu = slave_st->mtu[port];
528 slave_st->mtu[port] = mtu;
529 if (mtu > master->max_mtu[port])
530 master->max_mtu[port] = mtu;
531 if (mtu < prev_mtu && prev_mtu ==
532 master->max_mtu[port]) {
533 slave_st->mtu[port] = mtu;
534 master->max_mtu[port] = mtu;
535 for (i = 0; i < dev->num_slaves; i++) {
536 master->max_mtu[port] =
537 max(master->max_mtu[port],
538 master->slave_state[i].mtu[port]);
539 }
540 }
541
542 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
543 break;
544 }
545 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
546 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
547 MLX4_CMD_NATIVE);
548 }
549
550 /* For IB, we only consider:
551 * - The capability mask, which is set to the aggregate of all
552 * slave function capabilities
553 * - The QKey violatin counter - reset according to each request.
554 */
555
556 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
557 reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
558 new_cap_mask = ((__be32 *) inbox->buf)[2];
559 } else {
560 reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
561 new_cap_mask = ((__be32 *) inbox->buf)[1];
562 }
563
564 /* slave may not set the IS_SM capability for the port */
565 if (slave != mlx4_master_func_num(dev) &&
566 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
567 return -EINVAL;
568
569 /* No DEV_MGMT in multifunc mode */
570 if (mlx4_is_mfunc(dev) &&
571 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
572 return -EINVAL;
573
574 agg_cap_mask = 0;
575 slave_cap_mask =
576 priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
577 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
578 for (i = 0; i < dev->num_slaves; i++)
579 agg_cap_mask |=
580 priv->mfunc.master.slave_state[i].ib_cap_mask[port];
581
582 /* only clear mailbox for guests. Master may be setting
583 * MTU or PKEY table size
584 */
585 if (slave != dev->caps.function)
586 memset(inbox->buf, 0, 256);
587 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
588 *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
589 ((__be32 *) inbox->buf)[2] = agg_cap_mask;
590 } else {
591 ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
592 ((__be32 *) inbox->buf)[1] = agg_cap_mask;
593 }
594
595 err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
596 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
597 if (err)
598 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
599 slave_cap_mask;
600 return err;
601 }
602
603 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
604 struct mlx4_vhcr *vhcr,
605 struct mlx4_cmd_mailbox *inbox,
606 struct mlx4_cmd_mailbox *outbox,
607 struct mlx4_cmd_info *cmd)
608 {
609 return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
610 vhcr->op_modifier, inbox);
611 }
612
613 /* bit locations for set port command with zero op modifier */
614 enum {
615 MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
616 MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
617 MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
618 MLX4_CHANGE_PORT_VL_CAP = 21,
619 MLX4_CHANGE_PORT_MTU_CAP = 22,
620 };
621
622 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
623 {
624 struct mlx4_cmd_mailbox *mailbox;
625 int err, vl_cap, pkey_tbl_flag = 0;
626
627 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
628 return 0;
629
630 mailbox = mlx4_alloc_cmd_mailbox(dev);
631 if (IS_ERR(mailbox))
632 return PTR_ERR(mailbox);
633
634 memset(mailbox->buf, 0, 256);
635
636 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
637
638 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
639 pkey_tbl_flag = 1;
640 ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
641 }
642
643 /* IB VL CAP enum isn't used by the firmware, just numerical values */
644 for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
645 ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
646 (1 << MLX4_CHANGE_PORT_MTU_CAP) |
647 (1 << MLX4_CHANGE_PORT_VL_CAP) |
648 (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
649 (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
650 (vl_cap << MLX4_SET_PORT_VL_CAP));
651 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
652 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
653 if (err != -ENOMEM)
654 break;
655 }
656
657 mlx4_free_cmd_mailbox(dev, mailbox);
658 return err;
659 }
660
661 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
662 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
663 {
664 struct mlx4_cmd_mailbox *mailbox;
665 struct mlx4_set_port_general_context *context;
666 int err;
667 u32 in_mod;
668
669 mailbox = mlx4_alloc_cmd_mailbox(dev);
670 if (IS_ERR(mailbox))
671 return PTR_ERR(mailbox);
672 context = mailbox->buf;
673 memset(context, 0, sizeof *context);
674
675 context->flags = SET_PORT_GEN_ALL_VALID;
676 context->mtu = cpu_to_be16(mtu);
677 context->pptx = (pptx * (!pfctx)) << 7;
678 context->pfctx = pfctx;
679 context->pprx = (pprx * (!pfcrx)) << 7;
680 context->pfcrx = pfcrx;
681
682 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
683 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
684 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
685
686 mlx4_free_cmd_mailbox(dev, mailbox);
687 return err;
688 }
689 EXPORT_SYMBOL(mlx4_SET_PORT_general);
690
691 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
692 u8 promisc)
693 {
694 struct mlx4_cmd_mailbox *mailbox;
695 struct mlx4_set_port_rqp_calc_context *context;
696 int err;
697 u32 in_mod;
698 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
699 MCAST_DIRECT : MCAST_DEFAULT;
700
701 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
702 return 0;
703
704 mailbox = mlx4_alloc_cmd_mailbox(dev);
705 if (IS_ERR(mailbox))
706 return PTR_ERR(mailbox);
707 context = mailbox->buf;
708 memset(context, 0, sizeof *context);
709
710 context->base_qpn = cpu_to_be32(base_qpn);
711 context->n_mac = dev->caps.log_num_macs;
712 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
713 base_qpn);
714 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
715 base_qpn);
716 context->intra_no_vlan = 0;
717 context->no_vlan = MLX4_NO_VLAN_IDX;
718 context->intra_vlan_miss = 0;
719 context->vlan_miss = MLX4_VLAN_MISS_IDX;
720
721 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
722 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
723 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
724
725 mlx4_free_cmd_mailbox(dev, mailbox);
726 return err;
727 }
728 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
729
730 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
731 {
732 struct mlx4_cmd_mailbox *mailbox;
733 struct mlx4_set_port_prio2tc_context *context;
734 int err;
735 u32 in_mod;
736 int i;
737
738 mailbox = mlx4_alloc_cmd_mailbox(dev);
739 if (IS_ERR(mailbox))
740 return PTR_ERR(mailbox);
741 context = mailbox->buf;
742 memset(context, 0, sizeof *context);
743
744 for (i = 0; i < MLX4_NUM_UP; i += 2)
745 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
746
747 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
748 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
749 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
750
751 mlx4_free_cmd_mailbox(dev, mailbox);
752 return err;
753 }
754 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
755
756 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
757 u8 *pg, u16 *ratelimit)
758 {
759 struct mlx4_cmd_mailbox *mailbox;
760 struct mlx4_set_port_scheduler_context *context;
761 int err;
762 u32 in_mod;
763 int i;
764
765 mailbox = mlx4_alloc_cmd_mailbox(dev);
766 if (IS_ERR(mailbox))
767 return PTR_ERR(mailbox);
768 context = mailbox->buf;
769 memset(context, 0, sizeof *context);
770
771 for (i = 0; i < MLX4_NUM_TC; i++) {
772 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
773 u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
774 MLX4_RATELIMIT_DEFAULT;
775
776 tc->pg = htons(pg[i]);
777 tc->bw_precentage = htons(tc_tx_bw[i]);
778
779 tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
780 tc->max_bw_value = htons(r);
781 }
782
783 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
784 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
785 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
786
787 mlx4_free_cmd_mailbox(dev, mailbox);
788 return err;
789 }
790 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
791
792 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
793 struct mlx4_vhcr *vhcr,
794 struct mlx4_cmd_mailbox *inbox,
795 struct mlx4_cmd_mailbox *outbox,
796 struct mlx4_cmd_info *cmd)
797 {
798 int err = 0;
799
800 return err;
801 }
802
803 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
804 u64 mac, u64 clear, u8 mode)
805 {
806 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
807 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
808 MLX4_CMD_WRAPPED);
809 }
810 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
811
812 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
813 struct mlx4_vhcr *vhcr,
814 struct mlx4_cmd_mailbox *inbox,
815 struct mlx4_cmd_mailbox *outbox,
816 struct mlx4_cmd_info *cmd)
817 {
818 int err = 0;
819
820 return err;
821 }
822
823 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
824 u32 in_mod, struct mlx4_cmd_mailbox *outbox)
825 {
826 return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
827 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
828 MLX4_CMD_NATIVE);
829 }
830
831 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
832 struct mlx4_vhcr *vhcr,
833 struct mlx4_cmd_mailbox *inbox,
834 struct mlx4_cmd_mailbox *outbox,
835 struct mlx4_cmd_info *cmd)
836 {
837 if (slave != dev->caps.function)
838 return 0;
839 return mlx4_common_dump_eth_stats(dev, slave,
840 vhcr->in_modifier, outbox);
841 }
842
843 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
844 {
845 if (!mlx4_is_mfunc(dev)) {
846 *stats_bitmap = 0;
847 return;
848 }
849
850 *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
851 MLX4_STATS_TRAFFIC_DROPS_MASK |
852 MLX4_STATS_PORT_COUNTERS_MASK);
853
854 if (mlx4_is_master(dev))
855 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
856 }
857 EXPORT_SYMBOL(mlx4_set_stats_bitmap);