]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/freescale/fs_enet: fix error return code
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / mcg.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
225c7b1f 34#include <linux/string.h>
0345584e 35#include <linux/etherdevice.h>
225c7b1f
RD
36
37#include <linux/mlx4/cmd.h>
ee40fa06 38#include <linux/export.h>
225c7b1f
RD
39
40#include "mlx4.h"
41
521e575b
RL
42#define MGM_QPN_MASK 0x00FFFFFF
43#define MGM_BLCK_LB_BIT 30
44
225c7b1f
RD
45static const u8 zero_gid[16]; /* automatically initialized to 0 */
46
0ec2c0f8
EE
47struct mlx4_mgm {
48 __be32 next_gid_index;
49 __be32 members_count;
50 u32 reserved[2];
51 u8 gid[16];
52 __be32 qp[MLX4_MAX_QP_PER_MGM];
53};
54
55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
56{
0ff1fb65
HHZ
57 if (dev->caps.steering_mode ==
58 MLX4_STEERING_MODE_DEVICE_MANAGED)
59 return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
60 else
61 return min((1 << mlx4_log_num_mgm_entry_size),
62 MLX4_MAX_MGM_ENTRY_SIZE);
0ec2c0f8
EE
63}
64
65int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
66{
67 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
68}
69
8fcfb4db
HHZ
70static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
71 struct mlx4_cmd_mailbox *mailbox,
72 u32 size,
73 u64 *reg_id)
74{
75 u64 imm;
76 int err = 0;
77
78 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
79 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
80 MLX4_CMD_NATIVE);
81 if (err)
82 return err;
83 *reg_id = imm;
84
85 return err;
86}
87
88static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
89{
90 int err = 0;
91
92 err = mlx4_cmd(dev, regid, 0, 0,
93 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
94 MLX4_CMD_NATIVE);
95
96 return err;
97}
98
0345584e
YP
99static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
100 struct mlx4_cmd_mailbox *mailbox)
225c7b1f
RD
101{
102 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
f9baff50 103 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
225c7b1f
RD
104}
105
0345584e
YP
106static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
107 struct mlx4_cmd_mailbox *mailbox)
225c7b1f
RD
108{
109 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
f9baff50 110 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
225c7b1f
RD
111}
112
0ec2c0f8 113static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
b12d93d6
YP
114 struct mlx4_cmd_mailbox *mailbox)
115{
116 u32 in_mod;
117
0ec2c0f8 118 in_mod = (u32) port << 16 | steer << 1;
b12d93d6 119 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
f9baff50
JM
120 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
121 MLX4_CMD_NATIVE);
b12d93d6
YP
122}
123
0345584e
YP
124static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
125 u16 *hash, u8 op_mod)
225c7b1f
RD
126{
127 u64 imm;
128 int err;
129
0345584e 130 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
f9baff50
JM
131 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
132 MLX4_CMD_NATIVE);
225c7b1f
RD
133
134 if (!err)
135 *hash = imm;
136
137 return err;
138}
139
b12d93d6
YP
140static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
141 enum mlx4_steer_type steer,
142 u32 qpn)
143{
144 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
145 struct mlx4_promisc_qp *pqp;
146
147 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
148 if (pqp->qpn == qpn)
149 return pqp;
150 }
151 /* not found */
152 return NULL;
153}
154
155/*
156 * Add new entry to steering data structure.
157 * All promisc QPs should be added as well
158 */
0ec2c0f8 159static int new_steering_entry(struct mlx4_dev *dev, u8 port,
b12d93d6
YP
160 enum mlx4_steer_type steer,
161 unsigned int index, u32 qpn)
162{
163 struct mlx4_steer *s_steer;
164 struct mlx4_cmd_mailbox *mailbox;
165 struct mlx4_mgm *mgm;
166 u32 members_count;
167 struct mlx4_steer_index *new_entry;
168 struct mlx4_promisc_qp *pqp;
a14b289d 169 struct mlx4_promisc_qp *dqp = NULL;
b12d93d6
YP
170 u32 prot;
171 int err;
b12d93d6 172
4c41b367 173 s_steer = &mlx4_priv(dev)->steer[port - 1];
b12d93d6
YP
174 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
175 if (!new_entry)
176 return -ENOMEM;
177
178 INIT_LIST_HEAD(&new_entry->duplicates);
179 new_entry->index = index;
180 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
181
182 /* If the given qpn is also a promisc qp,
183 * it should be inserted to duplicates list
184 */
0ec2c0f8 185 pqp = get_promisc_qp(dev, 0, steer, qpn);
b12d93d6
YP
186 if (pqp) {
187 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
188 if (!dqp) {
189 err = -ENOMEM;
190 goto out_alloc;
191 }
192 dqp->qpn = qpn;
193 list_add_tail(&dqp->list, &new_entry->duplicates);
194 }
195
196 /* if no promisc qps for this vep, we are done */
197 if (list_empty(&s_steer->promisc_qps[steer]))
198 return 0;
199
200 /* now need to add all the promisc qps to the new
201 * steering entry, as they should also receive the packets
202 * destined to this address */
203 mailbox = mlx4_alloc_cmd_mailbox(dev);
204 if (IS_ERR(mailbox)) {
205 err = -ENOMEM;
206 goto out_alloc;
207 }
208 mgm = mailbox->buf;
209
210 err = mlx4_READ_ENTRY(dev, index, mailbox);
211 if (err)
212 goto out_mailbox;
213
214 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
215 prot = be32_to_cpu(mgm->members_count) >> 30;
216 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
217 /* don't add already existing qpn */
218 if (pqp->qpn == qpn)
219 continue;
0ec2c0f8 220 if (members_count == dev->caps.num_qp_per_mgm) {
b12d93d6
YP
221 /* out of space */
222 err = -ENOMEM;
223 goto out_mailbox;
224 }
225
226 /* add the qpn */
227 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
228 }
229 /* update the qps count and update the entry with all the promisc qps*/
230 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
231 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
232
233out_mailbox:
234 mlx4_free_cmd_mailbox(dev, mailbox);
235 if (!err)
236 return 0;
237out_alloc:
238 if (dqp) {
239 list_del(&dqp->list);
a14b289d 240 kfree(dqp);
b12d93d6
YP
241 }
242 list_del(&new_entry->list);
243 kfree(new_entry);
244 return err;
245}
246
247/* update the data structures with existing steering entry */
0ec2c0f8 248static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
b12d93d6
YP
249 enum mlx4_steer_type steer,
250 unsigned int index, u32 qpn)
251{
252 struct mlx4_steer *s_steer;
253 struct mlx4_steer_index *tmp_entry, *entry = NULL;
254 struct mlx4_promisc_qp *pqp;
255 struct mlx4_promisc_qp *dqp;
b12d93d6 256
4c41b367 257 s_steer = &mlx4_priv(dev)->steer[port - 1];
b12d93d6 258
0ec2c0f8 259 pqp = get_promisc_qp(dev, 0, steer, qpn);
b12d93d6
YP
260 if (!pqp)
261 return 0; /* nothing to do */
262
263 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
264 if (tmp_entry->index == index) {
265 entry = tmp_entry;
266 break;
267 }
268 }
269 if (unlikely(!entry)) {
270 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
271 return -EINVAL;
272 }
273
274 /* the given qpn is listed as a promisc qpn
275 * we need to add it as a duplicate to this entry
25985edc 276 * for future references */
b12d93d6 277 list_for_each_entry(dqp, &entry->duplicates, list) {
0ec2c0f8 278 if (qpn == pqp->qpn)
b12d93d6
YP
279 return 0; /* qp is already duplicated */
280 }
281
282 /* add the qp as a duplicate on this index */
283 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
284 if (!dqp)
285 return -ENOMEM;
286 dqp->qpn = qpn;
287 list_add_tail(&dqp->list, &entry->duplicates);
288
289 return 0;
290}
291
292/* Check whether a qpn is a duplicate on steering entry
293 * If so, it should not be removed from mgm */
0ec2c0f8 294static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
b12d93d6
YP
295 enum mlx4_steer_type steer,
296 unsigned int index, u32 qpn)
297{
298 struct mlx4_steer *s_steer;
299 struct mlx4_steer_index *tmp_entry, *entry = NULL;
300 struct mlx4_promisc_qp *dqp, *tmp_dqp;
b12d93d6 301
4c41b367 302 s_steer = &mlx4_priv(dev)->steer[port - 1];
b12d93d6
YP
303
304 /* if qp is not promisc, it cannot be duplicated */
0ec2c0f8 305 if (!get_promisc_qp(dev, 0, steer, qpn))
b12d93d6
YP
306 return false;
307
308 /* The qp is promisc qp so it is a duplicate on this index
309 * Find the index entry, and remove the duplicate */
310 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
311 if (tmp_entry->index == index) {
312 entry = tmp_entry;
313 break;
314 }
315 }
316 if (unlikely(!entry)) {
317 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
318 return false;
319 }
320 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
321 if (dqp->qpn == qpn) {
322 list_del(&dqp->list);
323 kfree(dqp);
324 }
325 }
326 return true;
327}
328
329/* I a steering entry contains only promisc QPs, it can be removed. */
0ec2c0f8 330static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
b12d93d6
YP
331 enum mlx4_steer_type steer,
332 unsigned int index, u32 tqpn)
333{
334 struct mlx4_steer *s_steer;
335 struct mlx4_cmd_mailbox *mailbox;
336 struct mlx4_mgm *mgm;
337 struct mlx4_steer_index *entry = NULL, *tmp_entry;
338 u32 qpn;
339 u32 members_count;
340 bool ret = false;
341 int i;
b12d93d6 342
4c41b367 343 s_steer = &mlx4_priv(dev)->steer[port - 1];
b12d93d6
YP
344
345 mailbox = mlx4_alloc_cmd_mailbox(dev);
346 if (IS_ERR(mailbox))
347 return false;
348 mgm = mailbox->buf;
349
350 if (mlx4_READ_ENTRY(dev, index, mailbox))
351 goto out;
352 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
353 for (i = 0; i < members_count; i++) {
354 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
0ec2c0f8 355 if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
b12d93d6
YP
356 /* the qp is not promisc, the entry can't be removed */
357 goto out;
358 }
359 }
360 /* All the qps currently registered for this entry are promiscuous,
361 * Checking for duplicates */
362 ret = true;
363 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
364 if (entry->index == index) {
365 if (list_empty(&entry->duplicates)) {
366 list_del(&entry->list);
367 kfree(entry);
368 } else {
369 /* This entry contains duplicates so it shouldn't be removed */
370 ret = false;
371 goto out;
372 }
373 }
374 }
375
376out:
377 mlx4_free_cmd_mailbox(dev, mailbox);
378 return ret;
379}
380
0ec2c0f8 381static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
b12d93d6
YP
382 enum mlx4_steer_type steer, u32 qpn)
383{
384 struct mlx4_steer *s_steer;
385 struct mlx4_cmd_mailbox *mailbox;
386 struct mlx4_mgm *mgm;
387 struct mlx4_steer_index *entry;
388 struct mlx4_promisc_qp *pqp;
389 struct mlx4_promisc_qp *dqp;
390 u32 members_count;
391 u32 prot;
392 int i;
393 bool found;
b12d93d6 394 int err;
b12d93d6 395 struct mlx4_priv *priv = mlx4_priv(dev);
0ec2c0f8 396
4c41b367 397 s_steer = &mlx4_priv(dev)->steer[port - 1];
b12d93d6
YP
398
399 mutex_lock(&priv->mcg_table.mutex);
400
0ec2c0f8 401 if (get_promisc_qp(dev, 0, steer, qpn)) {
b12d93d6
YP
402 err = 0; /* Noting to do, already exists */
403 goto out_mutex;
404 }
405
406 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
407 if (!pqp) {
408 err = -ENOMEM;
409 goto out_mutex;
410 }
411 pqp->qpn = qpn;
412
413 mailbox = mlx4_alloc_cmd_mailbox(dev);
414 if (IS_ERR(mailbox)) {
415 err = -ENOMEM;
416 goto out_alloc;
417 }
418 mgm = mailbox->buf;
419
420 /* the promisc qp needs to be added for each one of the steering
421 * entries, if it already exists, needs to be added as a duplicate
422 * for this entry */
423 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
424 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
425 if (err)
426 goto out_mailbox;
427
428 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
429 prot = be32_to_cpu(mgm->members_count) >> 30;
430 found = false;
431 for (i = 0; i < members_count; i++) {
432 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
433 /* Entry already exists, add to duplicates */
434 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
435 if (!dqp)
436 goto out_mailbox;
437 dqp->qpn = qpn;
438 list_add_tail(&dqp->list, &entry->duplicates);
439 found = true;
440 }
441 }
442 if (!found) {
443 /* Need to add the qpn to mgm */
0ec2c0f8 444 if (members_count == dev->caps.num_qp_per_mgm) {
b12d93d6
YP
445 /* entry is full */
446 err = -ENOMEM;
447 goto out_mailbox;
448 }
449 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
450 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
451 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
452 if (err)
453 goto out_mailbox;
454 }
b12d93d6
YP
455 }
456
457 /* add the new qpn to list of promisc qps */
458 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
459 /* now need to add all the promisc qps to default entry */
460 memset(mgm, 0, sizeof *mgm);
461 members_count = 0;
462 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
463 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
464 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
465
0ec2c0f8 466 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
b12d93d6
YP
467 if (err)
468 goto out_list;
469
470 mlx4_free_cmd_mailbox(dev, mailbox);
471 mutex_unlock(&priv->mcg_table.mutex);
472 return 0;
473
474out_list:
475 list_del(&pqp->list);
476out_mailbox:
477 mlx4_free_cmd_mailbox(dev, mailbox);
478out_alloc:
479 kfree(pqp);
480out_mutex:
481 mutex_unlock(&priv->mcg_table.mutex);
482 return err;
483}
484
0ec2c0f8 485static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
b12d93d6
YP
486 enum mlx4_steer_type steer, u32 qpn)
487{
488 struct mlx4_priv *priv = mlx4_priv(dev);
489 struct mlx4_steer *s_steer;
490 struct mlx4_cmd_mailbox *mailbox;
491 struct mlx4_mgm *mgm;
492 struct mlx4_steer_index *entry;
493 struct mlx4_promisc_qp *pqp;
494 struct mlx4_promisc_qp *dqp;
495 u32 members_count;
496 bool found;
497 bool back_to_list = false;
498 int loc, i;
499 int err;
b12d93d6 500
4c41b367 501 s_steer = &mlx4_priv(dev)->steer[port - 1];
b12d93d6
YP
502 mutex_lock(&priv->mcg_table.mutex);
503
0ec2c0f8 504 pqp = get_promisc_qp(dev, 0, steer, qpn);
b12d93d6
YP
505 if (unlikely(!pqp)) {
506 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
507 /* nothing to do */
508 err = 0;
509 goto out_mutex;
510 }
511
512 /*remove from list of promisc qps */
513 list_del(&pqp->list);
b12d93d6
YP
514
515 /* set the default entry not to include the removed one */
516 mailbox = mlx4_alloc_cmd_mailbox(dev);
517 if (IS_ERR(mailbox)) {
518 err = -ENOMEM;
519 back_to_list = true;
520 goto out_list;
521 }
522 mgm = mailbox->buf;
0ec2c0f8 523 memset(mgm, 0, sizeof *mgm);
b12d93d6
YP
524 members_count = 0;
525 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
526 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
527 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
528
0ec2c0f8 529 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
b12d93d6
YP
530 if (err)
531 goto out_mailbox;
532
533 /* remove the qp from all the steering entries*/
534 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
535 found = false;
536 list_for_each_entry(dqp, &entry->duplicates, list) {
537 if (dqp->qpn == qpn) {
538 found = true;
539 break;
540 }
541 }
542 if (found) {
543 /* a duplicate, no need to change the mgm,
544 * only update the duplicates list */
545 list_del(&dqp->list);
546 kfree(dqp);
547 } else {
548 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
549 if (err)
550 goto out_mailbox;
551 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
552 for (loc = -1, i = 0; i < members_count; ++i)
553 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
554 loc = i;
555
556 mgm->members_count = cpu_to_be32(--members_count |
557 (MLX4_PROT_ETH << 30));
558 mgm->qp[loc] = mgm->qp[i - 1];
559 mgm->qp[i - 1] = 0;
560
561 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
562 if (err)
563 goto out_mailbox;
564 }
565
566 }
567
568out_mailbox:
569 mlx4_free_cmd_mailbox(dev, mailbox);
570out_list:
571 if (back_to_list)
572 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
53020092
YP
573 else
574 kfree(pqp);
b12d93d6
YP
575out_mutex:
576 mutex_unlock(&priv->mcg_table.mutex);
577 return err;
578}
579
225c7b1f
RD
580/*
581 * Caller must hold MCG table semaphore. gid and mgm parameters must
582 * be properly aligned for command interface.
583 *
584 * Returns 0 unless a firmware command error occurs.
585 *
586 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
587 * and *mgm holds MGM entry.
588 *
589 * if GID is found in AMGM, *index = index in AMGM, *prev = index of
590 * previous entry in hash chain and *mgm holds AMGM entry.
591 *
592 * If no AMGM exists for given gid, *index = -1, *prev = index of last
593 * entry in hash chain and *mgm holds end of hash chain.
594 */
0345584e
YP
595static int find_entry(struct mlx4_dev *dev, u8 port,
596 u8 *gid, enum mlx4_protocol prot,
0345584e 597 struct mlx4_cmd_mailbox *mgm_mailbox,
deb8b3e8 598 int *prev, int *index)
225c7b1f
RD
599{
600 struct mlx4_cmd_mailbox *mailbox;
601 struct mlx4_mgm *mgm = mgm_mailbox->buf;
602 u8 *mgid;
603 int err;
deb8b3e8 604 u16 hash;
ccf86321
OG
605 u8 op_mod = (prot == MLX4_PROT_ETH) ?
606 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
225c7b1f
RD
607
608 mailbox = mlx4_alloc_cmd_mailbox(dev);
609 if (IS_ERR(mailbox))
610 return -ENOMEM;
611 mgid = mailbox->buf;
612
613 memcpy(mgid, gid, 16);
614
deb8b3e8 615 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
225c7b1f
RD
616 mlx4_free_cmd_mailbox(dev, mailbox);
617 if (err)
618 return err;
619
620 if (0)
deb8b3e8 621 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
225c7b1f 622
deb8b3e8 623 *index = hash;
225c7b1f
RD
624 *prev = -1;
625
626 do {
0345584e 627 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
225c7b1f
RD
628 if (err)
629 return err;
630
0345584e 631 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
deb8b3e8 632 if (*index != hash) {
225c7b1f
RD
633 mlx4_err(dev, "Found zero MGID in AMGM.\n");
634 err = -EINVAL;
635 }
636 return err;
637 }
638
da995a8a 639 if (!memcmp(mgm->gid, gid, 16) &&
0345584e 640 be32_to_cpu(mgm->members_count) >> 30 == prot)
225c7b1f
RD
641 return err;
642
643 *prev = *index;
644 *index = be32_to_cpu(mgm->next_gid_index) >> 6;
645 } while (*index);
646
647 *index = -1;
648 return err;
649}
650
0ff1fb65
HHZ
651struct mlx4_net_trans_rule_hw_ctrl {
652 __be32 ctrl;
653 __be32 vf_vep_port;
654 __be32 qpn;
655 __be32 reserved;
656};
657
658static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
659 struct mlx4_net_trans_rule_hw_ctrl *hw)
660{
661 static const u8 __promisc_mode[] = {
662 [MLX4_FS_PROMISC_NONE] = 0x0,
663 [MLX4_FS_PROMISC_UPLINK] = 0x1,
664 [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
665 [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
666 };
667
668 u32 dw = 0;
669
670 dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
671 dw |= ctrl->exclusive ? (1 << 2) : 0;
672 dw |= ctrl->allow_loopback ? (1 << 3) : 0;
673 dw |= __promisc_mode[ctrl->promisc_mode] << 8;
674 dw |= ctrl->priority << 16;
675
676 hw->ctrl = cpu_to_be32(dw);
677 hw->vf_vep_port = cpu_to_be32(ctrl->port);
678 hw->qpn = cpu_to_be32(ctrl->qpn);
679}
680
681struct mlx4_net_trans_rule_hw_ib {
682 u8 size;
683 u8 rsvd1;
684 __be16 id;
685 u32 rsvd2;
686 __be32 qpn;
687 __be32 qpn_mask;
688 u8 dst_gid[16];
689 u8 dst_gid_msk[16];
690} __packed;
691
692struct mlx4_net_trans_rule_hw_eth {
693 u8 size;
694 u8 rsvd;
695 __be16 id;
696 u8 rsvd1[6];
697 u8 dst_mac[6];
698 u16 rsvd2;
699 u8 dst_mac_msk[6];
700 u16 rsvd3;
701 u8 src_mac[6];
702 u16 rsvd4;
703 u8 src_mac_msk[6];
704 u8 rsvd5;
705 u8 ether_type_enable;
706 __be16 ether_type;
707 __be16 vlan_id_msk;
708 __be16 vlan_id;
709} __packed;
710
711struct mlx4_net_trans_rule_hw_tcp_udp {
712 u8 size;
713 u8 rsvd;
714 __be16 id;
715 __be16 rsvd1[3];
716 __be16 dst_port;
717 __be16 rsvd2;
718 __be16 dst_port_msk;
719 __be16 rsvd3;
720 __be16 src_port;
721 __be16 rsvd4;
722 __be16 src_port_msk;
723} __packed;
724
725struct mlx4_net_trans_rule_hw_ipv4 {
726 u8 size;
727 u8 rsvd;
728 __be16 id;
729 __be32 rsvd1;
730 __be32 dst_ip;
731 __be32 dst_ip_msk;
732 __be32 src_ip;
733 __be32 src_ip_msk;
734} __packed;
735
736struct _rule_hw {
737 union {
738 struct {
739 u8 size;
740 u8 rsvd;
741 __be16 id;
742 };
743 struct mlx4_net_trans_rule_hw_eth eth;
744 struct mlx4_net_trans_rule_hw_ib ib;
745 struct mlx4_net_trans_rule_hw_ipv4 ipv4;
746 struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
747 };
748};
749
750static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
751 struct _rule_hw *rule_hw)
752{
753 static const u16 __sw_id_hw[] = {
754 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
755 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
756 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
757 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
758 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
759 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
760 };
761
762 static const size_t __rule_hw_sz[] = {
763 [MLX4_NET_TRANS_RULE_ID_ETH] =
764 sizeof(struct mlx4_net_trans_rule_hw_eth),
765 [MLX4_NET_TRANS_RULE_ID_IB] =
766 sizeof(struct mlx4_net_trans_rule_hw_ib),
767 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
768 [MLX4_NET_TRANS_RULE_ID_IPV4] =
769 sizeof(struct mlx4_net_trans_rule_hw_ipv4),
770 [MLX4_NET_TRANS_RULE_ID_TCP] =
771 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
772 [MLX4_NET_TRANS_RULE_ID_UDP] =
773 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
774 };
447458c0 775 if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
0ff1fb65
HHZ
776 mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
777 return -EINVAL;
778 }
779 memset(rule_hw, 0, __rule_hw_sz[spec->id]);
780 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
781 rule_hw->size = __rule_hw_sz[spec->id] >> 2;
782
783 switch (spec->id) {
784 case MLX4_NET_TRANS_RULE_ID_ETH:
785 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
786 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
787 ETH_ALEN);
788 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
789 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
790 ETH_ALEN);
791 if (spec->eth.ether_type_enable) {
792 rule_hw->eth.ether_type_enable = 1;
793 rule_hw->eth.ether_type = spec->eth.ether_type;
794 }
795 rule_hw->eth.vlan_id = spec->eth.vlan_id;
796 rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
797 break;
798
799 case MLX4_NET_TRANS_RULE_ID_IB:
800 rule_hw->ib.qpn = spec->ib.r_qpn;
801 rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
802 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
803 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
804 break;
805
806 case MLX4_NET_TRANS_RULE_ID_IPV6:
807 return -EOPNOTSUPP;
808
809 case MLX4_NET_TRANS_RULE_ID_IPV4:
810 rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
811 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
812 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
813 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
814 break;
815
816 case MLX4_NET_TRANS_RULE_ID_TCP:
817 case MLX4_NET_TRANS_RULE_ID_UDP:
818 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
819 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
820 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
821 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
822 break;
823
824 default:
825 return -EINVAL;
826 }
827
828 return __rule_hw_sz[spec->id];
829}
830
831static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
832 struct mlx4_net_trans_rule *rule)
833{
834#define BUF_SIZE 256
835 struct mlx4_spec_list *cur;
836 char buf[BUF_SIZE];
837 int len = 0;
838
839 mlx4_err(dev, "%s", str);
840 len += snprintf(buf + len, BUF_SIZE - len,
841 "port = %d prio = 0x%x qp = 0x%x ",
842 rule->port, rule->priority, rule->qpn);
843
844 list_for_each_entry(cur, &rule->list, list) {
845 switch (cur->id) {
846 case MLX4_NET_TRANS_RULE_ID_ETH:
847 len += snprintf(buf + len, BUF_SIZE - len,
848 "dmac = %pM ", &cur->eth.dst_mac);
849 if (cur->eth.ether_type)
850 len += snprintf(buf + len, BUF_SIZE - len,
851 "ethertype = 0x%x ",
852 be16_to_cpu(cur->eth.ether_type));
853 if (cur->eth.vlan_id)
854 len += snprintf(buf + len, BUF_SIZE - len,
855 "vlan-id = %d ",
856 be16_to_cpu(cur->eth.vlan_id));
857 break;
858
859 case MLX4_NET_TRANS_RULE_ID_IPV4:
860 if (cur->ipv4.src_ip)
861 len += snprintf(buf + len, BUF_SIZE - len,
862 "src-ip = %pI4 ",
863 &cur->ipv4.src_ip);
864 if (cur->ipv4.dst_ip)
865 len += snprintf(buf + len, BUF_SIZE - len,
866 "dst-ip = %pI4 ",
867 &cur->ipv4.dst_ip);
868 break;
869
870 case MLX4_NET_TRANS_RULE_ID_TCP:
871 case MLX4_NET_TRANS_RULE_ID_UDP:
872 if (cur->tcp_udp.src_port)
873 len += snprintf(buf + len, BUF_SIZE - len,
874 "src-port = %d ",
875 be16_to_cpu(cur->tcp_udp.src_port));
876 if (cur->tcp_udp.dst_port)
877 len += snprintf(buf + len, BUF_SIZE - len,
878 "dst-port = %d ",
879 be16_to_cpu(cur->tcp_udp.dst_port));
880 break;
881
882 case MLX4_NET_TRANS_RULE_ID_IB:
883 len += snprintf(buf + len, BUF_SIZE - len,
884 "dst-gid = %pI6\n", cur->ib.dst_gid);
885 len += snprintf(buf + len, BUF_SIZE - len,
886 "dst-gid-mask = %pI6\n",
887 cur->ib.dst_gid_msk);
888 break;
889
890 case MLX4_NET_TRANS_RULE_ID_IPV6:
891 break;
892
893 default:
894 break;
895 }
896 }
897 len += snprintf(buf + len, BUF_SIZE - len, "\n");
898 mlx4_err(dev, "%s", buf);
899
900 if (len >= BUF_SIZE)
901 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
902}
903
904int mlx4_flow_attach(struct mlx4_dev *dev,
905 struct mlx4_net_trans_rule *rule, u64 *reg_id)
906{
907 struct mlx4_cmd_mailbox *mailbox;
908 struct mlx4_spec_list *cur;
909 u32 size = 0;
910 int ret;
911
912 mailbox = mlx4_alloc_cmd_mailbox(dev);
913 if (IS_ERR(mailbox))
914 return PTR_ERR(mailbox);
915
916 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
917 trans_rule_ctrl_to_hw(rule, mailbox->buf);
918
919 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
920
921 list_for_each_entry(cur, &rule->list, list) {
922 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
923 if (ret < 0) {
924 mlx4_free_cmd_mailbox(dev, mailbox);
925 return -EINVAL;
926 }
927 size += ret;
928 }
929
930 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
931 if (ret == -ENOMEM)
932 mlx4_err_rule(dev,
933 "mcg table is full. Fail to register network rule.\n",
934 rule);
935 else if (ret)
936 mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
937
938 mlx4_free_cmd_mailbox(dev, mailbox);
939
940 return ret;
941}
942EXPORT_SYMBOL_GPL(mlx4_flow_attach);
943
944int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
945{
946 int err;
947
948 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
949 if (err)
950 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
951 reg_id);
952 return err;
953}
954EXPORT_SYMBOL_GPL(mlx4_flow_detach);
955
0345584e
YP
956int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
957 int block_mcast_loopback, enum mlx4_protocol prot,
958 enum mlx4_steer_type steer)
225c7b1f
RD
959{
960 struct mlx4_priv *priv = mlx4_priv(dev);
961 struct mlx4_cmd_mailbox *mailbox;
962 struct mlx4_mgm *mgm;
963 u32 members_count;
225c7b1f
RD
964 int index, prev;
965 int link = 0;
966 int i;
967 int err;
0345584e 968 u8 port = gid[5];
b12d93d6 969 u8 new_entry = 0;
225c7b1f
RD
970
971 mailbox = mlx4_alloc_cmd_mailbox(dev);
972 if (IS_ERR(mailbox))
973 return PTR_ERR(mailbox);
974 mgm = mailbox->buf;
975
976 mutex_lock(&priv->mcg_table.mutex);
deb8b3e8
EE
977 err = find_entry(dev, port, gid, prot,
978 mailbox, &prev, &index);
225c7b1f
RD
979 if (err)
980 goto out;
981
982 if (index != -1) {
b12d93d6
YP
983 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
984 new_entry = 1;
225c7b1f 985 memcpy(mgm->gid, gid, 16);
b12d93d6 986 }
225c7b1f
RD
987 } else {
988 link = 1;
989
990 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
991 if (index == -1) {
992 mlx4_err(dev, "No AMGM entries left\n");
993 err = -ENOMEM;
994 goto out;
995 }
996 index += dev->caps.num_mgms;
997
0ec2c0f8 998 new_entry = 1;
225c7b1f
RD
999 memset(mgm, 0, sizeof *mgm);
1000 memcpy(mgm->gid, gid, 16);
1001 }
1002
da995a8a 1003 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
0ec2c0f8 1004 if (members_count == dev->caps.num_qp_per_mgm) {
225c7b1f
RD
1005 mlx4_err(dev, "MGM at index %x is full.\n", index);
1006 err = -ENOMEM;
1007 goto out;
1008 }
1009
1010 for (i = 0; i < members_count; ++i)
521e575b 1011 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
225c7b1f
RD
1012 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
1013 err = 0;
1014 goto out;
1015 }
1016
521e575b
RL
1017 if (block_mcast_loopback)
1018 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
e6a17622 1019 (1U << MGM_BLCK_LB_BIT));
521e575b
RL
1020 else
1021 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
1022
0345584e 1023 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
225c7b1f 1024
0345584e 1025 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
225c7b1f
RD
1026 if (err)
1027 goto out;
1028
1029 if (!link)
1030 goto out;
1031
0345584e 1032 err = mlx4_READ_ENTRY(dev, prev, mailbox);
225c7b1f
RD
1033 if (err)
1034 goto out;
1035
1036 mgm->next_gid_index = cpu_to_be32(index << 6);
1037
0345584e 1038 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
225c7b1f
RD
1039 if (err)
1040 goto out;
1041
1042out:
b12d93d6
YP
1043 if (prot == MLX4_PROT_ETH) {
1044 /* manage the steering entry for promisc mode */
1045 if (new_entry)
0ec2c0f8 1046 new_steering_entry(dev, port, steer, index, qp->qpn);
b12d93d6 1047 else
0ec2c0f8 1048 existing_steering_entry(dev, port, steer,
b12d93d6
YP
1049 index, qp->qpn);
1050 }
225c7b1f
RD
1051 if (err && link && index != -1) {
1052 if (index < dev->caps.num_mgms)
1053 mlx4_warn(dev, "Got AMGM index %d < %d",
1054 index, dev->caps.num_mgms);
1055 else
1056 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1057 index - dev->caps.num_mgms);
1058 }
1059 mutex_unlock(&priv->mcg_table.mutex);
1060
1061 mlx4_free_cmd_mailbox(dev, mailbox);
1062 return err;
1063}
225c7b1f 1064
0345584e
YP
1065int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1066 enum mlx4_protocol prot, enum mlx4_steer_type steer)
225c7b1f
RD
1067{
1068 struct mlx4_priv *priv = mlx4_priv(dev);
1069 struct mlx4_cmd_mailbox *mailbox;
1070 struct mlx4_mgm *mgm;
1071 u32 members_count;
225c7b1f
RD
1072 int prev, index;
1073 int i, loc;
1074 int err;
0345584e 1075 u8 port = gid[5];
b12d93d6 1076 bool removed_entry = false;
225c7b1f
RD
1077
1078 mailbox = mlx4_alloc_cmd_mailbox(dev);
1079 if (IS_ERR(mailbox))
1080 return PTR_ERR(mailbox);
1081 mgm = mailbox->buf;
1082
1083 mutex_lock(&priv->mcg_table.mutex);
1084
deb8b3e8
EE
1085 err = find_entry(dev, port, gid, prot,
1086 mailbox, &prev, &index);
225c7b1f
RD
1087 if (err)
1088 goto out;
1089
1090 if (index == -1) {
5b095d98 1091 mlx4_err(dev, "MGID %pI6 not found\n", gid);
225c7b1f
RD
1092 err = -EINVAL;
1093 goto out;
1094 }
1095
b12d93d6
YP
1096 /* if this pq is also a promisc qp, it shouldn't be removed */
1097 if (prot == MLX4_PROT_ETH &&
0ec2c0f8 1098 check_duplicate_entry(dev, port, steer, index, qp->qpn))
b12d93d6
YP
1099 goto out;
1100
da995a8a 1101 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
225c7b1f 1102 for (loc = -1, i = 0; i < members_count; ++i)
521e575b 1103 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
225c7b1f
RD
1104 loc = i;
1105
1106 if (loc == -1) {
1107 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
1108 err = -EINVAL;
1109 goto out;
1110 }
1111
1112
0345584e 1113 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
225c7b1f
RD
1114 mgm->qp[loc] = mgm->qp[i - 1];
1115 mgm->qp[i - 1] = 0;
1116
b12d93d6 1117 if (prot == MLX4_PROT_ETH)
0ec2c0f8
EE
1118 removed_entry = can_remove_steering_entry(dev, port, steer,
1119 index, qp->qpn);
b12d93d6 1120 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
0345584e 1121 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
225c7b1f 1122 goto out;
4dc51b32 1123 }
225c7b1f 1124
b12d93d6
YP
1125 /* We are going to delete the entry, members count should be 0 */
1126 mgm->members_count = cpu_to_be32((u32) prot << 30);
1127
225c7b1f
RD
1128 if (prev == -1) {
1129 /* Remove entry from MGM */
1130 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1131 if (amgm_index) {
0345584e 1132 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
225c7b1f
RD
1133 if (err)
1134 goto out;
1135 } else
1136 memset(mgm->gid, 0, 16);
1137
0345584e 1138 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
225c7b1f
RD
1139 if (err)
1140 goto out;
1141
1142 if (amgm_index) {
1143 if (amgm_index < dev->caps.num_mgms)
1144 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
1145 index, amgm_index, dev->caps.num_mgms);
1146 else
1147 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1148 amgm_index - dev->caps.num_mgms);
1149 }
1150 } else {
1151 /* Remove entry from AMGM */
1152 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
0345584e 1153 err = mlx4_READ_ENTRY(dev, prev, mailbox);
225c7b1f
RD
1154 if (err)
1155 goto out;
1156
1157 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
1158
0345584e 1159 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
225c7b1f
RD
1160 if (err)
1161 goto out;
1162
1163 if (index < dev->caps.num_mgms)
1164 mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
1165 prev, index, dev->caps.num_mgms);
1166 else
1167 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1168 index - dev->caps.num_mgms);
1169 }
1170
1171out:
1172 mutex_unlock(&priv->mcg_table.mutex);
1173
1174 mlx4_free_cmd_mailbox(dev, mailbox);
1175 return err;
1176}
0345584e 1177
0ec2c0f8
EE
1178static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
1179 u8 gid[16], u8 attach, u8 block_loopback,
1180 enum mlx4_protocol prot)
1181{
1182 struct mlx4_cmd_mailbox *mailbox;
1183 int err = 0;
1184 int qpn;
1185
1186 if (!mlx4_is_mfunc(dev))
1187 return -EBADF;
1188
1189 mailbox = mlx4_alloc_cmd_mailbox(dev);
1190 if (IS_ERR(mailbox))
1191 return PTR_ERR(mailbox);
1192
1193 memcpy(mailbox->buf, gid, 16);
1194 qpn = qp->qpn;
1195 qpn |= (prot << 28);
1196 if (attach && block_loopback)
1197 qpn |= (1 << 31);
1198
1199 err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
1200 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
1201 MLX4_CMD_WRAPPED);
1202
1203 mlx4_free_cmd_mailbox(dev, mailbox);
1204 return err;
1205}
0345584e
YP
1206
1207int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
0ff1fb65
HHZ
1208 u8 port, int block_mcast_loopback,
1209 enum mlx4_protocol prot, u64 *reg_id)
0345584e 1210{
0345584e 1211
c96d97f4
HHZ
1212 switch (dev->caps.steering_mode) {
1213 case MLX4_STEERING_MODE_A0:
1214 if (prot == MLX4_PROT_ETH)
1215 return 0;
0345584e 1216
c96d97f4
HHZ
1217 case MLX4_STEERING_MODE_B0:
1218 if (prot == MLX4_PROT_ETH)
1219 gid[7] |= (MLX4_MC_STEER << 1);
0ec2c0f8 1220
c96d97f4
HHZ
1221 if (mlx4_is_mfunc(dev))
1222 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1223 block_mcast_loopback, prot);
1224 return mlx4_qp_attach_common(dev, qp, gid,
1225 block_mcast_loopback, prot,
1226 MLX4_MC_STEER);
1227
0ff1fb65
HHZ
1228 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
1229 struct mlx4_spec_list spec = { {NULL} };
1230 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1231
1232 struct mlx4_net_trans_rule rule = {
1233 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1234 .exclusive = 0,
1235 .promisc_mode = MLX4_FS_PROMISC_NONE,
1236 .priority = MLX4_DOMAIN_NIC,
1237 };
1238
1239 rule.allow_loopback = ~block_mcast_loopback;
1240 rule.port = port;
1241 rule.qpn = qp->qpn;
1242 INIT_LIST_HEAD(&rule.list);
1243
1244 switch (prot) {
1245 case MLX4_PROT_ETH:
1246 spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
1247 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
1248 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1249 break;
1250
1251 case MLX4_PROT_IB_IPV6:
1252 spec.id = MLX4_NET_TRANS_RULE_ID_IB;
1253 memcpy(spec.ib.dst_gid, gid, 16);
1254 memset(&spec.ib.dst_gid_msk, 0xff, 16);
1255 break;
1256 default:
1257 return -EINVAL;
1258 }
1259 list_add_tail(&spec.list, &rule.list);
1260
1261 return mlx4_flow_attach(dev, &rule, reg_id);
1262 }
1263
c96d97f4
HHZ
1264 default:
1265 return -EINVAL;
1266 }
0345584e
YP
1267}
1268EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
1269
1270int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
0ff1fb65 1271 enum mlx4_protocol prot, u64 reg_id)
0345584e 1272{
c96d97f4
HHZ
1273 switch (dev->caps.steering_mode) {
1274 case MLX4_STEERING_MODE_A0:
1275 if (prot == MLX4_PROT_ETH)
1276 return 0;
0345584e 1277
c96d97f4
HHZ
1278 case MLX4_STEERING_MODE_B0:
1279 if (prot == MLX4_PROT_ETH)
1280 gid[7] |= (MLX4_MC_STEER << 1);
0ec2c0f8 1281
c96d97f4
HHZ
1282 if (mlx4_is_mfunc(dev))
1283 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1284
1285 return mlx4_qp_detach_common(dev, qp, gid, prot,
1286 MLX4_MC_STEER);
0345584e 1287
0ff1fb65
HHZ
1288 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1289 return mlx4_flow_detach(dev, reg_id);
1290
c96d97f4
HHZ
1291 default:
1292 return -EINVAL;
1293 }
0345584e 1294}
225c7b1f
RD
1295EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
1296
592e49dd
HHZ
1297int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1298 u32 qpn, enum mlx4_net_trans_promisc_mode mode)
1299{
1300 struct mlx4_net_trans_rule rule;
1301 u64 *regid_p;
1302
1303 switch (mode) {
1304 case MLX4_FS_PROMISC_UPLINK:
1305 case MLX4_FS_PROMISC_FUNCTION_PORT:
1306 regid_p = &dev->regid_promisc_array[port];
1307 break;
1308 case MLX4_FS_PROMISC_ALL_MULTI:
1309 regid_p = &dev->regid_allmulti_array[port];
1310 break;
1311 default:
1312 return -1;
1313 }
1314
1315 if (*regid_p != 0)
1316 return -1;
1317
1318 rule.promisc_mode = mode;
1319 rule.port = port;
1320 rule.qpn = qpn;
1321 INIT_LIST_HEAD(&rule.list);
1322 mlx4_err(dev, "going promisc on %x\n", port);
1323
1324 return mlx4_flow_attach(dev, &rule, regid_p);
1325}
1326EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
1327
1328int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1329 enum mlx4_net_trans_promisc_mode mode)
1330{
1331 int ret;
1332 u64 *regid_p;
1333
1334 switch (mode) {
1335 case MLX4_FS_PROMISC_UPLINK:
1336 case MLX4_FS_PROMISC_FUNCTION_PORT:
1337 regid_p = &dev->regid_promisc_array[port];
1338 break;
1339 case MLX4_FS_PROMISC_ALL_MULTI:
1340 regid_p = &dev->regid_allmulti_array[port];
1341 break;
1342 default:
1343 return -1;
1344 }
1345
1346 if (*regid_p == 0)
1347 return -1;
1348
1349 ret = mlx4_flow_detach(dev, *regid_p);
1350 if (ret == 0)
1351 *regid_p = 0;
1352
1353 return ret;
1354}
1355EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
1356
ffe455ad 1357int mlx4_unicast_attach(struct mlx4_dev *dev,
0ec2c0f8
EE
1358 struct mlx4_qp *qp, u8 gid[16],
1359 int block_mcast_loopback, enum mlx4_protocol prot)
1360{
0ec2c0f8
EE
1361 if (prot == MLX4_PROT_ETH)
1362 gid[7] |= (MLX4_UC_STEER << 1);
1363
1364 if (mlx4_is_mfunc(dev))
1365 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1366 block_mcast_loopback, prot);
1367
1368 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
1369 prot, MLX4_UC_STEER);
1370}
1371EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
1372
ffe455ad 1373int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
0ec2c0f8
EE
1374 u8 gid[16], enum mlx4_protocol prot)
1375{
0ec2c0f8
EE
1376 if (prot == MLX4_PROT_ETH)
1377 gid[7] |= (MLX4_UC_STEER << 1);
1378
1379 if (mlx4_is_mfunc(dev))
1380 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1381
1382 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
1383}
1384EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
1385
1386int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1387 struct mlx4_vhcr *vhcr,
1388 struct mlx4_cmd_mailbox *inbox,
1389 struct mlx4_cmd_mailbox *outbox,
1390 struct mlx4_cmd_info *cmd)
1391{
1392 u32 qpn = (u32) vhcr->in_param & 0xffffffff;
1393 u8 port = vhcr->in_param >> 62;
1394 enum mlx4_steer_type steer = vhcr->in_modifier;
1395
1396 /* Promiscuous unicast is not allowed in mfunc */
1397 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
1398 return 0;
1399
1400 if (vhcr->op_modifier)
1401 return add_promisc_qp(dev, port, steer, qpn);
1402 else
1403 return remove_promisc_qp(dev, port, steer, qpn);
1404}
1405
1406static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
1407 enum mlx4_steer_type steer, u8 add, u8 port)
1408{
1409 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
1410 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
1411 MLX4_CMD_WRAPPED);
1412}
b12d93d6
YP
1413
1414int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1415{
0ec2c0f8
EE
1416 if (mlx4_is_mfunc(dev))
1417 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
b12d93d6 1418
0ec2c0f8 1419 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
b12d93d6
YP
1420}
1421EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
1422
1423int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1424{
0ec2c0f8
EE
1425 if (mlx4_is_mfunc(dev))
1426 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
b12d93d6 1427
0ec2c0f8 1428 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
b12d93d6
YP
1429}
1430EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
1431
1432int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1433{
0ec2c0f8
EE
1434 if (mlx4_is_mfunc(dev))
1435 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
b12d93d6 1436
0ec2c0f8 1437 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
b12d93d6
YP
1438}
1439EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
1440
1441int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1442{
0ec2c0f8
EE
1443 if (mlx4_is_mfunc(dev))
1444 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
1445
1446 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
b12d93d6
YP
1447}
1448EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
1449
3d73c288 1450int mlx4_init_mcg_table(struct mlx4_dev *dev)
225c7b1f
RD
1451{
1452 struct mlx4_priv *priv = mlx4_priv(dev);
1453 int err;
1454
0ff1fb65
HHZ
1455 /* No need for mcg_table when fw managed the mcg table*/
1456 if (dev->caps.steering_mode ==
1457 MLX4_STEERING_MODE_DEVICE_MANAGED)
1458 return 0;
93fc9e1b
YP
1459 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1460 dev->caps.num_amgms - 1, 0, 0);
225c7b1f
RD
1461 if (err)
1462 return err;
1463
1464 mutex_init(&priv->mcg_table.mutex);
1465
1466 return 0;
1467}
1468
1469void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
1470{
0ff1fb65
HHZ
1471 if (dev->caps.steering_mode !=
1472 MLX4_STEERING_MODE_DEVICE_MANAGED)
1473 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
225c7b1f 1474}