]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
mlx4: In RoCE allow guests to have multiple GIDS
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
af22d9de 44#include <linux/if_ether.h>
7fb40f87 45#include <linux/etherdevice.h>
c82e9aa0
EC
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
c82e9aa0
EC
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
4874080d
JM
58struct vlan_res {
59 struct list_head list;
60 u16 vlan;
61 int ref_count;
62 int vlan_index;
63 u8 port;
64};
65
c82e9aa0
EC
66struct res_common {
67 struct list_head list;
4af1c048 68 struct rb_node node;
aa1ec3dd 69 u64 res_id;
c82e9aa0
EC
70 int owner;
71 int state;
72 int from_state;
73 int to_state;
74 int removing;
75};
76
77enum {
78 RES_ANY_BUSY = 1
79};
80
81struct res_gid {
82 struct list_head list;
83 u8 gid[16];
84 enum mlx4_protocol prot;
9f5b6c63 85 enum mlx4_steer_type steer;
fab1e24a 86 u64 reg_id;
c82e9aa0
EC
87};
88
89enum res_qp_states {
90 RES_QP_BUSY = RES_ANY_BUSY,
91
92 /* QP number was allocated */
93 RES_QP_RESERVED,
94
95 /* ICM memory for QP context was mapped */
96 RES_QP_MAPPED,
97
98 /* QP is in hw ownership */
99 RES_QP_HW
100};
101
c82e9aa0
EC
102struct res_qp {
103 struct res_common com;
104 struct res_mtt *mtt;
105 struct res_cq *rcq;
106 struct res_cq *scq;
107 struct res_srq *srq;
108 struct list_head mcg_list;
109 spinlock_t mcg_spl;
110 int local_qpn;
2c473ae7 111 atomic_t ref_count;
b01978ca 112 u32 qpc_flags;
f0f829bf 113 /* saved qp params before VST enforcement in order to restore on VGT */
b01978ca 114 u8 sched_queue;
f0f829bf
RE
115 __be32 param3;
116 u8 vlan_control;
117 u8 fvl_rx;
118 u8 pri_path_fl;
119 u8 vlan_index;
120 u8 feup;
c82e9aa0
EC
121};
122
123enum res_mtt_states {
124 RES_MTT_BUSY = RES_ANY_BUSY,
125 RES_MTT_ALLOCATED,
126};
127
128static inline const char *mtt_states_str(enum res_mtt_states state)
129{
130 switch (state) {
131 case RES_MTT_BUSY: return "RES_MTT_BUSY";
132 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
133 default: return "Unknown";
134 }
135}
136
137struct res_mtt {
138 struct res_common com;
139 int order;
140 atomic_t ref_count;
141};
142
143enum res_mpt_states {
144 RES_MPT_BUSY = RES_ANY_BUSY,
145 RES_MPT_RESERVED,
146 RES_MPT_MAPPED,
147 RES_MPT_HW,
148};
149
150struct res_mpt {
151 struct res_common com;
152 struct res_mtt *mtt;
153 int key;
154};
155
156enum res_eq_states {
157 RES_EQ_BUSY = RES_ANY_BUSY,
158 RES_EQ_RESERVED,
159 RES_EQ_HW,
160};
161
162struct res_eq {
163 struct res_common com;
164 struct res_mtt *mtt;
165};
166
167enum res_cq_states {
168 RES_CQ_BUSY = RES_ANY_BUSY,
169 RES_CQ_ALLOCATED,
170 RES_CQ_HW,
171};
172
173struct res_cq {
174 struct res_common com;
175 struct res_mtt *mtt;
176 atomic_t ref_count;
177};
178
179enum res_srq_states {
180 RES_SRQ_BUSY = RES_ANY_BUSY,
181 RES_SRQ_ALLOCATED,
182 RES_SRQ_HW,
183};
184
c82e9aa0
EC
185struct res_srq {
186 struct res_common com;
187 struct res_mtt *mtt;
188 struct res_cq *cq;
189 atomic_t ref_count;
190};
191
192enum res_counter_states {
193 RES_COUNTER_BUSY = RES_ANY_BUSY,
194 RES_COUNTER_ALLOCATED,
195};
196
c82e9aa0
EC
197struct res_counter {
198 struct res_common com;
199 int port;
200};
201
ba062d52
JM
202enum res_xrcdn_states {
203 RES_XRCD_BUSY = RES_ANY_BUSY,
204 RES_XRCD_ALLOCATED,
205};
206
207struct res_xrcdn {
208 struct res_common com;
209 int port;
210};
211
1b9c6b06
HHZ
212enum res_fs_rule_states {
213 RES_FS_RULE_BUSY = RES_ANY_BUSY,
214 RES_FS_RULE_ALLOCATED,
215};
216
217struct res_fs_rule {
218 struct res_common com;
2c473ae7 219 int qpn;
1b9c6b06
HHZ
220};
221
b6ffaeff
JM
222static int mlx4_is_eth(struct mlx4_dev *dev, int port)
223{
224 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
225}
226
4af1c048
HHZ
227static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
228{
229 struct rb_node *node = root->rb_node;
230
231 while (node) {
232 struct res_common *res = container_of(node, struct res_common,
233 node);
234
235 if (res_id < res->res_id)
236 node = node->rb_left;
237 else if (res_id > res->res_id)
238 node = node->rb_right;
239 else
240 return res;
241 }
242 return NULL;
243}
244
245static int res_tracker_insert(struct rb_root *root, struct res_common *res)
246{
247 struct rb_node **new = &(root->rb_node), *parent = NULL;
248
249 /* Figure out where to put new node */
250 while (*new) {
251 struct res_common *this = container_of(*new, struct res_common,
252 node);
253
254 parent = *new;
255 if (res->res_id < this->res_id)
256 new = &((*new)->rb_left);
257 else if (res->res_id > this->res_id)
258 new = &((*new)->rb_right);
259 else
260 return -EEXIST;
261 }
262
263 /* Add new node and rebalance tree. */
264 rb_link_node(&res->node, parent, new);
265 rb_insert_color(&res->node, root);
266
267 return 0;
268}
269
54679e14
JM
270enum qp_transition {
271 QP_TRANS_INIT2RTR,
272 QP_TRANS_RTR2RTS,
273 QP_TRANS_RTS2RTS,
274 QP_TRANS_SQERR2RTS,
275 QP_TRANS_SQD2SQD,
276 QP_TRANS_SQD2RTS
277};
278
c82e9aa0
EC
279/* For Debug uses */
280static const char *ResourceType(enum mlx4_resource rt)
281{
282 switch (rt) {
283 case RES_QP: return "RES_QP";
284 case RES_CQ: return "RES_CQ";
285 case RES_SRQ: return "RES_SRQ";
286 case RES_MPT: return "RES_MPT";
287 case RES_MTT: return "RES_MTT";
288 case RES_MAC: return "RES_MAC";
4874080d 289 case RES_VLAN: return "RES_VLAN";
c82e9aa0
EC
290 case RES_EQ: return "RES_EQ";
291 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 292 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 293 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
294 default: return "Unknown resource type !!!";
295 };
296}
297
4874080d 298static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
146f3ef4
JM
299static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
300 enum mlx4_resource res_type, int count,
301 int port)
302{
303 struct mlx4_priv *priv = mlx4_priv(dev);
304 struct resource_allocator *res_alloc =
305 &priv->mfunc.master.res_tracker.res_alloc[res_type];
306 int err = -EINVAL;
307 int allocated, free, reserved, guaranteed, from_free;
308
309 if (slave > dev->num_vfs)
310 return -EINVAL;
311
312 spin_lock(&res_alloc->alloc_lock);
313 allocated = (port > 0) ?
314 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
315 res_alloc->allocated[slave];
316 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
317 res_alloc->res_free;
318 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
319 res_alloc->res_reserved;
320 guaranteed = res_alloc->guaranteed[slave];
321
322 if (allocated + count > res_alloc->quota[slave])
323 goto out;
324
325 if (allocated + count <= guaranteed) {
326 err = 0;
327 } else {
328 /* portion may need to be obtained from free area */
329 if (guaranteed - allocated > 0)
330 from_free = count - (guaranteed - allocated);
331 else
332 from_free = count;
333
334 if (free - from_free > reserved)
335 err = 0;
336 }
337
338 if (!err) {
339 /* grant the request */
340 if (port > 0) {
341 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
342 res_alloc->res_port_free[port - 1] -= count;
343 } else {
344 res_alloc->allocated[slave] += count;
345 res_alloc->res_free -= count;
346 }
347 }
348
349out:
350 spin_unlock(&res_alloc->alloc_lock);
351 return err;
352}
353
354static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
355 enum mlx4_resource res_type, int count,
356 int port)
357{
358 struct mlx4_priv *priv = mlx4_priv(dev);
359 struct resource_allocator *res_alloc =
360 &priv->mfunc.master.res_tracker.res_alloc[res_type];
361
362 if (slave > dev->num_vfs)
363 return;
364
365 spin_lock(&res_alloc->alloc_lock);
366 if (port > 0) {
367 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
368 res_alloc->res_port_free[port - 1] += count;
369 } else {
370 res_alloc->allocated[slave] -= count;
371 res_alloc->res_free += count;
372 }
373
374 spin_unlock(&res_alloc->alloc_lock);
375 return;
376}
377
5a0d0a61
JM
378static inline void initialize_res_quotas(struct mlx4_dev *dev,
379 struct resource_allocator *res_alloc,
380 enum mlx4_resource res_type,
381 int vf, int num_instances)
382{
383 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
384 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
385 if (vf == mlx4_master_func_num(dev)) {
386 res_alloc->res_free = num_instances;
387 if (res_type == RES_MTT) {
388 /* reserved mtts will be taken out of the PF allocation */
389 res_alloc->res_free += dev->caps.reserved_mtts;
390 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
391 res_alloc->quota[vf] += dev->caps.reserved_mtts;
392 }
393 }
394}
395
396void mlx4_init_quotas(struct mlx4_dev *dev)
397{
398 struct mlx4_priv *priv = mlx4_priv(dev);
399 int pf;
400
401 /* quotas for VFs are initialized in mlx4_slave_cap */
402 if (mlx4_is_slave(dev))
403 return;
404
405 if (!mlx4_is_mfunc(dev)) {
406 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
407 mlx4_num_reserved_sqps(dev);
408 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
409 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
410 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
411 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
412 return;
413 }
414
415 pf = mlx4_master_func_num(dev);
416 dev->quotas.qp =
417 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
418 dev->quotas.cq =
419 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
420 dev->quotas.srq =
421 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
422 dev->quotas.mtt =
423 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
424 dev->quotas.mpt =
425 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
426}
c82e9aa0
EC
427int mlx4_init_resource_tracker(struct mlx4_dev *dev)
428{
429 struct mlx4_priv *priv = mlx4_priv(dev);
5a0d0a61 430 int i, j;
c82e9aa0
EC
431 int t;
432
433 priv->mfunc.master.res_tracker.slave_list =
434 kzalloc(dev->num_slaves * sizeof(struct slave_list),
435 GFP_KERNEL);
436 if (!priv->mfunc.master.res_tracker.slave_list)
437 return -ENOMEM;
438
439 for (i = 0 ; i < dev->num_slaves; i++) {
440 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
441 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
442 slave_list[i].res_list[t]);
443 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
444 }
445
446 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
447 dev->num_slaves);
448 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 449 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0 450
5a0d0a61
JM
451 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
452 struct resource_allocator *res_alloc =
453 &priv->mfunc.master.res_tracker.res_alloc[i];
454 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
455 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
456 if (i == RES_MAC || i == RES_VLAN)
457 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
458 (dev->num_vfs + 1) * sizeof(int),
459 GFP_KERNEL);
460 else
461 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
462
463 if (!res_alloc->quota || !res_alloc->guaranteed ||
464 !res_alloc->allocated)
465 goto no_mem_err;
466
146f3ef4 467 spin_lock_init(&res_alloc->alloc_lock);
5a0d0a61
JM
468 for (t = 0; t < dev->num_vfs + 1; t++) {
469 switch (i) {
470 case RES_QP:
471 initialize_res_quotas(dev, res_alloc, RES_QP,
472 t, dev->caps.num_qps -
473 dev->caps.reserved_qps -
474 mlx4_num_reserved_sqps(dev));
475 break;
476 case RES_CQ:
477 initialize_res_quotas(dev, res_alloc, RES_CQ,
478 t, dev->caps.num_cqs -
479 dev->caps.reserved_cqs);
480 break;
481 case RES_SRQ:
482 initialize_res_quotas(dev, res_alloc, RES_SRQ,
483 t, dev->caps.num_srqs -
484 dev->caps.reserved_srqs);
485 break;
486 case RES_MPT:
487 initialize_res_quotas(dev, res_alloc, RES_MPT,
488 t, dev->caps.num_mpts -
489 dev->caps.reserved_mrws);
490 break;
491 case RES_MTT:
492 initialize_res_quotas(dev, res_alloc, RES_MTT,
493 t, dev->caps.num_mtts -
494 dev->caps.reserved_mtts);
495 break;
496 case RES_MAC:
497 if (t == mlx4_master_func_num(dev)) {
498 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
499 res_alloc->guaranteed[t] = 2;
500 for (j = 0; j < MLX4_MAX_PORTS; j++)
501 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
502 } else {
503 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
504 res_alloc->guaranteed[t] = 2;
505 }
506 break;
507 case RES_VLAN:
508 if (t == mlx4_master_func_num(dev)) {
509 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
510 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
511 for (j = 0; j < MLX4_MAX_PORTS; j++)
512 res_alloc->res_port_free[j] =
513 res_alloc->quota[t];
514 } else {
515 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
516 res_alloc->guaranteed[t] = 0;
517 }
518 break;
519 case RES_COUNTER:
520 res_alloc->quota[t] = dev->caps.max_counters;
521 res_alloc->guaranteed[t] = 0;
522 if (t == mlx4_master_func_num(dev))
523 res_alloc->res_free = res_alloc->quota[t];
524 break;
525 default:
526 break;
527 }
528 if (i == RES_MAC || i == RES_VLAN) {
529 for (j = 0; j < MLX4_MAX_PORTS; j++)
530 res_alloc->res_port_rsvd[j] +=
531 res_alloc->guaranteed[t];
532 } else {
533 res_alloc->res_reserved += res_alloc->guaranteed[t];
534 }
535 }
536 }
c82e9aa0 537 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
5a0d0a61
JM
538 return 0;
539
540no_mem_err:
541 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
542 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
543 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
544 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
545 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
546 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
547 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
548 }
549 return -ENOMEM;
c82e9aa0
EC
550}
551
b8924951
JM
552void mlx4_free_resource_tracker(struct mlx4_dev *dev,
553 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
554{
555 struct mlx4_priv *priv = mlx4_priv(dev);
556 int i;
557
558 if (priv->mfunc.master.res_tracker.slave_list) {
4874080d
JM
559 if (type != RES_TR_FREE_STRUCTS_ONLY) {
560 for (i = 0; i < dev->num_slaves; i++) {
b8924951
JM
561 if (type == RES_TR_FREE_ALL ||
562 dev->caps.function != i)
563 mlx4_delete_all_resources_for_slave(dev, i);
4874080d
JM
564 }
565 /* free master's vlans */
566 i = dev->caps.function;
567 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
568 rem_slave_vlans(dev, i);
569 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
570 }
b8924951
JM
571
572 if (type != RES_TR_FREE_SLAVES_ONLY) {
5a0d0a61
JM
573 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
574 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
575 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
576 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
577 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
578 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
579 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
580 }
b8924951
JM
581 kfree(priv->mfunc.master.res_tracker.slave_list);
582 priv->mfunc.master.res_tracker.slave_list = NULL;
583 }
c82e9aa0
EC
584 }
585}
586
54679e14
JM
587static void update_pkey_index(struct mlx4_dev *dev, int slave,
588 struct mlx4_cmd_mailbox *inbox)
c82e9aa0 589{
54679e14
JM
590 u8 sched = *(u8 *)(inbox->buf + 64);
591 u8 orig_index = *(u8 *)(inbox->buf + 35);
592 u8 new_index;
593 struct mlx4_priv *priv = mlx4_priv(dev);
594 int port;
595
596 port = (sched >> 6 & 1) + 1;
597
598 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
599 *(u8 *)(inbox->buf + 35) = new_index;
54679e14
JM
600}
601
602static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
603 u8 slave)
604{
605 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
606 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
607 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
b6ffaeff 608 int port;
c82e9aa0 609
b6ffaeff
JM
610 if (MLX4_QP_ST_UD == ts) {
611 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
612 if (mlx4_is_eth(dev, port))
613 qp_ctx->pri_path.mgid_index = mlx4_get_base_gid_ix(dev, slave) | 0x80;
614 else
615 qp_ctx->pri_path.mgid_index = slave | 0x80;
616
617 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
618 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
619 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
620 if (mlx4_is_eth(dev, port)) {
621 qp_ctx->pri_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
622 qp_ctx->pri_path.mgid_index &= 0x7f;
623 } else {
624 qp_ctx->pri_path.mgid_index = slave & 0x7F;
625 }
626 }
627 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
628 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
629 if (mlx4_is_eth(dev, port)) {
630 qp_ctx->alt_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
631 qp_ctx->alt_path.mgid_index &= 0x7f;
632 } else {
633 qp_ctx->alt_path.mgid_index = slave & 0x7F;
634 }
635 }
54679e14 636 }
c82e9aa0
EC
637}
638
3f7fb021
RE
639static int update_vport_qp_param(struct mlx4_dev *dev,
640 struct mlx4_cmd_mailbox *inbox,
b01978ca 641 u8 slave, u32 qpn)
3f7fb021
RE
642{
643 struct mlx4_qp_context *qpc = inbox->buf + 8;
644 struct mlx4_vport_oper_state *vp_oper;
645 struct mlx4_priv *priv;
646 u32 qp_type;
647 int port;
648
649 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
650 priv = mlx4_priv(dev);
651 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
652
653 if (MLX4_VGT != vp_oper->state.default_vlan) {
654 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
b01978ca
JM
655 if (MLX4_QP_ST_RC == qp_type ||
656 (MLX4_QP_ST_UD == qp_type &&
657 !mlx4_is_qp_reserved(dev, qpn)))
3f7fb021
RE
658 return -EINVAL;
659
b01978ca
JM
660 /* the reserved QPs (special, proxy, tunnel)
661 * do not operate over vlans
662 */
663 if (mlx4_is_qp_reserved(dev, qpn))
664 return 0;
665
7677fc96
RE
666 /* force strip vlan by clear vsd */
667 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
0a6eac24
RE
668
669 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
670 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
671 qpc->pri_path.vlan_control =
672 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
673 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
674 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
675 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
676 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
677 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
678 } else if (0 != vp_oper->state.default_vlan) {
7677fc96
RE
679 qpc->pri_path.vlan_control =
680 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
681 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
682 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
683 } else { /* priority tagged */
684 qpc->pri_path.vlan_control =
685 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
686 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
687 }
688
689 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
3f7fb021 690 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
7677fc96
RE
691 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
692 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
3f7fb021
RE
693 qpc->pri_path.sched_queue &= 0xC7;
694 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
3f7fb021 695 }
e6b6a231 696 if (vp_oper->state.spoofchk) {
7677fc96 697 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
e6b6a231 698 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
e6b6a231 699 }
3f7fb021
RE
700 return 0;
701}
702
c82e9aa0
EC
703static int mpt_mask(struct mlx4_dev *dev)
704{
705 return dev->caps.num_mpts - 1;
706}
707
1e3f7b32 708static void *find_res(struct mlx4_dev *dev, u64 res_id,
c82e9aa0
EC
709 enum mlx4_resource type)
710{
711 struct mlx4_priv *priv = mlx4_priv(dev);
712
4af1c048
HHZ
713 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
714 res_id);
c82e9aa0
EC
715}
716
aa1ec3dd 717static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
718 enum mlx4_resource type,
719 void *res)
720{
721 struct res_common *r;
722 int err = 0;
723
724 spin_lock_irq(mlx4_tlock(dev));
725 r = find_res(dev, res_id, type);
726 if (!r) {
727 err = -ENONET;
728 goto exit;
729 }
730
731 if (r->state == RES_ANY_BUSY) {
732 err = -EBUSY;
733 goto exit;
734 }
735
736 if (r->owner != slave) {
737 err = -EPERM;
738 goto exit;
739 }
740
741 r->from_state = r->state;
742 r->state = RES_ANY_BUSY;
c82e9aa0
EC
743
744 if (res)
745 *((struct res_common **)res) = r;
746
747exit:
748 spin_unlock_irq(mlx4_tlock(dev));
749 return err;
750}
751
752int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
753 enum mlx4_resource type,
aa1ec3dd 754 u64 res_id, int *slave)
c82e9aa0
EC
755{
756
757 struct res_common *r;
758 int err = -ENOENT;
759 int id = res_id;
760
761 if (type == RES_QP)
762 id &= 0x7fffff;
996b0541 763 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
764
765 r = find_res(dev, id, type);
766 if (r) {
767 *slave = r->owner;
768 err = 0;
769 }
996b0541 770 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
771
772 return err;
773}
774
aa1ec3dd 775static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
776 enum mlx4_resource type)
777{
778 struct res_common *r;
779
780 spin_lock_irq(mlx4_tlock(dev));
781 r = find_res(dev, res_id, type);
782 if (r)
783 r->state = r->from_state;
784 spin_unlock_irq(mlx4_tlock(dev));
785}
786
787static struct res_common *alloc_qp_tr(int id)
788{
789 struct res_qp *ret;
790
791 ret = kzalloc(sizeof *ret, GFP_KERNEL);
792 if (!ret)
793 return NULL;
794
795 ret->com.res_id = id;
796 ret->com.state = RES_QP_RESERVED;
2531188b 797 ret->local_qpn = id;
c82e9aa0
EC
798 INIT_LIST_HEAD(&ret->mcg_list);
799 spin_lock_init(&ret->mcg_spl);
2c473ae7 800 atomic_set(&ret->ref_count, 0);
c82e9aa0
EC
801
802 return &ret->com;
803}
804
805static struct res_common *alloc_mtt_tr(int id, int order)
806{
807 struct res_mtt *ret;
808
809 ret = kzalloc(sizeof *ret, GFP_KERNEL);
810 if (!ret)
811 return NULL;
812
813 ret->com.res_id = id;
814 ret->order = order;
815 ret->com.state = RES_MTT_ALLOCATED;
816 atomic_set(&ret->ref_count, 0);
817
818 return &ret->com;
819}
820
821static struct res_common *alloc_mpt_tr(int id, int key)
822{
823 struct res_mpt *ret;
824
825 ret = kzalloc(sizeof *ret, GFP_KERNEL);
826 if (!ret)
827 return NULL;
828
829 ret->com.res_id = id;
830 ret->com.state = RES_MPT_RESERVED;
831 ret->key = key;
832
833 return &ret->com;
834}
835
836static struct res_common *alloc_eq_tr(int id)
837{
838 struct res_eq *ret;
839
840 ret = kzalloc(sizeof *ret, GFP_KERNEL);
841 if (!ret)
842 return NULL;
843
844 ret->com.res_id = id;
845 ret->com.state = RES_EQ_RESERVED;
846
847 return &ret->com;
848}
849
850static struct res_common *alloc_cq_tr(int id)
851{
852 struct res_cq *ret;
853
854 ret = kzalloc(sizeof *ret, GFP_KERNEL);
855 if (!ret)
856 return NULL;
857
858 ret->com.res_id = id;
859 ret->com.state = RES_CQ_ALLOCATED;
860 atomic_set(&ret->ref_count, 0);
861
862 return &ret->com;
863}
864
865static struct res_common *alloc_srq_tr(int id)
866{
867 struct res_srq *ret;
868
869 ret = kzalloc(sizeof *ret, GFP_KERNEL);
870 if (!ret)
871 return NULL;
872
873 ret->com.res_id = id;
874 ret->com.state = RES_SRQ_ALLOCATED;
875 atomic_set(&ret->ref_count, 0);
876
877 return &ret->com;
878}
879
880static struct res_common *alloc_counter_tr(int id)
881{
882 struct res_counter *ret;
883
884 ret = kzalloc(sizeof *ret, GFP_KERNEL);
885 if (!ret)
886 return NULL;
887
888 ret->com.res_id = id;
889 ret->com.state = RES_COUNTER_ALLOCATED;
890
891 return &ret->com;
892}
893
ba062d52
JM
894static struct res_common *alloc_xrcdn_tr(int id)
895{
896 struct res_xrcdn *ret;
897
898 ret = kzalloc(sizeof *ret, GFP_KERNEL);
899 if (!ret)
900 return NULL;
901
902 ret->com.res_id = id;
903 ret->com.state = RES_XRCD_ALLOCATED;
904
905 return &ret->com;
906}
907
2c473ae7 908static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1b9c6b06
HHZ
909{
910 struct res_fs_rule *ret;
911
912 ret = kzalloc(sizeof *ret, GFP_KERNEL);
913 if (!ret)
914 return NULL;
915
916 ret->com.res_id = id;
917 ret->com.state = RES_FS_RULE_ALLOCATED;
2c473ae7 918 ret->qpn = qpn;
1b9c6b06
HHZ
919 return &ret->com;
920}
921
aa1ec3dd 922static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
923 int extra)
924{
925 struct res_common *ret;
926
927 switch (type) {
928 case RES_QP:
929 ret = alloc_qp_tr(id);
930 break;
931 case RES_MPT:
932 ret = alloc_mpt_tr(id, extra);
933 break;
934 case RES_MTT:
935 ret = alloc_mtt_tr(id, extra);
936 break;
937 case RES_EQ:
938 ret = alloc_eq_tr(id);
939 break;
940 case RES_CQ:
941 ret = alloc_cq_tr(id);
942 break;
943 case RES_SRQ:
944 ret = alloc_srq_tr(id);
945 break;
946 case RES_MAC:
947 printk(KERN_ERR "implementation missing\n");
948 return NULL;
949 case RES_COUNTER:
950 ret = alloc_counter_tr(id);
951 break;
ba062d52
JM
952 case RES_XRCD:
953 ret = alloc_xrcdn_tr(id);
954 break;
1b9c6b06 955 case RES_FS_RULE:
2c473ae7 956 ret = alloc_fs_rule_tr(id, extra);
1b9c6b06 957 break;
c82e9aa0
EC
958 default:
959 return NULL;
960 }
961 if (ret)
962 ret->owner = slave;
963
964 return ret;
965}
966
aa1ec3dd 967static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
968 enum mlx4_resource type, int extra)
969{
970 int i;
971 int err;
972 struct mlx4_priv *priv = mlx4_priv(dev);
973 struct res_common **res_arr;
974 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 975 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
976
977 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
978 if (!res_arr)
979 return -ENOMEM;
980
981 for (i = 0; i < count; ++i) {
982 res_arr[i] = alloc_tr(base + i, type, slave, extra);
983 if (!res_arr[i]) {
984 for (--i; i >= 0; --i)
985 kfree(res_arr[i]);
986
987 kfree(res_arr);
988 return -ENOMEM;
989 }
990 }
991
992 spin_lock_irq(mlx4_tlock(dev));
993 for (i = 0; i < count; ++i) {
994 if (find_res(dev, base + i, type)) {
995 err = -EEXIST;
996 goto undo;
997 }
4af1c048 998 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
999 if (err)
1000 goto undo;
1001 list_add_tail(&res_arr[i]->list,
1002 &tracker->slave_list[slave].res_list[type]);
1003 }
1004 spin_unlock_irq(mlx4_tlock(dev));
1005 kfree(res_arr);
1006
1007 return 0;
1008
1009undo:
1010 for (--i; i >= base; --i)
4af1c048 1011 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
1012
1013 spin_unlock_irq(mlx4_tlock(dev));
1014
1015 for (i = 0; i < count; ++i)
1016 kfree(res_arr[i]);
1017
1018 kfree(res_arr);
1019
1020 return err;
1021}
1022
1023static int remove_qp_ok(struct res_qp *res)
1024{
2c473ae7
HHZ
1025 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1026 !list_empty(&res->mcg_list)) {
1027 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1028 res->com.state, atomic_read(&res->ref_count));
c82e9aa0 1029 return -EBUSY;
2c473ae7 1030 } else if (res->com.state != RES_QP_RESERVED) {
c82e9aa0 1031 return -EPERM;
2c473ae7 1032 }
c82e9aa0
EC
1033
1034 return 0;
1035}
1036
1037static int remove_mtt_ok(struct res_mtt *res, int order)
1038{
1039 if (res->com.state == RES_MTT_BUSY ||
1040 atomic_read(&res->ref_count)) {
1041 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1042 __func__, __LINE__,
1043 mtt_states_str(res->com.state),
1044 atomic_read(&res->ref_count));
1045 return -EBUSY;
1046 } else if (res->com.state != RES_MTT_ALLOCATED)
1047 return -EPERM;
1048 else if (res->order != order)
1049 return -EINVAL;
1050
1051 return 0;
1052}
1053
1054static int remove_mpt_ok(struct res_mpt *res)
1055{
1056 if (res->com.state == RES_MPT_BUSY)
1057 return -EBUSY;
1058 else if (res->com.state != RES_MPT_RESERVED)
1059 return -EPERM;
1060
1061 return 0;
1062}
1063
1064static int remove_eq_ok(struct res_eq *res)
1065{
1066 if (res->com.state == RES_MPT_BUSY)
1067 return -EBUSY;
1068 else if (res->com.state != RES_MPT_RESERVED)
1069 return -EPERM;
1070
1071 return 0;
1072}
1073
1074static int remove_counter_ok(struct res_counter *res)
1075{
1076 if (res->com.state == RES_COUNTER_BUSY)
1077 return -EBUSY;
1078 else if (res->com.state != RES_COUNTER_ALLOCATED)
1079 return -EPERM;
1080
1081 return 0;
1082}
1083
ba062d52
JM
1084static int remove_xrcdn_ok(struct res_xrcdn *res)
1085{
1086 if (res->com.state == RES_XRCD_BUSY)
1087 return -EBUSY;
1088 else if (res->com.state != RES_XRCD_ALLOCATED)
1089 return -EPERM;
1090
1091 return 0;
1092}
1093
1b9c6b06
HHZ
1094static int remove_fs_rule_ok(struct res_fs_rule *res)
1095{
1096 if (res->com.state == RES_FS_RULE_BUSY)
1097 return -EBUSY;
1098 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1099 return -EPERM;
1100
1101 return 0;
1102}
1103
c82e9aa0
EC
1104static int remove_cq_ok(struct res_cq *res)
1105{
1106 if (res->com.state == RES_CQ_BUSY)
1107 return -EBUSY;
1108 else if (res->com.state != RES_CQ_ALLOCATED)
1109 return -EPERM;
1110
1111 return 0;
1112}
1113
1114static int remove_srq_ok(struct res_srq *res)
1115{
1116 if (res->com.state == RES_SRQ_BUSY)
1117 return -EBUSY;
1118 else if (res->com.state != RES_SRQ_ALLOCATED)
1119 return -EPERM;
1120
1121 return 0;
1122}
1123
1124static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1125{
1126 switch (type) {
1127 case RES_QP:
1128 return remove_qp_ok((struct res_qp *)res);
1129 case RES_CQ:
1130 return remove_cq_ok((struct res_cq *)res);
1131 case RES_SRQ:
1132 return remove_srq_ok((struct res_srq *)res);
1133 case RES_MPT:
1134 return remove_mpt_ok((struct res_mpt *)res);
1135 case RES_MTT:
1136 return remove_mtt_ok((struct res_mtt *)res, extra);
1137 case RES_MAC:
1138 return -ENOSYS;
1139 case RES_EQ:
1140 return remove_eq_ok((struct res_eq *)res);
1141 case RES_COUNTER:
1142 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
1143 case RES_XRCD:
1144 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
1145 case RES_FS_RULE:
1146 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
1147 default:
1148 return -EINVAL;
1149 }
1150}
1151
aa1ec3dd 1152static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
1153 enum mlx4_resource type, int extra)
1154{
aa1ec3dd 1155 u64 i;
c82e9aa0
EC
1156 int err;
1157 struct mlx4_priv *priv = mlx4_priv(dev);
1158 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1159 struct res_common *r;
1160
1161 spin_lock_irq(mlx4_tlock(dev));
1162 for (i = base; i < base + count; ++i) {
4af1c048 1163 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
1164 if (!r) {
1165 err = -ENOENT;
1166 goto out;
1167 }
1168 if (r->owner != slave) {
1169 err = -EPERM;
1170 goto out;
1171 }
1172 err = remove_ok(r, type, extra);
1173 if (err)
1174 goto out;
1175 }
1176
1177 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
1178 r = res_tracker_lookup(&tracker->res_tree[type], i);
1179 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
1180 list_del(&r->list);
1181 kfree(r);
1182 }
1183 err = 0;
1184
1185out:
1186 spin_unlock_irq(mlx4_tlock(dev));
1187
1188 return err;
1189}
1190
1191static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1192 enum res_qp_states state, struct res_qp **qp,
1193 int alloc)
1194{
1195 struct mlx4_priv *priv = mlx4_priv(dev);
1196 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1197 struct res_qp *r;
1198 int err = 0;
1199
1200 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1201 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
1202 if (!r)
1203 err = -ENOENT;
1204 else if (r->com.owner != slave)
1205 err = -EPERM;
1206 else {
1207 switch (state) {
1208 case RES_QP_BUSY:
aa1ec3dd 1209 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
1210 __func__, r->com.res_id);
1211 err = -EBUSY;
1212 break;
1213
1214 case RES_QP_RESERVED:
1215 if (r->com.state == RES_QP_MAPPED && !alloc)
1216 break;
1217
aa1ec3dd 1218 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
1219 err = -EINVAL;
1220 break;
1221
1222 case RES_QP_MAPPED:
1223 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1224 r->com.state == RES_QP_HW)
1225 break;
1226 else {
aa1ec3dd 1227 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
1228 r->com.res_id);
1229 err = -EINVAL;
1230 }
1231
1232 break;
1233
1234 case RES_QP_HW:
1235 if (r->com.state != RES_QP_MAPPED)
1236 err = -EINVAL;
1237 break;
1238 default:
1239 err = -EINVAL;
1240 }
1241
1242 if (!err) {
1243 r->com.from_state = r->com.state;
1244 r->com.to_state = state;
1245 r->com.state = RES_QP_BUSY;
1246 if (qp)
64699336 1247 *qp = r;
c82e9aa0
EC
1248 }
1249 }
1250
1251 spin_unlock_irq(mlx4_tlock(dev));
1252
1253 return err;
1254}
1255
1256static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1257 enum res_mpt_states state, struct res_mpt **mpt)
1258{
1259 struct mlx4_priv *priv = mlx4_priv(dev);
1260 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1261 struct res_mpt *r;
1262 int err = 0;
1263
1264 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1265 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
1266 if (!r)
1267 err = -ENOENT;
1268 else if (r->com.owner != slave)
1269 err = -EPERM;
1270 else {
1271 switch (state) {
1272 case RES_MPT_BUSY:
1273 err = -EINVAL;
1274 break;
1275
1276 case RES_MPT_RESERVED:
1277 if (r->com.state != RES_MPT_MAPPED)
1278 err = -EINVAL;
1279 break;
1280
1281 case RES_MPT_MAPPED:
1282 if (r->com.state != RES_MPT_RESERVED &&
1283 r->com.state != RES_MPT_HW)
1284 err = -EINVAL;
1285 break;
1286
1287 case RES_MPT_HW:
1288 if (r->com.state != RES_MPT_MAPPED)
1289 err = -EINVAL;
1290 break;
1291 default:
1292 err = -EINVAL;
1293 }
1294
1295 if (!err) {
1296 r->com.from_state = r->com.state;
1297 r->com.to_state = state;
1298 r->com.state = RES_MPT_BUSY;
1299 if (mpt)
64699336 1300 *mpt = r;
c82e9aa0
EC
1301 }
1302 }
1303
1304 spin_unlock_irq(mlx4_tlock(dev));
1305
1306 return err;
1307}
1308
1309static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1310 enum res_eq_states state, struct res_eq **eq)
1311{
1312 struct mlx4_priv *priv = mlx4_priv(dev);
1313 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1314 struct res_eq *r;
1315 int err = 0;
1316
1317 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1318 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
1319 if (!r)
1320 err = -ENOENT;
1321 else if (r->com.owner != slave)
1322 err = -EPERM;
1323 else {
1324 switch (state) {
1325 case RES_EQ_BUSY:
1326 err = -EINVAL;
1327 break;
1328
1329 case RES_EQ_RESERVED:
1330 if (r->com.state != RES_EQ_HW)
1331 err = -EINVAL;
1332 break;
1333
1334 case RES_EQ_HW:
1335 if (r->com.state != RES_EQ_RESERVED)
1336 err = -EINVAL;
1337 break;
1338
1339 default:
1340 err = -EINVAL;
1341 }
1342
1343 if (!err) {
1344 r->com.from_state = r->com.state;
1345 r->com.to_state = state;
1346 r->com.state = RES_EQ_BUSY;
1347 if (eq)
1348 *eq = r;
1349 }
1350 }
1351
1352 spin_unlock_irq(mlx4_tlock(dev));
1353
1354 return err;
1355}
1356
1357static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1358 enum res_cq_states state, struct res_cq **cq)
1359{
1360 struct mlx4_priv *priv = mlx4_priv(dev);
1361 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1362 struct res_cq *r;
1363 int err;
1364
1365 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1366 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c9218a9e 1367 if (!r) {
c82e9aa0 1368 err = -ENOENT;
c9218a9e 1369 } else if (r->com.owner != slave) {
c82e9aa0 1370 err = -EPERM;
c9218a9e
PB
1371 } else if (state == RES_CQ_ALLOCATED) {
1372 if (r->com.state != RES_CQ_HW)
c82e9aa0 1373 err = -EINVAL;
c9218a9e
PB
1374 else if (atomic_read(&r->ref_count))
1375 err = -EBUSY;
1376 else
1377 err = 0;
1378 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1379 err = -EINVAL;
1380 } else {
1381 err = 0;
1382 }
c82e9aa0 1383
c9218a9e
PB
1384 if (!err) {
1385 r->com.from_state = r->com.state;
1386 r->com.to_state = state;
1387 r->com.state = RES_CQ_BUSY;
1388 if (cq)
1389 *cq = r;
c82e9aa0
EC
1390 }
1391
1392 spin_unlock_irq(mlx4_tlock(dev));
1393
1394 return err;
1395}
1396
1397static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
f088cbb8 1398 enum res_srq_states state, struct res_srq **srq)
c82e9aa0
EC
1399{
1400 struct mlx4_priv *priv = mlx4_priv(dev);
1401 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1402 struct res_srq *r;
1403 int err = 0;
1404
1405 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1406 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
f088cbb8 1407 if (!r) {
c82e9aa0 1408 err = -ENOENT;
f088cbb8 1409 } else if (r->com.owner != slave) {
c82e9aa0 1410 err = -EPERM;
f088cbb8
PB
1411 } else if (state == RES_SRQ_ALLOCATED) {
1412 if (r->com.state != RES_SRQ_HW)
c82e9aa0 1413 err = -EINVAL;
f088cbb8
PB
1414 else if (atomic_read(&r->ref_count))
1415 err = -EBUSY;
1416 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1417 err = -EINVAL;
1418 }
c82e9aa0 1419
f088cbb8
PB
1420 if (!err) {
1421 r->com.from_state = r->com.state;
1422 r->com.to_state = state;
1423 r->com.state = RES_SRQ_BUSY;
1424 if (srq)
1425 *srq = r;
c82e9aa0
EC
1426 }
1427
1428 spin_unlock_irq(mlx4_tlock(dev));
1429
1430 return err;
1431}
1432
1433static void res_abort_move(struct mlx4_dev *dev, int slave,
1434 enum mlx4_resource type, int id)
1435{
1436 struct mlx4_priv *priv = mlx4_priv(dev);
1437 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1438 struct res_common *r;
1439
1440 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1441 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1442 if (r && (r->owner == slave))
1443 r->state = r->from_state;
1444 spin_unlock_irq(mlx4_tlock(dev));
1445}
1446
1447static void res_end_move(struct mlx4_dev *dev, int slave,
1448 enum mlx4_resource type, int id)
1449{
1450 struct mlx4_priv *priv = mlx4_priv(dev);
1451 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1452 struct res_common *r;
1453
1454 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1455 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1456 if (r && (r->owner == slave))
1457 r->state = r->to_state;
1458 spin_unlock_irq(mlx4_tlock(dev));
1459}
1460
1461static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1462{
e2c76824
JM
1463 return mlx4_is_qp_reserved(dev, qpn) &&
1464 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
c82e9aa0
EC
1465}
1466
54679e14
JM
1467static int fw_reserved(struct mlx4_dev *dev, int qpn)
1468{
1469 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
c82e9aa0
EC
1470}
1471
1472static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1473 u64 in_param, u64 *out_param)
1474{
1475 int err;
1476 int count;
1477 int align;
1478 int base;
1479 int qpn;
1480
1481 switch (op) {
1482 case RES_OP_RESERVE:
1483 count = get_param_l(&in_param);
1484 align = get_param_h(&in_param);
146f3ef4 1485 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
c82e9aa0
EC
1486 if (err)
1487 return err;
1488
146f3ef4
JM
1489 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1490 if (err) {
1491 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1492 return err;
1493 }
1494
c82e9aa0
EC
1495 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1496 if (err) {
146f3ef4 1497 mlx4_release_resource(dev, slave, RES_QP, count, 0);
c82e9aa0
EC
1498 __mlx4_qp_release_range(dev, base, count);
1499 return err;
1500 }
1501 set_param_l(out_param, base);
1502 break;
1503 case RES_OP_MAP_ICM:
1504 qpn = get_param_l(&in_param) & 0x7fffff;
1505 if (valid_reserved(dev, slave, qpn)) {
1506 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1507 if (err)
1508 return err;
1509 }
1510
1511 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1512 NULL, 1);
1513 if (err)
1514 return err;
1515
54679e14 1516 if (!fw_reserved(dev, qpn)) {
c82e9aa0
EC
1517 err = __mlx4_qp_alloc_icm(dev, qpn);
1518 if (err) {
1519 res_abort_move(dev, slave, RES_QP, qpn);
1520 return err;
1521 }
1522 }
1523
1524 res_end_move(dev, slave, RES_QP, qpn);
1525 break;
1526
1527 default:
1528 err = -EINVAL;
1529 break;
1530 }
1531 return err;
1532}
1533
1534static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1535 u64 in_param, u64 *out_param)
1536{
1537 int err = -EINVAL;
1538 int base;
1539 int order;
1540
1541 if (op != RES_OP_RESERVE_AND_MAP)
1542 return err;
1543
1544 order = get_param_l(&in_param);
146f3ef4
JM
1545
1546 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1547 if (err)
1548 return err;
1549
c82e9aa0 1550 base = __mlx4_alloc_mtt_range(dev, order);
146f3ef4
JM
1551 if (base == -1) {
1552 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
c82e9aa0 1553 return -ENOMEM;
146f3ef4 1554 }
c82e9aa0
EC
1555
1556 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
146f3ef4
JM
1557 if (err) {
1558 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
c82e9aa0 1559 __mlx4_free_mtt_range(dev, base, order);
146f3ef4 1560 } else {
c82e9aa0 1561 set_param_l(out_param, base);
146f3ef4 1562 }
c82e9aa0
EC
1563
1564 return err;
1565}
1566
1567static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1568 u64 in_param, u64 *out_param)
1569{
1570 int err = -EINVAL;
1571 int index;
1572 int id;
1573 struct res_mpt *mpt;
1574
1575 switch (op) {
1576 case RES_OP_RESERVE:
146f3ef4
JM
1577 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1578 if (err)
1579 break;
1580
b20e519a 1581 index = __mlx4_mpt_reserve(dev);
146f3ef4
JM
1582 if (index == -1) {
1583 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
c82e9aa0 1584 break;
146f3ef4 1585 }
c82e9aa0
EC
1586 id = index & mpt_mask(dev);
1587
1588 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1589 if (err) {
146f3ef4 1590 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
b20e519a 1591 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1592 break;
1593 }
1594 set_param_l(out_param, index);
1595 break;
1596 case RES_OP_MAP_ICM:
1597 index = get_param_l(&in_param);
1598 id = index & mpt_mask(dev);
1599 err = mr_res_start_move_to(dev, slave, id,
1600 RES_MPT_MAPPED, &mpt);
1601 if (err)
1602 return err;
1603
b20e519a 1604 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
c82e9aa0
EC
1605 if (err) {
1606 res_abort_move(dev, slave, RES_MPT, id);
1607 return err;
1608 }
1609
1610 res_end_move(dev, slave, RES_MPT, id);
1611 break;
1612 }
1613 return err;
1614}
1615
1616static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1617 u64 in_param, u64 *out_param)
1618{
1619 int cqn;
1620 int err;
1621
1622 switch (op) {
1623 case RES_OP_RESERVE_AND_MAP:
146f3ef4 1624 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
c82e9aa0
EC
1625 if (err)
1626 break;
1627
146f3ef4
JM
1628 err = __mlx4_cq_alloc_icm(dev, &cqn);
1629 if (err) {
1630 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1631 break;
1632 }
1633
c82e9aa0
EC
1634 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1635 if (err) {
146f3ef4 1636 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
c82e9aa0
EC
1637 __mlx4_cq_free_icm(dev, cqn);
1638 break;
1639 }
1640
1641 set_param_l(out_param, cqn);
1642 break;
1643
1644 default:
1645 err = -EINVAL;
1646 }
1647
1648 return err;
1649}
1650
1651static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1652 u64 in_param, u64 *out_param)
1653{
1654 int srqn;
1655 int err;
1656
1657 switch (op) {
1658 case RES_OP_RESERVE_AND_MAP:
146f3ef4 1659 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
c82e9aa0
EC
1660 if (err)
1661 break;
1662
146f3ef4
JM
1663 err = __mlx4_srq_alloc_icm(dev, &srqn);
1664 if (err) {
1665 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1666 break;
1667 }
1668
c82e9aa0
EC
1669 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1670 if (err) {
146f3ef4 1671 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
c82e9aa0
EC
1672 __mlx4_srq_free_icm(dev, srqn);
1673 break;
1674 }
1675
1676 set_param_l(out_param, srqn);
1677 break;
1678
1679 default:
1680 err = -EINVAL;
1681 }
1682
1683 return err;
1684}
1685
1686static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1687{
1688 struct mlx4_priv *priv = mlx4_priv(dev);
1689 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1690 struct mac_res *res;
1691
146f3ef4
JM
1692 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1693 return -EINVAL;
c82e9aa0 1694 res = kzalloc(sizeof *res, GFP_KERNEL);
146f3ef4
JM
1695 if (!res) {
1696 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
c82e9aa0 1697 return -ENOMEM;
146f3ef4 1698 }
c82e9aa0
EC
1699 res->mac = mac;
1700 res->port = (u8) port;
1701 list_add_tail(&res->list,
1702 &tracker->slave_list[slave].res_list[RES_MAC]);
1703 return 0;
1704}
1705
1706static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1707 int port)
1708{
1709 struct mlx4_priv *priv = mlx4_priv(dev);
1710 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1711 struct list_head *mac_list =
1712 &tracker->slave_list[slave].res_list[RES_MAC];
1713 struct mac_res *res, *tmp;
1714
1715 list_for_each_entry_safe(res, tmp, mac_list, list) {
1716 if (res->mac == mac && res->port == (u8) port) {
1717 list_del(&res->list);
146f3ef4 1718 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
c82e9aa0
EC
1719 kfree(res);
1720 break;
1721 }
1722 }
1723}
1724
1725static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1726{
1727 struct mlx4_priv *priv = mlx4_priv(dev);
1728 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1729 struct list_head *mac_list =
1730 &tracker->slave_list[slave].res_list[RES_MAC];
1731 struct mac_res *res, *tmp;
1732
1733 list_for_each_entry_safe(res, tmp, mac_list, list) {
1734 list_del(&res->list);
1735 __mlx4_unregister_mac(dev, res->port, res->mac);
146f3ef4 1736 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
c82e9aa0
EC
1737 kfree(res);
1738 }
1739}
1740
1741static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 1742 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
1743{
1744 int err = -EINVAL;
1745 int port;
1746 u64 mac;
1747
1748 if (op != RES_OP_RESERVE_AND_MAP)
1749 return err;
1750
acddd5dd 1751 port = !in_port ? get_param_l(out_param) : in_port;
c82e9aa0
EC
1752 mac = in_param;
1753
1754 err = __mlx4_register_mac(dev, port, mac);
1755 if (err >= 0) {
1756 set_param_l(out_param, err);
1757 err = 0;
1758 }
1759
1760 if (!err) {
1761 err = mac_add_to_slave(dev, slave, mac, port);
1762 if (err)
1763 __mlx4_unregister_mac(dev, port, mac);
1764 }
1765 return err;
1766}
1767
4874080d
JM
1768static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1769 int port, int vlan_index)
ffe455ad 1770{
4874080d
JM
1771 struct mlx4_priv *priv = mlx4_priv(dev);
1772 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1773 struct list_head *vlan_list =
1774 &tracker->slave_list[slave].res_list[RES_VLAN];
1775 struct vlan_res *res, *tmp;
1776
1777 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1778 if (res->vlan == vlan && res->port == (u8) port) {
1779 /* vlan found. update ref count */
1780 ++res->ref_count;
1781 return 0;
1782 }
1783 }
1784
146f3ef4
JM
1785 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1786 return -EINVAL;
4874080d 1787 res = kzalloc(sizeof(*res), GFP_KERNEL);
146f3ef4
JM
1788 if (!res) {
1789 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
4874080d 1790 return -ENOMEM;
146f3ef4 1791 }
4874080d
JM
1792 res->vlan = vlan;
1793 res->port = (u8) port;
1794 res->vlan_index = vlan_index;
1795 res->ref_count = 1;
1796 list_add_tail(&res->list,
1797 &tracker->slave_list[slave].res_list[RES_VLAN]);
ffe455ad
EE
1798 return 0;
1799}
1800
4874080d
JM
1801
1802static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1803 int port)
1804{
1805 struct mlx4_priv *priv = mlx4_priv(dev);
1806 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1807 struct list_head *vlan_list =
1808 &tracker->slave_list[slave].res_list[RES_VLAN];
1809 struct vlan_res *res, *tmp;
1810
1811 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1812 if (res->vlan == vlan && res->port == (u8) port) {
1813 if (!--res->ref_count) {
1814 list_del(&res->list);
146f3ef4
JM
1815 mlx4_release_resource(dev, slave, RES_VLAN,
1816 1, port);
4874080d
JM
1817 kfree(res);
1818 }
1819 break;
1820 }
1821 }
1822}
1823
1824static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1825{
1826 struct mlx4_priv *priv = mlx4_priv(dev);
1827 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1828 struct list_head *vlan_list =
1829 &tracker->slave_list[slave].res_list[RES_VLAN];
1830 struct vlan_res *res, *tmp;
1831 int i;
1832
1833 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1834 list_del(&res->list);
1835 /* dereference the vlan the num times the slave referenced it */
1836 for (i = 0; i < res->ref_count; i++)
1837 __mlx4_unregister_vlan(dev, res->port, res->vlan);
146f3ef4 1838 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
4874080d
JM
1839 kfree(res);
1840 }
1841}
1842
1843static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2c957ff2 1844 u64 in_param, u64 *out_param, int in_port)
4874080d 1845{
2c957ff2
JM
1846 struct mlx4_priv *priv = mlx4_priv(dev);
1847 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
4874080d
JM
1848 int err;
1849 u16 vlan;
1850 int vlan_index;
2c957ff2
JM
1851 int port;
1852
1853 port = !in_port ? get_param_l(out_param) : in_port;
4874080d
JM
1854
1855 if (!port || op != RES_OP_RESERVE_AND_MAP)
1856 return -EINVAL;
1857
2c957ff2
JM
1858 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1859 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1860 slave_state[slave].old_vlan_api = true;
1861 return 0;
1862 }
1863
4874080d
JM
1864 vlan = (u16) in_param;
1865
1866 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1867 if (!err) {
1868 set_param_l(out_param, (u32) vlan_index);
1869 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1870 if (err)
1871 __mlx4_unregister_vlan(dev, port, vlan);
1872 }
1873 return err;
1874}
1875
ba062d52
JM
1876static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1877 u64 in_param, u64 *out_param)
1878{
1879 u32 index;
1880 int err;
1881
1882 if (op != RES_OP_RESERVE)
1883 return -EINVAL;
1884
146f3ef4 1885 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
ba062d52
JM
1886 if (err)
1887 return err;
1888
146f3ef4
JM
1889 err = __mlx4_counter_alloc(dev, &index);
1890 if (err) {
1891 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1892 return err;
1893 }
1894
ba062d52 1895 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
146f3ef4 1896 if (err) {
ba062d52 1897 __mlx4_counter_free(dev, index);
146f3ef4
JM
1898 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1899 } else {
ba062d52 1900 set_param_l(out_param, index);
146f3ef4 1901 }
ba062d52
JM
1902
1903 return err;
1904}
1905
1906static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1907 u64 in_param, u64 *out_param)
1908{
1909 u32 xrcdn;
1910 int err;
1911
1912 if (op != RES_OP_RESERVE)
1913 return -EINVAL;
1914
1915 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1916 if (err)
1917 return err;
1918
1919 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1920 if (err)
1921 __mlx4_xrcd_free(dev, xrcdn);
1922 else
1923 set_param_l(out_param, xrcdn);
1924
1925 return err;
1926}
1927
c82e9aa0
EC
1928int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1929 struct mlx4_vhcr *vhcr,
1930 struct mlx4_cmd_mailbox *inbox,
1931 struct mlx4_cmd_mailbox *outbox,
1932 struct mlx4_cmd_info *cmd)
1933{
1934 int err;
1935 int alop = vhcr->op_modifier;
1936
acddd5dd 1937 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
1938 case RES_QP:
1939 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1940 vhcr->in_param, &vhcr->out_param);
1941 break;
1942
1943 case RES_MTT:
1944 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1945 vhcr->in_param, &vhcr->out_param);
1946 break;
1947
1948 case RES_MPT:
1949 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1950 vhcr->in_param, &vhcr->out_param);
1951 break;
1952
1953 case RES_CQ:
1954 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1955 vhcr->in_param, &vhcr->out_param);
1956 break;
1957
1958 case RES_SRQ:
1959 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1960 vhcr->in_param, &vhcr->out_param);
1961 break;
1962
1963 case RES_MAC:
1964 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
1965 vhcr->in_param, &vhcr->out_param,
1966 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
1967 break;
1968
ffe455ad
EE
1969 case RES_VLAN:
1970 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
1971 vhcr->in_param, &vhcr->out_param,
1972 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
1973 break;
1974
ba062d52
JM
1975 case RES_COUNTER:
1976 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1977 vhcr->in_param, &vhcr->out_param);
1978 break;
1979
1980 case RES_XRCD:
1981 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1982 vhcr->in_param, &vhcr->out_param);
1983 break;
1984
c82e9aa0
EC
1985 default:
1986 err = -EINVAL;
1987 break;
1988 }
1989
1990 return err;
1991}
1992
1993static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1994 u64 in_param)
1995{
1996 int err;
1997 int count;
1998 int base;
1999 int qpn;
2000
2001 switch (op) {
2002 case RES_OP_RESERVE:
2003 base = get_param_l(&in_param) & 0x7fffff;
2004 count = get_param_h(&in_param);
2005 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2006 if (err)
2007 break;
146f3ef4 2008 mlx4_release_resource(dev, slave, RES_QP, count, 0);
c82e9aa0
EC
2009 __mlx4_qp_release_range(dev, base, count);
2010 break;
2011 case RES_OP_MAP_ICM:
2012 qpn = get_param_l(&in_param) & 0x7fffff;
2013 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2014 NULL, 0);
2015 if (err)
2016 return err;
2017
54679e14 2018 if (!fw_reserved(dev, qpn))
c82e9aa0
EC
2019 __mlx4_qp_free_icm(dev, qpn);
2020
2021 res_end_move(dev, slave, RES_QP, qpn);
2022
2023 if (valid_reserved(dev, slave, qpn))
2024 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2025 break;
2026 default:
2027 err = -EINVAL;
2028 break;
2029 }
2030 return err;
2031}
2032
2033static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2034 u64 in_param, u64 *out_param)
2035{
2036 int err = -EINVAL;
2037 int base;
2038 int order;
2039
2040 if (op != RES_OP_RESERVE_AND_MAP)
2041 return err;
2042
2043 base = get_param_l(&in_param);
2044 order = get_param_h(&in_param);
2045 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
146f3ef4
JM
2046 if (!err) {
2047 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
c82e9aa0 2048 __mlx4_free_mtt_range(dev, base, order);
146f3ef4 2049 }
c82e9aa0
EC
2050 return err;
2051}
2052
2053static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2054 u64 in_param)
2055{
2056 int err = -EINVAL;
2057 int index;
2058 int id;
2059 struct res_mpt *mpt;
2060
2061 switch (op) {
2062 case RES_OP_RESERVE:
2063 index = get_param_l(&in_param);
2064 id = index & mpt_mask(dev);
2065 err = get_res(dev, slave, id, RES_MPT, &mpt);
2066 if (err)
2067 break;
2068 index = mpt->key;
2069 put_res(dev, slave, id, RES_MPT);
2070
2071 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2072 if (err)
2073 break;
146f3ef4 2074 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
b20e519a 2075 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
2076 break;
2077 case RES_OP_MAP_ICM:
2078 index = get_param_l(&in_param);
2079 id = index & mpt_mask(dev);
2080 err = mr_res_start_move_to(dev, slave, id,
2081 RES_MPT_RESERVED, &mpt);
2082 if (err)
2083 return err;
2084
b20e519a 2085 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
2086 res_end_move(dev, slave, RES_MPT, id);
2087 return err;
2088 break;
2089 default:
2090 err = -EINVAL;
2091 break;
2092 }
2093 return err;
2094}
2095
2096static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2097 u64 in_param, u64 *out_param)
2098{
2099 int cqn;
2100 int err;
2101
2102 switch (op) {
2103 case RES_OP_RESERVE_AND_MAP:
2104 cqn = get_param_l(&in_param);
2105 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2106 if (err)
2107 break;
2108
146f3ef4 2109 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
c82e9aa0
EC
2110 __mlx4_cq_free_icm(dev, cqn);
2111 break;
2112
2113 default:
2114 err = -EINVAL;
2115 break;
2116 }
2117
2118 return err;
2119}
2120
2121static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2122 u64 in_param, u64 *out_param)
2123{
2124 int srqn;
2125 int err;
2126
2127 switch (op) {
2128 case RES_OP_RESERVE_AND_MAP:
2129 srqn = get_param_l(&in_param);
2130 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2131 if (err)
2132 break;
2133
146f3ef4 2134 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
c82e9aa0
EC
2135 __mlx4_srq_free_icm(dev, srqn);
2136 break;
2137
2138 default:
2139 err = -EINVAL;
2140 break;
2141 }
2142
2143 return err;
2144}
2145
2146static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 2147 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
2148{
2149 int port;
2150 int err = 0;
2151
2152 switch (op) {
2153 case RES_OP_RESERVE_AND_MAP:
acddd5dd 2154 port = !in_port ? get_param_l(out_param) : in_port;
c82e9aa0
EC
2155 mac_del_from_slave(dev, slave, in_param, port);
2156 __mlx4_unregister_mac(dev, port, in_param);
2157 break;
2158 default:
2159 err = -EINVAL;
2160 break;
2161 }
2162
2163 return err;
2164
2165}
2166
ffe455ad 2167static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 2168 u64 in_param, u64 *out_param, int port)
ffe455ad 2169{
2c957ff2
JM
2170 struct mlx4_priv *priv = mlx4_priv(dev);
2171 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
4874080d
JM
2172 int err = 0;
2173
2174 switch (op) {
2175 case RES_OP_RESERVE_AND_MAP:
2c957ff2
JM
2176 if (slave_state[slave].old_vlan_api)
2177 return 0;
4874080d
JM
2178 if (!port)
2179 return -EINVAL;
2180 vlan_del_from_slave(dev, slave, in_param, port);
2181 __mlx4_unregister_vlan(dev, port, in_param);
2182 break;
2183 default:
2184 err = -EINVAL;
2185 break;
2186 }
2187
2188 return err;
ffe455ad
EE
2189}
2190
ba062d52
JM
2191static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2192 u64 in_param, u64 *out_param)
2193{
2194 int index;
2195 int err;
2196
2197 if (op != RES_OP_RESERVE)
2198 return -EINVAL;
2199
2200 index = get_param_l(&in_param);
2201 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2202 if (err)
2203 return err;
2204
2205 __mlx4_counter_free(dev, index);
146f3ef4 2206 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
ba062d52
JM
2207
2208 return err;
2209}
2210
2211static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2212 u64 in_param, u64 *out_param)
2213{
2214 int xrcdn;
2215 int err;
2216
2217 if (op != RES_OP_RESERVE)
2218 return -EINVAL;
2219
2220 xrcdn = get_param_l(&in_param);
2221 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2222 if (err)
2223 return err;
2224
2225 __mlx4_xrcd_free(dev, xrcdn);
2226
2227 return err;
2228}
2229
c82e9aa0
EC
2230int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2231 struct mlx4_vhcr *vhcr,
2232 struct mlx4_cmd_mailbox *inbox,
2233 struct mlx4_cmd_mailbox *outbox,
2234 struct mlx4_cmd_info *cmd)
2235{
2236 int err = -EINVAL;
2237 int alop = vhcr->op_modifier;
2238
acddd5dd 2239 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
2240 case RES_QP:
2241 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2242 vhcr->in_param);
2243 break;
2244
2245 case RES_MTT:
2246 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2247 vhcr->in_param, &vhcr->out_param);
2248 break;
2249
2250 case RES_MPT:
2251 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2252 vhcr->in_param);
2253 break;
2254
2255 case RES_CQ:
2256 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2257 vhcr->in_param, &vhcr->out_param);
2258 break;
2259
2260 case RES_SRQ:
2261 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2262 vhcr->in_param, &vhcr->out_param);
2263 break;
2264
2265 case RES_MAC:
2266 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2267 vhcr->in_param, &vhcr->out_param,
2268 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
2269 break;
2270
ffe455ad
EE
2271 case RES_VLAN:
2272 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2273 vhcr->in_param, &vhcr->out_param,
2274 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
2275 break;
2276
ba062d52
JM
2277 case RES_COUNTER:
2278 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2279 vhcr->in_param, &vhcr->out_param);
2280 break;
2281
2282 case RES_XRCD:
2283 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2284 vhcr->in_param, &vhcr->out_param);
2285
c82e9aa0
EC
2286 default:
2287 break;
2288 }
2289 return err;
2290}
2291
2292/* ugly but other choices are uglier */
2293static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2294{
2295 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2296}
2297
2b8fb286 2298static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 2299{
2b8fb286 2300 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
2301}
2302
2303static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2304{
2305 return be32_to_cpu(mpt->mtt_sz);
2306}
2307
cc1ade94
SM
2308static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2309{
2310 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2311}
2312
2313static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2314{
2315 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2316}
2317
2318static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2319{
2320 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2321}
2322
2323static int mr_is_region(struct mlx4_mpt_entry *mpt)
2324{
2325 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2326}
2327
2b8fb286 2328static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
2329{
2330 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2331}
2332
2b8fb286 2333static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
2334{
2335 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2336}
2337
2338static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2339{
2340 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2341 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2342 int log_sq_sride = qpc->sq_size_stride & 7;
2343 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2344 int log_rq_stride = qpc->rq_size_stride & 7;
2345 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2346 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
5c5f3f0a
YH
2347 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2348 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
c82e9aa0
EC
2349 int sq_size;
2350 int rq_size;
2351 int total_pages;
2352 int total_mem;
2353 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2354
2355 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2356 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2357 total_mem = sq_size + rq_size;
2358 total_pages =
2359 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2360 page_shift);
2361
2362 return total_pages;
2363}
2364
c82e9aa0
EC
2365static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2366 int size, struct res_mtt *mtt)
2367{
2b8fb286
MA
2368 int res_start = mtt->com.res_id;
2369 int res_size = (1 << mtt->order);
c82e9aa0
EC
2370
2371 if (start < res_start || start + size > res_start + res_size)
2372 return -EPERM;
2373 return 0;
2374}
2375
2376int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2377 struct mlx4_vhcr *vhcr,
2378 struct mlx4_cmd_mailbox *inbox,
2379 struct mlx4_cmd_mailbox *outbox,
2380 struct mlx4_cmd_info *cmd)
2381{
2382 int err;
2383 int index = vhcr->in_modifier;
2384 struct res_mtt *mtt;
2385 struct res_mpt *mpt;
2b8fb286 2386 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2387 int phys;
2388 int id;
cc1ade94
SM
2389 u32 pd;
2390 int pd_slave;
c82e9aa0
EC
2391
2392 id = index & mpt_mask(dev);
2393 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2394 if (err)
2395 return err;
2396
cc1ade94
SM
2397 /* Disable memory windows for VFs. */
2398 if (!mr_is_region(inbox->buf)) {
2399 err = -EPERM;
2400 goto ex_abort;
2401 }
2402
2403 /* Make sure that the PD bits related to the slave id are zeros. */
2404 pd = mr_get_pd(inbox->buf);
2405 pd_slave = (pd >> 17) & 0x7f;
2406 if (pd_slave != 0 && pd_slave != slave) {
2407 err = -EPERM;
2408 goto ex_abort;
2409 }
2410
2411 if (mr_is_fmr(inbox->buf)) {
2412 /* FMR and Bind Enable are forbidden in slave devices. */
2413 if (mr_is_bind_enabled(inbox->buf)) {
2414 err = -EPERM;
2415 goto ex_abort;
2416 }
2417 /* FMR and Memory Windows are also forbidden. */
2418 if (!mr_is_region(inbox->buf)) {
2419 err = -EPERM;
2420 goto ex_abort;
2421 }
2422 }
2423
c82e9aa0
EC
2424 phys = mr_phys_mpt(inbox->buf);
2425 if (!phys) {
2b8fb286 2426 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2427 if (err)
2428 goto ex_abort;
2429
2430 err = check_mtt_range(dev, slave, mtt_base,
2431 mr_get_mtt_size(inbox->buf), mtt);
2432 if (err)
2433 goto ex_put;
2434
2435 mpt->mtt = mtt;
2436 }
2437
c82e9aa0
EC
2438 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2439 if (err)
2440 goto ex_put;
2441
2442 if (!phys) {
2443 atomic_inc(&mtt->ref_count);
2444 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2445 }
2446
2447 res_end_move(dev, slave, RES_MPT, id);
2448 return 0;
2449
2450ex_put:
2451 if (!phys)
2452 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2453ex_abort:
2454 res_abort_move(dev, slave, RES_MPT, id);
2455
2456 return err;
2457}
2458
2459int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2460 struct mlx4_vhcr *vhcr,
2461 struct mlx4_cmd_mailbox *inbox,
2462 struct mlx4_cmd_mailbox *outbox,
2463 struct mlx4_cmd_info *cmd)
2464{
2465 int err;
2466 int index = vhcr->in_modifier;
2467 struct res_mpt *mpt;
2468 int id;
2469
2470 id = index & mpt_mask(dev);
2471 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2472 if (err)
2473 return err;
2474
2475 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2476 if (err)
2477 goto ex_abort;
2478
2479 if (mpt->mtt)
2480 atomic_dec(&mpt->mtt->ref_count);
2481
2482 res_end_move(dev, slave, RES_MPT, id);
2483 return 0;
2484
2485ex_abort:
2486 res_abort_move(dev, slave, RES_MPT, id);
2487
2488 return err;
2489}
2490
2491int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2492 struct mlx4_vhcr *vhcr,
2493 struct mlx4_cmd_mailbox *inbox,
2494 struct mlx4_cmd_mailbox *outbox,
2495 struct mlx4_cmd_info *cmd)
2496{
2497 int err;
2498 int index = vhcr->in_modifier;
2499 struct res_mpt *mpt;
2500 int id;
2501
2502 id = index & mpt_mask(dev);
2503 err = get_res(dev, slave, id, RES_MPT, &mpt);
2504 if (err)
2505 return err;
2506
2507 if (mpt->com.from_state != RES_MPT_HW) {
2508 err = -EBUSY;
2509 goto out;
2510 }
2511
2512 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2513
2514out:
2515 put_res(dev, slave, id, RES_MPT);
2516 return err;
2517}
2518
2519static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2520{
2521 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2522}
2523
2524static int qp_get_scqn(struct mlx4_qp_context *qpc)
2525{
2526 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2527}
2528
2529static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2530{
2531 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2532}
2533
54679e14
JM
2534static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2535 struct mlx4_qp_context *context)
2536{
2537 u32 qpn = vhcr->in_modifier & 0xffffff;
2538 u32 qkey = 0;
2539
2540 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2541 return;
2542
2543 /* adjust qkey in qp context */
2544 context->qkey = cpu_to_be32(qkey);
2545}
2546
c82e9aa0
EC
2547int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2548 struct mlx4_vhcr *vhcr,
2549 struct mlx4_cmd_mailbox *inbox,
2550 struct mlx4_cmd_mailbox *outbox,
2551 struct mlx4_cmd_info *cmd)
2552{
2553 int err;
2554 int qpn = vhcr->in_modifier & 0x7fffff;
2555 struct res_mtt *mtt;
2556 struct res_qp *qp;
2557 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 2558 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2559 int mtt_size = qp_get_mtt_size(qpc);
2560 struct res_cq *rcq;
2561 struct res_cq *scq;
2562 int rcqn = qp_get_rcqn(qpc);
2563 int scqn = qp_get_scqn(qpc);
2564 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2565 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2566 struct res_srq *srq;
2567 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2568
2569 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2570 if (err)
2571 return err;
2572 qp->local_qpn = local_qpn;
b01978ca 2573 qp->sched_queue = 0;
f0f829bf
RE
2574 qp->param3 = 0;
2575 qp->vlan_control = 0;
2576 qp->fvl_rx = 0;
2577 qp->pri_path_fl = 0;
2578 qp->vlan_index = 0;
2579 qp->feup = 0;
b01978ca 2580 qp->qpc_flags = be32_to_cpu(qpc->flags);
c82e9aa0 2581
2b8fb286 2582 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2583 if (err)
2584 goto ex_abort;
2585
2586 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2587 if (err)
2588 goto ex_put_mtt;
2589
c82e9aa0
EC
2590 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2591 if (err)
2592 goto ex_put_mtt;
2593
2594 if (scqn != rcqn) {
2595 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2596 if (err)
2597 goto ex_put_rcq;
2598 } else
2599 scq = rcq;
2600
2601 if (use_srq) {
2602 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2603 if (err)
2604 goto ex_put_scq;
2605 }
2606
54679e14
JM
2607 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2608 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
2609 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2610 if (err)
2611 goto ex_put_srq;
2612 atomic_inc(&mtt->ref_count);
2613 qp->mtt = mtt;
2614 atomic_inc(&rcq->ref_count);
2615 qp->rcq = rcq;
2616 atomic_inc(&scq->ref_count);
2617 qp->scq = scq;
2618
2619 if (scqn != rcqn)
2620 put_res(dev, slave, scqn, RES_CQ);
2621
2622 if (use_srq) {
2623 atomic_inc(&srq->ref_count);
2624 put_res(dev, slave, srqn, RES_SRQ);
2625 qp->srq = srq;
2626 }
2627 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 2628 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2629 res_end_move(dev, slave, RES_QP, qpn);
2630
2631 return 0;
2632
2633ex_put_srq:
2634 if (use_srq)
2635 put_res(dev, slave, srqn, RES_SRQ);
2636ex_put_scq:
2637 if (scqn != rcqn)
2638 put_res(dev, slave, scqn, RES_CQ);
2639ex_put_rcq:
2640 put_res(dev, slave, rcqn, RES_CQ);
2641ex_put_mtt:
2b8fb286 2642 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2643ex_abort:
2644 res_abort_move(dev, slave, RES_QP, qpn);
2645
2646 return err;
2647}
2648
2b8fb286 2649static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
2650{
2651 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2652}
2653
2654static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2655{
2656 int log_eq_size = eqc->log_eq_size & 0x1f;
2657 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2658
2659 if (log_eq_size + 5 < page_shift)
2660 return 1;
2661
2662 return 1 << (log_eq_size + 5 - page_shift);
2663}
2664
2b8fb286 2665static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2666{
2667 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2668}
2669
2670static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2671{
2672 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2673 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2674
2675 if (log_cq_size + 5 < page_shift)
2676 return 1;
2677
2678 return 1 << (log_cq_size + 5 - page_shift);
2679}
2680
2681int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2682 struct mlx4_vhcr *vhcr,
2683 struct mlx4_cmd_mailbox *inbox,
2684 struct mlx4_cmd_mailbox *outbox,
2685 struct mlx4_cmd_info *cmd)
2686{
2687 int err;
2688 int eqn = vhcr->in_modifier;
2689 int res_id = (slave << 8) | eqn;
2690 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2691 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2692 int mtt_size = eq_get_mtt_size(eqc);
2693 struct res_eq *eq;
2694 struct res_mtt *mtt;
2695
2696 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2697 if (err)
2698 return err;
2699 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2700 if (err)
2701 goto out_add;
2702
2b8fb286 2703 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2704 if (err)
2705 goto out_move;
2706
2707 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2708 if (err)
2709 goto out_put;
2710
2711 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2712 if (err)
2713 goto out_put;
2714
2715 atomic_inc(&mtt->ref_count);
2716 eq->mtt = mtt;
2717 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2718 res_end_move(dev, slave, RES_EQ, res_id);
2719 return 0;
2720
2721out_put:
2722 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2723out_move:
2724 res_abort_move(dev, slave, RES_EQ, res_id);
2725out_add:
2726 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2727 return err;
2728}
2729
2730static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2731 int len, struct res_mtt **res)
2732{
2733 struct mlx4_priv *priv = mlx4_priv(dev);
2734 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2735 struct res_mtt *mtt;
2736 int err = -EINVAL;
2737
2738 spin_lock_irq(mlx4_tlock(dev));
2739 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2740 com.list) {
2741 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2742 *res = mtt;
2743 mtt->com.from_state = mtt->com.state;
2744 mtt->com.state = RES_MTT_BUSY;
2745 err = 0;
2746 break;
2747 }
2748 }
2749 spin_unlock_irq(mlx4_tlock(dev));
2750
2751 return err;
2752}
2753
54679e14
JM
2754static int verify_qp_parameters(struct mlx4_dev *dev,
2755 struct mlx4_cmd_mailbox *inbox,
2756 enum qp_transition transition, u8 slave)
2757{
2758 u32 qp_type;
2759 struct mlx4_qp_context *qp_ctx;
2760 enum mlx4_qp_optpar optpar;
b6ffaeff
JM
2761 int port;
2762 int num_gids;
54679e14
JM
2763
2764 qp_ctx = inbox->buf + 8;
2765 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2766 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2767
2768 switch (qp_type) {
2769 case MLX4_QP_ST_RC:
b6ffaeff 2770 case MLX4_QP_ST_XRC:
54679e14
JM
2771 case MLX4_QP_ST_UC:
2772 switch (transition) {
2773 case QP_TRANS_INIT2RTR:
2774 case QP_TRANS_RTR2RTS:
2775 case QP_TRANS_RTS2RTS:
2776 case QP_TRANS_SQD2SQD:
2777 case QP_TRANS_SQD2RTS:
2778 if (slave != mlx4_master_func_num(dev))
b6ffaeff
JM
2779 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2780 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2781 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2782 num_gids = mlx4_get_slave_num_gids(dev, slave);
2783 else
2784 num_gids = 1;
2785 if (qp_ctx->pri_path.mgid_index >= num_gids)
54679e14 2786 return -EINVAL;
b6ffaeff
JM
2787 }
2788 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2789 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2790 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2791 num_gids = mlx4_get_slave_num_gids(dev, slave);
2792 else
2793 num_gids = 1;
2794 if (qp_ctx->alt_path.mgid_index >= num_gids)
54679e14 2795 return -EINVAL;
b6ffaeff 2796 }
54679e14
JM
2797 break;
2798 default:
2799 break;
2800 }
2801
2802 break;
2803 default:
2804 break;
2805 }
2806
2807 return 0;
2808}
2809
c82e9aa0
EC
2810int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2811 struct mlx4_vhcr *vhcr,
2812 struct mlx4_cmd_mailbox *inbox,
2813 struct mlx4_cmd_mailbox *outbox,
2814 struct mlx4_cmd_info *cmd)
2815{
2816 struct mlx4_mtt mtt;
2817 __be64 *page_list = inbox->buf;
2818 u64 *pg_list = (u64 *)page_list;
2819 int i;
2820 struct res_mtt *rmtt = NULL;
2821 int start = be64_to_cpu(page_list[0]);
2822 int npages = vhcr->in_modifier;
2823 int err;
2824
2825 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2826 if (err)
2827 return err;
2828
2829 /* Call the SW implementation of write_mtt:
2830 * - Prepare a dummy mtt struct
2831 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2832 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2833 we don't really use it */
c82e9aa0
EC
2834 mtt.order = 0;
2835 mtt.page_shift = 0;
2836 for (i = 0; i < npages; ++i)
2837 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2838
2839 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2840 ((u64 *)page_list + 2));
2841
2842 if (rmtt)
2843 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2844
2845 return err;
2846}
2847
2848int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2849 struct mlx4_vhcr *vhcr,
2850 struct mlx4_cmd_mailbox *inbox,
2851 struct mlx4_cmd_mailbox *outbox,
2852 struct mlx4_cmd_info *cmd)
2853{
2854 int eqn = vhcr->in_modifier;
2855 int res_id = eqn | (slave << 8);
2856 struct res_eq *eq;
2857 int err;
2858
2859 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2860 if (err)
2861 return err;
2862
2863 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2864 if (err)
2865 goto ex_abort;
2866
2867 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2868 if (err)
2869 goto ex_put;
2870
2871 atomic_dec(&eq->mtt->ref_count);
2872 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2873 res_end_move(dev, slave, RES_EQ, res_id);
2874 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2875
2876 return 0;
2877
2878ex_put:
2879 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2880ex_abort:
2881 res_abort_move(dev, slave, RES_EQ, res_id);
2882
2883 return err;
2884}
2885
2886int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2887{
2888 struct mlx4_priv *priv = mlx4_priv(dev);
2889 struct mlx4_slave_event_eq_info *event_eq;
2890 struct mlx4_cmd_mailbox *mailbox;
2891 u32 in_modifier = 0;
2892 int err;
2893 int res_id;
2894 struct res_eq *req;
2895
2896 if (!priv->mfunc.master.slave_state)
2897 return -EINVAL;
2898
803143fb 2899 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2900
2901 /* Create the event only if the slave is registered */
803143fb 2902 if (event_eq->eqn < 0)
c82e9aa0
EC
2903 return 0;
2904
2905 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2906 res_id = (slave << 8) | event_eq->eqn;
2907 err = get_res(dev, slave, res_id, RES_EQ, &req);
2908 if (err)
2909 goto unlock;
2910
2911 if (req->com.from_state != RES_EQ_HW) {
2912 err = -EINVAL;
2913 goto put;
2914 }
2915
2916 mailbox = mlx4_alloc_cmd_mailbox(dev);
2917 if (IS_ERR(mailbox)) {
2918 err = PTR_ERR(mailbox);
2919 goto put;
2920 }
2921
2922 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2923 ++event_eq->token;
2924 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2925 }
2926
2927 memcpy(mailbox->buf, (u8 *) eqe, 28);
2928
2929 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2930
2931 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2932 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2933 MLX4_CMD_NATIVE);
2934
2935 put_res(dev, slave, res_id, RES_EQ);
2936 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2937 mlx4_free_cmd_mailbox(dev, mailbox);
2938 return err;
2939
2940put:
2941 put_res(dev, slave, res_id, RES_EQ);
2942
2943unlock:
2944 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2945 return err;
2946}
2947
2948int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2949 struct mlx4_vhcr *vhcr,
2950 struct mlx4_cmd_mailbox *inbox,
2951 struct mlx4_cmd_mailbox *outbox,
2952 struct mlx4_cmd_info *cmd)
2953{
2954 int eqn = vhcr->in_modifier;
2955 int res_id = eqn | (slave << 8);
2956 struct res_eq *eq;
2957 int err;
2958
2959 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2960 if (err)
2961 return err;
2962
2963 if (eq->com.from_state != RES_EQ_HW) {
2964 err = -EINVAL;
2965 goto ex_put;
2966 }
2967
2968 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2969
2970ex_put:
2971 put_res(dev, slave, res_id, RES_EQ);
2972 return err;
2973}
2974
2975int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2976 struct mlx4_vhcr *vhcr,
2977 struct mlx4_cmd_mailbox *inbox,
2978 struct mlx4_cmd_mailbox *outbox,
2979 struct mlx4_cmd_info *cmd)
2980{
2981 int err;
2982 int cqn = vhcr->in_modifier;
2983 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2984 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2985 struct res_cq *cq;
2986 struct res_mtt *mtt;
2987
2988 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2989 if (err)
2990 return err;
2b8fb286 2991 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2992 if (err)
2993 goto out_move;
2994 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2995 if (err)
2996 goto out_put;
2997 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2998 if (err)
2999 goto out_put;
3000 atomic_inc(&mtt->ref_count);
3001 cq->mtt = mtt;
3002 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3003 res_end_move(dev, slave, RES_CQ, cqn);
3004 return 0;
3005
3006out_put:
3007 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3008out_move:
3009 res_abort_move(dev, slave, RES_CQ, cqn);
3010 return err;
3011}
3012
3013int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3014 struct mlx4_vhcr *vhcr,
3015 struct mlx4_cmd_mailbox *inbox,
3016 struct mlx4_cmd_mailbox *outbox,
3017 struct mlx4_cmd_info *cmd)
3018{
3019 int err;
3020 int cqn = vhcr->in_modifier;
3021 struct res_cq *cq;
3022
3023 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3024 if (err)
3025 return err;
3026 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3027 if (err)
3028 goto out_move;
3029 atomic_dec(&cq->mtt->ref_count);
3030 res_end_move(dev, slave, RES_CQ, cqn);
3031 return 0;
3032
3033out_move:
3034 res_abort_move(dev, slave, RES_CQ, cqn);
3035 return err;
3036}
3037
3038int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3039 struct mlx4_vhcr *vhcr,
3040 struct mlx4_cmd_mailbox *inbox,
3041 struct mlx4_cmd_mailbox *outbox,
3042 struct mlx4_cmd_info *cmd)
3043{
3044 int cqn = vhcr->in_modifier;
3045 struct res_cq *cq;
3046 int err;
3047
3048 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3049 if (err)
3050 return err;
3051
3052 if (cq->com.from_state != RES_CQ_HW)
3053 goto ex_put;
3054
3055 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3056ex_put:
3057 put_res(dev, slave, cqn, RES_CQ);
3058
3059 return err;
3060}
3061
3062static int handle_resize(struct mlx4_dev *dev, int slave,
3063 struct mlx4_vhcr *vhcr,
3064 struct mlx4_cmd_mailbox *inbox,
3065 struct mlx4_cmd_mailbox *outbox,
3066 struct mlx4_cmd_info *cmd,
3067 struct res_cq *cq)
3068{
3069 int err;
3070 struct res_mtt *orig_mtt;
3071 struct res_mtt *mtt;
3072 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 3073 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
3074
3075 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3076 if (err)
3077 return err;
3078
3079 if (orig_mtt != cq->mtt) {
3080 err = -EINVAL;
3081 goto ex_put;
3082 }
3083
2b8fb286 3084 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3085 if (err)
3086 goto ex_put;
3087
3088 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3089 if (err)
3090 goto ex_put1;
3091 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3092 if (err)
3093 goto ex_put1;
3094 atomic_dec(&orig_mtt->ref_count);
3095 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3096 atomic_inc(&mtt->ref_count);
3097 cq->mtt = mtt;
3098 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3099 return 0;
3100
3101ex_put1:
3102 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3103ex_put:
3104 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3105
3106 return err;
3107
3108}
3109
3110int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3111 struct mlx4_vhcr *vhcr,
3112 struct mlx4_cmd_mailbox *inbox,
3113 struct mlx4_cmd_mailbox *outbox,
3114 struct mlx4_cmd_info *cmd)
3115{
3116 int cqn = vhcr->in_modifier;
3117 struct res_cq *cq;
3118 int err;
3119
3120 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3121 if (err)
3122 return err;
3123
3124 if (cq->com.from_state != RES_CQ_HW)
3125 goto ex_put;
3126
3127 if (vhcr->op_modifier == 0) {
3128 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 3129 goto ex_put;
c82e9aa0
EC
3130 }
3131
3132 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3133ex_put:
3134 put_res(dev, slave, cqn, RES_CQ);
3135
3136 return err;
3137}
3138
c82e9aa0
EC
3139static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3140{
3141 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3142 int log_rq_stride = srqc->logstride & 7;
3143 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3144
3145 if (log_srq_size + log_rq_stride + 4 < page_shift)
3146 return 1;
3147
3148 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3149}
3150
3151int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3152 struct mlx4_vhcr *vhcr,
3153 struct mlx4_cmd_mailbox *inbox,
3154 struct mlx4_cmd_mailbox *outbox,
3155 struct mlx4_cmd_info *cmd)
3156{
3157 int err;
3158 int srqn = vhcr->in_modifier;
3159 struct res_mtt *mtt;
3160 struct res_srq *srq;
3161 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 3162 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
3163
3164 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3165 return -EINVAL;
3166
3167 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3168 if (err)
3169 return err;
2b8fb286 3170 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3171 if (err)
3172 goto ex_abort;
3173 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3174 mtt);
3175 if (err)
3176 goto ex_put_mtt;
3177
c82e9aa0
EC
3178 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3179 if (err)
3180 goto ex_put_mtt;
3181
3182 atomic_inc(&mtt->ref_count);
3183 srq->mtt = mtt;
3184 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3185 res_end_move(dev, slave, RES_SRQ, srqn);
3186 return 0;
3187
3188ex_put_mtt:
3189 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3190ex_abort:
3191 res_abort_move(dev, slave, RES_SRQ, srqn);
3192
3193 return err;
3194}
3195
3196int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3197 struct mlx4_vhcr *vhcr,
3198 struct mlx4_cmd_mailbox *inbox,
3199 struct mlx4_cmd_mailbox *outbox,
3200 struct mlx4_cmd_info *cmd)
3201{
3202 int err;
3203 int srqn = vhcr->in_modifier;
3204 struct res_srq *srq;
3205
3206 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3207 if (err)
3208 return err;
3209 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3210 if (err)
3211 goto ex_abort;
3212 atomic_dec(&srq->mtt->ref_count);
3213 if (srq->cq)
3214 atomic_dec(&srq->cq->ref_count);
3215 res_end_move(dev, slave, RES_SRQ, srqn);
3216
3217 return 0;
3218
3219ex_abort:
3220 res_abort_move(dev, slave, RES_SRQ, srqn);
3221
3222 return err;
3223}
3224
3225int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3226 struct mlx4_vhcr *vhcr,
3227 struct mlx4_cmd_mailbox *inbox,
3228 struct mlx4_cmd_mailbox *outbox,
3229 struct mlx4_cmd_info *cmd)
3230{
3231 int err;
3232 int srqn = vhcr->in_modifier;
3233 struct res_srq *srq;
3234
3235 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3236 if (err)
3237 return err;
3238 if (srq->com.from_state != RES_SRQ_HW) {
3239 err = -EBUSY;
3240 goto out;
3241 }
3242 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3243out:
3244 put_res(dev, slave, srqn, RES_SRQ);
3245 return err;
3246}
3247
3248int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3249 struct mlx4_vhcr *vhcr,
3250 struct mlx4_cmd_mailbox *inbox,
3251 struct mlx4_cmd_mailbox *outbox,
3252 struct mlx4_cmd_info *cmd)
3253{
3254 int err;
3255 int srqn = vhcr->in_modifier;
3256 struct res_srq *srq;
3257
3258 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3259 if (err)
3260 return err;
3261
3262 if (srq->com.from_state != RES_SRQ_HW) {
3263 err = -EBUSY;
3264 goto out;
3265 }
3266
3267 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3268out:
3269 put_res(dev, slave, srqn, RES_SRQ);
3270 return err;
3271}
3272
3273int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3274 struct mlx4_vhcr *vhcr,
3275 struct mlx4_cmd_mailbox *inbox,
3276 struct mlx4_cmd_mailbox *outbox,
3277 struct mlx4_cmd_info *cmd)
3278{
3279 int err;
3280 int qpn = vhcr->in_modifier & 0x7fffff;
3281 struct res_qp *qp;
3282
3283 err = get_res(dev, slave, qpn, RES_QP, &qp);
3284 if (err)
3285 return err;
3286 if (qp->com.from_state != RES_QP_HW) {
3287 err = -EBUSY;
3288 goto out;
3289 }
3290
3291 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3292out:
3293 put_res(dev, slave, qpn, RES_QP);
3294 return err;
3295}
3296
54679e14
JM
3297int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3298 struct mlx4_vhcr *vhcr,
3299 struct mlx4_cmd_mailbox *inbox,
3300 struct mlx4_cmd_mailbox *outbox,
3301 struct mlx4_cmd_info *cmd)
3302{
3303 struct mlx4_qp_context *context = inbox->buf + 8;
3304 adjust_proxy_tun_qkey(dev, vhcr, context);
3305 update_pkey_index(dev, slave, inbox);
3306 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3307}
3308
c82e9aa0
EC
3309int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3310 struct mlx4_vhcr *vhcr,
3311 struct mlx4_cmd_mailbox *inbox,
3312 struct mlx4_cmd_mailbox *outbox,
3313 struct mlx4_cmd_info *cmd)
3314{
54679e14 3315 int err;
c82e9aa0 3316 struct mlx4_qp_context *qpc = inbox->buf + 8;
b01978ca
JM
3317 int qpn = vhcr->in_modifier & 0x7fffff;
3318 struct res_qp *qp;
3319 u8 orig_sched_queue;
f0f829bf
RE
3320 __be32 orig_param3 = qpc->param3;
3321 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3322 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3323 u8 orig_pri_path_fl = qpc->pri_path.fl;
3324 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3325 u8 orig_feup = qpc->pri_path.feup;
c82e9aa0 3326
54679e14
JM
3327 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3328 if (err)
3329 return err;
3330
3331 update_pkey_index(dev, slave, inbox);
3332 update_gid(dev, inbox, (u8)slave);
3333 adjust_proxy_tun_qkey(dev, vhcr, qpc);
b01978ca
JM
3334 orig_sched_queue = qpc->pri_path.sched_queue;
3335 err = update_vport_qp_param(dev, inbox, slave, qpn);
3f7fb021
RE
3336 if (err)
3337 return err;
54679e14 3338
b01978ca
JM
3339 err = get_res(dev, slave, qpn, RES_QP, &qp);
3340 if (err)
3341 return err;
3342 if (qp->com.from_state != RES_QP_HW) {
3343 err = -EBUSY;
3344 goto out;
3345 }
3346
3347 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3348out:
3349 /* if no error, save sched queue value passed in by VF. This is
3350 * essentially the QOS value provided by the VF. This will be useful
3351 * if we allow dynamic changes from VST back to VGT
3352 */
f0f829bf 3353 if (!err) {
b01978ca 3354 qp->sched_queue = orig_sched_queue;
f0f829bf
RE
3355 qp->param3 = orig_param3;
3356 qp->vlan_control = orig_vlan_control;
3357 qp->fvl_rx = orig_fvl_rx;
3358 qp->pri_path_fl = orig_pri_path_fl;
3359 qp->vlan_index = orig_vlan_index;
3360 qp->feup = orig_feup;
3361 }
b01978ca
JM
3362 put_res(dev, slave, qpn, RES_QP);
3363 return err;
54679e14
JM
3364}
3365
3366int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3367 struct mlx4_vhcr *vhcr,
3368 struct mlx4_cmd_mailbox *inbox,
3369 struct mlx4_cmd_mailbox *outbox,
3370 struct mlx4_cmd_info *cmd)
3371{
3372 int err;
3373 struct mlx4_qp_context *context = inbox->buf + 8;
3374
3375 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3376 if (err)
3377 return err;
3378
3379 update_pkey_index(dev, slave, inbox);
3380 update_gid(dev, inbox, (u8)slave);
3381 adjust_proxy_tun_qkey(dev, vhcr, context);
3382 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3383}
3384
3385int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3386 struct mlx4_vhcr *vhcr,
3387 struct mlx4_cmd_mailbox *inbox,
3388 struct mlx4_cmd_mailbox *outbox,
3389 struct mlx4_cmd_info *cmd)
3390{
3391 int err;
3392 struct mlx4_qp_context *context = inbox->buf + 8;
3393
3394 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3395 if (err)
3396 return err;
3397
3398 update_pkey_index(dev, slave, inbox);
3399 update_gid(dev, inbox, (u8)slave);
3400 adjust_proxy_tun_qkey(dev, vhcr, context);
3401 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3402}
3403
3404
3405int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3406 struct mlx4_vhcr *vhcr,
3407 struct mlx4_cmd_mailbox *inbox,
3408 struct mlx4_cmd_mailbox *outbox,
3409 struct mlx4_cmd_info *cmd)
3410{
3411 struct mlx4_qp_context *context = inbox->buf + 8;
3412 adjust_proxy_tun_qkey(dev, vhcr, context);
3413 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3414}
3415
3416int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3417 struct mlx4_vhcr *vhcr,
3418 struct mlx4_cmd_mailbox *inbox,
3419 struct mlx4_cmd_mailbox *outbox,
3420 struct mlx4_cmd_info *cmd)
3421{
3422 int err;
3423 struct mlx4_qp_context *context = inbox->buf + 8;
3424
3425 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3426 if (err)
3427 return err;
3428
3429 adjust_proxy_tun_qkey(dev, vhcr, context);
3430 update_gid(dev, inbox, (u8)slave);
3431 update_pkey_index(dev, slave, inbox);
3432 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3433}
3434
3435int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3436 struct mlx4_vhcr *vhcr,
3437 struct mlx4_cmd_mailbox *inbox,
3438 struct mlx4_cmd_mailbox *outbox,
3439 struct mlx4_cmd_info *cmd)
3440{
3441 int err;
3442 struct mlx4_qp_context *context = inbox->buf + 8;
3443
3444 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3445 if (err)
3446 return err;
c82e9aa0 3447
54679e14
JM
3448 adjust_proxy_tun_qkey(dev, vhcr, context);
3449 update_gid(dev, inbox, (u8)slave);
3450 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
3451 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3452}
3453
3454int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3455 struct mlx4_vhcr *vhcr,
3456 struct mlx4_cmd_mailbox *inbox,
3457 struct mlx4_cmd_mailbox *outbox,
3458 struct mlx4_cmd_info *cmd)
3459{
3460 int err;
3461 int qpn = vhcr->in_modifier & 0x7fffff;
3462 struct res_qp *qp;
3463
3464 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3465 if (err)
3466 return err;
3467 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3468 if (err)
3469 goto ex_abort;
3470
3471 atomic_dec(&qp->mtt->ref_count);
3472 atomic_dec(&qp->rcq->ref_count);
3473 atomic_dec(&qp->scq->ref_count);
3474 if (qp->srq)
3475 atomic_dec(&qp->srq->ref_count);
3476 res_end_move(dev, slave, RES_QP, qpn);
3477 return 0;
3478
3479ex_abort:
3480 res_abort_move(dev, slave, RES_QP, qpn);
3481
3482 return err;
3483}
3484
3485static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3486 struct res_qp *rqp, u8 *gid)
3487{
3488 struct res_gid *res;
3489
3490 list_for_each_entry(res, &rqp->mcg_list, list) {
3491 if (!memcmp(res->gid, gid, 16))
3492 return res;
3493 }
3494 return NULL;
3495}
3496
3497static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 3498 u8 *gid, enum mlx4_protocol prot,
fab1e24a 3499 enum mlx4_steer_type steer, u64 reg_id)
c82e9aa0
EC
3500{
3501 struct res_gid *res;
3502 int err;
3503
3504 res = kzalloc(sizeof *res, GFP_KERNEL);
3505 if (!res)
3506 return -ENOMEM;
3507
3508 spin_lock_irq(&rqp->mcg_spl);
3509 if (find_gid(dev, slave, rqp, gid)) {
3510 kfree(res);
3511 err = -EEXIST;
3512 } else {
3513 memcpy(res->gid, gid, 16);
3514 res->prot = prot;
9f5b6c63 3515 res->steer = steer;
fab1e24a 3516 res->reg_id = reg_id;
c82e9aa0
EC
3517 list_add_tail(&res->list, &rqp->mcg_list);
3518 err = 0;
3519 }
3520 spin_unlock_irq(&rqp->mcg_spl);
3521
3522 return err;
3523}
3524
3525static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 3526 u8 *gid, enum mlx4_protocol prot,
fab1e24a 3527 enum mlx4_steer_type steer, u64 *reg_id)
c82e9aa0
EC
3528{
3529 struct res_gid *res;
3530 int err;
3531
3532 spin_lock_irq(&rqp->mcg_spl);
3533 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 3534 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
3535 err = -EINVAL;
3536 else {
fab1e24a 3537 *reg_id = res->reg_id;
c82e9aa0
EC
3538 list_del(&res->list);
3539 kfree(res);
3540 err = 0;
3541 }
3542 spin_unlock_irq(&rqp->mcg_spl);
3543
3544 return err;
3545}
3546
fab1e24a
HHZ
3547static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3548 int block_loopback, enum mlx4_protocol prot,
3549 enum mlx4_steer_type type, u64 *reg_id)
3550{
3551 switch (dev->caps.steering_mode) {
3552 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3553 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3554 block_loopback, prot,
3555 reg_id);
3556 case MLX4_STEERING_MODE_B0:
3557 return mlx4_qp_attach_common(dev, qp, gid,
3558 block_loopback, prot, type);
3559 default:
3560 return -EINVAL;
3561 }
3562}
3563
3564static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3565 enum mlx4_protocol prot, enum mlx4_steer_type type,
3566 u64 reg_id)
3567{
3568 switch (dev->caps.steering_mode) {
3569 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3570 return mlx4_flow_detach(dev, reg_id);
3571 case MLX4_STEERING_MODE_B0:
3572 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3573 default:
3574 return -EINVAL;
3575 }
3576}
3577
c82e9aa0
EC
3578int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3579 struct mlx4_vhcr *vhcr,
3580 struct mlx4_cmd_mailbox *inbox,
3581 struct mlx4_cmd_mailbox *outbox,
3582 struct mlx4_cmd_info *cmd)
3583{
3584 struct mlx4_qp qp; /* dummy for calling attach/detach */
3585 u8 *gid = inbox->buf;
3586 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 3587 int err;
c82e9aa0
EC
3588 int qpn;
3589 struct res_qp *rqp;
fab1e24a 3590 u64 reg_id = 0;
c82e9aa0
EC
3591 int attach = vhcr->op_modifier;
3592 int block_loopback = vhcr->in_modifier >> 31;
3593 u8 steer_type_mask = 2;
75c6062c 3594 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
3595
3596 qpn = vhcr->in_modifier & 0xffffff;
3597 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3598 if (err)
3599 return err;
3600
3601 qp.qpn = qpn;
3602 if (attach) {
fab1e24a
HHZ
3603 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3604 type, &reg_id);
3605 if (err) {
3606 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
c82e9aa0 3607 goto ex_put;
fab1e24a
HHZ
3608 }
3609 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
c82e9aa0 3610 if (err)
fab1e24a 3611 goto ex_detach;
c82e9aa0 3612 } else {
fab1e24a 3613 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
c82e9aa0
EC
3614 if (err)
3615 goto ex_put;
c82e9aa0 3616
fab1e24a
HHZ
3617 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3618 if (err)
3619 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3620 qpn, reg_id);
3621 }
c82e9aa0 3622 put_res(dev, slave, qpn, RES_QP);
fab1e24a 3623 return err;
c82e9aa0 3624
fab1e24a
HHZ
3625ex_detach:
3626 qp_detach(dev, &qp, gid, prot, type, reg_id);
c82e9aa0
EC
3627ex_put:
3628 put_res(dev, slave, qpn, RES_QP);
c82e9aa0
EC
3629 return err;
3630}
3631
7fb40f87
HHZ
3632/*
3633 * MAC validation for Flow Steering rules.
3634 * VF can attach rules only with a mac address which is assigned to it.
3635 */
3636static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3637 struct list_head *rlist)
3638{
3639 struct mac_res *res, *tmp;
3640 __be64 be_mac;
3641
3642 /* make sure it isn't multicast or broadcast mac*/
3643 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3644 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3645 list_for_each_entry_safe(res, tmp, rlist, list) {
3646 be_mac = cpu_to_be64(res->mac << 16);
c0623e58 3647 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
7fb40f87
HHZ
3648 return 0;
3649 }
3650 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3651 eth_header->eth.dst_mac, slave);
3652 return -EINVAL;
3653 }
3654 return 0;
3655}
3656
3657/*
3658 * In case of missing eth header, append eth header with a MAC address
3659 * assigned to the VF.
3660 */
3661static int add_eth_header(struct mlx4_dev *dev, int slave,
3662 struct mlx4_cmd_mailbox *inbox,
3663 struct list_head *rlist, int header_id)
3664{
3665 struct mac_res *res, *tmp;
3666 u8 port;
3667 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3668 struct mlx4_net_trans_rule_hw_eth *eth_header;
3669 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3670 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3671 __be64 be_mac = 0;
3672 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3673
3674 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
015465f8 3675 port = ctrl->port;
7fb40f87
HHZ
3676 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3677
3678 /* Clear a space in the inbox for eth header */
3679 switch (header_id) {
3680 case MLX4_NET_TRANS_RULE_ID_IPV4:
3681 ip_header =
3682 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3683 memmove(ip_header, eth_header,
3684 sizeof(*ip_header) + sizeof(*l4_header));
3685 break;
3686 case MLX4_NET_TRANS_RULE_ID_TCP:
3687 case MLX4_NET_TRANS_RULE_ID_UDP:
3688 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3689 (eth_header + 1);
3690 memmove(l4_header, eth_header, sizeof(*l4_header));
3691 break;
3692 default:
3693 return -EINVAL;
3694 }
3695 list_for_each_entry_safe(res, tmp, rlist, list) {
3696 if (port == res->port) {
3697 be_mac = cpu_to_be64(res->mac << 16);
3698 break;
3699 }
3700 }
3701 if (!be_mac) {
3702 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3703 port);
3704 return -EINVAL;
3705 }
3706
3707 memset(eth_header, 0, sizeof(*eth_header));
3708 eth_header->size = sizeof(*eth_header) >> 2;
3709 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3710 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3711 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3712
3713 return 0;
3714
3715}
3716
8fcfb4db
HHZ
3717int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3718 struct mlx4_vhcr *vhcr,
3719 struct mlx4_cmd_mailbox *inbox,
3720 struct mlx4_cmd_mailbox *outbox,
3721 struct mlx4_cmd_info *cmd)
3722{
7fb40f87
HHZ
3723
3724 struct mlx4_priv *priv = mlx4_priv(dev);
3725 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3726 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
1b9c6b06 3727 int err;
a9c01e7a 3728 int qpn;
2c473ae7 3729 struct res_qp *rqp;
7fb40f87
HHZ
3730 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3731 struct _rule_hw *rule_header;
3732 int header_id;
1b9c6b06 3733
0ff1fb65
HHZ
3734 if (dev->caps.steering_mode !=
3735 MLX4_STEERING_MODE_DEVICE_MANAGED)
3736 return -EOPNOTSUPP;
1b9c6b06 3737
7fb40f87 3738 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
a9c01e7a 3739 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
2c473ae7 3740 err = get_res(dev, slave, qpn, RES_QP, &rqp);
a9c01e7a
HHZ
3741 if (err) {
3742 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3743 return err;
3744 }
7fb40f87
HHZ
3745 rule_header = (struct _rule_hw *)(ctrl + 1);
3746 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3747
3748 switch (header_id) {
3749 case MLX4_NET_TRANS_RULE_ID_ETH:
a9c01e7a
HHZ
3750 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3751 err = -EINVAL;
3752 goto err_put;
3753 }
7fb40f87 3754 break;
60396683
JM
3755 case MLX4_NET_TRANS_RULE_ID_IB:
3756 break;
7fb40f87
HHZ
3757 case MLX4_NET_TRANS_RULE_ID_IPV4:
3758 case MLX4_NET_TRANS_RULE_ID_TCP:
3759 case MLX4_NET_TRANS_RULE_ID_UDP:
3760 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
a9c01e7a
HHZ
3761 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3762 err = -EINVAL;
3763 goto err_put;
3764 }
7fb40f87
HHZ
3765 vhcr->in_modifier +=
3766 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3767 break;
3768 default:
3769 pr_err("Corrupted mailbox.\n");
a9c01e7a
HHZ
3770 err = -EINVAL;
3771 goto err_put;
7fb40f87
HHZ
3772 }
3773
1b9c6b06
HHZ
3774 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3775 vhcr->in_modifier, 0,
3776 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3777 MLX4_CMD_NATIVE);
3778 if (err)
a9c01e7a 3779 goto err_put;
1b9c6b06 3780
2c473ae7 3781 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
1b9c6b06
HHZ
3782 if (err) {
3783 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3784 /* detach rule*/
3785 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2065b38b 3786 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1b9c6b06 3787 MLX4_CMD_NATIVE);
2c473ae7 3788 goto err_put;
1b9c6b06 3789 }
2c473ae7 3790 atomic_inc(&rqp->ref_count);
a9c01e7a
HHZ
3791err_put:
3792 put_res(dev, slave, qpn, RES_QP);
1b9c6b06 3793 return err;
8fcfb4db
HHZ
3794}
3795
3796int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3797 struct mlx4_vhcr *vhcr,
3798 struct mlx4_cmd_mailbox *inbox,
3799 struct mlx4_cmd_mailbox *outbox,
3800 struct mlx4_cmd_info *cmd)
3801{
1b9c6b06 3802 int err;
2c473ae7
HHZ
3803 struct res_qp *rqp;
3804 struct res_fs_rule *rrule;
1b9c6b06 3805
0ff1fb65
HHZ
3806 if (dev->caps.steering_mode !=
3807 MLX4_STEERING_MODE_DEVICE_MANAGED)
3808 return -EOPNOTSUPP;
1b9c6b06 3809
2c473ae7
HHZ
3810 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3811 if (err)
3812 return err;
3813 /* Release the rule form busy state before removal */
3814 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3815 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3816 if (err)
3817 return err;
3818
1b9c6b06
HHZ
3819 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3820 if (err) {
3821 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2c473ae7 3822 goto out;
1b9c6b06
HHZ
3823 }
3824
3825 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3826 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3827 MLX4_CMD_NATIVE);
2c473ae7
HHZ
3828 if (!err)
3829 atomic_dec(&rqp->ref_count);
3830out:
3831 put_res(dev, slave, rrule->qpn, RES_QP);
1b9c6b06 3832 return err;
8fcfb4db
HHZ
3833}
3834
c82e9aa0
EC
3835enum {
3836 BUSY_MAX_RETRIES = 10
3837};
3838
3839int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3840 struct mlx4_vhcr *vhcr,
3841 struct mlx4_cmd_mailbox *inbox,
3842 struct mlx4_cmd_mailbox *outbox,
3843 struct mlx4_cmd_info *cmd)
3844{
3845 int err;
3846 int index = vhcr->in_modifier & 0xffff;
3847
3848 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3849 if (err)
3850 return err;
3851
3852 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3853 put_res(dev, slave, index, RES_COUNTER);
3854 return err;
3855}
3856
4de65803
MB
3857int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
3858 struct mlx4_vhcr *vhcr,
3859 struct mlx4_cmd_mailbox *inbox,
3860 struct mlx4_cmd_mailbox *outbox,
3861 struct mlx4_cmd_info *cmd)
3862{
3863 return -EPERM;
3864}
3865
3866
c82e9aa0
EC
3867static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3868{
3869 struct res_gid *rgid;
3870 struct res_gid *tmp;
c82e9aa0
EC
3871 struct mlx4_qp qp; /* dummy for calling attach/detach */
3872
3873 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
fab1e24a
HHZ
3874 switch (dev->caps.steering_mode) {
3875 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3876 mlx4_flow_detach(dev, rgid->reg_id);
3877 break;
3878 case MLX4_STEERING_MODE_B0:
3879 qp.qpn = rqp->local_qpn;
3880 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3881 rgid->prot, rgid->steer);
3882 break;
3883 }
c82e9aa0
EC
3884 list_del(&rgid->list);
3885 kfree(rgid);
3886 }
3887}
3888
3889static int _move_all_busy(struct mlx4_dev *dev, int slave,
3890 enum mlx4_resource type, int print)
3891{
3892 struct mlx4_priv *priv = mlx4_priv(dev);
3893 struct mlx4_resource_tracker *tracker =
3894 &priv->mfunc.master.res_tracker;
3895 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3896 struct res_common *r;
3897 struct res_common *tmp;
3898 int busy;
3899
3900 busy = 0;
3901 spin_lock_irq(mlx4_tlock(dev));
3902 list_for_each_entry_safe(r, tmp, rlist, list) {
3903 if (r->owner == slave) {
3904 if (!r->removing) {
3905 if (r->state == RES_ANY_BUSY) {
3906 if (print)
3907 mlx4_dbg(dev,
aa1ec3dd 3908 "%s id 0x%llx is busy\n",
c82e9aa0
EC
3909 ResourceType(type),
3910 r->res_id);
3911 ++busy;
3912 } else {
3913 r->from_state = r->state;
3914 r->state = RES_ANY_BUSY;
3915 r->removing = 1;
3916 }
3917 }
3918 }
3919 }
3920 spin_unlock_irq(mlx4_tlock(dev));
3921
3922 return busy;
3923}
3924
3925static int move_all_busy(struct mlx4_dev *dev, int slave,
3926 enum mlx4_resource type)
3927{
3928 unsigned long begin;
3929 int busy;
3930
3931 begin = jiffies;
3932 do {
3933 busy = _move_all_busy(dev, slave, type, 0);
3934 if (time_after(jiffies, begin + 5 * HZ))
3935 break;
3936 if (busy)
3937 cond_resched();
3938 } while (busy);
3939
3940 if (busy)
3941 busy = _move_all_busy(dev, slave, type, 1);
3942
3943 return busy;
3944}
3945static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3946{
3947 struct mlx4_priv *priv = mlx4_priv(dev);
3948 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3949 struct list_head *qp_list =
3950 &tracker->slave_list[slave].res_list[RES_QP];
3951 struct res_qp *qp;
3952 struct res_qp *tmp;
3953 int state;
3954 u64 in_param;
3955 int qpn;
3956 int err;
3957
3958 err = move_all_busy(dev, slave, RES_QP);
3959 if (err)
3960 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3961 "for slave %d\n", slave);
3962
3963 spin_lock_irq(mlx4_tlock(dev));
3964 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3965 spin_unlock_irq(mlx4_tlock(dev));
3966 if (qp->com.owner == slave) {
3967 qpn = qp->com.res_id;
3968 detach_qp(dev, slave, qp);
3969 state = qp->com.from_state;
3970 while (state != 0) {
3971 switch (state) {
3972 case RES_QP_RESERVED:
3973 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3974 rb_erase(&qp->com.node,
3975 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
3976 list_del(&qp->com.list);
3977 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
3978 if (!valid_reserved(dev, slave, qpn)) {
3979 __mlx4_qp_release_range(dev, qpn, 1);
3980 mlx4_release_resource(dev, slave,
3981 RES_QP, 1, 0);
3982 }
c82e9aa0
EC
3983 kfree(qp);
3984 state = 0;
3985 break;
3986 case RES_QP_MAPPED:
3987 if (!valid_reserved(dev, slave, qpn))
3988 __mlx4_qp_free_icm(dev, qpn);
3989 state = RES_QP_RESERVED;
3990 break;
3991 case RES_QP_HW:
3992 in_param = slave;
3993 err = mlx4_cmd(dev, in_param,
3994 qp->local_qpn, 2,
3995 MLX4_CMD_2RST_QP,
3996 MLX4_CMD_TIME_CLASS_A,
3997 MLX4_CMD_NATIVE);
3998 if (err)
3999 mlx4_dbg(dev, "rem_slave_qps: failed"
4000 " to move slave %d qpn %d to"
4001 " reset\n", slave,
4002 qp->local_qpn);
4003 atomic_dec(&qp->rcq->ref_count);
4004 atomic_dec(&qp->scq->ref_count);
4005 atomic_dec(&qp->mtt->ref_count);
4006 if (qp->srq)
4007 atomic_dec(&qp->srq->ref_count);
4008 state = RES_QP_MAPPED;
4009 break;
4010 default:
4011 state = 0;
4012 }
4013 }
4014 }
4015 spin_lock_irq(mlx4_tlock(dev));
4016 }
4017 spin_unlock_irq(mlx4_tlock(dev));
4018}
4019
4020static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4021{
4022 struct mlx4_priv *priv = mlx4_priv(dev);
4023 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4024 struct list_head *srq_list =
4025 &tracker->slave_list[slave].res_list[RES_SRQ];
4026 struct res_srq *srq;
4027 struct res_srq *tmp;
4028 int state;
4029 u64 in_param;
4030 LIST_HEAD(tlist);
4031 int srqn;
4032 int err;
4033
4034 err = move_all_busy(dev, slave, RES_SRQ);
4035 if (err)
4036 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
4037 "busy for slave %d\n", slave);
4038
4039 spin_lock_irq(mlx4_tlock(dev));
4040 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4041 spin_unlock_irq(mlx4_tlock(dev));
4042 if (srq->com.owner == slave) {
4043 srqn = srq->com.res_id;
4044 state = srq->com.from_state;
4045 while (state != 0) {
4046 switch (state) {
4047 case RES_SRQ_ALLOCATED:
4048 __mlx4_srq_free_icm(dev, srqn);
4049 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4050 rb_erase(&srq->com.node,
4051 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
4052 list_del(&srq->com.list);
4053 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4054 mlx4_release_resource(dev, slave,
4055 RES_SRQ, 1, 0);
c82e9aa0
EC
4056 kfree(srq);
4057 state = 0;
4058 break;
4059
4060 case RES_SRQ_HW:
4061 in_param = slave;
4062 err = mlx4_cmd(dev, in_param, srqn, 1,
4063 MLX4_CMD_HW2SW_SRQ,
4064 MLX4_CMD_TIME_CLASS_A,
4065 MLX4_CMD_NATIVE);
4066 if (err)
4067 mlx4_dbg(dev, "rem_slave_srqs: failed"
4068 " to move slave %d srq %d to"
4069 " SW ownership\n",
4070 slave, srqn);
4071
4072 atomic_dec(&srq->mtt->ref_count);
4073 if (srq->cq)
4074 atomic_dec(&srq->cq->ref_count);
4075 state = RES_SRQ_ALLOCATED;
4076 break;
4077
4078 default:
4079 state = 0;
4080 }
4081 }
4082 }
4083 spin_lock_irq(mlx4_tlock(dev));
4084 }
4085 spin_unlock_irq(mlx4_tlock(dev));
4086}
4087
4088static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4089{
4090 struct mlx4_priv *priv = mlx4_priv(dev);
4091 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4092 struct list_head *cq_list =
4093 &tracker->slave_list[slave].res_list[RES_CQ];
4094 struct res_cq *cq;
4095 struct res_cq *tmp;
4096 int state;
4097 u64 in_param;
4098 LIST_HEAD(tlist);
4099 int cqn;
4100 int err;
4101
4102 err = move_all_busy(dev, slave, RES_CQ);
4103 if (err)
4104 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4105 "busy for slave %d\n", slave);
4106
4107 spin_lock_irq(mlx4_tlock(dev));
4108 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4109 spin_unlock_irq(mlx4_tlock(dev));
4110 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4111 cqn = cq->com.res_id;
4112 state = cq->com.from_state;
4113 while (state != 0) {
4114 switch (state) {
4115 case RES_CQ_ALLOCATED:
4116 __mlx4_cq_free_icm(dev, cqn);
4117 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4118 rb_erase(&cq->com.node,
4119 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
4120 list_del(&cq->com.list);
4121 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4122 mlx4_release_resource(dev, slave,
4123 RES_CQ, 1, 0);
c82e9aa0
EC
4124 kfree(cq);
4125 state = 0;
4126 break;
4127
4128 case RES_CQ_HW:
4129 in_param = slave;
4130 err = mlx4_cmd(dev, in_param, cqn, 1,
4131 MLX4_CMD_HW2SW_CQ,
4132 MLX4_CMD_TIME_CLASS_A,
4133 MLX4_CMD_NATIVE);
4134 if (err)
4135 mlx4_dbg(dev, "rem_slave_cqs: failed"
4136 " to move slave %d cq %d to"
4137 " SW ownership\n",
4138 slave, cqn);
4139 atomic_dec(&cq->mtt->ref_count);
4140 state = RES_CQ_ALLOCATED;
4141 break;
4142
4143 default:
4144 state = 0;
4145 }
4146 }
4147 }
4148 spin_lock_irq(mlx4_tlock(dev));
4149 }
4150 spin_unlock_irq(mlx4_tlock(dev));
4151}
4152
4153static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4154{
4155 struct mlx4_priv *priv = mlx4_priv(dev);
4156 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4157 struct list_head *mpt_list =
4158 &tracker->slave_list[slave].res_list[RES_MPT];
4159 struct res_mpt *mpt;
4160 struct res_mpt *tmp;
4161 int state;
4162 u64 in_param;
4163 LIST_HEAD(tlist);
4164 int mptn;
4165 int err;
4166
4167 err = move_all_busy(dev, slave, RES_MPT);
4168 if (err)
4169 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4170 "busy for slave %d\n", slave);
4171
4172 spin_lock_irq(mlx4_tlock(dev));
4173 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4174 spin_unlock_irq(mlx4_tlock(dev));
4175 if (mpt->com.owner == slave) {
4176 mptn = mpt->com.res_id;
4177 state = mpt->com.from_state;
4178 while (state != 0) {
4179 switch (state) {
4180 case RES_MPT_RESERVED:
b20e519a 4181 __mlx4_mpt_release(dev, mpt->key);
c82e9aa0 4182 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4183 rb_erase(&mpt->com.node,
4184 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
4185 list_del(&mpt->com.list);
4186 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4187 mlx4_release_resource(dev, slave,
4188 RES_MPT, 1, 0);
c82e9aa0
EC
4189 kfree(mpt);
4190 state = 0;
4191 break;
4192
4193 case RES_MPT_MAPPED:
b20e519a 4194 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
4195 state = RES_MPT_RESERVED;
4196 break;
4197
4198 case RES_MPT_HW:
4199 in_param = slave;
4200 err = mlx4_cmd(dev, in_param, mptn, 0,
4201 MLX4_CMD_HW2SW_MPT,
4202 MLX4_CMD_TIME_CLASS_A,
4203 MLX4_CMD_NATIVE);
4204 if (err)
4205 mlx4_dbg(dev, "rem_slave_mrs: failed"
4206 " to move slave %d mpt %d to"
4207 " SW ownership\n",
4208 slave, mptn);
4209 if (mpt->mtt)
4210 atomic_dec(&mpt->mtt->ref_count);
4211 state = RES_MPT_MAPPED;
4212 break;
4213 default:
4214 state = 0;
4215 }
4216 }
4217 }
4218 spin_lock_irq(mlx4_tlock(dev));
4219 }
4220 spin_unlock_irq(mlx4_tlock(dev));
4221}
4222
4223static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4224{
4225 struct mlx4_priv *priv = mlx4_priv(dev);
4226 struct mlx4_resource_tracker *tracker =
4227 &priv->mfunc.master.res_tracker;
4228 struct list_head *mtt_list =
4229 &tracker->slave_list[slave].res_list[RES_MTT];
4230 struct res_mtt *mtt;
4231 struct res_mtt *tmp;
4232 int state;
4233 LIST_HEAD(tlist);
4234 int base;
4235 int err;
4236
4237 err = move_all_busy(dev, slave, RES_MTT);
4238 if (err)
4239 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4240 "busy for slave %d\n", slave);
4241
4242 spin_lock_irq(mlx4_tlock(dev));
4243 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4244 spin_unlock_irq(mlx4_tlock(dev));
4245 if (mtt->com.owner == slave) {
4246 base = mtt->com.res_id;
4247 state = mtt->com.from_state;
4248 while (state != 0) {
4249 switch (state) {
4250 case RES_MTT_ALLOCATED:
4251 __mlx4_free_mtt_range(dev, base,
4252 mtt->order);
4253 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4254 rb_erase(&mtt->com.node,
4255 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
4256 list_del(&mtt->com.list);
4257 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4258 mlx4_release_resource(dev, slave, RES_MTT,
4259 1 << mtt->order, 0);
c82e9aa0
EC
4260 kfree(mtt);
4261 state = 0;
4262 break;
4263
4264 default:
4265 state = 0;
4266 }
4267 }
4268 }
4269 spin_lock_irq(mlx4_tlock(dev));
4270 }
4271 spin_unlock_irq(mlx4_tlock(dev));
4272}
4273
1b9c6b06
HHZ
4274static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4275{
4276 struct mlx4_priv *priv = mlx4_priv(dev);
4277 struct mlx4_resource_tracker *tracker =
4278 &priv->mfunc.master.res_tracker;
4279 struct list_head *fs_rule_list =
4280 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4281 struct res_fs_rule *fs_rule;
4282 struct res_fs_rule *tmp;
4283 int state;
4284 u64 base;
4285 int err;
4286
4287 err = move_all_busy(dev, slave, RES_FS_RULE);
4288 if (err)
4289 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4290 slave);
4291
4292 spin_lock_irq(mlx4_tlock(dev));
4293 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4294 spin_unlock_irq(mlx4_tlock(dev));
4295 if (fs_rule->com.owner == slave) {
4296 base = fs_rule->com.res_id;
4297 state = fs_rule->com.from_state;
4298 while (state != 0) {
4299 switch (state) {
4300 case RES_FS_RULE_ALLOCATED:
4301 /* detach rule */
4302 err = mlx4_cmd(dev, base, 0, 0,
4303 MLX4_QP_FLOW_STEERING_DETACH,
4304 MLX4_CMD_TIME_CLASS_A,
4305 MLX4_CMD_NATIVE);
4306
4307 spin_lock_irq(mlx4_tlock(dev));
4308 rb_erase(&fs_rule->com.node,
4309 &tracker->res_tree[RES_FS_RULE]);
4310 list_del(&fs_rule->com.list);
4311 spin_unlock_irq(mlx4_tlock(dev));
4312 kfree(fs_rule);
4313 state = 0;
4314 break;
4315
4316 default:
4317 state = 0;
4318 }
4319 }
4320 }
4321 spin_lock_irq(mlx4_tlock(dev));
4322 }
4323 spin_unlock_irq(mlx4_tlock(dev));
4324}
4325
c82e9aa0
EC
4326static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4327{
4328 struct mlx4_priv *priv = mlx4_priv(dev);
4329 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4330 struct list_head *eq_list =
4331 &tracker->slave_list[slave].res_list[RES_EQ];
4332 struct res_eq *eq;
4333 struct res_eq *tmp;
4334 int err;
4335 int state;
4336 LIST_HEAD(tlist);
4337 int eqn;
4338 struct mlx4_cmd_mailbox *mailbox;
4339
4340 err = move_all_busy(dev, slave, RES_EQ);
4341 if (err)
4342 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4343 "busy for slave %d\n", slave);
4344
4345 spin_lock_irq(mlx4_tlock(dev));
4346 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4347 spin_unlock_irq(mlx4_tlock(dev));
4348 if (eq->com.owner == slave) {
4349 eqn = eq->com.res_id;
4350 state = eq->com.from_state;
4351 while (state != 0) {
4352 switch (state) {
4353 case RES_EQ_RESERVED:
4354 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4355 rb_erase(&eq->com.node,
4356 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
4357 list_del(&eq->com.list);
4358 spin_unlock_irq(mlx4_tlock(dev));
4359 kfree(eq);
4360 state = 0;
4361 break;
4362
4363 case RES_EQ_HW:
4364 mailbox = mlx4_alloc_cmd_mailbox(dev);
4365 if (IS_ERR(mailbox)) {
4366 cond_resched();
4367 continue;
4368 }
4369 err = mlx4_cmd_box(dev, slave, 0,
4370 eqn & 0xff, 0,
4371 MLX4_CMD_HW2SW_EQ,
4372 MLX4_CMD_TIME_CLASS_A,
4373 MLX4_CMD_NATIVE);
eb71d0d6
JM
4374 if (err)
4375 mlx4_dbg(dev, "rem_slave_eqs: failed"
4376 " to move slave %d eqs %d to"
4377 " SW ownership\n", slave, eqn);
c82e9aa0 4378 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
4379 atomic_dec(&eq->mtt->ref_count);
4380 state = RES_EQ_RESERVED;
c82e9aa0
EC
4381 break;
4382
4383 default:
4384 state = 0;
4385 }
4386 }
4387 }
4388 spin_lock_irq(mlx4_tlock(dev));
4389 }
4390 spin_unlock_irq(mlx4_tlock(dev));
4391}
4392
ba062d52
JM
4393static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4394{
4395 struct mlx4_priv *priv = mlx4_priv(dev);
4396 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4397 struct list_head *counter_list =
4398 &tracker->slave_list[slave].res_list[RES_COUNTER];
4399 struct res_counter *counter;
4400 struct res_counter *tmp;
4401 int err;
4402 int index;
4403
4404 err = move_all_busy(dev, slave, RES_COUNTER);
4405 if (err)
4406 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4407 "busy for slave %d\n", slave);
4408
4409 spin_lock_irq(mlx4_tlock(dev));
4410 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4411 if (counter->com.owner == slave) {
4412 index = counter->com.res_id;
4af1c048
HHZ
4413 rb_erase(&counter->com.node,
4414 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
4415 list_del(&counter->com.list);
4416 kfree(counter);
4417 __mlx4_counter_free(dev, index);
146f3ef4 4418 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
ba062d52
JM
4419 }
4420 }
4421 spin_unlock_irq(mlx4_tlock(dev));
4422}
4423
4424static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4425{
4426 struct mlx4_priv *priv = mlx4_priv(dev);
4427 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4428 struct list_head *xrcdn_list =
4429 &tracker->slave_list[slave].res_list[RES_XRCD];
4430 struct res_xrcdn *xrcd;
4431 struct res_xrcdn *tmp;
4432 int err;
4433 int xrcdn;
4434
4435 err = move_all_busy(dev, slave, RES_XRCD);
4436 if (err)
4437 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4438 "busy for slave %d\n", slave);
4439
4440 spin_lock_irq(mlx4_tlock(dev));
4441 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4442 if (xrcd->com.owner == slave) {
4443 xrcdn = xrcd->com.res_id;
4af1c048 4444 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
4445 list_del(&xrcd->com.list);
4446 kfree(xrcd);
4447 __mlx4_xrcd_free(dev, xrcdn);
4448 }
4449 }
4450 spin_unlock_irq(mlx4_tlock(dev));
4451}
4452
c82e9aa0
EC
4453void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4454{
4455 struct mlx4_priv *priv = mlx4_priv(dev);
4456
4457 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4874080d 4458 rem_slave_vlans(dev, slave);
c82e9aa0 4459 rem_slave_macs(dev, slave);
80cb0021 4460 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
4461 rem_slave_qps(dev, slave);
4462 rem_slave_srqs(dev, slave);
4463 rem_slave_cqs(dev, slave);
4464 rem_slave_mrs(dev, slave);
4465 rem_slave_eqs(dev, slave);
4466 rem_slave_mtts(dev, slave);
ba062d52
JM
4467 rem_slave_counters(dev, slave);
4468 rem_slave_xrcdns(dev, slave);
c82e9aa0
EC
4469 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4470}
b01978ca
JM
4471
4472void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4473{
4474 struct mlx4_vf_immed_vlan_work *work =
4475 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4476 struct mlx4_cmd_mailbox *mailbox;
4477 struct mlx4_update_qp_context *upd_context;
4478 struct mlx4_dev *dev = &work->priv->dev;
4479 struct mlx4_resource_tracker *tracker =
4480 &work->priv->mfunc.master.res_tracker;
4481 struct list_head *qp_list =
4482 &tracker->slave_list[work->slave].res_list[RES_QP];
4483 struct res_qp *qp;
4484 struct res_qp *tmp;
f0f829bf
RE
4485 u64 qp_path_mask_vlan_ctrl =
4486 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
b01978ca
JM
4487 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4488 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4489 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4490 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
f0f829bf
RE
4491 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4492
4493 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4494 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4495 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4496 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4497 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4498 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
b01978ca
JM
4499 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4500
4501 int err;
4502 int port, errors = 0;
4503 u8 vlan_control;
4504
4505 if (mlx4_is_slave(dev)) {
4506 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4507 work->slave);
4508 goto out;
4509 }
4510
4511 mailbox = mlx4_alloc_cmd_mailbox(dev);
4512 if (IS_ERR(mailbox))
4513 goto out;
0a6eac24
RE
4514 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4515 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4516 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4517 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4518 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4519 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4520 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4521 else if (!work->vlan_id)
b01978ca
JM
4522 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4523 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4524 else
4525 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4526 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4527 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4528
4529 upd_context = mailbox->buf;
f0f829bf 4530 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
b01978ca
JM
4531
4532 spin_lock_irq(mlx4_tlock(dev));
4533 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4534 spin_unlock_irq(mlx4_tlock(dev));
4535 if (qp->com.owner == work->slave) {
4536 if (qp->com.from_state != RES_QP_HW ||
4537 !qp->sched_queue || /* no INIT2RTR trans yet */
4538 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4539 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4540 spin_lock_irq(mlx4_tlock(dev));
4541 continue;
4542 }
4543 port = (qp->sched_queue >> 6 & 1) + 1;
4544 if (port != work->port) {
4545 spin_lock_irq(mlx4_tlock(dev));
4546 continue;
4547 }
f0f829bf
RE
4548 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4549 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4550 else
4551 upd_context->primary_addr_path_mask =
4552 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4553 if (work->vlan_id == MLX4_VGT) {
4554 upd_context->qp_context.param3 = qp->param3;
4555 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4556 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4557 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4558 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4559 upd_context->qp_context.pri_path.feup = qp->feup;
4560 upd_context->qp_context.pri_path.sched_queue =
4561 qp->sched_queue;
4562 } else {
4563 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4564 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4565 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4566 upd_context->qp_context.pri_path.fvl_rx =
4567 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4568 upd_context->qp_context.pri_path.fl =
4569 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4570 upd_context->qp_context.pri_path.feup =
4571 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4572 upd_context->qp_context.pri_path.sched_queue =
4573 qp->sched_queue & 0xC7;
4574 upd_context->qp_context.pri_path.sched_queue |=
4575 ((work->qos & 0x7) << 3);
4576 }
b01978ca
JM
4577
4578 err = mlx4_cmd(dev, mailbox->dma,
4579 qp->local_qpn & 0xffffff,
4580 0, MLX4_CMD_UPDATE_QP,
4581 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4582 if (err) {
4583 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4584 "port %d, qpn %d (%d)\n",
4585 work->slave, port, qp->local_qpn,
4586 err);
4587 errors++;
4588 }
4589 }
4590 spin_lock_irq(mlx4_tlock(dev));
4591 }
4592 spin_unlock_irq(mlx4_tlock(dev));
4593 mlx4_free_cmd_mailbox(dev, mailbox);
4594
4595 if (errors)
4596 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4597 errors, work->slave, work->port);
4598
4599 /* unregister previous vlan_id if needed and we had no errors
4600 * while updating the QPs
4601 */
4602 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4603 NO_INDX != work->orig_vlan_ix)
4604 __mlx4_unregister_vlan(&work->priv->dev, work->port,
2009d005 4605 work->orig_vlan_id);
b01978ca
JM
4606out:
4607 kfree(work);
4608 return;
4609}