]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/vport.c
iommu/vt-d: Don't over-free page table directories
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / vport.c
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38
39 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
40 u16 vport, u32 *out, int outlen)
41 {
42 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
43
44 MLX5_SET(query_vport_state_in, in, opcode,
45 MLX5_CMD_OP_QUERY_VPORT_STATE);
46 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
47 MLX5_SET(query_vport_state_in, in, vport_number, vport);
48 if (vport)
49 MLX5_SET(query_vport_state_in, in, other_vport, 1);
50
51 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
52 }
53
54 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
55 {
56 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
57
58 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
59
60 return MLX5_GET(query_vport_state_out, out, state);
61 }
62 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
63
64 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
65 {
66 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
67
68 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
69
70 return MLX5_GET(query_vport_state_out, out, admin_state);
71 }
72 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
73
74 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
75 u16 vport, u8 state)
76 {
77 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
78 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
79
80 MLX5_SET(modify_vport_state_in, in, opcode,
81 MLX5_CMD_OP_MODIFY_VPORT_STATE);
82 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
83 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
84 if (vport)
85 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
86 MLX5_SET(modify_vport_state_in, in, admin_state, state);
87
88 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
89 }
90 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
91
92 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
93 u32 *out, int outlen)
94 {
95 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
96
97 MLX5_SET(query_nic_vport_context_in, in, opcode,
98 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
99 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
100 if (vport)
101 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
102
103 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
104 }
105
106 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
107 int inlen)
108 {
109 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
110
111 MLX5_SET(modify_nic_vport_context_in, in, opcode,
112 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
113 return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
114 }
115
116 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
117 u16 vport, u8 *min_inline)
118 {
119 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
120 int err;
121
122 err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
123 if (!err)
124 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
125 nic_vport_context.min_wqe_inline_mode);
126 return err;
127 }
128 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
129
130 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
131 u16 vport, u8 min_inline)
132 {
133 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
134 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
135 void *nic_vport_ctx;
136
137 MLX5_SET(modify_nic_vport_context_in, in,
138 field_select.min_inline, 1);
139 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
140 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
141
142 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
143 in, nic_vport_context);
144 MLX5_SET(nic_vport_context, nic_vport_ctx,
145 min_wqe_inline_mode, min_inline);
146
147 return mlx5_modify_nic_vport_context(mdev, in, inlen);
148 }
149
150 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
151 u16 vport, u8 *addr)
152 {
153 u32 *out;
154 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
155 u8 *out_addr;
156 int err;
157
158 out = mlx5_vzalloc(outlen);
159 if (!out)
160 return -ENOMEM;
161
162 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
163 nic_vport_context.permanent_address);
164
165 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
166 if (!err)
167 ether_addr_copy(addr, &out_addr[2]);
168
169 kvfree(out);
170 return err;
171 }
172 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
173
174 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
175 u16 vport, u8 *addr)
176 {
177 void *in;
178 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
179 int err;
180 void *nic_vport_ctx;
181 u8 *perm_mac;
182
183 in = mlx5_vzalloc(inlen);
184 if (!in) {
185 mlx5_core_warn(mdev, "failed to allocate inbox\n");
186 return -ENOMEM;
187 }
188
189 MLX5_SET(modify_nic_vport_context_in, in,
190 field_select.permanent_address, 1);
191 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
192
193 if (vport)
194 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
195
196 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
197 in, nic_vport_context);
198 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
199 permanent_address);
200
201 ether_addr_copy(&perm_mac[2], addr);
202
203 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
204
205 kvfree(in);
206
207 return err;
208 }
209 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
210
211 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
212 {
213 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
214 u32 *out;
215 int err;
216
217 out = mlx5_vzalloc(outlen);
218 if (!out)
219 return -ENOMEM;
220
221 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
222 if (!err)
223 *mtu = MLX5_GET(query_nic_vport_context_out, out,
224 nic_vport_context.mtu);
225
226 kvfree(out);
227 return err;
228 }
229 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
230
231 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
232 {
233 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
234 void *in;
235 int err;
236
237 in = mlx5_vzalloc(inlen);
238 if (!in)
239 return -ENOMEM;
240
241 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
242 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
243
244 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
245
246 kvfree(in);
247 return err;
248 }
249 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
250
251 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
252 u32 vport,
253 enum mlx5_list_type list_type,
254 u8 addr_list[][ETH_ALEN],
255 int *list_size)
256 {
257 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
258 void *nic_vport_ctx;
259 int max_list_size;
260 int req_list_size;
261 int out_sz;
262 void *out;
263 int err;
264 int i;
265
266 req_list_size = *list_size;
267
268 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
269 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
270 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
271
272 if (req_list_size > max_list_size) {
273 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
274 req_list_size, max_list_size);
275 req_list_size = max_list_size;
276 }
277
278 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
279 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
280
281 out = kzalloc(out_sz, GFP_KERNEL);
282 if (!out)
283 return -ENOMEM;
284
285 MLX5_SET(query_nic_vport_context_in, in, opcode,
286 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
287 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
288 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
289
290 if (vport)
291 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
292
293 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
294 if (err)
295 goto out;
296
297 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
298 nic_vport_context);
299 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
300 allowed_list_size);
301
302 *list_size = req_list_size;
303 for (i = 0; i < req_list_size; i++) {
304 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
305 nic_vport_ctx,
306 current_uc_mac_address[i]) + 2;
307 ether_addr_copy(addr_list[i], mac_addr);
308 }
309 out:
310 kfree(out);
311 return err;
312 }
313 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
314
315 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
316 enum mlx5_list_type list_type,
317 u8 addr_list[][ETH_ALEN],
318 int list_size)
319 {
320 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
321 void *nic_vport_ctx;
322 int max_list_size;
323 int in_sz;
324 void *in;
325 int err;
326 int i;
327
328 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
329 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
330 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
331
332 if (list_size > max_list_size)
333 return -ENOSPC;
334
335 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
336 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
337
338 memset(out, 0, sizeof(out));
339 in = kzalloc(in_sz, GFP_KERNEL);
340 if (!in)
341 return -ENOMEM;
342
343 MLX5_SET(modify_nic_vport_context_in, in, opcode,
344 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
345 MLX5_SET(modify_nic_vport_context_in, in,
346 field_select.addresses_list, 1);
347
348 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
349 nic_vport_context);
350
351 MLX5_SET(nic_vport_context, nic_vport_ctx,
352 allowed_list_type, list_type);
353 MLX5_SET(nic_vport_context, nic_vport_ctx,
354 allowed_list_size, list_size);
355
356 for (i = 0; i < list_size; i++) {
357 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
358 nic_vport_ctx,
359 current_uc_mac_address[i]) + 2;
360 ether_addr_copy(curr_mac, addr_list[i]);
361 }
362
363 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
364 kfree(in);
365 return err;
366 }
367 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
368
369 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
370 u32 vport,
371 u16 vlans[],
372 int *size)
373 {
374 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
375 void *nic_vport_ctx;
376 int req_list_size;
377 int max_list_size;
378 int out_sz;
379 void *out;
380 int err;
381 int i;
382
383 req_list_size = *size;
384 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
385 if (req_list_size > max_list_size) {
386 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
387 req_list_size, max_list_size);
388 req_list_size = max_list_size;
389 }
390
391 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
392 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
393
394 memset(in, 0, sizeof(in));
395 out = kzalloc(out_sz, GFP_KERNEL);
396 if (!out)
397 return -ENOMEM;
398
399 MLX5_SET(query_nic_vport_context_in, in, opcode,
400 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
401 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
402 MLX5_NVPRT_LIST_TYPE_VLAN);
403 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
404
405 if (vport)
406 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
407
408 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
409 if (err)
410 goto out;
411
412 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
413 nic_vport_context);
414 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
415 allowed_list_size);
416
417 *size = req_list_size;
418 for (i = 0; i < req_list_size; i++) {
419 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
420 nic_vport_ctx,
421 current_uc_mac_address[i]);
422 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
423 }
424 out:
425 kfree(out);
426 return err;
427 }
428 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
429
430 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
431 u16 vlans[],
432 int list_size)
433 {
434 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
435 void *nic_vport_ctx;
436 int max_list_size;
437 int in_sz;
438 void *in;
439 int err;
440 int i;
441
442 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
443
444 if (list_size > max_list_size)
445 return -ENOSPC;
446
447 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
448 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
449
450 memset(out, 0, sizeof(out));
451 in = kzalloc(in_sz, GFP_KERNEL);
452 if (!in)
453 return -ENOMEM;
454
455 MLX5_SET(modify_nic_vport_context_in, in, opcode,
456 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
457 MLX5_SET(modify_nic_vport_context_in, in,
458 field_select.addresses_list, 1);
459
460 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
461 nic_vport_context);
462
463 MLX5_SET(nic_vport_context, nic_vport_ctx,
464 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
465 MLX5_SET(nic_vport_context, nic_vport_ctx,
466 allowed_list_size, list_size);
467
468 for (i = 0; i < list_size; i++) {
469 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
470 nic_vport_ctx,
471 current_uc_mac_address[i]);
472 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
473 }
474
475 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
476 kfree(in);
477 return err;
478 }
479 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
480
481 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
482 u64 *system_image_guid)
483 {
484 u32 *out;
485 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
486
487 out = mlx5_vzalloc(outlen);
488 if (!out)
489 return -ENOMEM;
490
491 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
492
493 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
494 nic_vport_context.system_image_guid);
495
496 kfree(out);
497
498 return 0;
499 }
500 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
501
502 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
503 {
504 u32 *out;
505 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
506
507 out = mlx5_vzalloc(outlen);
508 if (!out)
509 return -ENOMEM;
510
511 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
512
513 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
514 nic_vport_context.node_guid);
515
516 kfree(out);
517
518 return 0;
519 }
520 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
521
522 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
523 u32 vport, u64 node_guid)
524 {
525 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
526 void *nic_vport_context;
527 void *in;
528 int err;
529
530 if (!vport)
531 return -EINVAL;
532 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
533 return -EACCES;
534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
535 return -ENOTSUPP;
536
537 in = mlx5_vzalloc(inlen);
538 if (!in)
539 return -ENOMEM;
540
541 MLX5_SET(modify_nic_vport_context_in, in,
542 field_select.node_guid, 1);
543 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
544 MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
545
546 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
547 in, nic_vport_context);
548 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
549
550 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
551
552 kvfree(in);
553
554 return err;
555 }
556
557 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
558 u16 *qkey_viol_cntr)
559 {
560 u32 *out;
561 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
562
563 out = mlx5_vzalloc(outlen);
564 if (!out)
565 return -ENOMEM;
566
567 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
568
569 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
570 nic_vport_context.qkey_violation_counter);
571
572 kfree(out);
573
574 return 0;
575 }
576 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
577
578 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
579 u8 port_num, u16 vf_num, u16 gid_index,
580 union ib_gid *gid)
581 {
582 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
583 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
584 int is_group_manager;
585 void *out = NULL;
586 void *in = NULL;
587 union ib_gid *tmp;
588 int tbsz;
589 int nout;
590 int err;
591
592 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
593 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
594 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
595 vf_num, gid_index, tbsz);
596
597 if (gid_index > tbsz && gid_index != 0xffff)
598 return -EINVAL;
599
600 if (gid_index == 0xffff)
601 nout = tbsz;
602 else
603 nout = 1;
604
605 out_sz += nout * sizeof(*gid);
606
607 in = kzalloc(in_sz, GFP_KERNEL);
608 out = kzalloc(out_sz, GFP_KERNEL);
609 if (!in || !out) {
610 err = -ENOMEM;
611 goto out;
612 }
613
614 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
615 if (other_vport) {
616 if (is_group_manager) {
617 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
618 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
619 } else {
620 err = -EPERM;
621 goto out;
622 }
623 }
624 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
625
626 if (MLX5_CAP_GEN(dev, num_ports) == 2)
627 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
628
629 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
630 if (err)
631 goto out;
632
633 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
634 gid->global.subnet_prefix = tmp->global.subnet_prefix;
635 gid->global.interface_id = tmp->global.interface_id;
636
637 out:
638 kfree(in);
639 kfree(out);
640 return err;
641 }
642 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
643
644 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
645 u8 port_num, u16 vf_num, u16 pkey_index,
646 u16 *pkey)
647 {
648 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
649 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
650 int is_group_manager;
651 void *out = NULL;
652 void *in = NULL;
653 void *pkarr;
654 int nout;
655 int tbsz;
656 int err;
657 int i;
658
659 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
660
661 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
662 if (pkey_index > tbsz && pkey_index != 0xffff)
663 return -EINVAL;
664
665 if (pkey_index == 0xffff)
666 nout = tbsz;
667 else
668 nout = 1;
669
670 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
671
672 in = kzalloc(in_sz, GFP_KERNEL);
673 out = kzalloc(out_sz, GFP_KERNEL);
674 if (!in || !out) {
675 err = -ENOMEM;
676 goto out;
677 }
678
679 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
680 if (other_vport) {
681 if (is_group_manager) {
682 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
683 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
684 } else {
685 err = -EPERM;
686 goto out;
687 }
688 }
689 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
690
691 if (MLX5_CAP_GEN(dev, num_ports) == 2)
692 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
693
694 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
695 if (err)
696 goto out;
697
698 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
699 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
700 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
701
702 out:
703 kfree(in);
704 kfree(out);
705 return err;
706 }
707 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
708
709 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
710 u8 other_vport, u8 port_num,
711 u16 vf_num,
712 struct mlx5_hca_vport_context *rep)
713 {
714 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
715 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
716 int is_group_manager;
717 void *out;
718 void *ctx;
719 int err;
720
721 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
722
723 out = kzalloc(out_sz, GFP_KERNEL);
724 if (!out)
725 return -ENOMEM;
726
727 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
728
729 if (other_vport) {
730 if (is_group_manager) {
731 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
732 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
733 } else {
734 err = -EPERM;
735 goto ex;
736 }
737 }
738
739 if (MLX5_CAP_GEN(dev, num_ports) == 2)
740 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
741
742 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
743 if (err)
744 goto ex;
745
746 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
747 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
748 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
749 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
750 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
751 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
752 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
753 port_physical_state);
754 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
755 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
756 port_physical_state);
757 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
758 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
759 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
760 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
761 cap_mask1_field_select);
762 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
763 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
764 cap_mask2_field_select);
765 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
766 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
767 init_type_reply);
768 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
769 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
770 subnet_timeout);
771 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
772 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
773 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
774 qkey_violation_counter);
775 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
776 pkey_violation_counter);
777 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
778 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
779 system_image_guid);
780
781 ex:
782 kfree(out);
783 return err;
784 }
785 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
786
787 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
788 u64 *sys_image_guid)
789 {
790 struct mlx5_hca_vport_context *rep;
791 int err;
792
793 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
794 if (!rep)
795 return -ENOMEM;
796
797 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
798 if (!err)
799 *sys_image_guid = rep->sys_image_guid;
800
801 kfree(rep);
802 return err;
803 }
804 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
805
806 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
807 u64 *node_guid)
808 {
809 struct mlx5_hca_vport_context *rep;
810 int err;
811
812 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
813 if (!rep)
814 return -ENOMEM;
815
816 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
817 if (!err)
818 *node_guid = rep->node_guid;
819
820 kfree(rep);
821 return err;
822 }
823 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
824
825 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
826 u32 vport,
827 int *promisc_uc,
828 int *promisc_mc,
829 int *promisc_all)
830 {
831 u32 *out;
832 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
833 int err;
834
835 out = kzalloc(outlen, GFP_KERNEL);
836 if (!out)
837 return -ENOMEM;
838
839 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
840 if (err)
841 goto out;
842
843 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
844 nic_vport_context.promisc_uc);
845 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
846 nic_vport_context.promisc_mc);
847 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
848 nic_vport_context.promisc_all);
849
850 out:
851 kfree(out);
852 return err;
853 }
854 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
855
856 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
857 int promisc_uc,
858 int promisc_mc,
859 int promisc_all)
860 {
861 void *in;
862 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
863 int err;
864
865 in = mlx5_vzalloc(inlen);
866 if (!in) {
867 mlx5_core_err(mdev, "failed to allocate inbox\n");
868 return -ENOMEM;
869 }
870
871 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
872 MLX5_SET(modify_nic_vport_context_in, in,
873 nic_vport_context.promisc_uc, promisc_uc);
874 MLX5_SET(modify_nic_vport_context_in, in,
875 nic_vport_context.promisc_mc, promisc_mc);
876 MLX5_SET(modify_nic_vport_context_in, in,
877 nic_vport_context.promisc_all, promisc_all);
878
879 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
880
881 kvfree(in);
882
883 return err;
884 }
885 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
886
887 enum mlx5_vport_roce_state {
888 MLX5_VPORT_ROCE_DISABLED = 0,
889 MLX5_VPORT_ROCE_ENABLED = 1,
890 };
891
892 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
893 enum mlx5_vport_roce_state state)
894 {
895 void *in;
896 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
897 int err;
898
899 in = mlx5_vzalloc(inlen);
900 if (!in) {
901 mlx5_core_warn(mdev, "failed to allocate inbox\n");
902 return -ENOMEM;
903 }
904
905 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
906 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
907 state);
908
909 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
910
911 kvfree(in);
912
913 return err;
914 }
915
916 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
917 {
918 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
919 }
920 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
921
922 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
923 {
924 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
925 }
926 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
927
928 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
929 int vf, u8 port_num, void *out,
930 size_t out_sz)
931 {
932 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
933 int is_group_manager;
934 void *in;
935 int err;
936
937 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
938 in = mlx5_vzalloc(in_sz);
939 if (!in) {
940 err = -ENOMEM;
941 return err;
942 }
943
944 MLX5_SET(query_vport_counter_in, in, opcode,
945 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
946 if (other_vport) {
947 if (is_group_manager) {
948 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
949 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
950 } else {
951 err = -EPERM;
952 goto free;
953 }
954 }
955 if (MLX5_CAP_GEN(dev, num_ports) == 2)
956 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
957
958 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
959 free:
960 kvfree(in);
961 return err;
962 }
963 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
964
965 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
966 u8 other_vport, u8 port_num,
967 int vf,
968 struct mlx5_hca_vport_context *req)
969 {
970 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
971 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
972 int is_group_manager;
973 void *in;
974 int err;
975 void *ctx;
976
977 mlx5_core_dbg(dev, "vf %d\n", vf);
978 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
979 in = kzalloc(in_sz, GFP_KERNEL);
980 if (!in)
981 return -ENOMEM;
982
983 memset(out, 0, sizeof(out));
984 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
985 if (other_vport) {
986 if (is_group_manager) {
987 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
988 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
989 } else {
990 err = -EPERM;
991 goto ex;
992 }
993 }
994
995 if (MLX5_CAP_GEN(dev, num_ports) > 1)
996 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
997
998 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
999 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1000 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1001 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1002 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1003 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1004 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1005 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1006 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1007 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1008 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1009 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1010 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1011 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1012 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1013 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1014 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1015 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1016 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1017 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1018 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1019 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1020 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1021 ex:
1022 kfree(in);
1023 return err;
1024 }
1025 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);