]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/infiniband/hw/mlx5/cmd.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / infiniband / hw / mlx5 / cmd.c
1 /*
2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include "cmd.h"
34
35 int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
36 {
37 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
38 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
39 int err;
40
41 MLX5_SET(query_special_contexts_in, in, opcode,
42 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
43 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
44 if (!err)
45 *mkey = MLX5_GET(query_special_contexts_out, out,
46 dump_fill_mkey);
47 return err;
48 }
49
50 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
51 {
52 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
53 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
54 int err;
55
56 MLX5_SET(query_special_contexts_in, in, opcode,
57 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
58 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
59 if (!err)
60 *null_mkey = MLX5_GET(query_special_contexts_out, out,
61 null_mkey);
62 return err;
63 }
64
65 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
66 void *out, int out_size)
67 {
68 u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
69
70 MLX5_SET(query_cong_params_in, in, opcode,
71 MLX5_CMD_OP_QUERY_CONG_PARAMS);
72 MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
73
74 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
75 }
76
77 int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
78 void *in, int in_size)
79 {
80 u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
81
82 return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
83 }
84
85 int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
86 u64 length, u32 alignment)
87 {
88 struct mlx5_core_dev *dev = dm->dev;
89 u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
90 >> PAGE_SHIFT;
91 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
92 u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
93 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
94 u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
95 u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
96 u32 mlx5_alignment;
97 u64 page_idx = 0;
98 int ret = 0;
99
100 if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
101 return -EINVAL;
102
103 /* mlx5 device sets alignment as 64*2^driver_value
104 * so normalizing is needed.
105 */
106 mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
107 alignment - MLX5_MEMIC_BASE_ALIGN;
108 if (mlx5_alignment > max_alignment)
109 return -EINVAL;
110
111 MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
112 MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
113 MLX5_SET(alloc_memic_in, in, memic_size, length);
114 MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
115 mlx5_alignment);
116
117 while (page_idx < num_memic_hw_pages) {
118 spin_lock(&dm->lock);
119 page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
120 num_memic_hw_pages,
121 page_idx,
122 num_pages, 0);
123
124 if (page_idx < num_memic_hw_pages)
125 bitmap_set(dm->memic_alloc_pages,
126 page_idx, num_pages);
127
128 spin_unlock(&dm->lock);
129
130 if (page_idx >= num_memic_hw_pages)
131 break;
132
133 MLX5_SET64(alloc_memic_in, in, range_start_addr,
134 hw_start_addr + (page_idx * PAGE_SIZE));
135
136 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
137 if (ret) {
138 spin_lock(&dm->lock);
139 bitmap_clear(dm->memic_alloc_pages,
140 page_idx, num_pages);
141 spin_unlock(&dm->lock);
142
143 if (ret == -EAGAIN) {
144 page_idx++;
145 continue;
146 }
147
148 return ret;
149 }
150
151 *addr = dev->bar_addr +
152 MLX5_GET64(alloc_memic_out, out, memic_start_addr);
153
154 return 0;
155 }
156
157 return -ENOMEM;
158 }
159
160 int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
161 {
162 struct mlx5_core_dev *dev = dm->dev;
163 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
164 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
165 u32 out[MLX5_ST_SZ_DW(dealloc_memic_out)] = {0};
166 u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {0};
167 u64 start_page_idx;
168 int err;
169
170 addr -= dev->bar_addr;
171 start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
172
173 MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
174 MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
175 MLX5_SET(dealloc_memic_in, in, memic_size, length);
176
177 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
178
179 if (!err) {
180 spin_lock(&dm->lock);
181 bitmap_clear(dm->memic_alloc_pages,
182 start_page_idx, num_pages);
183 spin_unlock(&dm->lock);
184 }
185
186 return err;
187 }
188
189 int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
190 u16 uid, phys_addr_t *addr, u32 *obj_id)
191 {
192 struct mlx5_core_dev *dev = dm->dev;
193 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
194 u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
195 unsigned long *block_map;
196 u64 icm_start_addr;
197 u32 log_icm_size;
198 u32 num_blocks;
199 u32 max_blocks;
200 u64 block_idx;
201 void *sw_icm;
202 int ret;
203
204 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
205 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
206 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
207 MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
208
209 switch (type) {
210 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
211 icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
212 steering_sw_icm_start_address);
213 log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
214 block_map = dm->steering_sw_icm_alloc_blocks;
215 break;
216 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
217 icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
218 header_modify_sw_icm_start_address);
219 log_icm_size = MLX5_CAP_DEV_MEM(dev,
220 log_header_modify_sw_icm_size);
221 block_map = dm->header_modify_sw_icm_alloc_blocks;
222 break;
223 default:
224 return -EINVAL;
225 }
226
227 num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
228 MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
229 max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
230 spin_lock(&dm->lock);
231 block_idx = bitmap_find_next_zero_area(block_map,
232 max_blocks,
233 0,
234 num_blocks, 0);
235
236 if (block_idx < max_blocks)
237 bitmap_set(block_map,
238 block_idx, num_blocks);
239
240 spin_unlock(&dm->lock);
241
242 if (block_idx >= max_blocks)
243 return -ENOMEM;
244
245 sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
246 icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
247 MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
248 icm_start_addr);
249 MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
250
251 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
252 if (ret) {
253 spin_lock(&dm->lock);
254 bitmap_clear(block_map,
255 block_idx, num_blocks);
256 spin_unlock(&dm->lock);
257
258 return ret;
259 }
260
261 *addr = icm_start_addr;
262 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
263
264 return 0;
265 }
266
267 int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
268 u16 uid, phys_addr_t addr, u32 obj_id)
269 {
270 struct mlx5_core_dev *dev = dm->dev;
271 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
272 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
273 unsigned long *block_map;
274 u32 num_blocks;
275 u64 start_idx;
276 int err;
277
278 num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
279 MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
280
281 switch (type) {
282 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
283 start_idx =
284 (addr - MLX5_CAP64_DEV_MEM(
285 dev, steering_sw_icm_start_address)) >>
286 MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
287 block_map = dm->steering_sw_icm_alloc_blocks;
288 break;
289 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
290 start_idx =
291 (addr -
292 MLX5_CAP64_DEV_MEM(
293 dev, header_modify_sw_icm_start_address)) >>
294 MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
295 block_map = dm->header_modify_sw_icm_alloc_blocks;
296 break;
297 default:
298 return -EINVAL;
299 }
300
301 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
302 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
303 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
304 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
305 MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
306
307 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
308 if (err)
309 return err;
310
311 spin_lock(&dm->lock);
312 bitmap_clear(block_map,
313 start_idx, num_blocks);
314 spin_unlock(&dm->lock);
315
316 return 0;
317 }
318
319 int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
320 {
321 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
322 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
323
324 MLX5_SET(ppcnt_reg, in, local_port, 1);
325
326 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
327 return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT,
328 0, 0);
329 }
330
331 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
332 {
333 u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
334 u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {};
335
336 MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
337 MLX5_SET(destroy_tir_in, in, tirn, tirn);
338 MLX5_SET(destroy_tir_in, in, uid, uid);
339 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
340 }
341
342 void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
343 {
344 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
345 u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
346
347 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
348 MLX5_SET(destroy_tis_in, in, tisn, tisn);
349 MLX5_SET(destroy_tis_in, in, uid, uid);
350 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
351 }
352
353 void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
354 {
355 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
356 u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {};
357
358 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
359 MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
360 MLX5_SET(destroy_rqt_in, in, uid, uid);
361 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
362 }
363
364 int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
365 u16 uid)
366 {
367 u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
368 u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
369 int err;
370
371 MLX5_SET(alloc_transport_domain_in, in, opcode,
372 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
373 MLX5_SET(alloc_transport_domain_in, in, uid, uid);
374
375 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
376 if (!err)
377 *tdn = MLX5_GET(alloc_transport_domain_out, out,
378 transport_domain);
379
380 return err;
381 }
382
383 void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
384 u16 uid)
385 {
386 u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
387 u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
388
389 MLX5_SET(dealloc_transport_domain_in, in, opcode,
390 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
391 MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
392 MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
393 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
394 }
395
396 void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
397 {
398 u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {};
399 u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
400
401 MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
402 MLX5_SET(dealloc_pd_in, in, pd, pdn);
403 MLX5_SET(dealloc_pd_in, in, uid, uid);
404 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
405 }
406
407 int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
408 u32 qpn, u16 uid)
409 {
410 u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {};
411 u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
412 void *gid;
413
414 MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
415 MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
416 MLX5_SET(attach_to_mcg_in, in, uid, uid);
417 gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
418 memcpy(gid, mgid, sizeof(*mgid));
419 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
420 }
421
422 int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
423 u32 qpn, u16 uid)
424 {
425 u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {};
426 u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
427 void *gid;
428
429 MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
430 MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
431 MLX5_SET(detach_from_mcg_in, in, uid, uid);
432 gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
433 memcpy(gid, mgid, sizeof(*mgid));
434 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
435 }
436
437 int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
438 {
439 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
440 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
441 int err;
442
443 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
444 MLX5_SET(alloc_xrcd_in, in, uid, uid);
445 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
446 if (!err)
447 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
448 return err;
449 }
450
451 int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
452 {
453 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {};
454 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
455
456 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
457 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
458 MLX5_SET(dealloc_xrcd_in, in, uid, uid);
459 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
460 }
461
462 int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
463 u16 uid)
464 {
465 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
466 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
467 int err;
468
469 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
470 MLX5_SET(alloc_q_counter_in, in, uid, uid);
471
472 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
473 if (!err)
474 *counter_id = MLX5_GET(alloc_q_counter_out, out,
475 counter_set_id);
476 return err;
477 }
478
479 int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
480 u16 opmod, u8 port)
481 {
482 int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
483 int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
484 int err = -ENOMEM;
485 void *data;
486 void *resp;
487 u32 *out;
488 u32 *in;
489
490 in = kzalloc(inlen, GFP_KERNEL);
491 out = kzalloc(outlen, GFP_KERNEL);
492 if (!in || !out)
493 goto out;
494
495 MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
496 MLX5_SET(mad_ifc_in, in, op_mod, opmod);
497 MLX5_SET(mad_ifc_in, in, port, port);
498
499 data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
500 memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
501
502 err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
503 if (err)
504 goto out;
505
506 resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
507 memcpy(outb, resp,
508 MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
509
510 out:
511 kfree(out);
512 kfree(in);
513 return err;
514 }