1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/types.h>
33 #include <asm/byteorder.h>
34 #include <linux/bitops.h>
35 #include <linux/delay.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/errno.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
43 #include <linux/ipv6.h>
44 #include <linux/kernel.h>
45 #include <linux/list.h>
46 #include <linux/module.h>
47 #include <linux/mutex.h>
48 #include <linux/pci.h>
49 #include <linux/slab.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/tcp.h>
53 #include <linux/bitops.h>
54 #include <linux/qed/qed_roce_if.h>
55 #include <linux/qed/qed_roce_if.h>
60 #include "qed_init_ops.h"
64 #include "qed_reg_addr.h"
69 static void qed_roce_free_real_icid(struct qed_hwfn
*p_hwfn
, u16 icid
);
71 void qed_roce_async_event(struct qed_hwfn
*p_hwfn
,
72 u8 fw_event_code
, union rdma_eqe_data
*rdma_data
)
74 if (fw_event_code
== ROCE_ASYNC_EVENT_DESTROY_QP_DONE
) {
76 (u16
)le32_to_cpu(rdma_data
->rdma_destroy_qp_data
.cid
);
78 /* icid release in this async event can occur only if the icid
79 * was offloaded to the FW. In case it wasn't offloaded this is
80 * handled in qed_roce_sp_destroy_qp.
82 qed_roce_free_real_icid(p_hwfn
, icid
);
84 struct qed_rdma_events
*events
= &p_hwfn
->p_rdma_info
->events
;
86 events
->affiliated_event(p_hwfn
->p_rdma_info
->events
.context
,
88 &rdma_data
->async_handle
);
92 static int qed_rdma_bmap_alloc(struct qed_hwfn
*p_hwfn
,
93 struct qed_bmap
*bmap
, u32 max_count
)
95 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "max_count = %08x\n", max_count
);
97 bmap
->max_count
= max_count
;
99 bmap
->bitmap
= kzalloc(BITS_TO_LONGS(max_count
) * sizeof(long),
103 "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
107 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Allocated bitmap %p\n",
112 static int qed_rdma_bmap_alloc_id(struct qed_hwfn
*p_hwfn
,
113 struct qed_bmap
*bmap
, u32
*id_num
)
115 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "bmap = %p\n", bmap
);
117 *id_num
= find_first_zero_bit(bmap
->bitmap
, bmap
->max_count
);
119 if (*id_num
>= bmap
->max_count
) {
120 DP_NOTICE(p_hwfn
, "no id available max_count=%d\n",
125 __set_bit(*id_num
, bmap
->bitmap
);
130 static void qed_bmap_set_id(struct qed_hwfn
*p_hwfn
,
131 struct qed_bmap
*bmap
, u32 id_num
)
133 if (id_num
>= bmap
->max_count
)
136 __set_bit(id_num
, bmap
->bitmap
);
139 static void qed_bmap_release_id(struct qed_hwfn
*p_hwfn
,
140 struct qed_bmap
*bmap
, u32 id_num
)
144 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "id_num = %08x", id_num
);
145 if (id_num
>= bmap
->max_count
)
148 b_acquired
= test_and_clear_bit(id_num
, bmap
->bitmap
);
150 DP_NOTICE(p_hwfn
, "ID %d already released\n", id_num
);
155 static int qed_bmap_test_id(struct qed_hwfn
*p_hwfn
,
156 struct qed_bmap
*bmap
, u32 id_num
)
158 if (id_num
>= bmap
->max_count
)
161 return test_bit(id_num
, bmap
->bitmap
);
164 static u32
qed_rdma_get_sb_id(void *p_hwfn
, u32 rel_sb_id
)
166 /* First sb id for RoCE is after all the l2 sb */
167 return FEAT_NUM((struct qed_hwfn
*)p_hwfn
, QED_PF_L2_QUE
) + rel_sb_id
;
170 static int qed_rdma_alloc(struct qed_hwfn
*p_hwfn
,
171 struct qed_ptt
*p_ptt
,
172 struct qed_rdma_start_in_params
*params
)
174 struct qed_rdma_info
*p_rdma_info
;
175 u32 num_cons
, num_tasks
;
178 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Allocating RDMA\n");
180 /* Allocate a struct with current pf rdma info */
181 p_rdma_info
= kzalloc(sizeof(*p_rdma_info
), GFP_KERNEL
);
184 "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
189 p_hwfn
->p_rdma_info
= p_rdma_info
;
190 p_rdma_info
->proto
= PROTOCOLID_ROCE
;
192 num_cons
= qed_cxt_get_proto_cid_count(p_hwfn
, p_rdma_info
->proto
,
195 p_rdma_info
->num_qps
= num_cons
/ 2;
197 num_tasks
= qed_cxt_get_proto_tid_count(p_hwfn
, PROTOCOLID_ROCE
);
199 /* Each MR uses a single task */
200 p_rdma_info
->num_mrs
= num_tasks
;
202 /* Queue zone lines are shared between RoCE and L2 in such a way that
203 * they can be used by each without obstructing the other.
205 p_rdma_info
->queue_zone_base
= (u16
)RESC_START(p_hwfn
, QED_L2_QUEUE
);
206 p_rdma_info
->max_queue_zones
= (u16
)RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
208 /* Allocate a struct with device params and fill it */
209 p_rdma_info
->dev
= kzalloc(sizeof(*p_rdma_info
->dev
), GFP_KERNEL
);
210 if (!p_rdma_info
->dev
) {
212 "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
217 /* Allocate a struct with port params and fill it */
218 p_rdma_info
->port
= kzalloc(sizeof(*p_rdma_info
->port
), GFP_KERNEL
);
219 if (!p_rdma_info
->port
) {
221 "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
226 /* Allocate bit map for pd's */
227 rc
= qed_rdma_bmap_alloc(p_hwfn
, &p_rdma_info
->pd_map
, RDMA_MAX_PDS
);
229 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
230 "Failed to allocate pd_map, rc = %d\n",
235 /* Allocate DPI bitmap */
236 rc
= qed_rdma_bmap_alloc(p_hwfn
, &p_rdma_info
->dpi_map
,
239 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
240 "Failed to allocate DPI bitmap, rc = %d\n", rc
);
244 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
245 * twice the number of QPs.
247 rc
= qed_rdma_bmap_alloc(p_hwfn
, &p_rdma_info
->cq_map
,
248 p_rdma_info
->num_qps
* 2);
250 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
251 "Failed to allocate cq bitmap, rc = %d\n", rc
);
255 /* Allocate bitmap for toggle bit for cq icids
256 * We toggle the bit every time we create or resize cq for a given icid.
257 * The maximum number of CQs is bounded to twice the number of QPs.
259 rc
= qed_rdma_bmap_alloc(p_hwfn
, &p_rdma_info
->toggle_bits
,
260 p_rdma_info
->num_qps
* 2);
262 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
263 "Failed to allocate toogle bits, rc = %d\n", rc
);
267 /* Allocate bitmap for itids */
268 rc
= qed_rdma_bmap_alloc(p_hwfn
, &p_rdma_info
->tid_map
,
269 p_rdma_info
->num_mrs
);
271 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
272 "Failed to allocate itids bitmaps, rc = %d\n", rc
);
273 goto free_toggle_map
;
276 /* Allocate bitmap for cids used for qps. */
277 rc
= qed_rdma_bmap_alloc(p_hwfn
, &p_rdma_info
->cid_map
, num_cons
);
279 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
280 "Failed to allocate cid bitmap, rc = %d\n", rc
);
284 /* Allocate bitmap for cids used for responders/requesters. */
285 rc
= qed_rdma_bmap_alloc(p_hwfn
, &p_rdma_info
->real_cid_map
, num_cons
);
287 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
288 "Failed to allocate real cid bitmap, rc = %d\n", rc
);
291 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Allocation successful\n");
295 kfree(p_rdma_info
->cid_map
.bitmap
);
297 kfree(p_rdma_info
->tid_map
.bitmap
);
299 kfree(p_rdma_info
->toggle_bits
.bitmap
);
301 kfree(p_rdma_info
->cq_map
.bitmap
);
303 kfree(p_rdma_info
->dpi_map
.bitmap
);
305 kfree(p_rdma_info
->pd_map
.bitmap
);
307 kfree(p_rdma_info
->port
);
309 kfree(p_rdma_info
->dev
);
316 static void qed_rdma_resc_free(struct qed_hwfn
*p_hwfn
)
318 struct qed_bmap
*rcid_map
= &p_hwfn
->p_rdma_info
->real_cid_map
;
319 struct qed_rdma_info
*p_rdma_info
= p_hwfn
->p_rdma_info
;
322 /* when destroying a_RoCE QP the control is returned to the user after
323 * the synchronous part. The asynchronous part may take a little longer.
324 * We delay for a short while if an async destroy QP is still expected.
325 * Beyond the added delay we clear the bitmap anyway.
327 while (bitmap_weight(rcid_map
->bitmap
, rcid_map
->max_count
)) {
329 if (wait_count
++ > 20) {
330 DP_NOTICE(p_hwfn
, "cid bitmap wait timed out\n");
335 kfree(p_rdma_info
->cid_map
.bitmap
);
336 kfree(p_rdma_info
->tid_map
.bitmap
);
337 kfree(p_rdma_info
->toggle_bits
.bitmap
);
338 kfree(p_rdma_info
->cq_map
.bitmap
);
339 kfree(p_rdma_info
->dpi_map
.bitmap
);
340 kfree(p_rdma_info
->pd_map
.bitmap
);
342 kfree(p_rdma_info
->port
);
343 kfree(p_rdma_info
->dev
);
348 static void qed_rdma_free(struct qed_hwfn
*p_hwfn
)
350 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Freeing RDMA\n");
352 qed_rdma_resc_free(p_hwfn
);
355 static void qed_rdma_get_guid(struct qed_hwfn
*p_hwfn
, u8
*guid
)
357 guid
[0] = p_hwfn
->hw_info
.hw_mac_addr
[0] ^ 2;
358 guid
[1] = p_hwfn
->hw_info
.hw_mac_addr
[1];
359 guid
[2] = p_hwfn
->hw_info
.hw_mac_addr
[2];
362 guid
[5] = p_hwfn
->hw_info
.hw_mac_addr
[3];
363 guid
[6] = p_hwfn
->hw_info
.hw_mac_addr
[4];
364 guid
[7] = p_hwfn
->hw_info
.hw_mac_addr
[5];
367 static void qed_rdma_init_events(struct qed_hwfn
*p_hwfn
,
368 struct qed_rdma_start_in_params
*params
)
370 struct qed_rdma_events
*events
;
372 events
= &p_hwfn
->p_rdma_info
->events
;
374 events
->unaffiliated_event
= params
->events
->unaffiliated_event
;
375 events
->affiliated_event
= params
->events
->affiliated_event
;
376 events
->context
= params
->events
->context
;
379 static void qed_rdma_init_devinfo(struct qed_hwfn
*p_hwfn
,
380 struct qed_rdma_start_in_params
*params
)
382 struct qed_rdma_device
*dev
= p_hwfn
->p_rdma_info
->dev
;
383 struct qed_dev
*cdev
= p_hwfn
->cdev
;
384 u32 pci_status_control
;
387 /* Vendor specific information */
388 dev
->vendor_id
= cdev
->vendor_id
;
389 dev
->vendor_part_id
= cdev
->device_id
;
391 dev
->fw_ver
= (FW_MAJOR_VERSION
<< 24) | (FW_MINOR_VERSION
<< 16) |
392 (FW_REVISION_VERSION
<< 8) | (FW_ENGINEERING_VERSION
);
394 qed_rdma_get_guid(p_hwfn
, (u8
*)&dev
->sys_image_guid
);
395 dev
->node_guid
= dev
->sys_image_guid
;
397 dev
->max_sge
= min_t(u32
, RDMA_MAX_SGE_PER_SQ_WQE
,
398 RDMA_MAX_SGE_PER_RQ_WQE
);
400 if (cdev
->rdma_max_sge
)
401 dev
->max_sge
= min_t(u32
, cdev
->rdma_max_sge
, dev
->max_sge
);
403 dev
->max_inline
= ROCE_REQ_MAX_INLINE_DATA_SIZE
;
405 dev
->max_inline
= (cdev
->rdma_max_inline
) ?
406 min_t(u32
, cdev
->rdma_max_inline
, dev
->max_inline
) :
409 dev
->max_wqe
= QED_RDMA_MAX_WQE
;
410 dev
->max_cnq
= (u8
)FEAT_NUM(p_hwfn
, QED_RDMA_CNQ
);
412 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
413 * it is up-aligned to 16 and then to ILT page size within qed cxt.
414 * This is OK in terms of ILT but we don't want to configure the FW
415 * above its abilities
417 num_qps
= ROCE_MAX_QPS
;
418 num_qps
= min_t(u64
, num_qps
, p_hwfn
->p_rdma_info
->num_qps
);
419 dev
->max_qp
= num_qps
;
421 /* CQs uses the same icids that QPs use hence they are limited by the
422 * number of icids. There are two icids per QP.
424 dev
->max_cq
= num_qps
* 2;
426 /* The number of mrs is smaller by 1 since the first is reserved */
427 dev
->max_mr
= p_hwfn
->p_rdma_info
->num_mrs
- 1;
428 dev
->max_mr_size
= QED_RDMA_MAX_MR_SIZE
;
430 /* The maximum CQE capacity per CQ supported.
431 * max number of cqes will be in two layer pbl,
432 * 8 is the pointer size in bytes
433 * 32 is the size of cq element in bytes
435 if (params
->cq_mode
== QED_RDMA_CQ_MODE_32_BITS
)
436 dev
->max_cqe
= QED_RDMA_MAX_CQE_32_BIT
;
438 dev
->max_cqe
= QED_RDMA_MAX_CQE_16_BIT
;
441 dev
->max_fmr
= QED_RDMA_MAX_FMR
;
442 dev
->max_mr_mw_fmr_pbl
= (PAGE_SIZE
/ 8) * (PAGE_SIZE
/ 8);
443 dev
->max_mr_mw_fmr_size
= dev
->max_mr_mw_fmr_pbl
* PAGE_SIZE
;
444 dev
->max_pkey
= QED_RDMA_MAX_P_KEY
;
446 dev
->max_qp_resp_rd_atomic_resc
= RDMA_RING_PAGE_SIZE
/
447 (RDMA_RESP_RD_ATOMIC_ELM_SIZE
* 2);
448 dev
->max_qp_req_rd_atomic_resc
= RDMA_RING_PAGE_SIZE
/
449 RDMA_REQ_RD_ATOMIC_ELM_SIZE
;
450 dev
->max_dev_resp_rd_atomic_resc
= dev
->max_qp_resp_rd_atomic_resc
*
451 p_hwfn
->p_rdma_info
->num_qps
;
452 dev
->page_size_caps
= QED_RDMA_PAGE_SIZE_CAPS
;
453 dev
->dev_ack_delay
= QED_RDMA_ACK_DELAY
;
454 dev
->max_pd
= RDMA_MAX_PDS
;
455 dev
->max_ah
= p_hwfn
->p_rdma_info
->num_qps
;
456 dev
->max_stats_queues
= (u8
)RESC_NUM(p_hwfn
, QED_RDMA_STATS_QUEUE
);
458 /* Set capablities */
460 SET_FIELD(dev
->dev_caps
, QED_RDMA_DEV_CAP_RNR_NAK
, 1);
461 SET_FIELD(dev
->dev_caps
, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT
, 1);
462 SET_FIELD(dev
->dev_caps
, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT
, 1);
463 SET_FIELD(dev
->dev_caps
, QED_RDMA_DEV_CAP_RESIZE_CQ
, 1);
464 SET_FIELD(dev
->dev_caps
, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT
, 1);
465 SET_FIELD(dev
->dev_caps
, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT
, 1);
466 SET_FIELD(dev
->dev_caps
, QED_RDMA_DEV_CAP_ZBVA
, 1);
467 SET_FIELD(dev
->dev_caps
, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE
, 1);
469 /* Check atomic operations support in PCI configuration space. */
470 pci_read_config_dword(cdev
->pdev
,
471 cdev
->pdev
->pcie_cap
+ PCI_EXP_DEVCTL2
,
472 &pci_status_control
);
474 if (pci_status_control
& PCI_EXP_DEVCTL2_LTR_EN
)
475 SET_FIELD(dev
->dev_caps
, QED_RDMA_DEV_CAP_ATOMIC_OP
, 1);
478 static void qed_rdma_init_port(struct qed_hwfn
*p_hwfn
)
480 struct qed_rdma_port
*port
= p_hwfn
->p_rdma_info
->port
;
481 struct qed_rdma_device
*dev
= p_hwfn
->p_rdma_info
->dev
;
483 port
->port_state
= p_hwfn
->mcp_info
->link_output
.link_up
?
484 QED_RDMA_PORT_UP
: QED_RDMA_PORT_DOWN
;
486 port
->max_msg_size
= min_t(u64
,
487 (dev
->max_mr_mw_fmr_size
*
488 p_hwfn
->cdev
->rdma_max_sge
),
491 port
->pkey_bad_counter
= 0;
494 static int qed_rdma_init_hw(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
496 u32 ll2_ethertype_en
;
498 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Initializing HW\n");
499 p_hwfn
->b_rdma_enabled_in_prs
= false;
501 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ROCE_DEST_QP_MAX_PF
, 0);
503 p_hwfn
->rdma_prs_search_reg
= PRS_REG_SEARCH_ROCE
;
505 /* We delay writing to this reg until first cid is allocated. See
506 * qed_cxt_dynamic_ilt_alloc function for more details
508 ll2_ethertype_en
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_LIGHT_L2_ETHERTYPE_EN
);
509 qed_wr(p_hwfn
, p_ptt
, PRS_REG_LIGHT_L2_ETHERTYPE_EN
,
510 (ll2_ethertype_en
| 0x01));
512 if (qed_cxt_get_proto_cid_start(p_hwfn
, PROTOCOLID_ROCE
) % 2) {
513 DP_NOTICE(p_hwfn
, "The first RoCE's cid should be even\n");
517 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Initializing HW - Done\n");
521 static int qed_rdma_start_fw(struct qed_hwfn
*p_hwfn
,
522 struct qed_rdma_start_in_params
*params
,
523 struct qed_ptt
*p_ptt
)
525 struct rdma_init_func_ramrod_data
*p_ramrod
;
526 struct qed_rdma_cnq_params
*p_cnq_pbl_list
;
527 struct rdma_init_func_hdr
*p_params_header
;
528 struct rdma_cnq_params
*p_cnq_params
;
529 struct qed_sp_init_data init_data
;
530 struct qed_spq_entry
*p_ent
;
534 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Starting FW\n");
536 /* Save the number of cnqs for the function close ramrod */
537 p_hwfn
->p_rdma_info
->num_cnqs
= params
->desired_cnq
;
540 memset(&init_data
, 0, sizeof(init_data
));
541 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
542 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
544 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, RDMA_RAMROD_FUNC_INIT
,
545 p_hwfn
->p_rdma_info
->proto
, &init_data
);
549 p_ramrod
= &p_ent
->ramrod
.roce_init_func
.rdma
;
551 p_params_header
= &p_ramrod
->params_header
;
552 p_params_header
->cnq_start_offset
= (u8
)RESC_START(p_hwfn
,
554 p_params_header
->num_cnqs
= params
->desired_cnq
;
556 if (params
->cq_mode
== QED_RDMA_CQ_MODE_16_BITS
)
557 p_params_header
->cq_ring_mode
= 1;
559 p_params_header
->cq_ring_mode
= 0;
561 for (cnq_id
= 0; cnq_id
< params
->desired_cnq
; cnq_id
++) {
562 sb_id
= qed_rdma_get_sb_id(p_hwfn
, cnq_id
);
563 p_cnq_params
= &p_ramrod
->cnq_params
[cnq_id
];
564 p_cnq_pbl_list
= ¶ms
->cnq_pbl_list
[cnq_id
];
565 p_cnq_params
->sb_num
=
566 cpu_to_le16(p_hwfn
->sbs_info
[sb_id
]->igu_sb_id
);
568 p_cnq_params
->sb_index
= p_hwfn
->pf_params
.rdma_pf_params
.gl_pi
;
569 p_cnq_params
->num_pbl_pages
= p_cnq_pbl_list
->num_pbl_pages
;
571 DMA_REGPAIR_LE(p_cnq_params
->pbl_base_addr
,
572 p_cnq_pbl_list
->pbl_ptr
);
574 /* we assume here that cnq_id and qz_offset are the same */
575 p_cnq_params
->queue_zone_num
=
576 cpu_to_le16(p_hwfn
->p_rdma_info
->queue_zone_base
+
580 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
583 static int qed_rdma_alloc_tid(void *rdma_cxt
, u32
*itid
)
585 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
588 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Allocate TID\n");
590 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
591 rc
= qed_rdma_bmap_alloc_id(p_hwfn
,
592 &p_hwfn
->p_rdma_info
->tid_map
, itid
);
593 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
597 rc
= qed_cxt_dynamic_ilt_alloc(p_hwfn
, QED_ELEM_TASK
, *itid
);
599 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Allocate TID - done, rc = %d\n", rc
);
603 static int qed_rdma_reserve_lkey(struct qed_hwfn
*p_hwfn
)
605 struct qed_rdma_device
*dev
= p_hwfn
->p_rdma_info
->dev
;
607 /* The first DPI is reserved for the Kernel */
608 __set_bit(0, p_hwfn
->p_rdma_info
->dpi_map
.bitmap
);
610 /* Tid 0 will be used as the key for "reserved MR".
611 * The driver should allocate memory for it so it can be loaded but no
612 * ramrod should be passed on it.
614 qed_rdma_alloc_tid(p_hwfn
, &dev
->reserved_lkey
);
615 if (dev
->reserved_lkey
!= RDMA_RESERVED_LKEY
) {
617 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
624 static int qed_rdma_setup(struct qed_hwfn
*p_hwfn
,
625 struct qed_ptt
*p_ptt
,
626 struct qed_rdma_start_in_params
*params
)
630 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "RDMA setup\n");
632 spin_lock_init(&p_hwfn
->p_rdma_info
->lock
);
634 qed_rdma_init_devinfo(p_hwfn
, params
);
635 qed_rdma_init_port(p_hwfn
);
636 qed_rdma_init_events(p_hwfn
, params
);
638 rc
= qed_rdma_reserve_lkey(p_hwfn
);
642 rc
= qed_rdma_init_hw(p_hwfn
, p_ptt
);
646 return qed_rdma_start_fw(p_hwfn
, params
, p_ptt
);
649 static int qed_rdma_stop(void *rdma_cxt
)
651 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
652 struct rdma_close_func_ramrod_data
*p_ramrod
;
653 struct qed_sp_init_data init_data
;
654 struct qed_spq_entry
*p_ent
;
655 struct qed_ptt
*p_ptt
;
656 u32 ll2_ethertype_en
;
659 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "RDMA stop\n");
661 p_ptt
= qed_ptt_acquire(p_hwfn
);
663 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Failed to acquire PTT\n");
667 /* Disable RoCE search */
668 qed_wr(p_hwfn
, p_ptt
, p_hwfn
->rdma_prs_search_reg
, 0);
669 p_hwfn
->b_rdma_enabled_in_prs
= false;
671 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ROCE_DEST_QP_MAX_PF
, 0);
673 ll2_ethertype_en
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_LIGHT_L2_ETHERTYPE_EN
);
675 qed_wr(p_hwfn
, p_ptt
, PRS_REG_LIGHT_L2_ETHERTYPE_EN
,
676 (ll2_ethertype_en
& 0xFFFE));
678 qed_ptt_release(p_hwfn
, p_ptt
);
681 memset(&init_data
, 0, sizeof(init_data
));
682 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
683 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
686 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, RDMA_RAMROD_FUNC_CLOSE
,
687 p_hwfn
->p_rdma_info
->proto
, &init_data
);
691 p_ramrod
= &p_ent
->ramrod
.rdma_close_func
;
693 p_ramrod
->num_cnqs
= p_hwfn
->p_rdma_info
->num_cnqs
;
694 p_ramrod
->cnq_start_offset
= (u8
)RESC_START(p_hwfn
, QED_RDMA_CNQ_RAM
);
696 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
699 qed_rdma_free(p_hwfn
);
701 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "RDMA stop done, rc = %d\n", rc
);
705 static int qed_rdma_add_user(void *rdma_cxt
,
706 struct qed_rdma_add_user_out_params
*out_params
)
708 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
709 u32 dpi_start_offset
;
713 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Adding User\n");
716 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
717 rc
= qed_rdma_bmap_alloc_id(p_hwfn
, &p_hwfn
->p_rdma_info
->dpi_map
,
719 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
721 out_params
->dpi
= (u16
)returned_id
;
723 /* Calculate the corresponding DPI address */
724 dpi_start_offset
= p_hwfn
->dpi_start_offset
;
726 out_params
->dpi_addr
= (u64
)((u8 __iomem
*)p_hwfn
->doorbells
+
728 ((out_params
->dpi
) * p_hwfn
->dpi_size
));
730 out_params
->dpi_phys_addr
= p_hwfn
->cdev
->db_phys_addr
+
732 ((out_params
->dpi
) * p_hwfn
->dpi_size
);
734 out_params
->dpi_size
= p_hwfn
->dpi_size
;
736 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Adding user - done, rc = %d\n", rc
);
740 static struct qed_rdma_port
*qed_rdma_query_port(void *rdma_cxt
)
742 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
743 struct qed_rdma_port
*p_port
= p_hwfn
->p_rdma_info
->port
;
745 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "RDMA Query port\n");
747 /* Link may have changed */
748 p_port
->port_state
= p_hwfn
->mcp_info
->link_output
.link_up
?
749 QED_RDMA_PORT_UP
: QED_RDMA_PORT_DOWN
;
751 p_port
->link_speed
= p_hwfn
->mcp_info
->link_output
.speed
;
756 static struct qed_rdma_device
*qed_rdma_query_device(void *rdma_cxt
)
758 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
760 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Query device\n");
762 /* Return struct with device parameters */
763 return p_hwfn
->p_rdma_info
->dev
;
766 static void qed_rdma_free_tid(void *rdma_cxt
, u32 itid
)
768 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
770 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "itid = %08x\n", itid
);
772 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
773 qed_bmap_release_id(p_hwfn
, &p_hwfn
->p_rdma_info
->tid_map
, itid
);
774 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
777 static void qed_rdma_cnq_prod_update(void *rdma_cxt
, u8 qz_offset
, u16 prod
)
779 struct qed_hwfn
*p_hwfn
;
783 p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
785 if (qz_offset
> p_hwfn
->p_rdma_info
->max_queue_zones
) {
787 "queue zone offset %d is too large (max is %d)\n",
788 qz_offset
, p_hwfn
->p_rdma_info
->max_queue_zones
);
792 qz_num
= p_hwfn
->p_rdma_info
->queue_zone_base
+ qz_offset
;
793 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
794 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num
);
796 REG_WR16(p_hwfn
, addr
, prod
);
798 /* keep prod updates ordered */
802 static int qed_fill_rdma_dev_info(struct qed_dev
*cdev
,
803 struct qed_dev_rdma_info
*info
)
805 memset(info
, 0, sizeof(*info
));
807 info
->rdma_type
= QED_RDMA_TYPE_ROCE
;
809 qed_fill_dev_info(cdev
, &info
->common
);
814 static int qed_rdma_get_sb_start(struct qed_dev
*cdev
)
818 if (cdev
->num_hwfns
> 1)
819 feat_num
= FEAT_NUM(QED_LEADING_HWFN(cdev
), QED_PF_L2_QUE
);
821 feat_num
= FEAT_NUM(QED_LEADING_HWFN(cdev
), QED_PF_L2_QUE
) *
827 static int qed_rdma_get_min_cnq_msix(struct qed_dev
*cdev
)
829 int n_cnq
= FEAT_NUM(QED_LEADING_HWFN(cdev
), QED_RDMA_CNQ
);
830 int n_msix
= cdev
->int_params
.rdma_msix_cnt
;
832 return min_t(int, n_cnq
, n_msix
);
835 static int qed_rdma_set_int(struct qed_dev
*cdev
, u16 cnt
)
839 /* Mark the fastpath as free/used */
840 cdev
->int_params
.fp_initialized
= cnt
? true : false;
842 if (cdev
->int_params
.out
.int_mode
!= QED_INT_MODE_MSIX
) {
844 "qed roce supports only MSI-X interrupts (detected %d).\n",
845 cdev
->int_params
.out
.int_mode
);
847 } else if (cdev
->int_params
.fp_msix_cnt
) {
848 limit
= cdev
->int_params
.rdma_msix_cnt
;
854 return min_t(int, cnt
, limit
);
857 static int qed_rdma_get_int(struct qed_dev
*cdev
, struct qed_int_info
*info
)
859 memset(info
, 0, sizeof(*info
));
861 if (!cdev
->int_params
.fp_initialized
) {
863 "Protocol driver requested interrupt information, but its support is not yet configured\n");
867 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
868 int msix_base
= cdev
->int_params
.rdma_msix_base
;
870 info
->msix_cnt
= cdev
->int_params
.rdma_msix_cnt
;
871 info
->msix
= &cdev
->int_params
.msix_table
[msix_base
];
873 DP_VERBOSE(cdev
, QED_MSG_RDMA
, "msix_cnt = %d msix_base=%d\n",
874 info
->msix_cnt
, msix_base
);
880 static int qed_rdma_alloc_pd(void *rdma_cxt
, u16
*pd
)
882 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
886 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Alloc PD\n");
888 /* Allocates an unused protection domain */
889 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
890 rc
= qed_rdma_bmap_alloc_id(p_hwfn
,
891 &p_hwfn
->p_rdma_info
->pd_map
, &returned_id
);
892 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
894 *pd
= (u16
)returned_id
;
896 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Alloc PD - done, rc = %d\n", rc
);
900 static void qed_rdma_free_pd(void *rdma_cxt
, u16 pd
)
902 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
904 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "pd = %08x\n", pd
);
906 /* Returns a previously allocated protection domain for reuse */
907 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
908 qed_bmap_release_id(p_hwfn
, &p_hwfn
->p_rdma_info
->pd_map
, pd
);
909 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
912 static enum qed_rdma_toggle_bit
913 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn
*p_hwfn
, u16 icid
)
915 struct qed_rdma_info
*p_info
= p_hwfn
->p_rdma_info
;
916 enum qed_rdma_toggle_bit toggle_bit
;
919 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", icid
);
921 /* the function toggle the bit that is related to a given icid
922 * and returns the new toggle bit's value
924 bmap_id
= icid
- qed_cxt_get_proto_cid_start(p_hwfn
, p_info
->proto
);
926 spin_lock_bh(&p_info
->lock
);
927 toggle_bit
= !test_and_change_bit(bmap_id
,
928 p_info
->toggle_bits
.bitmap
);
929 spin_unlock_bh(&p_info
->lock
);
931 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "QED_RDMA_TOGGLE_BIT_= %d\n",
937 static int qed_rdma_create_cq(void *rdma_cxt
,
938 struct qed_rdma_create_cq_in_params
*params
,
941 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
942 struct qed_rdma_info
*p_info
= p_hwfn
->p_rdma_info
;
943 struct rdma_create_cq_ramrod_data
*p_ramrod
;
944 enum qed_rdma_toggle_bit toggle_bit
;
945 struct qed_sp_init_data init_data
;
946 struct qed_spq_entry
*p_ent
;
947 u32 returned_id
, start_cid
;
950 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "cq_handle = %08x%08x\n",
951 params
->cq_handle_hi
, params
->cq_handle_lo
);
954 spin_lock_bh(&p_info
->lock
);
955 rc
= qed_rdma_bmap_alloc_id(p_hwfn
,
956 &p_info
->cq_map
, &returned_id
);
957 spin_unlock_bh(&p_info
->lock
);
960 DP_NOTICE(p_hwfn
, "Can't create CQ, rc = %d\n", rc
);
964 start_cid
= qed_cxt_get_proto_cid_start(p_hwfn
,
966 *icid
= returned_id
+ start_cid
;
968 /* Check if icid requires a page allocation */
969 rc
= qed_cxt_dynamic_ilt_alloc(p_hwfn
, QED_ELEM_CXT
, *icid
);
974 memset(&init_data
, 0, sizeof(init_data
));
975 init_data
.cid
= *icid
;
976 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
977 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
979 /* Send create CQ ramrod */
980 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
981 RDMA_RAMROD_CREATE_CQ
,
982 p_info
->proto
, &init_data
);
986 p_ramrod
= &p_ent
->ramrod
.rdma_create_cq
;
988 p_ramrod
->cq_handle
.hi
= cpu_to_le32(params
->cq_handle_hi
);
989 p_ramrod
->cq_handle
.lo
= cpu_to_le32(params
->cq_handle_lo
);
990 p_ramrod
->dpi
= cpu_to_le16(params
->dpi
);
991 p_ramrod
->is_two_level_pbl
= params
->pbl_two_level
;
992 p_ramrod
->max_cqes
= cpu_to_le32(params
->cq_size
);
993 DMA_REGPAIR_LE(p_ramrod
->pbl_addr
, params
->pbl_ptr
);
994 p_ramrod
->pbl_num_pages
= cpu_to_le16(params
->pbl_num_pages
);
995 p_ramrod
->cnq_id
= (u8
)RESC_START(p_hwfn
, QED_RDMA_CNQ_RAM
) +
997 p_ramrod
->int_timeout
= params
->int_timeout
;
999 /* toggle the bit for every resize or create cq for a given icid */
1000 toggle_bit
= qed_rdma_toggle_bit_create_resize_cq(p_hwfn
, *icid
);
1002 p_ramrod
->toggle_bit
= toggle_bit
;
1004 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1006 /* restore toggle bit */
1007 qed_rdma_toggle_bit_create_resize_cq(p_hwfn
, *icid
);
1011 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Created CQ, rc = %d\n", rc
);
1015 /* release allocated icid */
1016 spin_lock_bh(&p_info
->lock
);
1017 qed_bmap_release_id(p_hwfn
, &p_info
->cq_map
, returned_id
);
1018 spin_unlock_bh(&p_info
->lock
);
1019 DP_NOTICE(p_hwfn
, "Create CQ failed, rc = %d\n", rc
);
1025 qed_rdma_destroy_cq(void *rdma_cxt
,
1026 struct qed_rdma_destroy_cq_in_params
*in_params
,
1027 struct qed_rdma_destroy_cq_out_params
*out_params
)
1029 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
1030 struct rdma_destroy_cq_output_params
*p_ramrod_res
;
1031 struct rdma_destroy_cq_ramrod_data
*p_ramrod
;
1032 struct qed_sp_init_data init_data
;
1033 struct qed_spq_entry
*p_ent
;
1034 dma_addr_t ramrod_res_phys
;
1037 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", in_params
->icid
);
1040 (struct rdma_destroy_cq_output_params
*)
1041 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1042 sizeof(struct rdma_destroy_cq_output_params
),
1043 &ramrod_res_phys
, GFP_KERNEL
);
1044 if (!p_ramrod_res
) {
1046 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1051 memset(&init_data
, 0, sizeof(init_data
));
1052 init_data
.cid
= in_params
->icid
;
1053 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1054 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1056 /* Send destroy CQ ramrod */
1057 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1058 RDMA_RAMROD_DESTROY_CQ
,
1059 p_hwfn
->p_rdma_info
->proto
, &init_data
);
1063 p_ramrod
= &p_ent
->ramrod
.rdma_destroy_cq
;
1064 DMA_REGPAIR_LE(p_ramrod
->output_params_addr
, ramrod_res_phys
);
1066 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1070 out_params
->num_cq_notif
= le16_to_cpu(p_ramrod_res
->cnq_num
);
1072 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1073 sizeof(struct rdma_destroy_cq_output_params
),
1074 p_ramrod_res
, ramrod_res_phys
);
1077 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
1079 qed_bmap_release_id(p_hwfn
,
1080 &p_hwfn
->p_rdma_info
->cq_map
,
1082 qed_cxt_get_proto_cid_start(p_hwfn
,
1084 p_rdma_info
->proto
)));
1086 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
1088 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Destroyed CQ, rc = %d\n", rc
);
1091 err
: dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1092 sizeof(struct rdma_destroy_cq_output_params
),
1093 p_ramrod_res
, ramrod_res_phys
);
1098 static void qed_rdma_set_fw_mac(u16
*p_fw_mac
, u8
*p_qed_mac
)
1100 p_fw_mac
[0] = cpu_to_le16((p_qed_mac
[0] << 8) + p_qed_mac
[1]);
1101 p_fw_mac
[1] = cpu_to_le16((p_qed_mac
[2] << 8) + p_qed_mac
[3]);
1102 p_fw_mac
[2] = cpu_to_le16((p_qed_mac
[4] << 8) + p_qed_mac
[5]);
1105 static void qed_rdma_copy_gids(struct qed_rdma_qp
*qp
, __le32
*src_gid
,
1110 if (qp
->roce_mode
== ROCE_V2_IPV4
) {
1111 /* The IPv4 addresses shall be aligned to the highest word.
1112 * The lower words must be zero.
1114 memset(src_gid
, 0, sizeof(union qed_gid
));
1115 memset(dst_gid
, 0, sizeof(union qed_gid
));
1116 src_gid
[3] = cpu_to_le32(qp
->sgid
.ipv4_addr
);
1117 dst_gid
[3] = cpu_to_le32(qp
->dgid
.ipv4_addr
);
1119 /* GIDs and IPv6 addresses coincide in location and size */
1120 for (i
= 0; i
< ARRAY_SIZE(qp
->sgid
.dwords
); i
++) {
1121 src_gid
[i
] = cpu_to_le32(qp
->sgid
.dwords
[i
]);
1122 dst_gid
[i
] = cpu_to_le32(qp
->dgid
.dwords
[i
]);
1127 static enum roce_flavor
qed_roce_mode_to_flavor(enum roce_mode roce_mode
)
1129 enum roce_flavor flavor
;
1131 switch (roce_mode
) {
1133 flavor
= PLAIN_ROCE
;
1136 flavor
= RROCE_IPV4
;
1139 flavor
= ROCE_V2_IPV6
;
1142 flavor
= MAX_ROCE_MODE
;
1148 void qed_roce_free_cid_pair(struct qed_hwfn
*p_hwfn
, u16 cid
)
1150 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
1151 qed_bmap_release_id(p_hwfn
, &p_hwfn
->p_rdma_info
->cid_map
, cid
);
1152 qed_bmap_release_id(p_hwfn
, &p_hwfn
->p_rdma_info
->cid_map
, cid
+ 1);
1153 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
1156 static int qed_roce_alloc_cid(struct qed_hwfn
*p_hwfn
, u16
*cid
)
1158 struct qed_rdma_info
*p_rdma_info
= p_hwfn
->p_rdma_info
;
1163 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
1164 rc
= qed_rdma_bmap_alloc_id(p_hwfn
, &p_rdma_info
->cid_map
,
1167 spin_unlock_bh(&p_rdma_info
->lock
);
1171 rc
= qed_rdma_bmap_alloc_id(p_hwfn
, &p_rdma_info
->cid_map
,
1174 spin_unlock_bh(&p_rdma_info
->lock
);
1178 /* the two icid's should be adjacent */
1179 if ((requester_icid
- responder_icid
) != 1) {
1180 DP_NOTICE(p_hwfn
, "Failed to allocate two adjacent qp's'\n");
1185 responder_icid
+= qed_cxt_get_proto_cid_start(p_hwfn
,
1186 p_rdma_info
->proto
);
1187 requester_icid
+= qed_cxt_get_proto_cid_start(p_hwfn
,
1188 p_rdma_info
->proto
);
1190 /* If these icids require a new ILT line allocate DMA-able context for
1193 rc
= qed_cxt_dynamic_ilt_alloc(p_hwfn
, QED_ELEM_CXT
, responder_icid
);
1197 rc
= qed_cxt_dynamic_ilt_alloc(p_hwfn
, QED_ELEM_CXT
, requester_icid
);
1201 *cid
= (u16
)responder_icid
;
1205 spin_lock_bh(&p_rdma_info
->lock
);
1206 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, responder_icid
);
1207 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, requester_icid
);
1209 spin_unlock_bh(&p_rdma_info
->lock
);
1210 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
1211 "Allocate CID - failed, rc = %d\n", rc
);
1215 static void qed_roce_set_real_cid(struct qed_hwfn
*p_hwfn
, u32 cid
)
1217 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
1218 qed_bmap_set_id(p_hwfn
, &p_hwfn
->p_rdma_info
->real_cid_map
, cid
);
1219 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
1222 static int qed_roce_sp_create_responder(struct qed_hwfn
*p_hwfn
,
1223 struct qed_rdma_qp
*qp
)
1225 struct roce_create_qp_resp_ramrod_data
*p_ramrod
;
1226 struct qed_sp_init_data init_data
;
1227 union qed_qm_pq_params qm_params
;
1228 enum roce_flavor roce_flavor
;
1229 struct qed_spq_entry
*p_ent
;
1230 u16 regular_latency_queue
;
1231 enum protocol_type proto
;
1234 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
1236 /* Allocate DMA-able memory for IRQ */
1237 qp
->irq_num_pages
= 1;
1238 qp
->irq
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1239 RDMA_RING_PAGE_SIZE
,
1240 &qp
->irq_phys_addr
, GFP_KERNEL
);
1244 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
1250 memset(&init_data
, 0, sizeof(init_data
));
1251 init_data
.cid
= qp
->icid
;
1252 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1253 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1255 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_CREATE_QP
,
1256 PROTOCOLID_ROCE
, &init_data
);
1260 p_ramrod
= &p_ent
->ramrod
.roce_create_qp_resp
;
1262 p_ramrod
->flags
= 0;
1264 roce_flavor
= qed_roce_mode_to_flavor(qp
->roce_mode
);
1265 SET_FIELD(p_ramrod
->flags
,
1266 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR
, roce_flavor
);
1268 SET_FIELD(p_ramrod
->flags
,
1269 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN
,
1270 qp
->incoming_rdma_read_en
);
1272 SET_FIELD(p_ramrod
->flags
,
1273 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN
,
1274 qp
->incoming_rdma_write_en
);
1276 SET_FIELD(p_ramrod
->flags
,
1277 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN
,
1278 qp
->incoming_atomic_en
);
1280 SET_FIELD(p_ramrod
->flags
,
1281 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN
,
1282 qp
->e2e_flow_control_en
);
1284 SET_FIELD(p_ramrod
->flags
,
1285 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG
, qp
->use_srq
);
1287 SET_FIELD(p_ramrod
->flags
,
1288 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN
,
1289 qp
->fmr_and_reserved_lkey
);
1291 SET_FIELD(p_ramrod
->flags
,
1292 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER
,
1293 qp
->min_rnr_nak_timer
);
1295 p_ramrod
->max_ird
= qp
->max_rd_atomic_resp
;
1296 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
1297 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
1298 p_ramrod
->irq_num_pages
= qp
->irq_num_pages
;
1299 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
1300 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
1301 p_ramrod
->dst_qp_id
= cpu_to_le32(qp
->dest_qp
);
1302 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
1303 p_ramrod
->initial_psn
= cpu_to_le32(qp
->rq_psn
);
1304 p_ramrod
->pd
= cpu_to_le16(qp
->pd
);
1305 p_ramrod
->rq_num_pages
= cpu_to_le16(qp
->rq_num_pages
);
1306 DMA_REGPAIR_LE(p_ramrod
->rq_pbl_addr
, qp
->rq_pbl_ptr
);
1307 DMA_REGPAIR_LE(p_ramrod
->irq_pbl_addr
, qp
->irq_phys_addr
);
1308 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
1309 p_ramrod
->qp_handle_for_async
.hi
= cpu_to_le32(qp
->qp_handle_async
.hi
);
1310 p_ramrod
->qp_handle_for_async
.lo
= cpu_to_le32(qp
->qp_handle_async
.lo
);
1311 p_ramrod
->qp_handle_for_cqe
.hi
= cpu_to_le32(qp
->qp_handle
.hi
);
1312 p_ramrod
->qp_handle_for_cqe
.lo
= cpu_to_le32(qp
->qp_handle
.lo
);
1313 p_ramrod
->cq_cid
= cpu_to_le32((p_hwfn
->hw_info
.opaque_fid
<< 16) |
1316 memset(&qm_params
, 0, sizeof(qm_params
));
1317 qm_params
.roce
.qpid
= qp
->icid
>> 1;
1318 regular_latency_queue
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_ROCE
,
1321 p_ramrod
->regular_latency_phy_queue
=
1322 cpu_to_le16(regular_latency_queue
);
1323 p_ramrod
->low_latency_phy_queue
=
1324 cpu_to_le16(regular_latency_queue
);
1326 p_ramrod
->dpi
= cpu_to_le16(qp
->dpi
);
1328 qed_rdma_set_fw_mac(p_ramrod
->remote_mac_addr
, qp
->remote_mac_addr
);
1329 qed_rdma_set_fw_mac(p_ramrod
->local_mac_addr
, qp
->local_mac_addr
);
1331 p_ramrod
->udp_src_port
= qp
->udp_src_port
;
1332 p_ramrod
->vlan_id
= cpu_to_le16(qp
->vlan_id
);
1333 p_ramrod
->srq_id
.srq_idx
= cpu_to_le16(qp
->srq_id
);
1334 p_ramrod
->srq_id
.opaque_fid
= cpu_to_le16(p_hwfn
->hw_info
.opaque_fid
);
1336 p_ramrod
->stats_counter_id
= RESC_START(p_hwfn
, QED_RDMA_STATS_QUEUE
) +
1339 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1341 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
1342 "rc = %d regular physical queue = 0x%x\n", rc
,
1343 regular_latency_queue
);
1348 qp
->resp_offloaded
= true;
1351 proto
= p_hwfn
->p_rdma_info
->proto
;
1352 qed_roce_set_real_cid(p_hwfn
, qp
->icid
-
1353 qed_cxt_get_proto_cid_start(p_hwfn
, proto
));
1358 DP_NOTICE(p_hwfn
, "create responder - failed, rc = %d\n", rc
);
1359 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1360 qp
->irq_num_pages
* RDMA_RING_PAGE_SIZE
,
1361 qp
->irq
, qp
->irq_phys_addr
);
1366 static int qed_roce_sp_create_requester(struct qed_hwfn
*p_hwfn
,
1367 struct qed_rdma_qp
*qp
)
1369 struct roce_create_qp_req_ramrod_data
*p_ramrod
;
1370 struct qed_sp_init_data init_data
;
1371 union qed_qm_pq_params qm_params
;
1372 enum roce_flavor roce_flavor
;
1373 struct qed_spq_entry
*p_ent
;
1374 u16 regular_latency_queue
;
1375 enum protocol_type proto
;
1378 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
1380 /* Allocate DMA-able memory for ORQ */
1381 qp
->orq_num_pages
= 1;
1382 qp
->orq
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1383 RDMA_RING_PAGE_SIZE
,
1384 &qp
->orq_phys_addr
, GFP_KERNEL
);
1388 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
1394 memset(&init_data
, 0, sizeof(init_data
));
1395 init_data
.cid
= qp
->icid
+ 1;
1396 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1397 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1399 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1400 ROCE_RAMROD_CREATE_QP
,
1401 PROTOCOLID_ROCE
, &init_data
);
1405 p_ramrod
= &p_ent
->ramrod
.roce_create_qp_req
;
1407 p_ramrod
->flags
= 0;
1409 roce_flavor
= qed_roce_mode_to_flavor(qp
->roce_mode
);
1410 SET_FIELD(p_ramrod
->flags
,
1411 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR
, roce_flavor
);
1413 SET_FIELD(p_ramrod
->flags
,
1414 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN
,
1415 qp
->fmr_and_reserved_lkey
);
1417 SET_FIELD(p_ramrod
->flags
,
1418 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP
, qp
->signal_all
);
1420 SET_FIELD(p_ramrod
->flags
,
1421 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT
, qp
->retry_cnt
);
1423 SET_FIELD(p_ramrod
->flags
,
1424 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT
,
1427 p_ramrod
->max_ord
= qp
->max_rd_atomic_req
;
1428 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
1429 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
1430 p_ramrod
->orq_num_pages
= qp
->orq_num_pages
;
1431 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
1432 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
1433 p_ramrod
->dst_qp_id
= cpu_to_le32(qp
->dest_qp
);
1434 p_ramrod
->ack_timeout_val
= cpu_to_le32(qp
->ack_timeout
);
1435 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
1436 p_ramrod
->initial_psn
= cpu_to_le32(qp
->sq_psn
);
1437 p_ramrod
->pd
= cpu_to_le16(qp
->pd
);
1438 p_ramrod
->sq_num_pages
= cpu_to_le16(qp
->sq_num_pages
);
1439 DMA_REGPAIR_LE(p_ramrod
->sq_pbl_addr
, qp
->sq_pbl_ptr
);
1440 DMA_REGPAIR_LE(p_ramrod
->orq_pbl_addr
, qp
->orq_phys_addr
);
1441 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
1442 p_ramrod
->qp_handle_for_async
.hi
= cpu_to_le32(qp
->qp_handle_async
.hi
);
1443 p_ramrod
->qp_handle_for_async
.lo
= cpu_to_le32(qp
->qp_handle_async
.lo
);
1444 p_ramrod
->qp_handle_for_cqe
.hi
= cpu_to_le32(qp
->qp_handle
.hi
);
1445 p_ramrod
->qp_handle_for_cqe
.lo
= cpu_to_le32(qp
->qp_handle
.lo
);
1447 cpu_to_le32((p_hwfn
->hw_info
.opaque_fid
<< 16) | qp
->sq_cq_id
);
1449 memset(&qm_params
, 0, sizeof(qm_params
));
1450 qm_params
.roce
.qpid
= qp
->icid
>> 1;
1451 regular_latency_queue
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_ROCE
,
1454 p_ramrod
->regular_latency_phy_queue
=
1455 cpu_to_le16(regular_latency_queue
);
1456 p_ramrod
->low_latency_phy_queue
=
1457 cpu_to_le16(regular_latency_queue
);
1459 p_ramrod
->dpi
= cpu_to_le16(qp
->dpi
);
1461 qed_rdma_set_fw_mac(p_ramrod
->remote_mac_addr
, qp
->remote_mac_addr
);
1462 qed_rdma_set_fw_mac(p_ramrod
->local_mac_addr
, qp
->local_mac_addr
);
1464 p_ramrod
->udp_src_port
= qp
->udp_src_port
;
1465 p_ramrod
->vlan_id
= cpu_to_le16(qp
->vlan_id
);
1466 p_ramrod
->stats_counter_id
= RESC_START(p_hwfn
, QED_RDMA_STATS_QUEUE
) +
1469 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1471 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "rc = %d\n", rc
);
1476 qp
->req_offloaded
= true;
1477 proto
= p_hwfn
->p_rdma_info
->proto
;
1478 qed_roce_set_real_cid(p_hwfn
,
1480 qed_cxt_get_proto_cid_start(p_hwfn
, proto
));
1485 DP_NOTICE(p_hwfn
, "Create requested - failed, rc = %d\n", rc
);
1486 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1487 qp
->orq_num_pages
* RDMA_RING_PAGE_SIZE
,
1488 qp
->orq
, qp
->orq_phys_addr
);
1492 static int qed_roce_sp_modify_responder(struct qed_hwfn
*p_hwfn
,
1493 struct qed_rdma_qp
*qp
,
1494 bool move_to_err
, u32 modify_flags
)
1496 struct roce_modify_qp_resp_ramrod_data
*p_ramrod
;
1497 struct qed_sp_init_data init_data
;
1498 struct qed_spq_entry
*p_ent
;
1501 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
1503 if (move_to_err
&& !qp
->resp_offloaded
)
1507 memset(&init_data
, 0, sizeof(init_data
));
1508 init_data
.cid
= qp
->icid
;
1509 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1510 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1512 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1513 ROCE_EVENT_MODIFY_QP
,
1514 PROTOCOLID_ROCE
, &init_data
);
1516 DP_NOTICE(p_hwfn
, "rc = %d\n", rc
);
1520 p_ramrod
= &p_ent
->ramrod
.roce_modify_qp_resp
;
1522 p_ramrod
->flags
= 0;
1524 SET_FIELD(p_ramrod
->flags
,
1525 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG
, move_to_err
);
1527 SET_FIELD(p_ramrod
->flags
,
1528 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN
,
1529 qp
->incoming_rdma_read_en
);
1531 SET_FIELD(p_ramrod
->flags
,
1532 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN
,
1533 qp
->incoming_rdma_write_en
);
1535 SET_FIELD(p_ramrod
->flags
,
1536 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN
,
1537 qp
->incoming_atomic_en
);
1539 SET_FIELD(p_ramrod
->flags
,
1540 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN
,
1541 qp
->e2e_flow_control_en
);
1543 SET_FIELD(p_ramrod
->flags
,
1544 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG
,
1545 GET_FIELD(modify_flags
,
1546 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN
));
1548 SET_FIELD(p_ramrod
->flags
,
1549 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG
,
1550 GET_FIELD(modify_flags
, QED_ROCE_MODIFY_QP_VALID_PKEY
));
1552 SET_FIELD(p_ramrod
->flags
,
1553 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG
,
1554 GET_FIELD(modify_flags
,
1555 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR
));
1557 SET_FIELD(p_ramrod
->flags
,
1558 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG
,
1559 GET_FIELD(modify_flags
,
1560 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP
));
1562 SET_FIELD(p_ramrod
->flags
,
1563 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG
,
1564 GET_FIELD(modify_flags
,
1565 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER
));
1567 p_ramrod
->fields
= 0;
1568 SET_FIELD(p_ramrod
->fields
,
1569 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER
,
1570 qp
->min_rnr_nak_timer
);
1572 p_ramrod
->max_ird
= qp
->max_rd_atomic_resp
;
1573 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
1574 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
1575 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
1576 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
1577 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
1578 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
1579 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1581 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Modify responder, rc = %d\n", rc
);
1585 static int qed_roce_sp_modify_requester(struct qed_hwfn
*p_hwfn
,
1586 struct qed_rdma_qp
*qp
,
1588 bool move_to_err
, u32 modify_flags
)
1590 struct roce_modify_qp_req_ramrod_data
*p_ramrod
;
1591 struct qed_sp_init_data init_data
;
1592 struct qed_spq_entry
*p_ent
;
1595 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
1597 if (move_to_err
&& !(qp
->req_offloaded
))
1601 memset(&init_data
, 0, sizeof(init_data
));
1602 init_data
.cid
= qp
->icid
+ 1;
1603 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1604 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1606 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1607 ROCE_EVENT_MODIFY_QP
,
1608 PROTOCOLID_ROCE
, &init_data
);
1610 DP_NOTICE(p_hwfn
, "rc = %d\n", rc
);
1614 p_ramrod
= &p_ent
->ramrod
.roce_modify_qp_req
;
1616 p_ramrod
->flags
= 0;
1618 SET_FIELD(p_ramrod
->flags
,
1619 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG
, move_to_err
);
1621 SET_FIELD(p_ramrod
->flags
,
1622 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG
, move_to_sqd
);
1624 SET_FIELD(p_ramrod
->flags
,
1625 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY
,
1628 SET_FIELD(p_ramrod
->flags
,
1629 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG
,
1630 GET_FIELD(modify_flags
, QED_ROCE_MODIFY_QP_VALID_PKEY
));
1632 SET_FIELD(p_ramrod
->flags
,
1633 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG
,
1634 GET_FIELD(modify_flags
,
1635 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR
));
1637 SET_FIELD(p_ramrod
->flags
,
1638 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG
,
1639 GET_FIELD(modify_flags
,
1640 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ
));
1642 SET_FIELD(p_ramrod
->flags
,
1643 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG
,
1644 GET_FIELD(modify_flags
,
1645 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT
));
1647 SET_FIELD(p_ramrod
->flags
,
1648 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG
,
1649 GET_FIELD(modify_flags
, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT
));
1651 SET_FIELD(p_ramrod
->flags
,
1652 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG
,
1653 GET_FIELD(modify_flags
,
1654 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT
));
1656 p_ramrod
->fields
= 0;
1657 SET_FIELD(p_ramrod
->fields
,
1658 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT
, qp
->retry_cnt
);
1660 SET_FIELD(p_ramrod
->fields
,
1661 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT
,
1664 p_ramrod
->max_ord
= qp
->max_rd_atomic_req
;
1665 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
1666 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
1667 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
1668 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
1669 p_ramrod
->ack_timeout_val
= cpu_to_le32(qp
->ack_timeout
);
1670 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
1671 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
1672 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1674 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Modify requester, rc = %d\n", rc
);
1678 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn
*p_hwfn
,
1679 struct qed_rdma_qp
*qp
,
1680 u32
*num_invalidated_mw
,
1683 struct roce_destroy_qp_resp_output_params
*p_ramrod_res
;
1684 struct roce_destroy_qp_resp_ramrod_data
*p_ramrod
;
1685 struct qed_sp_init_data init_data
;
1686 struct qed_spq_entry
*p_ent
;
1687 dma_addr_t ramrod_res_phys
;
1690 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
1692 *num_invalidated_mw
= 0;
1693 *cq_prod
= qp
->cq_prod
;
1695 if (!qp
->resp_offloaded
) {
1696 /* If a responder was never offload, we need to free the cids
1697 * allocated in create_qp as a FW async event will never arrive
1702 qed_cxt_get_proto_cid_start(p_hwfn
,
1703 p_hwfn
->p_rdma_info
->proto
);
1704 qed_roce_free_cid_pair(p_hwfn
, (u16
)cid
);
1710 memset(&init_data
, 0, sizeof(init_data
));
1711 init_data
.cid
= qp
->icid
;
1712 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1713 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1715 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1716 ROCE_RAMROD_DESTROY_QP
,
1717 PROTOCOLID_ROCE
, &init_data
);
1721 p_ramrod
= &p_ent
->ramrod
.roce_destroy_qp_resp
;
1723 p_ramrod_res
= (struct roce_destroy_qp_resp_output_params
*)
1724 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_ramrod_res
),
1725 &ramrod_res_phys
, GFP_KERNEL
);
1727 if (!p_ramrod_res
) {
1730 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
1735 DMA_REGPAIR_LE(p_ramrod
->output_params_addr
, ramrod_res_phys
);
1737 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1741 *num_invalidated_mw
= le32_to_cpu(p_ramrod_res
->num_invalidated_mw
);
1742 *cq_prod
= le32_to_cpu(p_ramrod_res
->cq_prod
);
1743 qp
->cq_prod
= *cq_prod
;
1745 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
1746 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1747 qp
->irq_num_pages
* RDMA_RING_PAGE_SIZE
,
1748 qp
->irq
, qp
->irq_phys_addr
);
1750 qp
->resp_offloaded
= false;
1752 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Destroy responder, rc = %d\n", rc
);
1755 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1756 sizeof(struct roce_destroy_qp_resp_output_params
),
1757 p_ramrod_res
, ramrod_res_phys
);
1762 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn
*p_hwfn
,
1763 struct qed_rdma_qp
*qp
,
1766 struct roce_destroy_qp_req_output_params
*p_ramrod_res
;
1767 struct roce_destroy_qp_req_ramrod_data
*p_ramrod
;
1768 struct qed_sp_init_data init_data
;
1769 struct qed_spq_entry
*p_ent
;
1770 dma_addr_t ramrod_res_phys
;
1773 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
1775 if (!qp
->req_offloaded
)
1778 p_ramrod_res
= (struct roce_destroy_qp_req_output_params
*)
1779 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1780 sizeof(*p_ramrod_res
),
1781 &ramrod_res_phys
, GFP_KERNEL
);
1782 if (!p_ramrod_res
) {
1784 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
1789 memset(&init_data
, 0, sizeof(init_data
));
1790 init_data
.cid
= qp
->icid
+ 1;
1791 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1792 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1794 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_DESTROY_QP
,
1795 PROTOCOLID_ROCE
, &init_data
);
1799 p_ramrod
= &p_ent
->ramrod
.roce_destroy_qp_req
;
1800 DMA_REGPAIR_LE(p_ramrod
->output_params_addr
, ramrod_res_phys
);
1802 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1806 *num_bound_mw
= le32_to_cpu(p_ramrod_res
->num_bound_mw
);
1808 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
1809 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1810 qp
->orq_num_pages
* RDMA_RING_PAGE_SIZE
,
1811 qp
->orq
, qp
->orq_phys_addr
);
1813 qp
->req_offloaded
= false;
1815 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Destroy requester, rc = %d\n", rc
);
1818 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_ramrod_res
),
1819 p_ramrod_res
, ramrod_res_phys
);
1824 static int qed_roce_query_qp(struct qed_hwfn
*p_hwfn
,
1825 struct qed_rdma_qp
*qp
,
1826 struct qed_rdma_query_qp_out_params
*out_params
)
1828 struct roce_query_qp_resp_output_params
*p_resp_ramrod_res
;
1829 struct roce_query_qp_req_output_params
*p_req_ramrod_res
;
1830 struct roce_query_qp_resp_ramrod_data
*p_resp_ramrod
;
1831 struct roce_query_qp_req_ramrod_data
*p_req_ramrod
;
1832 struct qed_sp_init_data init_data
;
1833 dma_addr_t resp_ramrod_res_phys
;
1834 dma_addr_t req_ramrod_res_phys
;
1835 struct qed_spq_entry
*p_ent
;
1841 if ((!(qp
->resp_offloaded
)) && (!(qp
->req_offloaded
))) {
1842 /* We can't send ramrod to the fw since this qp wasn't offloaded
1845 out_params
->draining
= false;
1846 out_params
->rq_psn
= qp
->rq_psn
;
1847 out_params
->sq_psn
= qp
->sq_psn
;
1848 out_params
->state
= qp
->cur_state
;
1850 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "No QPs as no offload\n");
1854 if (!(qp
->resp_offloaded
)) {
1856 "The responder's qp should be offloded before requester's\n");
1860 /* Send a query responder ramrod to FW to get RQ-PSN and state */
1861 p_resp_ramrod_res
= (struct roce_query_qp_resp_output_params
*)
1862 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1863 sizeof(*p_resp_ramrod_res
),
1864 &resp_ramrod_res_phys
, GFP_KERNEL
);
1865 if (!p_resp_ramrod_res
) {
1867 "qed query qp failed: cannot allocate memory (ramrod)\n");
1872 memset(&init_data
, 0, sizeof(init_data
));
1873 init_data
.cid
= qp
->icid
;
1874 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1875 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1876 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_QUERY_QP
,
1877 PROTOCOLID_ROCE
, &init_data
);
1881 p_resp_ramrod
= &p_ent
->ramrod
.roce_query_qp_resp
;
1882 DMA_REGPAIR_LE(p_resp_ramrod
->output_params_addr
, resp_ramrod_res_phys
);
1884 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1888 out_params
->rq_psn
= le32_to_cpu(p_resp_ramrod_res
->psn
);
1889 rq_err_state
= GET_FIELD(le32_to_cpu(p_resp_ramrod_res
->err_flag
),
1890 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG
);
1892 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_resp_ramrod_res
),
1893 p_resp_ramrod_res
, resp_ramrod_res_phys
);
1895 if (!(qp
->req_offloaded
)) {
1896 /* Don't send query qp for the requester */
1897 out_params
->sq_psn
= qp
->sq_psn
;
1898 out_params
->draining
= false;
1901 qp
->cur_state
= QED_ROCE_QP_STATE_ERR
;
1903 out_params
->state
= qp
->cur_state
;
1908 /* Send a query requester ramrod to FW to get SQ-PSN and state */
1909 p_req_ramrod_res
= (struct roce_query_qp_req_output_params
*)
1910 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1911 sizeof(*p_req_ramrod_res
),
1912 &req_ramrod_res_phys
,
1914 if (!p_req_ramrod_res
) {
1917 "qed query qp failed: cannot allocate memory (ramrod)\n");
1922 init_data
.cid
= qp
->icid
+ 1;
1923 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_QUERY_QP
,
1924 PROTOCOLID_ROCE
, &init_data
);
1928 p_req_ramrod
= &p_ent
->ramrod
.roce_query_qp_req
;
1929 DMA_REGPAIR_LE(p_req_ramrod
->output_params_addr
, req_ramrod_res_phys
);
1931 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1935 out_params
->sq_psn
= le32_to_cpu(p_req_ramrod_res
->psn
);
1936 sq_err_state
= GET_FIELD(le32_to_cpu(p_req_ramrod_res
->flags
),
1937 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG
);
1939 GET_FIELD(le32_to_cpu(p_req_ramrod_res
->flags
),
1940 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG
);
1942 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_req_ramrod_res
),
1943 p_req_ramrod_res
, req_ramrod_res_phys
);
1945 out_params
->draining
= false;
1947 if (rq_err_state
|| sq_err_state
)
1948 qp
->cur_state
= QED_ROCE_QP_STATE_ERR
;
1949 else if (sq_draining
)
1950 out_params
->draining
= true;
1951 out_params
->state
= qp
->cur_state
;
1956 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_req_ramrod_res
),
1957 p_req_ramrod_res
, req_ramrod_res_phys
);
1960 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_resp_ramrod_res
),
1961 p_resp_ramrod_res
, resp_ramrod_res_phys
);
1965 static int qed_roce_destroy_qp(struct qed_hwfn
*p_hwfn
, struct qed_rdma_qp
*qp
)
1967 u32 num_invalidated_mw
= 0;
1968 u32 num_bound_mw
= 0;
1972 /* Destroys the specified QP */
1973 if ((qp
->cur_state
!= QED_ROCE_QP_STATE_RESET
) &&
1974 (qp
->cur_state
!= QED_ROCE_QP_STATE_ERR
) &&
1975 (qp
->cur_state
!= QED_ROCE_QP_STATE_INIT
)) {
1977 "QP must be in error, reset or init state before destroying it\n");
1981 if (qp
->cur_state
!= QED_ROCE_QP_STATE_RESET
) {
1982 rc
= qed_roce_sp_destroy_qp_responder(p_hwfn
, qp
,
1983 &num_invalidated_mw
,
1988 /* Send destroy requester ramrod */
1989 rc
= qed_roce_sp_destroy_qp_requester(p_hwfn
, qp
,
1994 if (num_invalidated_mw
!= num_bound_mw
) {
1996 "number of invalidate memory windows is different from bounded ones\n");
2004 static int qed_rdma_query_qp(void *rdma_cxt
,
2005 struct qed_rdma_qp
*qp
,
2006 struct qed_rdma_query_qp_out_params
*out_params
)
2008 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
2011 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
2013 /* The following fields are filled in from qp and not FW as they can't
2016 out_params
->mtu
= qp
->mtu
;
2017 out_params
->dest_qp
= qp
->dest_qp
;
2018 out_params
->incoming_atomic_en
= qp
->incoming_atomic_en
;
2019 out_params
->e2e_flow_control_en
= qp
->e2e_flow_control_en
;
2020 out_params
->incoming_rdma_read_en
= qp
->incoming_rdma_read_en
;
2021 out_params
->incoming_rdma_write_en
= qp
->incoming_rdma_write_en
;
2022 out_params
->dgid
= qp
->dgid
;
2023 out_params
->flow_label
= qp
->flow_label
;
2024 out_params
->hop_limit_ttl
= qp
->hop_limit_ttl
;
2025 out_params
->traffic_class_tos
= qp
->traffic_class_tos
;
2026 out_params
->timeout
= qp
->ack_timeout
;
2027 out_params
->rnr_retry
= qp
->rnr_retry_cnt
;
2028 out_params
->retry_cnt
= qp
->retry_cnt
;
2029 out_params
->min_rnr_nak_timer
= qp
->min_rnr_nak_timer
;
2030 out_params
->pkey_index
= 0;
2031 out_params
->max_rd_atomic
= qp
->max_rd_atomic_req
;
2032 out_params
->max_dest_rd_atomic
= qp
->max_rd_atomic_resp
;
2033 out_params
->sqd_async
= qp
->sqd_async
;
2035 rc
= qed_roce_query_qp(p_hwfn
, qp
, out_params
);
2037 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Query QP, rc = %d\n", rc
);
2041 static int qed_rdma_destroy_qp(void *rdma_cxt
, struct qed_rdma_qp
*qp
)
2043 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
2046 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
2048 rc
= qed_roce_destroy_qp(p_hwfn
, qp
);
2050 /* free qp params struct */
2053 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "QP destroyed\n");
2057 static struct qed_rdma_qp
*
2058 qed_rdma_create_qp(void *rdma_cxt
,
2059 struct qed_rdma_create_qp_in_params
*in_params
,
2060 struct qed_rdma_create_qp_out_params
*out_params
)
2062 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
2063 struct qed_rdma_qp
*qp
;
2064 u8 max_stats_queues
;
2067 if (!rdma_cxt
|| !in_params
|| !out_params
|| !p_hwfn
->p_rdma_info
) {
2068 DP_ERR(p_hwfn
->cdev
,
2069 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
2070 rdma_cxt
, in_params
, out_params
);
2074 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
2075 "qed rdma create qp called with qp_handle = %08x%08x\n",
2076 in_params
->qp_handle_hi
, in_params
->qp_handle_lo
);
2078 /* Some sanity checks... */
2079 max_stats_queues
= p_hwfn
->p_rdma_info
->dev
->max_stats_queues
;
2080 if (in_params
->stats_queue
>= max_stats_queues
) {
2081 DP_ERR(p_hwfn
->cdev
,
2082 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
2083 in_params
->stats_queue
, max_stats_queues
);
2087 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2089 DP_NOTICE(p_hwfn
, "Failed to allocate qed_rdma_qp\n");
2093 rc
= qed_roce_alloc_cid(p_hwfn
, &qp
->icid
);
2094 qp
->qpid
= ((0xFF << 16) | qp
->icid
);
2096 DP_INFO(p_hwfn
, "ROCE qpid=%x\n", qp
->qpid
);
2103 qp
->cur_state
= QED_ROCE_QP_STATE_RESET
;
2104 qp
->qp_handle
.hi
= cpu_to_le32(in_params
->qp_handle_hi
);
2105 qp
->qp_handle
.lo
= cpu_to_le32(in_params
->qp_handle_lo
);
2106 qp
->qp_handle_async
.hi
= cpu_to_le32(in_params
->qp_handle_async_hi
);
2107 qp
->qp_handle_async
.lo
= cpu_to_le32(in_params
->qp_handle_async_lo
);
2108 qp
->use_srq
= in_params
->use_srq
;
2109 qp
->signal_all
= in_params
->signal_all
;
2110 qp
->fmr_and_reserved_lkey
= in_params
->fmr_and_reserved_lkey
;
2111 qp
->pd
= in_params
->pd
;
2112 qp
->dpi
= in_params
->dpi
;
2113 qp
->sq_cq_id
= in_params
->sq_cq_id
;
2114 qp
->sq_num_pages
= in_params
->sq_num_pages
;
2115 qp
->sq_pbl_ptr
= in_params
->sq_pbl_ptr
;
2116 qp
->rq_cq_id
= in_params
->rq_cq_id
;
2117 qp
->rq_num_pages
= in_params
->rq_num_pages
;
2118 qp
->rq_pbl_ptr
= in_params
->rq_pbl_ptr
;
2119 qp
->srq_id
= in_params
->srq_id
;
2120 qp
->req_offloaded
= false;
2121 qp
->resp_offloaded
= false;
2122 qp
->e2e_flow_control_en
= qp
->use_srq
? false : true;
2123 qp
->stats_queue
= in_params
->stats_queue
;
2125 out_params
->icid
= qp
->icid
;
2126 out_params
->qp_id
= qp
->qpid
;
2128 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Create QP, rc = %d\n", rc
);
2132 static int qed_roce_modify_qp(struct qed_hwfn
*p_hwfn
,
2133 struct qed_rdma_qp
*qp
,
2134 enum qed_roce_qp_state prev_state
,
2135 struct qed_rdma_modify_qp_in_params
*params
)
2137 u32 num_invalidated_mw
= 0, num_bound_mw
= 0;
2140 /* Perform additional operations according to the current state and the
2143 if (((prev_state
== QED_ROCE_QP_STATE_INIT
) ||
2144 (prev_state
== QED_ROCE_QP_STATE_RESET
)) &&
2145 (qp
->cur_state
== QED_ROCE_QP_STATE_RTR
)) {
2146 /* Init->RTR or Reset->RTR */
2147 rc
= qed_roce_sp_create_responder(p_hwfn
, qp
);
2149 } else if ((prev_state
== QED_ROCE_QP_STATE_RTR
) &&
2150 (qp
->cur_state
== QED_ROCE_QP_STATE_RTS
)) {
2152 rc
= qed_roce_sp_create_requester(p_hwfn
, qp
);
2156 /* Send modify responder ramrod */
2157 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
2158 params
->modify_flags
);
2160 } else if ((prev_state
== QED_ROCE_QP_STATE_RTS
) &&
2161 (qp
->cur_state
== QED_ROCE_QP_STATE_RTS
)) {
2163 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
2164 params
->modify_flags
);
2168 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, false,
2169 params
->modify_flags
);
2171 } else if ((prev_state
== QED_ROCE_QP_STATE_RTS
) &&
2172 (qp
->cur_state
== QED_ROCE_QP_STATE_SQD
)) {
2174 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, true, false,
2175 params
->modify_flags
);
2177 } else if ((prev_state
== QED_ROCE_QP_STATE_SQD
) &&
2178 (qp
->cur_state
== QED_ROCE_QP_STATE_SQD
)) {
2180 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
2181 params
->modify_flags
);
2185 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, false,
2186 params
->modify_flags
);
2188 } else if ((prev_state
== QED_ROCE_QP_STATE_SQD
) &&
2189 (qp
->cur_state
== QED_ROCE_QP_STATE_RTS
)) {
2191 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
2192 params
->modify_flags
);
2196 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, false,
2197 params
->modify_flags
);
2200 } else if (qp
->cur_state
== QED_ROCE_QP_STATE_ERR
||
2201 qp
->cur_state
== QED_ROCE_QP_STATE_SQE
) {
2203 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, true,
2204 params
->modify_flags
);
2208 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, true,
2209 params
->modify_flags
);
2211 } else if (qp
->cur_state
== QED_ROCE_QP_STATE_RESET
) {
2212 /* Any state -> RESET */
2215 /* Send destroy responder ramrod */
2216 rc
= qed_roce_sp_destroy_qp_responder(p_hwfn
,
2218 &num_invalidated_mw
,
2224 qp
->cq_prod
= cq_prod
;
2226 rc
= qed_roce_sp_destroy_qp_requester(p_hwfn
, qp
,
2229 if (num_invalidated_mw
!= num_bound_mw
) {
2231 "number of invalidate memory windows is different from bounded ones\n");
2235 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "0\n");
2241 static int qed_rdma_modify_qp(void *rdma_cxt
,
2242 struct qed_rdma_qp
*qp
,
2243 struct qed_rdma_modify_qp_in_params
*params
)
2245 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
2246 enum qed_roce_qp_state prev_state
;
2249 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x params->new_state=%d\n",
2250 qp
->icid
, params
->new_state
);
2253 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "rc = %d\n", rc
);
2257 if (GET_FIELD(params
->modify_flags
,
2258 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN
)) {
2259 qp
->incoming_rdma_read_en
= params
->incoming_rdma_read_en
;
2260 qp
->incoming_rdma_write_en
= params
->incoming_rdma_write_en
;
2261 qp
->incoming_atomic_en
= params
->incoming_atomic_en
;
2264 /* Update QP structure with the updated values */
2265 if (GET_FIELD(params
->modify_flags
, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE
))
2266 qp
->roce_mode
= params
->roce_mode
;
2267 if (GET_FIELD(params
->modify_flags
, QED_ROCE_MODIFY_QP_VALID_PKEY
))
2268 qp
->pkey
= params
->pkey
;
2269 if (GET_FIELD(params
->modify_flags
,
2270 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN
))
2271 qp
->e2e_flow_control_en
= params
->e2e_flow_control_en
;
2272 if (GET_FIELD(params
->modify_flags
, QED_ROCE_MODIFY_QP_VALID_DEST_QP
))
2273 qp
->dest_qp
= params
->dest_qp
;
2274 if (GET_FIELD(params
->modify_flags
,
2275 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR
)) {
2276 /* Indicates that the following parameters have changed:
2277 * Traffic class, flow label, hop limit, source GID,
2278 * destination GID, loopback indicator
2280 qp
->traffic_class_tos
= params
->traffic_class_tos
;
2281 qp
->flow_label
= params
->flow_label
;
2282 qp
->hop_limit_ttl
= params
->hop_limit_ttl
;
2284 qp
->sgid
= params
->sgid
;
2285 qp
->dgid
= params
->dgid
;
2286 qp
->udp_src_port
= 0;
2287 qp
->vlan_id
= params
->vlan_id
;
2288 qp
->mtu
= params
->mtu
;
2289 qp
->lb_indication
= params
->lb_indication
;
2290 memcpy((u8
*)&qp
->remote_mac_addr
[0],
2291 (u8
*)¶ms
->remote_mac_addr
[0], ETH_ALEN
);
2292 if (params
->use_local_mac
) {
2293 memcpy((u8
*)&qp
->local_mac_addr
[0],
2294 (u8
*)¶ms
->local_mac_addr
[0], ETH_ALEN
);
2296 memcpy((u8
*)&qp
->local_mac_addr
[0],
2297 (u8
*)&p_hwfn
->hw_info
.hw_mac_addr
, ETH_ALEN
);
2300 if (GET_FIELD(params
->modify_flags
, QED_ROCE_MODIFY_QP_VALID_RQ_PSN
))
2301 qp
->rq_psn
= params
->rq_psn
;
2302 if (GET_FIELD(params
->modify_flags
, QED_ROCE_MODIFY_QP_VALID_SQ_PSN
))
2303 qp
->sq_psn
= params
->sq_psn
;
2304 if (GET_FIELD(params
->modify_flags
,
2305 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ
))
2306 qp
->max_rd_atomic_req
= params
->max_rd_atomic_req
;
2307 if (GET_FIELD(params
->modify_flags
,
2308 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP
))
2309 qp
->max_rd_atomic_resp
= params
->max_rd_atomic_resp
;
2310 if (GET_FIELD(params
->modify_flags
,
2311 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT
))
2312 qp
->ack_timeout
= params
->ack_timeout
;
2313 if (GET_FIELD(params
->modify_flags
, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT
))
2314 qp
->retry_cnt
= params
->retry_cnt
;
2315 if (GET_FIELD(params
->modify_flags
,
2316 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT
))
2317 qp
->rnr_retry_cnt
= params
->rnr_retry_cnt
;
2318 if (GET_FIELD(params
->modify_flags
,
2319 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER
))
2320 qp
->min_rnr_nak_timer
= params
->min_rnr_nak_timer
;
2322 qp
->sqd_async
= params
->sqd_async
;
2324 prev_state
= qp
->cur_state
;
2325 if (GET_FIELD(params
->modify_flags
,
2326 QED_RDMA_MODIFY_QP_VALID_NEW_STATE
)) {
2327 qp
->cur_state
= params
->new_state
;
2328 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "qp->cur_state=%d\n",
2332 rc
= qed_roce_modify_qp(p_hwfn
, qp
, prev_state
, params
);
2334 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Modify QP, rc = %d\n", rc
);
2339 qed_rdma_register_tid(void *rdma_cxt
,
2340 struct qed_rdma_register_tid_in_params
*params
)
2342 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
2343 struct rdma_register_tid_ramrod_data
*p_ramrod
;
2344 struct qed_sp_init_data init_data
;
2345 struct qed_spq_entry
*p_ent
;
2346 enum rdma_tid_type tid_type
;
2350 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "itid = %08x\n", params
->itid
);
2353 memset(&init_data
, 0, sizeof(init_data
));
2354 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
2355 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
2357 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, RDMA_RAMROD_REGISTER_MR
,
2358 p_hwfn
->p_rdma_info
->proto
, &init_data
);
2360 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "rc = %d\n", rc
);
2364 if (p_hwfn
->p_rdma_info
->last_tid
< params
->itid
)
2365 p_hwfn
->p_rdma_info
->last_tid
= params
->itid
;
2367 p_ramrod
= &p_ent
->ramrod
.rdma_register_tid
;
2369 p_ramrod
->flags
= 0;
2370 SET_FIELD(p_ramrod
->flags
,
2371 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL
,
2372 params
->pbl_two_level
);
2374 SET_FIELD(p_ramrod
->flags
,
2375 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED
, params
->zbva
);
2377 SET_FIELD(p_ramrod
->flags
,
2378 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR
, params
->phy_mr
);
2380 /* Don't initialize D/C field, as it may override other bits. */
2381 if (!(params
->tid_type
== QED_RDMA_TID_FMR
) && !(params
->dma_mr
))
2382 SET_FIELD(p_ramrod
->flags
,
2383 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG
,
2384 params
->page_size_log
- 12);
2386 SET_FIELD(p_ramrod
->flags
,
2387 RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID
,
2388 p_hwfn
->p_rdma_info
->last_tid
);
2390 SET_FIELD(p_ramrod
->flags
,
2391 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ
,
2392 params
->remote_read
);
2394 SET_FIELD(p_ramrod
->flags
,
2395 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE
,
2396 params
->remote_write
);
2398 SET_FIELD(p_ramrod
->flags
,
2399 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC
,
2400 params
->remote_atomic
);
2402 SET_FIELD(p_ramrod
->flags
,
2403 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE
,
2404 params
->local_write
);
2406 SET_FIELD(p_ramrod
->flags
,
2407 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ
, params
->local_read
);
2409 SET_FIELD(p_ramrod
->flags
,
2410 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND
,
2413 SET_FIELD(p_ramrod
->flags1
,
2414 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG
,
2415 params
->pbl_page_size_log
- 12);
2417 SET_FIELD(p_ramrod
->flags2
,
2418 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR
, params
->dma_mr
);
2420 switch (params
->tid_type
) {
2421 case QED_RDMA_TID_REGISTERED_MR
:
2422 tid_type
= RDMA_TID_REGISTERED_MR
;
2424 case QED_RDMA_TID_FMR
:
2425 tid_type
= RDMA_TID_FMR
;
2427 case QED_RDMA_TID_MW_TYPE1
:
2428 tid_type
= RDMA_TID_MW_TYPE1
;
2430 case QED_RDMA_TID_MW_TYPE2A
:
2431 tid_type
= RDMA_TID_MW_TYPE2A
;
2435 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "rc = %d\n", rc
);
2438 SET_FIELD(p_ramrod
->flags1
,
2439 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE
, tid_type
);
2441 p_ramrod
->itid
= cpu_to_le32(params
->itid
);
2442 p_ramrod
->key
= params
->key
;
2443 p_ramrod
->pd
= cpu_to_le16(params
->pd
);
2444 p_ramrod
->length_hi
= (u8
)(params
->length
>> 32);
2445 p_ramrod
->length_lo
= DMA_LO_LE(params
->length
);
2447 /* Lower 32 bits of the registered MR address.
2448 * In case of zero based MR, will hold FBO
2450 p_ramrod
->va
.hi
= 0;
2451 p_ramrod
->va
.lo
= cpu_to_le32(params
->fbo
);
2453 DMA_REGPAIR_LE(p_ramrod
->va
, params
->vaddr
);
2455 DMA_REGPAIR_LE(p_ramrod
->pbl_base
, params
->pbl_ptr
);
2458 if (params
->dif_enabled
) {
2459 SET_FIELD(p_ramrod
->flags2
,
2460 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG
, 1);
2461 DMA_REGPAIR_LE(p_ramrod
->dif_error_addr
,
2462 params
->dif_error_addr
);
2463 DMA_REGPAIR_LE(p_ramrod
->dif_runt_addr
, params
->dif_runt_addr
);
2466 rc
= qed_spq_post(p_hwfn
, p_ent
, &fw_return_code
);
2468 if (fw_return_code
!= RDMA_RETURN_OK
) {
2469 DP_NOTICE(p_hwfn
, "fw_return_code = %d\n", fw_return_code
);
2473 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Register TID, rc = %d\n", rc
);
2477 static int qed_rdma_deregister_tid(void *rdma_cxt
, u32 itid
)
2479 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
2480 struct rdma_deregister_tid_ramrod_data
*p_ramrod
;
2481 struct qed_sp_init_data init_data
;
2482 struct qed_spq_entry
*p_ent
;
2483 struct qed_ptt
*p_ptt
;
2487 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "itid = %08x\n", itid
);
2490 memset(&init_data
, 0, sizeof(init_data
));
2491 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
2492 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
2494 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, RDMA_RAMROD_DEREGISTER_MR
,
2495 p_hwfn
->p_rdma_info
->proto
, &init_data
);
2497 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "rc = %d\n", rc
);
2501 p_ramrod
= &p_ent
->ramrod
.rdma_deregister_tid
;
2502 p_ramrod
->itid
= cpu_to_le32(itid
);
2504 rc
= qed_spq_post(p_hwfn
, p_ent
, &fw_return_code
);
2506 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "rc = %d\n", rc
);
2510 if (fw_return_code
== RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR
) {
2511 DP_NOTICE(p_hwfn
, "fw_return_code = %d\n", fw_return_code
);
2513 } else if (fw_return_code
== RDMA_RETURN_NIG_DRAIN_REQ
) {
2514 /* Bit indicating that the TID is in use and a nig drain is
2515 * required before sending the ramrod again
2517 p_ptt
= qed_ptt_acquire(p_hwfn
);
2520 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
2521 "Failed to acquire PTT\n");
2525 rc
= qed_mcp_drain(p_hwfn
, p_ptt
);
2527 qed_ptt_release(p_hwfn
, p_ptt
);
2528 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
2533 qed_ptt_release(p_hwfn
, p_ptt
);
2535 /* Resend the ramrod */
2536 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
2537 RDMA_RAMROD_DEREGISTER_MR
,
2538 p_hwfn
->p_rdma_info
->proto
,
2541 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
2542 "Failed to init sp-element\n");
2546 rc
= qed_spq_post(p_hwfn
, p_ent
, &fw_return_code
);
2548 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
2553 if (fw_return_code
!= RDMA_RETURN_OK
) {
2554 DP_NOTICE(p_hwfn
, "fw_return_code = %d\n",
2560 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "De-registered TID, rc = %d\n", rc
);
2564 static void qed_roce_free_real_icid(struct qed_hwfn
*p_hwfn
, u16 icid
)
2566 struct qed_rdma_info
*p_rdma_info
= p_hwfn
->p_rdma_info
;
2567 u32 start_cid
, cid
, xcid
;
2569 /* an even icid belongs to a responder while an odd icid belongs to a
2570 * requester. The 'cid' received as an input can be either. We calculate
2571 * the "partner" icid and call it xcid. Only if both are free then the
2572 * "cid" map can be cleared.
2574 start_cid
= qed_cxt_get_proto_cid_start(p_hwfn
, p_rdma_info
->proto
);
2575 cid
= icid
- start_cid
;
2578 spin_lock_bh(&p_rdma_info
->lock
);
2580 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->real_cid_map
, cid
);
2581 if (qed_bmap_test_id(p_hwfn
, &p_rdma_info
->real_cid_map
, xcid
) == 0) {
2582 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, cid
);
2583 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, xcid
);
2586 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
2589 static void *qed_rdma_get_rdma_ctx(struct qed_dev
*cdev
)
2591 return QED_LEADING_HWFN(cdev
);
2594 static void qed_rdma_dpm_conf(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2598 val
= (p_hwfn
->dcbx_no_edpm
|| p_hwfn
->db_bar_no_edpm
) ? 0 : 1;
2600 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_DPM_ENABLE
, val
);
2601 DP_VERBOSE(p_hwfn
, (QED_MSG_DCB
| QED_MSG_RDMA
),
2602 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
2603 val
, p_hwfn
->dcbx_no_edpm
, p_hwfn
->db_bar_no_edpm
);
2606 void qed_rdma_dpm_bar(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2608 p_hwfn
->db_bar_no_edpm
= true;
2610 qed_rdma_dpm_conf(p_hwfn
, p_ptt
);
2613 static int qed_rdma_start(void *rdma_cxt
,
2614 struct qed_rdma_start_in_params
*params
)
2616 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
2617 struct qed_ptt
*p_ptt
;
2620 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
2621 "desired_cnq = %08x\n", params
->desired_cnq
);
2623 p_ptt
= qed_ptt_acquire(p_hwfn
);
2627 rc
= qed_rdma_alloc(p_hwfn
, p_ptt
, params
);
2631 rc
= qed_rdma_setup(p_hwfn
, p_ptt
, params
);
2635 qed_ptt_release(p_hwfn
, p_ptt
);
2640 qed_rdma_free(p_hwfn
);
2642 qed_ptt_release(p_hwfn
, p_ptt
);
2644 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "RDMA start - error, rc = %d\n", rc
);
2648 static int qed_rdma_init(struct qed_dev
*cdev
,
2649 struct qed_rdma_start_in_params
*params
)
2651 return qed_rdma_start(QED_LEADING_HWFN(cdev
), params
);
2654 static void qed_rdma_remove_user(void *rdma_cxt
, u16 dpi
)
2656 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)rdma_cxt
;
2658 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "dpi = %08x\n", dpi
);
2660 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
2661 qed_bmap_release_id(p_hwfn
, &p_hwfn
->p_rdma_info
->dpi_map
, dpi
);
2662 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
2665 void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn
*p_hwfn
,
2666 u8 connection_handle
,
2668 dma_addr_t first_frag_addr
,
2669 bool b_last_fragment
, bool b_last_packet
)
2671 struct qed_roce_ll2_packet
*packet
= cookie
;
2672 struct qed_roce_ll2_info
*roce_ll2
= p_hwfn
->ll2
;
2674 roce_ll2
->cbs
.tx_cb(roce_ll2
->cb_cookie
, packet
);
2677 void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn
*p_hwfn
,
2678 u8 connection_handle
,
2680 dma_addr_t first_frag_addr
,
2681 bool b_last_fragment
, bool b_last_packet
)
2683 qed_ll2b_complete_tx_gsi_packet(p_hwfn
, connection_handle
,
2684 cookie
, first_frag_addr
,
2685 b_last_fragment
, b_last_packet
);
2688 void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn
*p_hwfn
,
2689 u8 connection_handle
,
2691 dma_addr_t rx_buf_addr
,
2693 u8 data_length_error
,
2696 u32 src_mac_addr_hi
,
2697 u16 src_mac_addr_lo
, bool b_last_packet
)
2699 struct qed_roce_ll2_info
*roce_ll2
= p_hwfn
->ll2
;
2700 struct qed_roce_ll2_rx_params params
;
2701 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2702 struct qed_roce_ll2_packet pkt
;
2706 "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
2707 (void *)(uintptr_t)rx_buf_addr
,
2708 data_length
, data_length_error
);
2710 memset(&pkt
, 0, sizeof(pkt
));
2712 pkt
.payload
[0].baddr
= rx_buf_addr
;
2713 pkt
.payload
[0].len
= data_length
;
2715 memset(¶ms
, 0, sizeof(params
));
2716 params
.vlan_id
= vlan
;
2717 *((u32
*)¶ms
.smac
[0]) = ntohl(src_mac_addr_hi
);
2718 *((u16
*)¶ms
.smac
[4]) = ntohs(src_mac_addr_lo
);
2720 if (data_length_error
) {
2722 "roce ll2 rx complete: data length error %d, length=%d\n",
2723 data_length_error
, data_length
);
2724 params
.rc
= -EINVAL
;
2727 roce_ll2
->cbs
.rx_cb(roce_ll2
->cb_cookie
, &pkt
, ¶ms
);
2730 static int qed_roce_ll2_set_mac_filter(struct qed_dev
*cdev
,
2731 u8
*old_mac_address
,
2732 u8
*new_mac_address
)
2734 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2735 struct qed_ptt
*p_ptt
;
2738 if (!hwfn
->ll2
|| hwfn
->ll2
->handle
== QED_LL2_UNUSED_HANDLE
) {
2740 "qed roce mac filter failed - roce_info/ll2 NULL\n");
2744 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
2747 "qed roce ll2 mac filter set: failed to acquire PTT\n");
2751 mutex_lock(&hwfn
->ll2
->lock
);
2752 if (old_mac_address
)
2753 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev
), p_ptt
,
2755 if (new_mac_address
)
2756 rc
= qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev
), p_ptt
,
2758 mutex_unlock(&hwfn
->ll2
->lock
);
2760 qed_ptt_release(QED_LEADING_HWFN(cdev
), p_ptt
);
2764 "qed roce ll2 mac filter set: failed to add mac filter\n");
2769 static int qed_roce_ll2_start(struct qed_dev
*cdev
,
2770 struct qed_roce_ll2_params
*params
)
2772 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2773 struct qed_roce_ll2_info
*roce_ll2
;
2774 struct qed_ll2_conn ll2_params
;
2778 DP_ERR(cdev
, "qed roce ll2 start: failed due to NULL params\n");
2781 if (!params
->cbs
.tx_cb
|| !params
->cbs
.rx_cb
) {
2783 "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
2784 params
->cbs
.tx_cb
, params
->cbs
.rx_cb
);
2787 if (!is_valid_ether_addr(params
->mac_address
)) {
2789 "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
2790 params
->mac_address
);
2795 roce_ll2
= kzalloc(sizeof(*roce_ll2
), GFP_ATOMIC
);
2797 DP_ERR(cdev
, "qed roce ll2 start: failed memory allocation\n");
2800 roce_ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2801 roce_ll2
->cbs
= params
->cbs
;
2802 roce_ll2
->cb_cookie
= params
->cb_cookie
;
2803 mutex_init(&roce_ll2
->lock
);
2805 memset(&ll2_params
, 0, sizeof(ll2_params
));
2806 ll2_params
.conn_type
= QED_LL2_TYPE_ROCE
;
2807 ll2_params
.mtu
= params
->mtu
;
2808 ll2_params
.rx_drop_ttl0_flg
= true;
2809 ll2_params
.rx_vlan_removal_en
= false;
2810 ll2_params
.tx_dest
= CORE_TX_DEST_NW
;
2811 ll2_params
.ai_err_packet_too_big
= LL2_DROP_PACKET
;
2812 ll2_params
.ai_err_no_buf
= LL2_DROP_PACKET
;
2813 ll2_params
.gsi_enable
= true;
2815 rc
= qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev
), &ll2_params
,
2816 params
->max_rx_buffers
,
2817 params
->max_tx_buffers
,
2821 "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
2826 rc
= qed_ll2_establish_connection(QED_LEADING_HWFN(cdev
),
2830 "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
2835 hwfn
->ll2
= roce_ll2
;
2837 rc
= qed_roce_ll2_set_mac_filter(cdev
, NULL
, params
->mac_address
);
2842 ether_addr_copy(roce_ll2
->mac_address
, params
->mac_address
);
2847 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev
), roce_ll2
->handle
);
2849 qed_ll2_release_connection(QED_LEADING_HWFN(cdev
), roce_ll2
->handle
);
2855 static int qed_roce_ll2_stop(struct qed_dev
*cdev
)
2857 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2858 struct qed_roce_ll2_info
*roce_ll2
= hwfn
->ll2
;
2861 if (roce_ll2
->handle
== QED_LL2_UNUSED_HANDLE
) {
2862 DP_ERR(cdev
, "qed roce ll2 stop: cannot stop an unused LL2\n");
2866 /* remove LL2 MAC address filter */
2867 rc
= qed_roce_ll2_set_mac_filter(cdev
, roce_ll2
->mac_address
, NULL
);
2868 eth_zero_addr(roce_ll2
->mac_address
);
2870 rc
= qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev
),
2874 "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
2877 qed_ll2_release_connection(QED_LEADING_HWFN(cdev
), roce_ll2
->handle
);
2879 roce_ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2886 static int qed_roce_ll2_tx(struct qed_dev
*cdev
,
2887 struct qed_roce_ll2_packet
*pkt
,
2888 struct qed_roce_ll2_tx_params
*params
)
2890 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2891 struct qed_roce_ll2_info
*roce_ll2
= hwfn
->ll2
;
2892 enum qed_ll2_roce_flavor_type qed_roce_flavor
;
2897 if (!pkt
|| !params
) {
2899 "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
2904 qed_roce_flavor
= (pkt
->roce_mode
== ROCE_V1
) ? QED_LL2_ROCE
2907 if (pkt
->roce_mode
== ROCE_V2_IPV4
)
2908 flags
|= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT
);
2911 rc
= qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev
), roce_ll2
->handle
,
2912 1 + pkt
->n_seg
, 0, flags
, 0,
2914 qed_roce_flavor
, pkt
->header
.baddr
,
2915 pkt
->header
.len
, pkt
, 1);
2917 DP_ERR(cdev
, "roce ll2 tx: header failed (rc=%d)\n", rc
);
2918 return QED_ROCE_TX_HEAD_FAILURE
;
2922 for (i
= 0; i
< pkt
->n_seg
; i
++) {
2923 rc
= qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev
),
2925 pkt
->payload
[i
].baddr
,
2926 pkt
->payload
[i
].len
);
2928 /* If failed not much to do here, partial packet has
2929 * been posted * we can't free memory, will need to wait
2933 "roce ll2 tx: payload failed (rc=%d)\n", rc
);
2934 return QED_ROCE_TX_FRAG_FAILURE
;
2941 static int qed_roce_ll2_post_rx_buffer(struct qed_dev
*cdev
,
2942 struct qed_roce_ll2_buffer
*buf
,
2943 u64 cookie
, u8 notify_fw
)
2945 return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev
),
2946 QED_LEADING_HWFN(cdev
)->ll2
->handle
,
2947 buf
->baddr
, buf
->len
,
2948 (void *)(uintptr_t)cookie
, notify_fw
);
2951 static int qed_roce_ll2_stats(struct qed_dev
*cdev
, struct qed_ll2_stats
*stats
)
2953 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2954 struct qed_roce_ll2_info
*roce_ll2
= hwfn
->ll2
;
2956 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev
),
2957 roce_ll2
->handle
, stats
);
2960 static const struct qed_rdma_ops qed_rdma_ops_pass
= {
2961 .common
= &qed_common_ops_pass
,
2962 .fill_dev_info
= &qed_fill_rdma_dev_info
,
2963 .rdma_get_rdma_ctx
= &qed_rdma_get_rdma_ctx
,
2964 .rdma_init
= &qed_rdma_init
,
2965 .rdma_add_user
= &qed_rdma_add_user
,
2966 .rdma_remove_user
= &qed_rdma_remove_user
,
2967 .rdma_stop
= &qed_rdma_stop
,
2968 .rdma_query_port
= &qed_rdma_query_port
,
2969 .rdma_query_device
= &qed_rdma_query_device
,
2970 .rdma_get_start_sb
= &qed_rdma_get_sb_start
,
2971 .rdma_get_rdma_int
= &qed_rdma_get_int
,
2972 .rdma_set_rdma_int
= &qed_rdma_set_int
,
2973 .rdma_get_min_cnq_msix
= &qed_rdma_get_min_cnq_msix
,
2974 .rdma_cnq_prod_update
= &qed_rdma_cnq_prod_update
,
2975 .rdma_alloc_pd
= &qed_rdma_alloc_pd
,
2976 .rdma_dealloc_pd
= &qed_rdma_free_pd
,
2977 .rdma_create_cq
= &qed_rdma_create_cq
,
2978 .rdma_destroy_cq
= &qed_rdma_destroy_cq
,
2979 .rdma_create_qp
= &qed_rdma_create_qp
,
2980 .rdma_modify_qp
= &qed_rdma_modify_qp
,
2981 .rdma_query_qp
= &qed_rdma_query_qp
,
2982 .rdma_destroy_qp
= &qed_rdma_destroy_qp
,
2983 .rdma_alloc_tid
= &qed_rdma_alloc_tid
,
2984 .rdma_free_tid
= &qed_rdma_free_tid
,
2985 .rdma_register_tid
= &qed_rdma_register_tid
,
2986 .rdma_deregister_tid
= &qed_rdma_deregister_tid
,
2987 .roce_ll2_start
= &qed_roce_ll2_start
,
2988 .roce_ll2_stop
= &qed_roce_ll2_stop
,
2989 .roce_ll2_tx
= &qed_roce_ll2_tx
,
2990 .roce_ll2_post_rx_buffer
= &qed_roce_ll2_post_rx_buffer
,
2991 .roce_ll2_set_mac_filter
= &qed_roce_ll2_set_mac_filter
,
2992 .roce_ll2_stats
= &qed_roce_ll2_stats
,
2995 const struct qed_rdma_ops
*qed_get_rdma_ops(void)
2997 return &qed_rdma_ops_pass
;
2999 EXPORT_SYMBOL(qed_get_rdma_ops
);