]>
Commit | Line | Data |
---|---|---|
1f4d4ed6 | 1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
f1372ee1 KM |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation | |
f1372ee1 | 4 | */ |
1f4d4ed6 | 5 | |
f1372ee1 KM |
6 | #include <linux/types.h> |
7 | #include <asm/byteorder.h> | |
8 | #include <linux/bitops.h> | |
9 | #include <linux/delay.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/io.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/list.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/mutex.h> | |
17 | #include <linux/pci.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/string.h> | |
21 | #include "qed.h" | |
22 | #include "qed_cxt.h" | |
23 | #include "qed_hsi.h" | |
24 | #include "qed_hw.h" | |
25 | #include "qed_init_ops.h" | |
26 | #include "qed_int.h" | |
27 | #include "qed_ll2.h" | |
28 | #include "qed_mcp.h" | |
29 | #include "qed_reg_addr.h" | |
7003cdd6 | 30 | #include <linux/qed/qed_rdma_if.h> |
b71b9afd KM |
31 | #include "qed_rdma.h" |
32 | #include "qed_roce.h" | |
f1372ee1 KM |
33 | #include "qed_sp.h" |
34 | ||
f1372ee1 | 35 | |
b71b9afd KM |
36 | int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, |
37 | struct qed_bmap *bmap, u32 max_count, char *name) | |
f1372ee1 KM |
38 | { |
39 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); | |
40 | ||
41 | bmap->max_count = max_count; | |
42 | ||
43 | bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long), | |
44 | GFP_KERNEL); | |
45 | if (!bmap->bitmap) | |
46 | return -ENOMEM; | |
47 | ||
48 | snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); | |
49 | ||
50 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); | |
51 | return 0; | |
52 | } | |
53 | ||
b71b9afd KM |
54 | int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, |
55 | struct qed_bmap *bmap, u32 *id_num) | |
f1372ee1 KM |
56 | { |
57 | *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); | |
58 | if (*id_num >= bmap->max_count) | |
59 | return -EINVAL; | |
60 | ||
61 | __set_bit(*id_num, bmap->bitmap); | |
62 | ||
63 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", | |
64 | bmap->name, *id_num); | |
65 | ||
66 | return 0; | |
67 | } | |
68 | ||
b71b9afd KM |
69 | void qed_bmap_set_id(struct qed_hwfn *p_hwfn, |
70 | struct qed_bmap *bmap, u32 id_num) | |
f1372ee1 KM |
71 | { |
72 | if (id_num >= bmap->max_count) | |
73 | return; | |
74 | ||
75 | __set_bit(id_num, bmap->bitmap); | |
76 | } | |
77 | ||
b71b9afd KM |
78 | void qed_bmap_release_id(struct qed_hwfn *p_hwfn, |
79 | struct qed_bmap *bmap, u32 id_num) | |
f1372ee1 KM |
80 | { |
81 | bool b_acquired; | |
82 | ||
83 | if (id_num >= bmap->max_count) | |
84 | return; | |
85 | ||
86 | b_acquired = test_and_clear_bit(id_num, bmap->bitmap); | |
87 | if (!b_acquired) { | |
88 | DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", | |
89 | bmap->name, id_num); | |
90 | return; | |
91 | } | |
92 | ||
93 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", | |
94 | bmap->name, id_num); | |
95 | } | |
96 | ||
b71b9afd KM |
97 | int qed_bmap_test_id(struct qed_hwfn *p_hwfn, |
98 | struct qed_bmap *bmap, u32 id_num) | |
f1372ee1 KM |
99 | { |
100 | if (id_num >= bmap->max_count) | |
101 | return -1; | |
102 | ||
103 | return test_bit(id_num, bmap->bitmap); | |
104 | } | |
105 | ||
106 | static bool qed_bmap_is_empty(struct qed_bmap *bmap) | |
107 | { | |
108 | return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); | |
109 | } | |
110 | ||
bf774d14 | 111 | static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) |
f1372ee1 KM |
112 | { |
113 | /* First sb id for RoCE is after all the l2 sb */ | |
114 | return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; | |
115 | } | |
116 | ||
291d57f6 | 117 | int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) |
f1372ee1 KM |
118 | { |
119 | struct qed_rdma_info *p_rdma_info; | |
f1372ee1 | 120 | |
f1372ee1 KM |
121 | p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); |
122 | if (!p_rdma_info) | |
291d57f6 MK |
123 | return -ENOMEM; |
124 | ||
125 | spin_lock_init(&p_rdma_info->lock); | |
f1372ee1 KM |
126 | |
127 | p_hwfn->p_rdma_info = p_rdma_info; | |
291d57f6 MK |
128 | return 0; |
129 | } | |
130 | ||
131 | void qed_rdma_info_free(struct qed_hwfn *p_hwfn) | |
132 | { | |
133 | kfree(p_hwfn->p_rdma_info); | |
134 | p_hwfn->p_rdma_info = NULL; | |
135 | } | |
136 | ||
137 | static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) | |
138 | { | |
139 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; | |
140 | u32 num_cons, num_tasks; | |
141 | int rc = -ENOMEM; | |
142 | ||
143 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); | |
144 | ||
e0a8f9de MK |
145 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
146 | p_rdma_info->proto = PROTOCOLID_IWARP; | |
147 | else | |
148 | p_rdma_info->proto = PROTOCOLID_ROCE; | |
f1372ee1 KM |
149 | |
150 | num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, | |
151 | NULL); | |
152 | ||
67b40dcc KM |
153 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
154 | p_rdma_info->num_qps = num_cons; | |
155 | else | |
156 | p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ | |
f1372ee1 KM |
157 | |
158 | num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); | |
159 | ||
160 | /* Each MR uses a single task */ | |
161 | p_rdma_info->num_mrs = num_tasks; | |
162 | ||
163 | /* Queue zone lines are shared between RoCE and L2 in such a way that | |
164 | * they can be used by each without obstructing the other. | |
165 | */ | |
166 | p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); | |
167 | p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); | |
168 | ||
169 | /* Allocate a struct with device params and fill it */ | |
170 | p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); | |
171 | if (!p_rdma_info->dev) | |
291d57f6 | 172 | return rc; |
f1372ee1 KM |
173 | |
174 | /* Allocate a struct with port params and fill it */ | |
175 | p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); | |
176 | if (!p_rdma_info->port) | |
177 | goto free_rdma_dev; | |
178 | ||
179 | /* Allocate bit map for pd's */ | |
180 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, | |
181 | "PD"); | |
182 | if (rc) { | |
183 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
184 | "Failed to allocate pd_map, rc = %d\n", | |
185 | rc); | |
186 | goto free_rdma_port; | |
187 | } | |
188 | ||
7bfb399e YB |
189 | /* Allocate bit map for XRC Domains */ |
190 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map, | |
191 | QED_RDMA_MAX_XRCDS, "XRCD"); | |
192 | if (rc) { | |
193 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
194 | "Failed to allocate xrcd_map,rc = %d\n", rc); | |
195 | goto free_pd_map; | |
196 | } | |
197 | ||
f1372ee1 KM |
198 | /* Allocate DPI bitmap */ |
199 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, | |
200 | p_hwfn->dpi_count, "DPI"); | |
201 | if (rc) { | |
202 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
203 | "Failed to allocate DPI bitmap, rc = %d\n", rc); | |
7bfb399e | 204 | goto free_xrcd_map; |
f1372ee1 KM |
205 | } |
206 | ||
471115ab MK |
207 | /* Allocate bitmap for cq's. The maximum number of CQs is bound to |
208 | * the number of connections we support. (num_qps in iWARP or | |
209 | * num_qps/2 in RoCE). | |
f1372ee1 | 210 | */ |
471115ab | 211 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ"); |
f1372ee1 KM |
212 | if (rc) { |
213 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
214 | "Failed to allocate cq bitmap, rc = %d\n", rc); | |
215 | goto free_dpi_map; | |
216 | } | |
217 | ||
218 | /* Allocate bitmap for toggle bit for cq icids | |
219 | * We toggle the bit every time we create or resize cq for a given icid. | |
471115ab | 220 | * Size needs to equal the size of the cq bmap. |
f1372ee1 KM |
221 | */ |
222 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, | |
471115ab | 223 | num_cons, "Toggle"); |
f1372ee1 KM |
224 | if (rc) { |
225 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
5a94df70 | 226 | "Failed to allocate toggle bits, rc = %d\n", rc); |
f1372ee1 KM |
227 | goto free_cq_map; |
228 | } | |
229 | ||
230 | /* Allocate bitmap for itids */ | |
231 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, | |
232 | p_rdma_info->num_mrs, "MR"); | |
233 | if (rc) { | |
234 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
235 | "Failed to allocate itids bitmaps, rc = %d\n", rc); | |
236 | goto free_toggle_map; | |
237 | } | |
238 | ||
239 | /* Allocate bitmap for cids used for qps. */ | |
240 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, | |
241 | "CID"); | |
242 | if (rc) { | |
243 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
244 | "Failed to allocate cid bitmap, rc = %d\n", rc); | |
245 | goto free_tid_map; | |
246 | } | |
247 | ||
248 | /* Allocate bitmap for cids used for responders/requesters. */ | |
249 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, | |
250 | "REAL_CID"); | |
251 | if (rc) { | |
252 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
253 | "Failed to allocate real cid bitmap, rc = %d\n", rc); | |
254 | goto free_cid_map; | |
255 | } | |
67b40dcc | 256 | |
7bfb399e YB |
257 | /* The first SRQ follows the last XRC SRQ. This means that the |
258 | * SRQ IDs start from an offset equals to max_xrc_srqs. | |
259 | */ | |
260 | p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count; | |
261 | rc = qed_rdma_bmap_alloc(p_hwfn, | |
262 | &p_rdma_info->xrc_srq_map, | |
263 | p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ"); | |
264 | if (rc) { | |
265 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
266 | "Failed to allocate xrc srq bitmap, rc = %d\n", rc); | |
267 | goto free_real_cid_map; | |
268 | } | |
269 | ||
39dbc646 | 270 | /* Allocate bitmap for srqs */ |
b8204ad8 | 271 | p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count; |
39dbc646 YB |
272 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, |
273 | p_rdma_info->num_srqs, "SRQ"); | |
274 | if (rc) { | |
275 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
276 | "Failed to allocate srq bitmap, rc = %d\n", rc); | |
7bfb399e | 277 | goto free_xrc_srq_map; |
39dbc646 YB |
278 | } |
279 | ||
67b40dcc KM |
280 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
281 | rc = qed_iwarp_alloc(p_hwfn); | |
282 | ||
283 | if (rc) | |
39dbc646 | 284 | goto free_srq_map; |
67b40dcc | 285 | |
f1372ee1 KM |
286 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); |
287 | return 0; | |
288 | ||
39dbc646 YB |
289 | free_srq_map: |
290 | kfree(p_rdma_info->srq_map.bitmap); | |
7bfb399e YB |
291 | free_xrc_srq_map: |
292 | kfree(p_rdma_info->xrc_srq_map.bitmap); | |
39dbc646 YB |
293 | free_real_cid_map: |
294 | kfree(p_rdma_info->real_cid_map.bitmap); | |
f1372ee1 KM |
295 | free_cid_map: |
296 | kfree(p_rdma_info->cid_map.bitmap); | |
297 | free_tid_map: | |
298 | kfree(p_rdma_info->tid_map.bitmap); | |
299 | free_toggle_map: | |
300 | kfree(p_rdma_info->toggle_bits.bitmap); | |
301 | free_cq_map: | |
302 | kfree(p_rdma_info->cq_map.bitmap); | |
303 | free_dpi_map: | |
304 | kfree(p_rdma_info->dpi_map.bitmap); | |
7bfb399e YB |
305 | free_xrcd_map: |
306 | kfree(p_rdma_info->xrcd_map.bitmap); | |
f1372ee1 KM |
307 | free_pd_map: |
308 | kfree(p_rdma_info->pd_map.bitmap); | |
309 | free_rdma_port: | |
310 | kfree(p_rdma_info->port); | |
311 | free_rdma_dev: | |
312 | kfree(p_rdma_info->dev); | |
f1372ee1 KM |
313 | |
314 | return rc; | |
315 | } | |
316 | ||
b71b9afd KM |
317 | void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, |
318 | struct qed_bmap *bmap, bool check) | |
f1372ee1 KM |
319 | { |
320 | int weight = bitmap_weight(bmap->bitmap, bmap->max_count); | |
321 | int last_line = bmap->max_count / (64 * 8); | |
322 | int last_item = last_line * 8 + | |
323 | DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); | |
324 | u64 *pmap = (u64 *)bmap->bitmap; | |
325 | int line, item, offset; | |
326 | u8 str_last_line[200] = { 0 }; | |
327 | ||
328 | if (!weight || !check) | |
329 | goto end; | |
330 | ||
331 | DP_NOTICE(p_hwfn, | |
332 | "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", | |
333 | bmap->name, bmap->max_count, weight); | |
334 | ||
335 | /* print aligned non-zero lines, if any */ | |
336 | for (item = 0, line = 0; line < last_line; line++, item += 8) | |
337 | if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) | |
338 | DP_NOTICE(p_hwfn, | |
339 | "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", | |
340 | line, | |
341 | pmap[item], | |
342 | pmap[item + 1], | |
343 | pmap[item + 2], | |
344 | pmap[item + 3], | |
345 | pmap[item + 4], | |
346 | pmap[item + 5], | |
347 | pmap[item + 6], pmap[item + 7]); | |
348 | ||
349 | /* print last unaligned non-zero line, if any */ | |
350 | if ((bmap->max_count % (64 * 8)) && | |
351 | (bitmap_weight((unsigned long *)&pmap[item], | |
352 | bmap->max_count - item * 64))) { | |
353 | offset = sprintf(str_last_line, "line 0x%04x: ", line); | |
354 | for (; item < last_item; item++) | |
355 | offset += sprintf(str_last_line + offset, | |
356 | "0x%016llx ", pmap[item]); | |
357 | DP_NOTICE(p_hwfn, "%s\n", str_last_line); | |
358 | } | |
359 | ||
360 | end: | |
361 | kfree(bmap->bitmap); | |
362 | bmap->bitmap = NULL; | |
363 | } | |
364 | ||
365 | static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) | |
366 | { | |
367 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; | |
368 | ||
67b40dcc KM |
369 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
370 | qed_iwarp_resc_free(p_hwfn); | |
371 | ||
f1372ee1 KM |
372 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); |
373 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); | |
374 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); | |
375 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); | |
376 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); | |
377 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); | |
39dbc646 YB |
378 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); |
379 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); | |
7bfb399e | 380 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1); |
f1372ee1 KM |
381 | |
382 | kfree(p_rdma_info->port); | |
383 | kfree(p_rdma_info->dev); | |
f1372ee1 KM |
384 | } |
385 | ||
1fe280a0 MK |
386 | static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) |
387 | { | |
fdd6d771 | 388 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1fe280a0 | 389 | |
fdd6d771 | 390 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); |
1fe280a0 | 391 | |
fdd6d771 RV |
392 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); |
393 | qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); | |
394 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1fe280a0 MK |
395 | } |
396 | ||
397 | static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn) | |
398 | { | |
399 | qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey); | |
400 | } | |
401 | ||
f1372ee1 KM |
402 | static void qed_rdma_free(struct qed_hwfn *p_hwfn) |
403 | { | |
404 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); | |
405 | ||
1fe280a0 | 406 | qed_rdma_free_reserved_lkey(p_hwfn); |
9de506a5 | 407 | qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); |
f89782c2 | 408 | qed_rdma_resc_free(p_hwfn); |
f1372ee1 KM |
409 | } |
410 | ||
411 | static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) | |
412 | { | |
413 | guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; | |
414 | guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; | |
415 | guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; | |
416 | guid[3] = 0xff; | |
417 | guid[4] = 0xfe; | |
418 | guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; | |
419 | guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; | |
420 | guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; | |
421 | } | |
422 | ||
423 | static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, | |
424 | struct qed_rdma_start_in_params *params) | |
425 | { | |
426 | struct qed_rdma_events *events; | |
427 | ||
428 | events = &p_hwfn->p_rdma_info->events; | |
429 | ||
430 | events->unaffiliated_event = params->events->unaffiliated_event; | |
431 | events->affiliated_event = params->events->affiliated_event; | |
432 | events->context = params->events->context; | |
433 | } | |
434 | ||
435 | static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, | |
436 | struct qed_rdma_start_in_params *params) | |
437 | { | |
438 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | |
439 | struct qed_dev *cdev = p_hwfn->cdev; | |
440 | u32 pci_status_control; | |
441 | u32 num_qps; | |
442 | ||
443 | /* Vendor specific information */ | |
444 | dev->vendor_id = cdev->vendor_id; | |
445 | dev->vendor_part_id = cdev->device_id; | |
81af04b4 | 446 | dev->hw_ver = cdev->chip_rev; |
f1372ee1 KM |
447 | dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | |
448 | (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); | |
449 | ||
450 | qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); | |
451 | dev->node_guid = dev->sys_image_guid; | |
452 | ||
453 | dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, | |
454 | RDMA_MAX_SGE_PER_RQ_WQE); | |
455 | ||
456 | if (cdev->rdma_max_sge) | |
457 | dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); | |
458 | ||
39dbc646 YB |
459 | dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; |
460 | if (p_hwfn->cdev->rdma_max_srq_sge) { | |
461 | dev->max_srq_sge = min_t(u32, | |
462 | p_hwfn->cdev->rdma_max_srq_sge, | |
463 | dev->max_srq_sge); | |
464 | } | |
f1372ee1 KM |
465 | dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; |
466 | ||
467 | dev->max_inline = (cdev->rdma_max_inline) ? | |
468 | min_t(u32, cdev->rdma_max_inline, dev->max_inline) : | |
469 | dev->max_inline; | |
470 | ||
471 | dev->max_wqe = QED_RDMA_MAX_WQE; | |
472 | dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); | |
473 | ||
474 | /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because | |
475 | * it is up-aligned to 16 and then to ILT page size within qed cxt. | |
476 | * This is OK in terms of ILT but we don't want to configure the FW | |
477 | * above its abilities | |
478 | */ | |
479 | num_qps = ROCE_MAX_QPS; | |
480 | num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); | |
481 | dev->max_qp = num_qps; | |
482 | ||
483 | /* CQs uses the same icids that QPs use hence they are limited by the | |
484 | * number of icids. There are two icids per QP. | |
485 | */ | |
486 | dev->max_cq = num_qps * 2; | |
487 | ||
488 | /* The number of mrs is smaller by 1 since the first is reserved */ | |
489 | dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; | |
490 | dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; | |
491 | ||
492 | /* The maximum CQE capacity per CQ supported. | |
493 | * max number of cqes will be in two layer pbl, | |
494 | * 8 is the pointer size in bytes | |
495 | * 32 is the size of cq element in bytes | |
496 | */ | |
497 | if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) | |
498 | dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; | |
499 | else | |
500 | dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; | |
501 | ||
502 | dev->max_mw = 0; | |
f1372ee1 KM |
503 | dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); |
504 | dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; | |
505 | dev->max_pkey = QED_RDMA_MAX_P_KEY; | |
506 | ||
39dbc646 YB |
507 | dev->max_srq = p_hwfn->p_rdma_info->num_srqs; |
508 | dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; | |
f1372ee1 KM |
509 | dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / |
510 | (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); | |
511 | dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / | |
512 | RDMA_REQ_RD_ATOMIC_ELM_SIZE; | |
513 | dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * | |
514 | p_hwfn->p_rdma_info->num_qps; | |
515 | dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; | |
516 | dev->dev_ack_delay = QED_RDMA_ACK_DELAY; | |
517 | dev->max_pd = RDMA_MAX_PDS; | |
518 | dev->max_ah = p_hwfn->p_rdma_info->num_qps; | |
519 | dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); | |
520 | ||
521 | /* Set capablities */ | |
522 | dev->dev_caps = 0; | |
523 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); | |
524 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); | |
525 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); | |
526 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); | |
527 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); | |
528 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); | |
529 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); | |
530 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); | |
531 | ||
532 | /* Check atomic operations support in PCI configuration space. */ | |
93428c58 FL |
533 | pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2, |
534 | &pci_status_control); | |
f1372ee1 KM |
535 | |
536 | if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) | |
537 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); | |
67b40dcc KM |
538 | |
539 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) | |
540 | qed_iwarp_init_devinfo(p_hwfn); | |
f1372ee1 KM |
541 | } |
542 | ||
543 | static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) | |
544 | { | |
545 | struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; | |
546 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | |
547 | ||
548 | port->port_state = p_hwfn->mcp_info->link_output.link_up ? | |
549 | QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; | |
550 | ||
551 | port->max_msg_size = min_t(u64, | |
552 | (dev->max_mr_mw_fmr_size * | |
553 | p_hwfn->cdev->rdma_max_sge), | |
554 | BIT(31)); | |
555 | ||
556 | port->pkey_bad_counter = 0; | |
557 | } | |
558 | ||
559 | static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |
560 | { | |
67b40dcc | 561 | int rc = 0; |
f1372ee1 KM |
562 | |
563 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); | |
564 | p_hwfn->b_rdma_enabled_in_prs = false; | |
565 | ||
67b40dcc KM |
566 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
567 | qed_iwarp_init_hw(p_hwfn, p_ptt); | |
568 | else | |
569 | rc = qed_roce_init_hw(p_hwfn, p_ptt); | |
f1372ee1 | 570 | |
67b40dcc | 571 | return rc; |
f1372ee1 KM |
572 | } |
573 | ||
574 | static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, | |
575 | struct qed_rdma_start_in_params *params, | |
576 | struct qed_ptt *p_ptt) | |
577 | { | |
578 | struct rdma_init_func_ramrod_data *p_ramrod; | |
579 | struct qed_rdma_cnq_params *p_cnq_pbl_list; | |
580 | struct rdma_init_func_hdr *p_params_header; | |
581 | struct rdma_cnq_params *p_cnq_params; | |
582 | struct qed_sp_init_data init_data; | |
583 | struct qed_spq_entry *p_ent; | |
584 | u32 cnq_id, sb_id; | |
585 | u16 igu_sb_id; | |
586 | int rc; | |
587 | ||
588 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); | |
589 | ||
590 | /* Save the number of cnqs for the function close ramrod */ | |
591 | p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; | |
592 | ||
593 | /* Get SPQ entry */ | |
594 | memset(&init_data, 0, sizeof(init_data)); | |
595 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
596 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
597 | ||
598 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, | |
599 | p_hwfn->p_rdma_info->proto, &init_data); | |
600 | if (rc) | |
601 | return rc; | |
602 | ||
d1abfd0b MK |
603 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
604 | qed_iwarp_init_fw_ramrod(p_hwfn, | |
da090917 | 605 | &p_ent->ramrod.iwarp_init_func); |
67b40dcc | 606 | p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; |
d1abfd0b | 607 | } else { |
67b40dcc | 608 | p_ramrod = &p_ent->ramrod.roce_init_func.rdma; |
d1abfd0b | 609 | } |
f1372ee1 KM |
610 | |
611 | p_params_header = &p_ramrod->params_header; | |
612 | p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, | |
613 | QED_RDMA_CNQ_RAM); | |
614 | p_params_header->num_cnqs = params->desired_cnq; | |
7bfb399e YB |
615 | p_params_header->first_reg_srq_id = |
616 | cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset); | |
617 | p_params_header->reg_srq_base_addr = | |
618 | cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM)); | |
f1372ee1 KM |
619 | if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) |
620 | p_params_header->cq_ring_mode = 1; | |
621 | else | |
622 | p_params_header->cq_ring_mode = 0; | |
623 | ||
624 | for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { | |
625 | sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); | |
626 | igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); | |
627 | p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); | |
628 | p_cnq_params = &p_ramrod->cnq_params[cnq_id]; | |
629 | p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; | |
630 | ||
631 | p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; | |
632 | p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; | |
633 | ||
634 | DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, | |
635 | p_cnq_pbl_list->pbl_ptr); | |
636 | ||
637 | /* we assume here that cnq_id and qz_offset are the same */ | |
638 | p_cnq_params->queue_zone_num = | |
639 | cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + | |
640 | cnq_id); | |
641 | } | |
642 | ||
643 | return qed_spq_post(p_hwfn, p_ent, NULL); | |
644 | } | |
645 | ||
646 | static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) | |
647 | { | |
648 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
649 | int rc; | |
650 | ||
651 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); | |
652 | ||
653 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
654 | rc = qed_rdma_bmap_alloc_id(p_hwfn, | |
655 | &p_hwfn->p_rdma_info->tid_map, itid); | |
656 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
657 | if (rc) | |
658 | goto out; | |
659 | ||
660 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); | |
661 | out: | |
662 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); | |
663 | return rc; | |
664 | } | |
665 | ||
666 | static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) | |
667 | { | |
668 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | |
669 | ||
f1372ee1 KM |
670 | /* Tid 0 will be used as the key for "reserved MR". |
671 | * The driver should allocate memory for it so it can be loaded but no | |
672 | * ramrod should be passed on it. | |
673 | */ | |
674 | qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); | |
675 | if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { | |
676 | DP_NOTICE(p_hwfn, | |
677 | "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); | |
678 | return -EINVAL; | |
679 | } | |
680 | ||
681 | return 0; | |
682 | } | |
683 | ||
684 | static int qed_rdma_setup(struct qed_hwfn *p_hwfn, | |
685 | struct qed_ptt *p_ptt, | |
686 | struct qed_rdma_start_in_params *params) | |
687 | { | |
688 | int rc; | |
689 | ||
690 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); | |
691 | ||
f1372ee1 KM |
692 | qed_rdma_init_devinfo(p_hwfn, params); |
693 | qed_rdma_init_port(p_hwfn); | |
694 | qed_rdma_init_events(p_hwfn, params); | |
695 | ||
696 | rc = qed_rdma_reserve_lkey(p_hwfn); | |
697 | if (rc) | |
698 | return rc; | |
699 | ||
700 | rc = qed_rdma_init_hw(p_hwfn, p_ptt); | |
701 | if (rc) | |
702 | return rc; | |
703 | ||
67b40dcc | 704 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
79284ade | 705 | rc = qed_iwarp_setup(p_hwfn, params); |
67b40dcc KM |
706 | if (rc) |
707 | return rc; | |
708 | } else { | |
709 | rc = qed_roce_setup(p_hwfn); | |
710 | if (rc) | |
711 | return rc; | |
712 | } | |
f1372ee1 KM |
713 | |
714 | return qed_rdma_start_fw(p_hwfn, params, p_ptt); | |
715 | } | |
716 | ||
bf774d14 | 717 | static int qed_rdma_stop(void *rdma_cxt) |
f1372ee1 KM |
718 | { |
719 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
720 | struct rdma_close_func_ramrod_data *p_ramrod; | |
721 | struct qed_sp_init_data init_data; | |
722 | struct qed_spq_entry *p_ent; | |
723 | struct qed_ptt *p_ptt; | |
724 | u32 ll2_ethertype_en; | |
725 | int rc = -EBUSY; | |
726 | ||
727 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); | |
728 | ||
729 | p_ptt = qed_ptt_acquire(p_hwfn); | |
730 | if (!p_ptt) { | |
731 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); | |
732 | return rc; | |
733 | } | |
734 | ||
735 | /* Disable RoCE search */ | |
736 | qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); | |
737 | p_hwfn->b_rdma_enabled_in_prs = false; | |
291d57f6 | 738 | p_hwfn->p_rdma_info->active = 0; |
f1372ee1 KM |
739 | qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); |
740 | ||
741 | ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); | |
742 | ||
743 | qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, | |
744 | (ll2_ethertype_en & 0xFFFE)); | |
745 | ||
67b40dcc | 746 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
79284ade | 747 | rc = qed_iwarp_stop(p_hwfn); |
67b40dcc KM |
748 | if (rc) { |
749 | qed_ptt_release(p_hwfn, p_ptt); | |
750 | return rc; | |
751 | } | |
752 | } else { | |
753 | qed_roce_stop(p_hwfn); | |
754 | } | |
755 | ||
f1372ee1 KM |
756 | qed_ptt_release(p_hwfn, p_ptt); |
757 | ||
758 | /* Get SPQ entry */ | |
759 | memset(&init_data, 0, sizeof(init_data)); | |
760 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
761 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
762 | ||
763 | /* Stop RoCE */ | |
764 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, | |
765 | p_hwfn->p_rdma_info->proto, &init_data); | |
766 | if (rc) | |
767 | goto out; | |
768 | ||
769 | p_ramrod = &p_ent->ramrod.rdma_close_func; | |
770 | ||
771 | p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; | |
772 | p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); | |
773 | ||
774 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
775 | ||
776 | out: | |
777 | qed_rdma_free(p_hwfn); | |
778 | ||
779 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); | |
780 | return rc; | |
781 | } | |
782 | ||
783 | static int qed_rdma_add_user(void *rdma_cxt, | |
784 | struct qed_rdma_add_user_out_params *out_params) | |
785 | { | |
786 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
787 | u32 dpi_start_offset; | |
788 | u32 returned_id = 0; | |
789 | int rc; | |
790 | ||
791 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); | |
792 | ||
793 | /* Allocate DPI */ | |
794 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
795 | rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, | |
796 | &returned_id); | |
797 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
798 | ||
799 | out_params->dpi = (u16)returned_id; | |
800 | ||
801 | /* Calculate the corresponding DPI address */ | |
802 | dpi_start_offset = p_hwfn->dpi_start_offset; | |
803 | ||
0058eb58 MK |
804 | out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset + |
805 | out_params->dpi * p_hwfn->dpi_size; | |
f1372ee1 | 806 | |
8366d520 | 807 | out_params->dpi_phys_addr = p_hwfn->db_phys_addr + |
f1372ee1 KM |
808 | dpi_start_offset + |
809 | ((out_params->dpi) * p_hwfn->dpi_size); | |
810 | ||
811 | out_params->dpi_size = p_hwfn->dpi_size; | |
812 | out_params->wid_count = p_hwfn->wid_count; | |
813 | ||
814 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); | |
815 | return rc; | |
816 | } | |
817 | ||
818 | static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) | |
819 | { | |
820 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
821 | struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; | |
7e50769c | 822 | struct qed_mcp_link_state *p_link_output; |
f1372ee1 KM |
823 | |
824 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); | |
825 | ||
7e50769c MK |
826 | /* The link state is saved only for the leading hwfn */ |
827 | p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; | |
f1372ee1 | 828 | |
7e50769c MK |
829 | p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP |
830 | : QED_RDMA_PORT_DOWN; | |
831 | ||
832 | p_port->link_speed = p_link_output->speed; | |
f1372ee1 KM |
833 | |
834 | p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; | |
835 | ||
836 | return p_port; | |
837 | } | |
838 | ||
839 | static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) | |
840 | { | |
841 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
842 | ||
843 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); | |
844 | ||
845 | /* Return struct with device parameters */ | |
846 | return p_hwfn->p_rdma_info->dev; | |
847 | } | |
848 | ||
f1372ee1 KM |
849 | static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) |
850 | { | |
851 | struct qed_hwfn *p_hwfn; | |
852 | u16 qz_num; | |
853 | u32 addr; | |
854 | ||
855 | p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
856 | ||
857 | if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { | |
858 | DP_NOTICE(p_hwfn, | |
859 | "queue zone offset %d is too large (max is %d)\n", | |
860 | qz_offset, p_hwfn->p_rdma_info->max_queue_zones); | |
861 | return; | |
862 | } | |
863 | ||
864 | qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; | |
865 | addr = GTT_BAR0_MAP_REG_USDM_RAM + | |
866 | USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); | |
867 | ||
868 | REG_WR16(p_hwfn, addr, prod); | |
869 | ||
870 | /* keep prod updates ordered */ | |
871 | wmb(); | |
872 | } | |
873 | ||
874 | static int qed_fill_rdma_dev_info(struct qed_dev *cdev, | |
875 | struct qed_dev_rdma_info *info) | |
876 | { | |
7e50769c | 877 | struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); |
f1372ee1 KM |
878 | |
879 | memset(info, 0, sizeof(*info)); | |
880 | ||
67b40dcc KM |
881 | info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? |
882 | QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; | |
883 | ||
f1372ee1 KM |
884 | info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); |
885 | ||
886 | qed_fill_dev_info(cdev, &info->common); | |
887 | ||
888 | return 0; | |
889 | } | |
890 | ||
891 | static int qed_rdma_get_sb_start(struct qed_dev *cdev) | |
892 | { | |
893 | int feat_num; | |
894 | ||
895 | if (cdev->num_hwfns > 1) | |
7e50769c | 896 | feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE); |
f1372ee1 | 897 | else |
7e50769c | 898 | feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) * |
f1372ee1 KM |
899 | cdev->num_hwfns; |
900 | ||
901 | return feat_num; | |
902 | } | |
903 | ||
904 | static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) | |
905 | { | |
7e50769c | 906 | int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ); |
f1372ee1 KM |
907 | int n_msix = cdev->int_params.rdma_msix_cnt; |
908 | ||
909 | return min_t(int, n_cnq, n_msix); | |
910 | } | |
911 | ||
912 | static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) | |
913 | { | |
914 | int limit = 0; | |
915 | ||
916 | /* Mark the fastpath as free/used */ | |
917 | cdev->int_params.fp_initialized = cnt ? true : false; | |
918 | ||
919 | if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { | |
920 | DP_ERR(cdev, | |
921 | "qed roce supports only MSI-X interrupts (detected %d).\n", | |
922 | cdev->int_params.out.int_mode); | |
923 | return -EINVAL; | |
924 | } else if (cdev->int_params.fp_msix_cnt) { | |
925 | limit = cdev->int_params.rdma_msix_cnt; | |
926 | } | |
927 | ||
928 | if (!limit) | |
929 | return -ENOMEM; | |
930 | ||
931 | return min_t(int, cnt, limit); | |
932 | } | |
933 | ||
934 | static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) | |
935 | { | |
936 | memset(info, 0, sizeof(*info)); | |
937 | ||
938 | if (!cdev->int_params.fp_initialized) { | |
939 | DP_INFO(cdev, | |
940 | "Protocol driver requested interrupt information, but its support is not yet configured\n"); | |
941 | return -EINVAL; | |
942 | } | |
943 | ||
944 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { | |
945 | int msix_base = cdev->int_params.rdma_msix_base; | |
946 | ||
947 | info->msix_cnt = cdev->int_params.rdma_msix_cnt; | |
948 | info->msix = &cdev->int_params.msix_table[msix_base]; | |
949 | ||
950 | DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", | |
951 | info->msix_cnt, msix_base); | |
952 | } | |
953 | ||
954 | return 0; | |
955 | } | |
956 | ||
957 | static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) | |
958 | { | |
959 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
960 | u32 returned_id; | |
961 | int rc; | |
962 | ||
963 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); | |
964 | ||
965 | /* Allocates an unused protection domain */ | |
966 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
967 | rc = qed_rdma_bmap_alloc_id(p_hwfn, | |
968 | &p_hwfn->p_rdma_info->pd_map, &returned_id); | |
969 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
970 | ||
971 | *pd = (u16)returned_id; | |
972 | ||
973 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); | |
974 | return rc; | |
975 | } | |
976 | ||
977 | static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) | |
978 | { | |
979 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
980 | ||
981 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); | |
982 | ||
983 | /* Returns a previously allocated protection domain for reuse */ | |
984 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
985 | qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); | |
986 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
987 | } | |
988 | ||
7bfb399e YB |
989 | static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id) |
990 | { | |
991 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
992 | u32 returned_id; | |
993 | int rc; | |
994 | ||
995 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n"); | |
996 | ||
997 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
998 | rc = qed_rdma_bmap_alloc_id(p_hwfn, | |
999 | &p_hwfn->p_rdma_info->xrcd_map, | |
1000 | &returned_id); | |
1001 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1002 | if (rc) { | |
1003 | DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n"); | |
1004 | return rc; | |
1005 | } | |
1006 | ||
1007 | *xrcd_id = (u16)returned_id; | |
1008 | ||
1009 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc); | |
1010 | return rc; | |
1011 | } | |
1012 | ||
1013 | static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id) | |
1014 | { | |
1015 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1016 | ||
1017 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id); | |
1018 | ||
1019 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1020 | qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id); | |
1021 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1022 | } | |
1023 | ||
f1372ee1 KM |
1024 | static enum qed_rdma_toggle_bit |
1025 | qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) | |
1026 | { | |
1027 | struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; | |
1028 | enum qed_rdma_toggle_bit toggle_bit; | |
1029 | u32 bmap_id; | |
1030 | ||
1031 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); | |
1032 | ||
1033 | /* the function toggle the bit that is related to a given icid | |
1034 | * and returns the new toggle bit's value | |
1035 | */ | |
1036 | bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); | |
1037 | ||
1038 | spin_lock_bh(&p_info->lock); | |
1039 | toggle_bit = !test_and_change_bit(bmap_id, | |
1040 | p_info->toggle_bits.bitmap); | |
1041 | spin_unlock_bh(&p_info->lock); | |
1042 | ||
1043 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", | |
1044 | toggle_bit); | |
1045 | ||
1046 | return toggle_bit; | |
1047 | } | |
1048 | ||
1049 | static int qed_rdma_create_cq(void *rdma_cxt, | |
1050 | struct qed_rdma_create_cq_in_params *params, | |
1051 | u16 *icid) | |
1052 | { | |
1053 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1054 | struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; | |
1055 | struct rdma_create_cq_ramrod_data *p_ramrod; | |
1056 | enum qed_rdma_toggle_bit toggle_bit; | |
1057 | struct qed_sp_init_data init_data; | |
1058 | struct qed_spq_entry *p_ent; | |
1059 | u32 returned_id, start_cid; | |
1060 | int rc; | |
1061 | ||
1062 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", | |
1063 | params->cq_handle_hi, params->cq_handle_lo); | |
1064 | ||
1065 | /* Allocate icid */ | |
1066 | spin_lock_bh(&p_info->lock); | |
1067 | rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); | |
1068 | spin_unlock_bh(&p_info->lock); | |
1069 | ||
1070 | if (rc) { | |
1071 | DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); | |
1072 | return rc; | |
1073 | } | |
1074 | ||
1075 | start_cid = qed_cxt_get_proto_cid_start(p_hwfn, | |
1076 | p_info->proto); | |
1077 | *icid = returned_id + start_cid; | |
1078 | ||
1079 | /* Check if icid requires a page allocation */ | |
1080 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); | |
1081 | if (rc) | |
1082 | goto err; | |
1083 | ||
1084 | /* Get SPQ entry */ | |
1085 | memset(&init_data, 0, sizeof(init_data)); | |
1086 | init_data.cid = *icid; | |
1087 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1088 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1089 | ||
1090 | /* Send create CQ ramrod */ | |
1091 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1092 | RDMA_RAMROD_CREATE_CQ, | |
1093 | p_info->proto, &init_data); | |
1094 | if (rc) | |
1095 | goto err; | |
1096 | ||
1097 | p_ramrod = &p_ent->ramrod.rdma_create_cq; | |
1098 | ||
1099 | p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); | |
1100 | p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); | |
1101 | p_ramrod->dpi = cpu_to_le16(params->dpi); | |
1102 | p_ramrod->is_two_level_pbl = params->pbl_two_level; | |
1103 | p_ramrod->max_cqes = cpu_to_le32(params->cq_size); | |
1104 | DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); | |
1105 | p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); | |
1106 | p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + | |
1107 | params->cnq_id; | |
1108 | p_ramrod->int_timeout = params->int_timeout; | |
1109 | ||
1110 | /* toggle the bit for every resize or create cq for a given icid */ | |
1111 | toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); | |
1112 | ||
1113 | p_ramrod->toggle_bit = toggle_bit; | |
1114 | ||
1115 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
1116 | if (rc) { | |
1117 | /* restore toggle bit */ | |
1118 | qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); | |
1119 | goto err; | |
1120 | } | |
1121 | ||
1122 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); | |
1123 | return rc; | |
1124 | ||
1125 | err: | |
1126 | /* release allocated icid */ | |
1127 | spin_lock_bh(&p_info->lock); | |
1128 | qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); | |
1129 | spin_unlock_bh(&p_info->lock); | |
1130 | DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); | |
1131 | ||
1132 | return rc; | |
1133 | } | |
1134 | ||
1135 | static int | |
1136 | qed_rdma_destroy_cq(void *rdma_cxt, | |
1137 | struct qed_rdma_destroy_cq_in_params *in_params, | |
1138 | struct qed_rdma_destroy_cq_out_params *out_params) | |
1139 | { | |
1140 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1141 | struct rdma_destroy_cq_output_params *p_ramrod_res; | |
1142 | struct rdma_destroy_cq_ramrod_data *p_ramrod; | |
1143 | struct qed_sp_init_data init_data; | |
1144 | struct qed_spq_entry *p_ent; | |
1145 | dma_addr_t ramrod_res_phys; | |
1146 | enum protocol_type proto; | |
1147 | int rc = -ENOMEM; | |
1148 | ||
1149 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); | |
1150 | ||
1151 | p_ramrod_res = | |
1152 | (struct rdma_destroy_cq_output_params *) | |
1153 | dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
1154 | sizeof(struct rdma_destroy_cq_output_params), | |
1155 | &ramrod_res_phys, GFP_KERNEL); | |
1156 | if (!p_ramrod_res) { | |
1157 | DP_NOTICE(p_hwfn, | |
1158 | "qed destroy cq failed: cannot allocate memory (ramrod)\n"); | |
1159 | return rc; | |
1160 | } | |
1161 | ||
1162 | /* Get SPQ entry */ | |
1163 | memset(&init_data, 0, sizeof(init_data)); | |
1164 | init_data.cid = in_params->icid; | |
1165 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1166 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1167 | proto = p_hwfn->p_rdma_info->proto; | |
1168 | /* Send destroy CQ ramrod */ | |
1169 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1170 | RDMA_RAMROD_DESTROY_CQ, | |
1171 | proto, &init_data); | |
1172 | if (rc) | |
1173 | goto err; | |
1174 | ||
1175 | p_ramrod = &p_ent->ramrod.rdma_destroy_cq; | |
1176 | DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); | |
1177 | ||
1178 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
1179 | if (rc) | |
1180 | goto err; | |
1181 | ||
1182 | out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); | |
1183 | ||
1184 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
1185 | sizeof(struct rdma_destroy_cq_output_params), | |
1186 | p_ramrod_res, ramrod_res_phys); | |
1187 | ||
1188 | /* Free icid */ | |
1189 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1190 | ||
1191 | qed_bmap_release_id(p_hwfn, | |
1192 | &p_hwfn->p_rdma_info->cq_map, | |
1193 | (in_params->icid - | |
1194 | qed_cxt_get_proto_cid_start(p_hwfn, proto))); | |
1195 | ||
1196 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1197 | ||
1198 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); | |
1199 | return rc; | |
1200 | ||
1201 | err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
1202 | sizeof(struct rdma_destroy_cq_output_params), | |
1203 | p_ramrod_res, ramrod_res_phys); | |
1204 | ||
1205 | return rc; | |
1206 | } | |
1207 | ||
b71b9afd | 1208 | void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) |
f1372ee1 KM |
1209 | { |
1210 | p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); | |
1211 | p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); | |
1212 | p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); | |
1213 | } | |
1214 | ||
f1372ee1 KM |
1215 | static int qed_rdma_query_qp(void *rdma_cxt, |
1216 | struct qed_rdma_qp *qp, | |
1217 | struct qed_rdma_query_qp_out_params *out_params) | |
1218 | { | |
1219 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
67b40dcc | 1220 | int rc = 0; |
f1372ee1 KM |
1221 | |
1222 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); | |
1223 | ||
1224 | /* The following fields are filled in from qp and not FW as they can't | |
1225 | * be modified by FW | |
1226 | */ | |
1227 | out_params->mtu = qp->mtu; | |
1228 | out_params->dest_qp = qp->dest_qp; | |
1229 | out_params->incoming_atomic_en = qp->incoming_atomic_en; | |
1230 | out_params->e2e_flow_control_en = qp->e2e_flow_control_en; | |
1231 | out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; | |
1232 | out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; | |
1233 | out_params->dgid = qp->dgid; | |
1234 | out_params->flow_label = qp->flow_label; | |
1235 | out_params->hop_limit_ttl = qp->hop_limit_ttl; | |
1236 | out_params->traffic_class_tos = qp->traffic_class_tos; | |
1237 | out_params->timeout = qp->ack_timeout; | |
1238 | out_params->rnr_retry = qp->rnr_retry_cnt; | |
1239 | out_params->retry_cnt = qp->retry_cnt; | |
1240 | out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; | |
1241 | out_params->pkey_index = 0; | |
1242 | out_params->max_rd_atomic = qp->max_rd_atomic_req; | |
1243 | out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; | |
1244 | out_params->sqd_async = qp->sqd_async; | |
1245 | ||
67b40dcc KM |
1246 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
1247 | qed_iwarp_query_qp(qp, out_params); | |
1248 | else | |
1249 | rc = qed_roce_query_qp(p_hwfn, qp, out_params); | |
f1372ee1 KM |
1250 | |
1251 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); | |
1252 | return rc; | |
1253 | } | |
1254 | ||
1255 | static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) | |
1256 | { | |
1257 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1258 | int rc = 0; | |
1259 | ||
1260 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); | |
1261 | ||
67b40dcc KM |
1262 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
1263 | rc = qed_iwarp_destroy_qp(p_hwfn, qp); | |
1264 | else | |
1265 | rc = qed_roce_destroy_qp(p_hwfn, qp); | |
f1372ee1 KM |
1266 | |
1267 | /* free qp params struct */ | |
1268 | kfree(qp); | |
1269 | ||
1270 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); | |
1271 | return rc; | |
1272 | } | |
1273 | ||
1274 | static struct qed_rdma_qp * | |
1275 | qed_rdma_create_qp(void *rdma_cxt, | |
1276 | struct qed_rdma_create_qp_in_params *in_params, | |
1277 | struct qed_rdma_create_qp_out_params *out_params) | |
1278 | { | |
1279 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1280 | struct qed_rdma_qp *qp; | |
1281 | u8 max_stats_queues; | |
1282 | int rc; | |
1283 | ||
291d57f6 MK |
1284 | if (!rdma_cxt || !in_params || !out_params || |
1285 | !p_hwfn->p_rdma_info->active) { | |
f1372ee1 KM |
1286 | DP_ERR(p_hwfn->cdev, |
1287 | "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", | |
1288 | rdma_cxt, in_params, out_params); | |
1289 | return NULL; | |
1290 | } | |
1291 | ||
1292 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1293 | "qed rdma create qp called with qp_handle = %08x%08x\n", | |
1294 | in_params->qp_handle_hi, in_params->qp_handle_lo); | |
1295 | ||
1296 | /* Some sanity checks... */ | |
1297 | max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; | |
1298 | if (in_params->stats_queue >= max_stats_queues) { | |
1299 | DP_ERR(p_hwfn->cdev, | |
1300 | "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", | |
1301 | in_params->stats_queue, max_stats_queues); | |
1302 | return NULL; | |
1303 | } | |
1304 | ||
67b40dcc KM |
1305 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
1306 | if (in_params->sq_num_pages * sizeof(struct regpair) > | |
1307 | IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { | |
1308 | DP_NOTICE(p_hwfn->cdev, | |
1309 | "Sq num pages: %d exceeds maximum\n", | |
1310 | in_params->sq_num_pages); | |
1311 | return NULL; | |
1312 | } | |
1313 | if (in_params->rq_num_pages * sizeof(struct regpair) > | |
1314 | IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { | |
1315 | DP_NOTICE(p_hwfn->cdev, | |
1316 | "Rq num pages: %d exceeds maximum\n", | |
1317 | in_params->rq_num_pages); | |
1318 | return NULL; | |
1319 | } | |
1320 | } | |
1321 | ||
f1372ee1 KM |
1322 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
1323 | if (!qp) | |
1324 | return NULL; | |
1325 | ||
f1372ee1 KM |
1326 | qp->cur_state = QED_ROCE_QP_STATE_RESET; |
1327 | qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); | |
1328 | qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); | |
1329 | qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); | |
1330 | qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); | |
1331 | qp->use_srq = in_params->use_srq; | |
1332 | qp->signal_all = in_params->signal_all; | |
1333 | qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; | |
1334 | qp->pd = in_params->pd; | |
1335 | qp->dpi = in_params->dpi; | |
1336 | qp->sq_cq_id = in_params->sq_cq_id; | |
1337 | qp->sq_num_pages = in_params->sq_num_pages; | |
1338 | qp->sq_pbl_ptr = in_params->sq_pbl_ptr; | |
1339 | qp->rq_cq_id = in_params->rq_cq_id; | |
1340 | qp->rq_num_pages = in_params->rq_num_pages; | |
1341 | qp->rq_pbl_ptr = in_params->rq_pbl_ptr; | |
1342 | qp->srq_id = in_params->srq_id; | |
1343 | qp->req_offloaded = false; | |
1344 | qp->resp_offloaded = false; | |
1345 | qp->e2e_flow_control_en = qp->use_srq ? false : true; | |
1346 | qp->stats_queue = in_params->stats_queue; | |
7bfb399e YB |
1347 | qp->qp_type = in_params->qp_type; |
1348 | qp->xrcd_id = in_params->xrcd_id; | |
f1372ee1 | 1349 | |
67b40dcc KM |
1350 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
1351 | rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); | |
1352 | qp->qpid = qp->icid; | |
1353 | } else { | |
ff937b91 | 1354 | qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE); |
67b40dcc KM |
1355 | rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); |
1356 | qp->qpid = ((0xFF << 16) | qp->icid); | |
1357 | } | |
1358 | ||
1359 | if (rc) { | |
1360 | kfree(qp); | |
1361 | return NULL; | |
1362 | } | |
1363 | ||
f1372ee1 KM |
1364 | out_params->icid = qp->icid; |
1365 | out_params->qp_id = qp->qpid; | |
1366 | ||
1367 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); | |
1368 | return qp; | |
1369 | } | |
1370 | ||
f1372ee1 KM |
1371 | static int qed_rdma_modify_qp(void *rdma_cxt, |
1372 | struct qed_rdma_qp *qp, | |
1373 | struct qed_rdma_modify_qp_in_params *params) | |
1374 | { | |
1375 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1376 | enum qed_roce_qp_state prev_state; | |
1377 | int rc = 0; | |
1378 | ||
1379 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", | |
1380 | qp->icid, params->new_state); | |
1381 | ||
1382 | if (rc) { | |
1383 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
1384 | return rc; | |
1385 | } | |
1386 | ||
1387 | if (GET_FIELD(params->modify_flags, | |
1388 | QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { | |
1389 | qp->incoming_rdma_read_en = params->incoming_rdma_read_en; | |
1390 | qp->incoming_rdma_write_en = params->incoming_rdma_write_en; | |
1391 | qp->incoming_atomic_en = params->incoming_atomic_en; | |
1392 | } | |
1393 | ||
1394 | /* Update QP structure with the updated values */ | |
1395 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) | |
1396 | qp->roce_mode = params->roce_mode; | |
1397 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) | |
1398 | qp->pkey = params->pkey; | |
1399 | if (GET_FIELD(params->modify_flags, | |
1400 | QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) | |
1401 | qp->e2e_flow_control_en = params->e2e_flow_control_en; | |
1402 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) | |
1403 | qp->dest_qp = params->dest_qp; | |
1404 | if (GET_FIELD(params->modify_flags, | |
1405 | QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { | |
1406 | /* Indicates that the following parameters have changed: | |
1407 | * Traffic class, flow label, hop limit, source GID, | |
1408 | * destination GID, loopback indicator | |
1409 | */ | |
1410 | qp->traffic_class_tos = params->traffic_class_tos; | |
1411 | qp->flow_label = params->flow_label; | |
1412 | qp->hop_limit_ttl = params->hop_limit_ttl; | |
1413 | ||
1414 | qp->sgid = params->sgid; | |
1415 | qp->dgid = params->dgid; | |
1416 | qp->udp_src_port = 0; | |
1417 | qp->vlan_id = params->vlan_id; | |
1418 | qp->mtu = params->mtu; | |
1419 | qp->lb_indication = params->lb_indication; | |
1420 | memcpy((u8 *)&qp->remote_mac_addr[0], | |
1421 | (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); | |
1422 | if (params->use_local_mac) { | |
1423 | memcpy((u8 *)&qp->local_mac_addr[0], | |
1424 | (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); | |
1425 | } else { | |
1426 | memcpy((u8 *)&qp->local_mac_addr[0], | |
1427 | (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); | |
1428 | } | |
1429 | } | |
1430 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) | |
1431 | qp->rq_psn = params->rq_psn; | |
1432 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) | |
1433 | qp->sq_psn = params->sq_psn; | |
1434 | if (GET_FIELD(params->modify_flags, | |
1435 | QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) | |
1436 | qp->max_rd_atomic_req = params->max_rd_atomic_req; | |
1437 | if (GET_FIELD(params->modify_flags, | |
1438 | QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) | |
1439 | qp->max_rd_atomic_resp = params->max_rd_atomic_resp; | |
1440 | if (GET_FIELD(params->modify_flags, | |
1441 | QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) | |
1442 | qp->ack_timeout = params->ack_timeout; | |
1443 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) | |
1444 | qp->retry_cnt = params->retry_cnt; | |
1445 | if (GET_FIELD(params->modify_flags, | |
1446 | QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) | |
1447 | qp->rnr_retry_cnt = params->rnr_retry_cnt; | |
1448 | if (GET_FIELD(params->modify_flags, | |
1449 | QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) | |
1450 | qp->min_rnr_nak_timer = params->min_rnr_nak_timer; | |
1451 | ||
1452 | qp->sqd_async = params->sqd_async; | |
1453 | ||
1454 | prev_state = qp->cur_state; | |
1455 | if (GET_FIELD(params->modify_flags, | |
1456 | QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { | |
1457 | qp->cur_state = params->new_state; | |
1458 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", | |
1459 | qp->cur_state); | |
1460 | } | |
1461 | ||
7bfb399e YB |
1462 | switch (qp->qp_type) { |
1463 | case QED_RDMA_QP_TYPE_XRC_INI: | |
1464 | qp->has_req = 1; | |
1465 | break; | |
1466 | case QED_RDMA_QP_TYPE_XRC_TGT: | |
1467 | qp->has_resp = 1; | |
1468 | break; | |
1469 | default: | |
1470 | qp->has_req = 1; | |
1471 | qp->has_resp = 1; | |
1472 | } | |
1473 | ||
67b40dcc KM |
1474 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
1475 | enum qed_iwarp_qp_state new_state = | |
1476 | qed_roce2iwarp_state(qp->cur_state); | |
1477 | ||
1478 | rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0); | |
1479 | } else { | |
1480 | rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); | |
1481 | } | |
f1372ee1 KM |
1482 | |
1483 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); | |
1484 | return rc; | |
1485 | } | |
1486 | ||
1487 | static int | |
1488 | qed_rdma_register_tid(void *rdma_cxt, | |
1489 | struct qed_rdma_register_tid_in_params *params) | |
1490 | { | |
1491 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1492 | struct rdma_register_tid_ramrod_data *p_ramrod; | |
1493 | struct qed_sp_init_data init_data; | |
1494 | struct qed_spq_entry *p_ent; | |
1495 | enum rdma_tid_type tid_type; | |
1496 | u8 fw_return_code; | |
1497 | int rc; | |
1498 | ||
1499 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); | |
1500 | ||
1501 | /* Get SPQ entry */ | |
1502 | memset(&init_data, 0, sizeof(init_data)); | |
1503 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1504 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1505 | ||
1506 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, | |
1507 | p_hwfn->p_rdma_info->proto, &init_data); | |
1508 | if (rc) { | |
1509 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
1510 | return rc; | |
1511 | } | |
1512 | ||
1513 | if (p_hwfn->p_rdma_info->last_tid < params->itid) | |
1514 | p_hwfn->p_rdma_info->last_tid = params->itid; | |
1515 | ||
1516 | p_ramrod = &p_ent->ramrod.rdma_register_tid; | |
1517 | ||
1518 | p_ramrod->flags = 0; | |
1519 | SET_FIELD(p_ramrod->flags, | |
1520 | RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, | |
1521 | params->pbl_two_level); | |
1522 | ||
1523 | SET_FIELD(p_ramrod->flags, | |
1524 | RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); | |
1525 | ||
1526 | SET_FIELD(p_ramrod->flags, | |
1527 | RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); | |
1528 | ||
1529 | /* Don't initialize D/C field, as it may override other bits. */ | |
1530 | if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) | |
1531 | SET_FIELD(p_ramrod->flags, | |
1532 | RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, | |
1533 | params->page_size_log - 12); | |
1534 | ||
1535 | SET_FIELD(p_ramrod->flags, | |
1536 | RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, | |
1537 | params->remote_read); | |
1538 | ||
1539 | SET_FIELD(p_ramrod->flags, | |
1540 | RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, | |
1541 | params->remote_write); | |
1542 | ||
1543 | SET_FIELD(p_ramrod->flags, | |
1544 | RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, | |
1545 | params->remote_atomic); | |
1546 | ||
1547 | SET_FIELD(p_ramrod->flags, | |
1548 | RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, | |
1549 | params->local_write); | |
1550 | ||
1551 | SET_FIELD(p_ramrod->flags, | |
1552 | RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); | |
1553 | ||
1554 | SET_FIELD(p_ramrod->flags, | |
1555 | RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, | |
1556 | params->mw_bind); | |
1557 | ||
1558 | SET_FIELD(p_ramrod->flags1, | |
1559 | RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, | |
1560 | params->pbl_page_size_log - 12); | |
1561 | ||
1562 | SET_FIELD(p_ramrod->flags2, | |
1563 | RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); | |
1564 | ||
1565 | switch (params->tid_type) { | |
1566 | case QED_RDMA_TID_REGISTERED_MR: | |
1567 | tid_type = RDMA_TID_REGISTERED_MR; | |
1568 | break; | |
1569 | case QED_RDMA_TID_FMR: | |
1570 | tid_type = RDMA_TID_FMR; | |
1571 | break; | |
d52c89f1 MK |
1572 | case QED_RDMA_TID_MW: |
1573 | tid_type = RDMA_TID_MW; | |
f1372ee1 KM |
1574 | break; |
1575 | default: | |
1576 | rc = -EINVAL; | |
1577 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
fb5e7438 | 1578 | qed_sp_destroy_request(p_hwfn, p_ent); |
f1372ee1 KM |
1579 | return rc; |
1580 | } | |
1581 | SET_FIELD(p_ramrod->flags1, | |
1582 | RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); | |
1583 | ||
1584 | p_ramrod->itid = cpu_to_le32(params->itid); | |
1585 | p_ramrod->key = params->key; | |
1586 | p_ramrod->pd = cpu_to_le16(params->pd); | |
1587 | p_ramrod->length_hi = (u8)(params->length >> 32); | |
1588 | p_ramrod->length_lo = DMA_LO_LE(params->length); | |
1589 | if (params->zbva) { | |
1590 | /* Lower 32 bits of the registered MR address. | |
1591 | * In case of zero based MR, will hold FBO | |
1592 | */ | |
1593 | p_ramrod->va.hi = 0; | |
1594 | p_ramrod->va.lo = cpu_to_le32(params->fbo); | |
1595 | } else { | |
1596 | DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); | |
1597 | } | |
1598 | DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); | |
1599 | ||
1600 | /* DIF */ | |
1601 | if (params->dif_enabled) { | |
1602 | SET_FIELD(p_ramrod->flags2, | |
1603 | RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); | |
1604 | DMA_REGPAIR_LE(p_ramrod->dif_error_addr, | |
1605 | params->dif_error_addr); | |
f1372ee1 KM |
1606 | } |
1607 | ||
1608 | rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); | |
1609 | if (rc) | |
1610 | return rc; | |
1611 | ||
1612 | if (fw_return_code != RDMA_RETURN_OK) { | |
1613 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); | |
1614 | return -EINVAL; | |
1615 | } | |
1616 | ||
1617 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); | |
1618 | return rc; | |
1619 | } | |
1620 | ||
1621 | static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) | |
1622 | { | |
1623 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1624 | struct rdma_deregister_tid_ramrod_data *p_ramrod; | |
1625 | struct qed_sp_init_data init_data; | |
1626 | struct qed_spq_entry *p_ent; | |
1627 | struct qed_ptt *p_ptt; | |
1628 | u8 fw_return_code; | |
1629 | int rc; | |
1630 | ||
1631 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); | |
1632 | ||
1633 | /* Get SPQ entry */ | |
1634 | memset(&init_data, 0, sizeof(init_data)); | |
1635 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1636 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1637 | ||
1638 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, | |
1639 | p_hwfn->p_rdma_info->proto, &init_data); | |
1640 | if (rc) { | |
1641 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
1642 | return rc; | |
1643 | } | |
1644 | ||
1645 | p_ramrod = &p_ent->ramrod.rdma_deregister_tid; | |
1646 | p_ramrod->itid = cpu_to_le32(itid); | |
1647 | ||
1648 | rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); | |
1649 | if (rc) { | |
1650 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
1651 | return rc; | |
1652 | } | |
1653 | ||
1654 | if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { | |
1655 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); | |
1656 | return -EINVAL; | |
1657 | } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { | |
1658 | /* Bit indicating that the TID is in use and a nig drain is | |
1659 | * required before sending the ramrod again | |
1660 | */ | |
1661 | p_ptt = qed_ptt_acquire(p_hwfn); | |
1662 | if (!p_ptt) { | |
1663 | rc = -EBUSY; | |
1664 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1665 | "Failed to acquire PTT\n"); | |
1666 | return rc; | |
1667 | } | |
1668 | ||
1669 | rc = qed_mcp_drain(p_hwfn, p_ptt); | |
1670 | if (rc) { | |
1671 | qed_ptt_release(p_hwfn, p_ptt); | |
1672 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1673 | "Drain failed\n"); | |
1674 | return rc; | |
1675 | } | |
1676 | ||
1677 | qed_ptt_release(p_hwfn, p_ptt); | |
1678 | ||
1679 | /* Resend the ramrod */ | |
1680 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1681 | RDMA_RAMROD_DEREGISTER_MR, | |
1682 | p_hwfn->p_rdma_info->proto, | |
1683 | &init_data); | |
1684 | if (rc) { | |
1685 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1686 | "Failed to init sp-element\n"); | |
1687 | return rc; | |
1688 | } | |
1689 | ||
1690 | rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); | |
1691 | if (rc) { | |
1692 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1693 | "Ramrod failed\n"); | |
1694 | return rc; | |
1695 | } | |
1696 | ||
1697 | if (fw_return_code != RDMA_RETURN_OK) { | |
1698 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n", | |
1699 | fw_return_code); | |
1700 | return rc; | |
1701 | } | |
1702 | } | |
1703 | ||
1704 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); | |
1705 | return rc; | |
1706 | } | |
1707 | ||
f1372ee1 KM |
1708 | static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) |
1709 | { | |
7e50769c | 1710 | return QED_AFFIN_HWFN(cdev); |
f1372ee1 KM |
1711 | } |
1712 | ||
7bfb399e YB |
1713 | static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn, |
1714 | bool is_xrc) | |
1715 | { | |
1716 | if (is_xrc) | |
1717 | return &p_hwfn->p_rdma_info->xrc_srq_map; | |
1718 | ||
1719 | return &p_hwfn->p_rdma_info->srq_map; | |
1720 | } | |
1721 | ||
39dbc646 YB |
1722 | static int qed_rdma_modify_srq(void *rdma_cxt, |
1723 | struct qed_rdma_modify_srq_in_params *in_params) | |
1724 | { | |
1725 | struct rdma_srq_modify_ramrod_data *p_ramrod; | |
1726 | struct qed_sp_init_data init_data = {}; | |
1727 | struct qed_hwfn *p_hwfn = rdma_cxt; | |
1728 | struct qed_spq_entry *p_ent; | |
1729 | u16 opaque_fid; | |
1730 | int rc; | |
1731 | ||
1732 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1733 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1734 | ||
1735 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1736 | RDMA_RAMROD_MODIFY_SRQ, | |
1737 | p_hwfn->p_rdma_info->proto, &init_data); | |
1738 | if (rc) | |
1739 | return rc; | |
1740 | ||
1741 | p_ramrod = &p_ent->ramrod.rdma_modify_srq; | |
1742 | p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); | |
1743 | opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1744 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); | |
1745 | p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); | |
1746 | ||
1747 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
1748 | if (rc) | |
1749 | return rc; | |
1750 | ||
7bfb399e YB |
1751 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n", |
1752 | in_params->srq_id, in_params->is_xrc); | |
39dbc646 YB |
1753 | |
1754 | return rc; | |
1755 | } | |
1756 | ||
1757 | static int | |
1758 | qed_rdma_destroy_srq(void *rdma_cxt, | |
1759 | struct qed_rdma_destroy_srq_in_params *in_params) | |
1760 | { | |
1761 | struct rdma_srq_destroy_ramrod_data *p_ramrod; | |
1762 | struct qed_sp_init_data init_data = {}; | |
1763 | struct qed_hwfn *p_hwfn = rdma_cxt; | |
1764 | struct qed_spq_entry *p_ent; | |
1765 | struct qed_bmap *bmap; | |
1766 | u16 opaque_fid; | |
7bfb399e | 1767 | u16 offset; |
39dbc646 YB |
1768 | int rc; |
1769 | ||
1770 | opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1771 | ||
1772 | init_data.opaque_fid = opaque_fid; | |
1773 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1774 | ||
1775 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1776 | RDMA_RAMROD_DESTROY_SRQ, | |
1777 | p_hwfn->p_rdma_info->proto, &init_data); | |
1778 | if (rc) | |
1779 | return rc; | |
1780 | ||
1781 | p_ramrod = &p_ent->ramrod.rdma_destroy_srq; | |
1782 | p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); | |
1783 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); | |
1784 | ||
1785 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
1786 | if (rc) | |
1787 | return rc; | |
1788 | ||
7bfb399e YB |
1789 | bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); |
1790 | offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; | |
39dbc646 YB |
1791 | |
1792 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
7bfb399e | 1793 | qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset); |
39dbc646 YB |
1794 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); |
1795 | ||
7bfb399e YB |
1796 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1797 | "XRC/SRQ destroyed Id = %x, is_xrc=%u\n", | |
1798 | in_params->srq_id, in_params->is_xrc); | |
39dbc646 YB |
1799 | |
1800 | return rc; | |
1801 | } | |
1802 | ||
1803 | static int | |
1804 | qed_rdma_create_srq(void *rdma_cxt, | |
1805 | struct qed_rdma_create_srq_in_params *in_params, | |
1806 | struct qed_rdma_create_srq_out_params *out_params) | |
1807 | { | |
1808 | struct rdma_srq_create_ramrod_data *p_ramrod; | |
1809 | struct qed_sp_init_data init_data = {}; | |
1810 | struct qed_hwfn *p_hwfn = rdma_cxt; | |
1811 | enum qed_cxt_elem_type elem_type; | |
1812 | struct qed_spq_entry *p_ent; | |
1813 | u16 opaque_fid, srq_id; | |
1814 | struct qed_bmap *bmap; | |
1815 | u32 returned_id; | |
7bfb399e | 1816 | u16 offset; |
39dbc646 YB |
1817 | int rc; |
1818 | ||
7bfb399e | 1819 | bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); |
39dbc646 YB |
1820 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); |
1821 | rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); | |
1822 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1823 | ||
1824 | if (rc) { | |
7bfb399e YB |
1825 | DP_NOTICE(p_hwfn, |
1826 | "failed to allocate xrc/srq id (is_xrc=%u)\n", | |
1827 | in_params->is_xrc); | |
39dbc646 YB |
1828 | return rc; |
1829 | } | |
1830 | ||
7bfb399e | 1831 | elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ); |
39dbc646 YB |
1832 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); |
1833 | if (rc) | |
1834 | goto err; | |
7bfb399e | 1835 | |
39dbc646 YB |
1836 | opaque_fid = p_hwfn->hw_info.opaque_fid; |
1837 | ||
1838 | opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1839 | init_data.opaque_fid = opaque_fid; | |
1840 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1841 | ||
1842 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1843 | RDMA_RAMROD_CREATE_SRQ, | |
1844 | p_hwfn->p_rdma_info->proto, &init_data); | |
1845 | if (rc) | |
1846 | goto err; | |
1847 | ||
1848 | p_ramrod = &p_ent->ramrod.rdma_create_srq; | |
1849 | DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); | |
1850 | p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); | |
1851 | p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); | |
39dbc646 YB |
1852 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); |
1853 | p_ramrod->page_size = cpu_to_le16(in_params->page_size); | |
1854 | DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); | |
7bfb399e YB |
1855 | offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; |
1856 | srq_id = (u16)returned_id + offset; | |
1857 | p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); | |
39dbc646 | 1858 | |
7bfb399e YB |
1859 | if (in_params->is_xrc) { |
1860 | SET_FIELD(p_ramrod->flags, | |
1861 | RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1); | |
1862 | SET_FIELD(p_ramrod->flags, | |
1863 | RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN, | |
1864 | in_params->reserved_key_en); | |
1865 | p_ramrod->xrc_srq_cq_cid = | |
1866 | cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | | |
1867 | in_params->cq_cid); | |
1868 | p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id); | |
1869 | } | |
39dbc646 YB |
1870 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
1871 | if (rc) | |
1872 | goto err; | |
1873 | ||
1874 | out_params->srq_id = srq_id; | |
1875 | ||
7bfb399e YB |
1876 | DP_VERBOSE(p_hwfn, |
1877 | QED_MSG_RDMA, | |
1878 | "XRC/SRQ created Id = %x (is_xrc=%u)\n", | |
1879 | out_params->srq_id, in_params->is_xrc); | |
39dbc646 YB |
1880 | return rc; |
1881 | ||
1882 | err: | |
1883 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1884 | qed_bmap_release_id(p_hwfn, bmap, returned_id); | |
1885 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1886 | ||
1887 | return rc; | |
1888 | } | |
1889 | ||
b71b9afd | 1890 | bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) |
f1372ee1 KM |
1891 | { |
1892 | bool result; | |
1893 | ||
291d57f6 MK |
1894 | /* if rdma wasn't activated yet, naturally there are no qps */ |
1895 | if (!p_hwfn->p_rdma_info->active) | |
f1372ee1 KM |
1896 | return false; |
1897 | ||
1898 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1899 | if (!p_hwfn->p_rdma_info->cid_map.bitmap) | |
1900 | result = false; | |
1901 | else | |
1902 | result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map); | |
1903 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1904 | return result; | |
1905 | } | |
1906 | ||
b71b9afd | 1907 | void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
f1372ee1 KM |
1908 | { |
1909 | u32 val; | |
1910 | ||
1911 | val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; | |
1912 | ||
1913 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); | |
1914 | DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), | |
1915 | "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", | |
1916 | val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); | |
1917 | } | |
1918 | ||
f1372ee1 KM |
1919 | |
1920 | void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |
1921 | { | |
1922 | p_hwfn->db_bar_no_edpm = true; | |
1923 | ||
1924 | qed_rdma_dpm_conf(p_hwfn, p_ptt); | |
1925 | } | |
1926 | ||
1927 | static int qed_rdma_start(void *rdma_cxt, | |
1928 | struct qed_rdma_start_in_params *params) | |
1929 | { | |
1930 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1931 | struct qed_ptt *p_ptt; | |
1932 | int rc = -EBUSY; | |
1933 | ||
1934 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1935 | "desired_cnq = %08x\n", params->desired_cnq); | |
1936 | ||
1937 | p_ptt = qed_ptt_acquire(p_hwfn); | |
1938 | if (!p_ptt) | |
1939 | goto err; | |
1940 | ||
291d57f6 | 1941 | rc = qed_rdma_alloc(p_hwfn); |
f1372ee1 KM |
1942 | if (rc) |
1943 | goto err1; | |
1944 | ||
1945 | rc = qed_rdma_setup(p_hwfn, p_ptt, params); | |
1946 | if (rc) | |
1947 | goto err2; | |
1948 | ||
1949 | qed_ptt_release(p_hwfn, p_ptt); | |
291d57f6 | 1950 | p_hwfn->p_rdma_info->active = 1; |
f1372ee1 KM |
1951 | |
1952 | return rc; | |
1953 | ||
1954 | err2: | |
1955 | qed_rdma_free(p_hwfn); | |
1956 | err1: | |
1957 | qed_ptt_release(p_hwfn, p_ptt); | |
1958 | err: | |
1959 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); | |
1960 | return rc; | |
1961 | } | |
1962 | ||
1963 | static int qed_rdma_init(struct qed_dev *cdev, | |
1964 | struct qed_rdma_start_in_params *params) | |
1965 | { | |
7e50769c | 1966 | return qed_rdma_start(QED_AFFIN_HWFN(cdev), params); |
f1372ee1 KM |
1967 | } |
1968 | ||
1969 | static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) | |
1970 | { | |
1971 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1972 | ||
1973 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); | |
1974 | ||
1975 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1976 | qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); | |
1977 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1978 | } | |
1979 | ||
1980 | static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, | |
1981 | u8 *old_mac_address, | |
1982 | u8 *new_mac_address) | |
1983 | { | |
f1372ee1 KM |
1984 | int rc = 0; |
1985 | ||
f1372ee1 | 1986 | if (old_mac_address) |
79284ade | 1987 | qed_llh_remove_mac_filter(cdev, 0, old_mac_address); |
f1372ee1 | 1988 | if (new_mac_address) |
79284ade | 1989 | rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address); |
f1372ee1 KM |
1990 | |
1991 | if (rc) | |
1992 | DP_ERR(cdev, | |
1993 | "qed roce ll2 mac filter set: failed to add MAC filter\n"); | |
1994 | ||
1995 | return rc; | |
1996 | } | |
1997 | ||
3576e99e MK |
1998 | static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset) |
1999 | { | |
2000 | enum qed_eng eng; | |
2001 | u8 ppfid = 0; | |
2002 | int rc; | |
2003 | ||
2004 | /* Make sure iwarp cmt mode is enabled before setting affinity */ | |
2005 | if (!cdev->iwarp_cmt) | |
2006 | return -EINVAL; | |
2007 | ||
2008 | if (b_reset) | |
2009 | eng = QED_BOTH_ENG; | |
2010 | else | |
2011 | eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0; | |
2012 | ||
2013 | rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); | |
2014 | if (rc) { | |
2015 | DP_NOTICE(cdev, | |
2016 | "Failed to set the engine affinity of ppfid %d\n", | |
2017 | ppfid); | |
2018 | return rc; | |
2019 | } | |
2020 | ||
2021 | DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP), | |
2022 | "LLH: Set the engine affinity of non-RoCE packets as %d\n", | |
2023 | eng); | |
2024 | ||
2025 | return 0; | |
2026 | } | |
2027 | ||
f1372ee1 KM |
2028 | static const struct qed_rdma_ops qed_rdma_ops_pass = { |
2029 | .common = &qed_common_ops_pass, | |
2030 | .fill_dev_info = &qed_fill_rdma_dev_info, | |
2031 | .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, | |
2032 | .rdma_init = &qed_rdma_init, | |
2033 | .rdma_add_user = &qed_rdma_add_user, | |
2034 | .rdma_remove_user = &qed_rdma_remove_user, | |
2035 | .rdma_stop = &qed_rdma_stop, | |
2036 | .rdma_query_port = &qed_rdma_query_port, | |
2037 | .rdma_query_device = &qed_rdma_query_device, | |
2038 | .rdma_get_start_sb = &qed_rdma_get_sb_start, | |
2039 | .rdma_get_rdma_int = &qed_rdma_get_int, | |
2040 | .rdma_set_rdma_int = &qed_rdma_set_int, | |
2041 | .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, | |
2042 | .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, | |
2043 | .rdma_alloc_pd = &qed_rdma_alloc_pd, | |
2044 | .rdma_dealloc_pd = &qed_rdma_free_pd, | |
7bfb399e YB |
2045 | .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd, |
2046 | .rdma_dealloc_xrcd = &qed_rdma_free_xrcd, | |
f1372ee1 KM |
2047 | .rdma_create_cq = &qed_rdma_create_cq, |
2048 | .rdma_destroy_cq = &qed_rdma_destroy_cq, | |
2049 | .rdma_create_qp = &qed_rdma_create_qp, | |
2050 | .rdma_modify_qp = &qed_rdma_modify_qp, | |
2051 | .rdma_query_qp = &qed_rdma_query_qp, | |
2052 | .rdma_destroy_qp = &qed_rdma_destroy_qp, | |
2053 | .rdma_alloc_tid = &qed_rdma_alloc_tid, | |
2054 | .rdma_free_tid = &qed_rdma_free_tid, | |
2055 | .rdma_register_tid = &qed_rdma_register_tid, | |
2056 | .rdma_deregister_tid = &qed_rdma_deregister_tid, | |
39dbc646 YB |
2057 | .rdma_create_srq = &qed_rdma_create_srq, |
2058 | .rdma_modify_srq = &qed_rdma_modify_srq, | |
2059 | .rdma_destroy_srq = &qed_rdma_destroy_srq, | |
f1372ee1 KM |
2060 | .ll2_acquire_connection = &qed_ll2_acquire_connection, |
2061 | .ll2_establish_connection = &qed_ll2_establish_connection, | |
2062 | .ll2_terminate_connection = &qed_ll2_terminate_connection, | |
2063 | .ll2_release_connection = &qed_ll2_release_connection, | |
2064 | .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, | |
2065 | .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, | |
2066 | .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, | |
2067 | .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, | |
2068 | .ll2_get_stats = &qed_ll2_get_stats, | |
3576e99e | 2069 | .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin, |
4b0fdd7c | 2070 | .iwarp_connect = &qed_iwarp_connect, |
65a91a6c KM |
2071 | .iwarp_create_listen = &qed_iwarp_create_listen, |
2072 | .iwarp_destroy_listen = &qed_iwarp_destroy_listen, | |
4b0fdd7c KM |
2073 | .iwarp_accept = &qed_iwarp_accept, |
2074 | .iwarp_reject = &qed_iwarp_reject, | |
2075 | .iwarp_send_rtr = &qed_iwarp_send_rtr, | |
f1372ee1 KM |
2076 | }; |
2077 | ||
2078 | const struct qed_rdma_ops *qed_get_rdma_ops(void) | |
2079 | { | |
2080 | return &qed_rdma_ops_pass; | |
2081 | } | |
2082 | EXPORT_SYMBOL(qed_get_rdma_ops); |