]>
Commit | Line | Data |
---|---|---|
8700e3e7 MS |
1 | /* |
2 | * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. | |
3 | * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/skbuff.h> | |
35 | #include <linux/delay.h> | |
36 | #include <linux/sched.h> | |
37 | ||
38 | #include "rxe.h" | |
39 | #include "rxe_loc.h" | |
40 | #include "rxe_queue.h" | |
41 | #include "rxe_task.h" | |
42 | ||
43 | char *rxe_qp_state_name[] = { | |
44 | [QP_STATE_RESET] = "RESET", | |
45 | [QP_STATE_INIT] = "INIT", | |
46 | [QP_STATE_READY] = "READY", | |
47 | [QP_STATE_DRAIN] = "DRAIN", | |
48 | [QP_STATE_DRAINED] = "DRAINED", | |
49 | [QP_STATE_ERROR] = "ERROR", | |
50 | }; | |
51 | ||
52 | static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, | |
53 | int has_srq) | |
54 | { | |
55 | if (cap->max_send_wr > rxe->attr.max_qp_wr) { | |
56 | pr_warn("invalid send wr = %d > %d\n", | |
57 | cap->max_send_wr, rxe->attr.max_qp_wr); | |
58 | goto err1; | |
59 | } | |
60 | ||
61 | if (cap->max_send_sge > rxe->attr.max_sge) { | |
62 | pr_warn("invalid send sge = %d > %d\n", | |
63 | cap->max_send_sge, rxe->attr.max_sge); | |
64 | goto err1; | |
65 | } | |
66 | ||
67 | if (!has_srq) { | |
68 | if (cap->max_recv_wr > rxe->attr.max_qp_wr) { | |
69 | pr_warn("invalid recv wr = %d > %d\n", | |
70 | cap->max_recv_wr, rxe->attr.max_qp_wr); | |
71 | goto err1; | |
72 | } | |
73 | ||
74 | if (cap->max_recv_sge > rxe->attr.max_sge) { | |
75 | pr_warn("invalid recv sge = %d > %d\n", | |
76 | cap->max_recv_sge, rxe->attr.max_sge); | |
77 | goto err1; | |
78 | } | |
79 | } | |
80 | ||
81 | if (cap->max_inline_data > rxe->max_inline_data) { | |
82 | pr_warn("invalid max inline data = %d > %d\n", | |
83 | cap->max_inline_data, rxe->max_inline_data); | |
84 | goto err1; | |
85 | } | |
86 | ||
87 | return 0; | |
88 | ||
89 | err1: | |
90 | return -EINVAL; | |
91 | } | |
92 | ||
93 | int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) | |
94 | { | |
95 | struct ib_qp_cap *cap = &init->cap; | |
96 | struct rxe_port *port; | |
97 | int port_num = init->port_num; | |
98 | ||
99 | if (!init->recv_cq || !init->send_cq) { | |
100 | pr_warn("missing cq\n"); | |
101 | goto err1; | |
102 | } | |
103 | ||
104 | if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) | |
105 | goto err1; | |
106 | ||
107 | if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) { | |
108 | if (port_num != 1) { | |
109 | pr_warn("invalid port = %d\n", port_num); | |
110 | goto err1; | |
111 | } | |
112 | ||
113 | port = &rxe->port; | |
114 | ||
115 | if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) { | |
116 | pr_warn("SMI QP exists for port %d\n", port_num); | |
117 | goto err1; | |
118 | } | |
119 | ||
120 | if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { | |
121 | pr_warn("GSI QP exists for port %d\n", port_num); | |
122 | goto err1; | |
123 | } | |
124 | } | |
125 | ||
126 | return 0; | |
127 | ||
128 | err1: | |
129 | return -EINVAL; | |
130 | } | |
131 | ||
132 | static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) | |
133 | { | |
134 | qp->resp.res_head = 0; | |
135 | qp->resp.res_tail = 0; | |
136 | qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); | |
137 | ||
138 | if (!qp->resp.resources) | |
139 | return -ENOMEM; | |
140 | ||
141 | return 0; | |
142 | } | |
143 | ||
144 | static void free_rd_atomic_resources(struct rxe_qp *qp) | |
145 | { | |
146 | if (qp->resp.resources) { | |
147 | int i; | |
148 | ||
b6bbee0d | 149 | for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { |
8700e3e7 MS |
150 | struct resp_res *res = &qp->resp.resources[i]; |
151 | ||
152 | free_rd_atomic_resource(qp, res); | |
153 | } | |
154 | kfree(qp->resp.resources); | |
155 | qp->resp.resources = NULL; | |
156 | } | |
157 | } | |
158 | ||
159 | void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) | |
160 | { | |
161 | if (res->type == RXE_ATOMIC_MASK) { | |
162 | rxe_drop_ref(qp); | |
163 | kfree_skb(res->atomic.skb); | |
164 | } else if (res->type == RXE_READ_MASK) { | |
165 | if (res->read.mr) | |
166 | rxe_drop_ref(res->read.mr); | |
167 | } | |
168 | res->type = 0; | |
169 | } | |
170 | ||
171 | static void cleanup_rd_atomic_resources(struct rxe_qp *qp) | |
172 | { | |
173 | int i; | |
174 | struct resp_res *res; | |
175 | ||
176 | if (qp->resp.resources) { | |
b6bbee0d | 177 | for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { |
8700e3e7 MS |
178 | res = &qp->resp.resources[i]; |
179 | free_rd_atomic_resource(qp, res); | |
180 | } | |
181 | } | |
182 | } | |
183 | ||
184 | static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, | |
185 | struct ib_qp_init_attr *init) | |
186 | { | |
187 | struct rxe_port *port; | |
188 | u32 qpn; | |
189 | ||
190 | qp->sq_sig_type = init->sq_sig_type; | |
191 | qp->attr.path_mtu = 1; | |
192 | qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); | |
193 | ||
194 | qpn = qp->pelem.index; | |
195 | port = &rxe->port; | |
196 | ||
197 | switch (init->qp_type) { | |
198 | case IB_QPT_SMI: | |
199 | qp->ibqp.qp_num = 0; | |
200 | port->qp_smi_index = qpn; | |
201 | qp->attr.port_num = init->port_num; | |
202 | break; | |
203 | ||
204 | case IB_QPT_GSI: | |
205 | qp->ibqp.qp_num = 1; | |
206 | port->qp_gsi_index = qpn; | |
207 | qp->attr.port_num = init->port_num; | |
208 | break; | |
209 | ||
210 | default: | |
211 | qp->ibqp.qp_num = qpn; | |
212 | break; | |
213 | } | |
214 | ||
215 | INIT_LIST_HEAD(&qp->grp_list); | |
216 | ||
217 | skb_queue_head_init(&qp->send_pkts); | |
218 | ||
219 | spin_lock_init(&qp->grp_lock); | |
220 | spin_lock_init(&qp->state_lock); | |
221 | ||
222 | atomic_set(&qp->ssn, 0); | |
223 | atomic_set(&qp->skb_out, 0); | |
224 | } | |
225 | ||
226 | static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, | |
227 | struct ib_qp_init_attr *init, | |
228 | struct ib_ucontext *context, struct ib_udata *udata) | |
229 | { | |
230 | int err; | |
231 | int wqe_size; | |
232 | ||
233 | err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); | |
234 | if (err < 0) | |
235 | return err; | |
236 | qp->sk->sk->sk_user_data = qp; | |
237 | ||
238 | qp->sq.max_wr = init->cap.max_send_wr; | |
239 | qp->sq.max_sge = init->cap.max_send_sge; | |
240 | qp->sq.max_inline = init->cap.max_inline_data; | |
241 | ||
242 | wqe_size = max_t(int, sizeof(struct rxe_send_wqe) + | |
243 | qp->sq.max_sge * sizeof(struct ib_sge), | |
244 | sizeof(struct rxe_send_wqe) + | |
245 | qp->sq.max_inline); | |
246 | ||
247 | qp->sq.queue = rxe_queue_init(rxe, | |
248 | &qp->sq.max_wr, | |
249 | wqe_size); | |
250 | if (!qp->sq.queue) | |
251 | return -ENOMEM; | |
252 | ||
253 | err = do_mmap_info(rxe, udata, true, | |
254 | context, qp->sq.queue->buf, | |
255 | qp->sq.queue->buf_size, &qp->sq.queue->ip); | |
256 | ||
257 | if (err) { | |
258 | kvfree(qp->sq.queue->buf); | |
259 | kfree(qp->sq.queue); | |
260 | return err; | |
261 | } | |
262 | ||
263 | qp->req.wqe_index = producer_index(qp->sq.queue); | |
264 | qp->req.state = QP_STATE_RESET; | |
265 | qp->req.opcode = -1; | |
266 | qp->comp.opcode = -1; | |
267 | ||
268 | spin_lock_init(&qp->sq.sq_lock); | |
269 | skb_queue_head_init(&qp->req_pkts); | |
270 | ||
271 | rxe_init_task(rxe, &qp->req.task, qp, | |
272 | rxe_requester, "req"); | |
273 | rxe_init_task(rxe, &qp->comp.task, qp, | |
274 | rxe_completer, "comp"); | |
275 | ||
276 | init_timer(&qp->rnr_nak_timer); | |
277 | qp->rnr_nak_timer.function = rnr_nak_timer; | |
278 | qp->rnr_nak_timer.data = (unsigned long)qp; | |
279 | ||
280 | init_timer(&qp->retrans_timer); | |
281 | qp->retrans_timer.function = retransmit_timer; | |
282 | qp->retrans_timer.data = (unsigned long)qp; | |
283 | qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ | |
284 | ||
285 | return 0; | |
286 | } | |
287 | ||
288 | static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, | |
289 | struct ib_qp_init_attr *init, | |
290 | struct ib_ucontext *context, struct ib_udata *udata) | |
291 | { | |
292 | int err; | |
293 | int wqe_size; | |
294 | ||
295 | if (!qp->srq) { | |
296 | qp->rq.max_wr = init->cap.max_recv_wr; | |
297 | qp->rq.max_sge = init->cap.max_recv_sge; | |
298 | ||
299 | wqe_size = rcv_wqe_size(qp->rq.max_sge); | |
300 | ||
e404f945 PP |
301 | pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", |
302 | qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); | |
8700e3e7 MS |
303 | |
304 | qp->rq.queue = rxe_queue_init(rxe, | |
305 | &qp->rq.max_wr, | |
306 | wqe_size); | |
307 | if (!qp->rq.queue) | |
308 | return -ENOMEM; | |
309 | ||
310 | err = do_mmap_info(rxe, udata, false, context, | |
311 | qp->rq.queue->buf, | |
312 | qp->rq.queue->buf_size, | |
313 | &qp->rq.queue->ip); | |
314 | if (err) { | |
315 | kvfree(qp->rq.queue->buf); | |
316 | kfree(qp->rq.queue); | |
317 | return err; | |
318 | } | |
319 | } | |
320 | ||
321 | spin_lock_init(&qp->rq.producer_lock); | |
322 | spin_lock_init(&qp->rq.consumer_lock); | |
323 | ||
324 | skb_queue_head_init(&qp->resp_pkts); | |
325 | ||
326 | rxe_init_task(rxe, &qp->resp.task, qp, | |
327 | rxe_responder, "resp"); | |
328 | ||
329 | qp->resp.opcode = OPCODE_NONE; | |
330 | qp->resp.msn = 0; | |
331 | qp->resp.state = QP_STATE_RESET; | |
332 | ||
333 | return 0; | |
334 | } | |
335 | ||
336 | /* called by the create qp verb */ | |
337 | int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, | |
338 | struct ib_qp_init_attr *init, struct ib_udata *udata, | |
339 | struct ib_pd *ibpd) | |
340 | { | |
341 | int err; | |
342 | struct rxe_cq *rcq = to_rcq(init->recv_cq); | |
343 | struct rxe_cq *scq = to_rcq(init->send_cq); | |
344 | struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; | |
345 | struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL; | |
346 | ||
347 | rxe_add_ref(pd); | |
348 | rxe_add_ref(rcq); | |
349 | rxe_add_ref(scq); | |
350 | if (srq) | |
351 | rxe_add_ref(srq); | |
352 | ||
353 | qp->pd = pd; | |
354 | qp->rcq = rcq; | |
355 | qp->scq = scq; | |
356 | qp->srq = srq; | |
357 | ||
358 | rxe_qp_init_misc(rxe, qp, init); | |
359 | ||
360 | err = rxe_qp_init_req(rxe, qp, init, context, udata); | |
361 | if (err) | |
362 | goto err1; | |
363 | ||
364 | err = rxe_qp_init_resp(rxe, qp, init, context, udata); | |
365 | if (err) | |
366 | goto err2; | |
367 | ||
368 | qp->attr.qp_state = IB_QPS_RESET; | |
369 | qp->valid = 1; | |
370 | ||
371 | return 0; | |
372 | ||
373 | err2: | |
374 | rxe_queue_cleanup(qp->sq.queue); | |
375 | err1: | |
376 | if (srq) | |
377 | rxe_drop_ref(srq); | |
378 | rxe_drop_ref(scq); | |
379 | rxe_drop_ref(rcq); | |
380 | rxe_drop_ref(pd); | |
381 | ||
382 | return err; | |
383 | } | |
384 | ||
385 | /* called by the query qp verb */ | |
386 | int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) | |
387 | { | |
388 | init->event_handler = qp->ibqp.event_handler; | |
389 | init->qp_context = qp->ibqp.qp_context; | |
390 | init->send_cq = qp->ibqp.send_cq; | |
391 | init->recv_cq = qp->ibqp.recv_cq; | |
392 | init->srq = qp->ibqp.srq; | |
393 | ||
394 | init->cap.max_send_wr = qp->sq.max_wr; | |
395 | init->cap.max_send_sge = qp->sq.max_sge; | |
396 | init->cap.max_inline_data = qp->sq.max_inline; | |
397 | ||
398 | if (!qp->srq) { | |
399 | init->cap.max_recv_wr = qp->rq.max_wr; | |
400 | init->cap.max_recv_sge = qp->rq.max_sge; | |
401 | } | |
402 | ||
403 | init->sq_sig_type = qp->sq_sig_type; | |
404 | ||
405 | init->qp_type = qp->ibqp.qp_type; | |
406 | init->port_num = 1; | |
407 | ||
408 | return 0; | |
409 | } | |
410 | ||
411 | /* called by the modify qp verb, this routine checks all the parameters before | |
412 | * making any changes | |
413 | */ | |
414 | int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, | |
415 | struct ib_qp_attr *attr, int mask) | |
416 | { | |
417 | enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? | |
418 | attr->cur_qp_state : qp->attr.qp_state; | |
419 | enum ib_qp_state new_state = (mask & IB_QP_STATE) ? | |
420 | attr->qp_state : cur_state; | |
421 | ||
422 | if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask, | |
423 | IB_LINK_LAYER_ETHERNET)) { | |
424 | pr_warn("invalid mask or state for qp\n"); | |
425 | goto err1; | |
426 | } | |
427 | ||
428 | if (mask & IB_QP_STATE) { | |
429 | if (cur_state == IB_QPS_SQD) { | |
430 | if (qp->req.state == QP_STATE_DRAIN && | |
431 | new_state != IB_QPS_ERR) | |
432 | goto err1; | |
433 | } | |
434 | } | |
435 | ||
436 | if (mask & IB_QP_PORT) { | |
437 | if (attr->port_num != 1) { | |
438 | pr_warn("invalid port %d\n", attr->port_num); | |
439 | goto err1; | |
440 | } | |
441 | } | |
442 | ||
443 | if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) | |
444 | goto err1; | |
445 | ||
446 | if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) | |
447 | goto err1; | |
448 | ||
449 | if (mask & IB_QP_ALT_PATH) { | |
450 | if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) | |
451 | goto err1; | |
452 | if (attr->alt_port_num != 1) { | |
453 | pr_warn("invalid alt port %d\n", attr->alt_port_num); | |
454 | goto err1; | |
455 | } | |
456 | if (attr->alt_timeout > 31) { | |
457 | pr_warn("invalid QP alt timeout %d > 31\n", | |
458 | attr->alt_timeout); | |
459 | goto err1; | |
460 | } | |
461 | } | |
462 | ||
463 | if (mask & IB_QP_PATH_MTU) { | |
464 | struct rxe_port *port = &rxe->port; | |
465 | ||
466 | enum ib_mtu max_mtu = port->attr.max_mtu; | |
467 | enum ib_mtu mtu = attr->path_mtu; | |
468 | ||
469 | if (mtu > max_mtu) { | |
470 | pr_debug("invalid mtu (%d) > (%d)\n", | |
471 | ib_mtu_enum_to_int(mtu), | |
472 | ib_mtu_enum_to_int(max_mtu)); | |
473 | goto err1; | |
474 | } | |
475 | } | |
476 | ||
477 | if (mask & IB_QP_MAX_QP_RD_ATOMIC) { | |
478 | if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { | |
479 | pr_warn("invalid max_rd_atomic %d > %d\n", | |
480 | attr->max_rd_atomic, | |
481 | rxe->attr.max_qp_rd_atom); | |
482 | goto err1; | |
483 | } | |
484 | } | |
485 | ||
486 | if (mask & IB_QP_TIMEOUT) { | |
487 | if (attr->timeout > 31) { | |
488 | pr_warn("invalid QP timeout %d > 31\n", | |
489 | attr->timeout); | |
490 | goto err1; | |
491 | } | |
492 | } | |
493 | ||
494 | return 0; | |
495 | ||
496 | err1: | |
497 | return -EINVAL; | |
498 | } | |
499 | ||
500 | /* move the qp to the reset state */ | |
501 | static void rxe_qp_reset(struct rxe_qp *qp) | |
502 | { | |
503 | /* stop tasks from running */ | |
504 | rxe_disable_task(&qp->resp.task); | |
505 | ||
506 | /* stop request/comp */ | |
507 | if (qp->sq.queue) { | |
508 | if (qp_type(qp) == IB_QPT_RC) | |
509 | rxe_disable_task(&qp->comp.task); | |
510 | rxe_disable_task(&qp->req.task); | |
511 | } | |
512 | ||
513 | /* move qp to the reset state */ | |
514 | qp->req.state = QP_STATE_RESET; | |
515 | qp->resp.state = QP_STATE_RESET; | |
516 | ||
517 | /* let state machines reset themselves drain work and packet queues | |
518 | * etc. | |
519 | */ | |
520 | __rxe_do_task(&qp->resp.task); | |
521 | ||
522 | if (qp->sq.queue) { | |
523 | __rxe_do_task(&qp->comp.task); | |
524 | __rxe_do_task(&qp->req.task); | |
aa75b07b | 525 | rxe_queue_reset(qp->sq.queue); |
8700e3e7 MS |
526 | } |
527 | ||
528 | /* cleanup attributes */ | |
529 | atomic_set(&qp->ssn, 0); | |
530 | qp->req.opcode = -1; | |
531 | qp->req.need_retry = 0; | |
532 | qp->req.noack_pkts = 0; | |
533 | qp->resp.msn = 0; | |
534 | qp->resp.opcode = -1; | |
535 | qp->resp.drop_msg = 0; | |
536 | qp->resp.goto_error = 0; | |
537 | qp->resp.sent_psn_nak = 0; | |
538 | ||
539 | if (qp->resp.mr) { | |
540 | rxe_drop_ref(qp->resp.mr); | |
541 | qp->resp.mr = NULL; | |
542 | } | |
543 | ||
544 | cleanup_rd_atomic_resources(qp); | |
545 | ||
546 | /* reenable tasks */ | |
547 | rxe_enable_task(&qp->resp.task); | |
548 | ||
549 | if (qp->sq.queue) { | |
550 | if (qp_type(qp) == IB_QPT_RC) | |
551 | rxe_enable_task(&qp->comp.task); | |
552 | ||
553 | rxe_enable_task(&qp->req.task); | |
554 | } | |
555 | } | |
556 | ||
557 | /* drain the send queue */ | |
558 | static void rxe_qp_drain(struct rxe_qp *qp) | |
559 | { | |
560 | if (qp->sq.queue) { | |
561 | if (qp->req.state != QP_STATE_DRAINED) { | |
562 | qp->req.state = QP_STATE_DRAIN; | |
563 | if (qp_type(qp) == IB_QPT_RC) | |
564 | rxe_run_task(&qp->comp.task, 1); | |
565 | else | |
566 | __rxe_do_task(&qp->comp.task); | |
567 | rxe_run_task(&qp->req.task, 1); | |
568 | } | |
569 | } | |
570 | } | |
571 | ||
572 | /* move the qp to the error state */ | |
573 | void rxe_qp_error(struct rxe_qp *qp) | |
574 | { | |
575 | qp->req.state = QP_STATE_ERROR; | |
576 | qp->resp.state = QP_STATE_ERROR; | |
577 | ||
578 | /* drain work and packet queues */ | |
579 | rxe_run_task(&qp->resp.task, 1); | |
580 | ||
581 | if (qp_type(qp) == IB_QPT_RC) | |
582 | rxe_run_task(&qp->comp.task, 1); | |
583 | else | |
584 | __rxe_do_task(&qp->comp.task); | |
585 | rxe_run_task(&qp->req.task, 1); | |
586 | } | |
587 | ||
588 | /* called by the modify qp verb */ | |
589 | int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, | |
590 | struct ib_udata *udata) | |
591 | { | |
592 | int err; | |
593 | struct rxe_dev *rxe = to_rdev(qp->ibqp.device); | |
594 | union ib_gid sgid; | |
595 | struct ib_gid_attr sgid_attr; | |
596 | ||
597 | if (mask & IB_QP_MAX_QP_RD_ATOMIC) { | |
598 | int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); | |
599 | ||
b6bbee0d PP |
600 | qp->attr.max_rd_atomic = max_rd_atomic; |
601 | atomic_set(&qp->req.rd_atomic, max_rd_atomic); | |
602 | } | |
603 | ||
604 | if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { | |
605 | int max_dest_rd_atomic = | |
606 | __roundup_pow_of_two(attr->max_dest_rd_atomic); | |
607 | ||
608 | qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; | |
609 | ||
8700e3e7 MS |
610 | free_rd_atomic_resources(qp); |
611 | ||
b6bbee0d | 612 | err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); |
8700e3e7 MS |
613 | if (err) |
614 | return err; | |
8700e3e7 MS |
615 | } |
616 | ||
617 | if (mask & IB_QP_CUR_STATE) | |
618 | qp->attr.cur_qp_state = attr->qp_state; | |
619 | ||
620 | if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) | |
621 | qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; | |
622 | ||
623 | if (mask & IB_QP_ACCESS_FLAGS) | |
624 | qp->attr.qp_access_flags = attr->qp_access_flags; | |
625 | ||
626 | if (mask & IB_QP_PKEY_INDEX) | |
627 | qp->attr.pkey_index = attr->pkey_index; | |
628 | ||
629 | if (mask & IB_QP_PORT) | |
630 | qp->attr.port_num = attr->port_num; | |
631 | ||
632 | if (mask & IB_QP_QKEY) | |
633 | qp->attr.qkey = attr->qkey; | |
634 | ||
635 | if (mask & IB_QP_AV) { | |
636 | ib_get_cached_gid(&rxe->ib_dev, 1, | |
637 | attr->ah_attr.grh.sgid_index, &sgid, | |
638 | &sgid_attr); | |
639 | rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av, | |
640 | &attr->ah_attr); | |
641 | rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr, | |
642 | &sgid_attr, &sgid); | |
643 | if (sgid_attr.ndev) | |
644 | dev_put(sgid_attr.ndev); | |
645 | } | |
646 | ||
647 | if (mask & IB_QP_ALT_PATH) { | |
648 | ib_get_cached_gid(&rxe->ib_dev, 1, | |
649 | attr->alt_ah_attr.grh.sgid_index, &sgid, | |
650 | &sgid_attr); | |
651 | ||
652 | rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av, | |
653 | &attr->alt_ah_attr); | |
654 | rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr, | |
655 | &sgid_attr, &sgid); | |
656 | if (sgid_attr.ndev) | |
657 | dev_put(sgid_attr.ndev); | |
658 | ||
659 | qp->attr.alt_port_num = attr->alt_port_num; | |
660 | qp->attr.alt_pkey_index = attr->alt_pkey_index; | |
661 | qp->attr.alt_timeout = attr->alt_timeout; | |
662 | } | |
663 | ||
664 | if (mask & IB_QP_PATH_MTU) { | |
665 | qp->attr.path_mtu = attr->path_mtu; | |
666 | qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); | |
667 | } | |
668 | ||
669 | if (mask & IB_QP_TIMEOUT) { | |
670 | qp->attr.timeout = attr->timeout; | |
671 | if (attr->timeout == 0) { | |
672 | qp->qp_timeout_jiffies = 0; | |
673 | } else { | |
674 | /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ | |
675 | int j = nsecs_to_jiffies(4096ULL << attr->timeout); | |
676 | ||
677 | qp->qp_timeout_jiffies = j ? j : 1; | |
678 | } | |
679 | } | |
680 | ||
681 | if (mask & IB_QP_RETRY_CNT) { | |
682 | qp->attr.retry_cnt = attr->retry_cnt; | |
683 | qp->comp.retry_cnt = attr->retry_cnt; | |
e404f945 PP |
684 | pr_debug("qp#%d set retry count = %d\n", qp_num(qp), |
685 | attr->retry_cnt); | |
8700e3e7 MS |
686 | } |
687 | ||
688 | if (mask & IB_QP_RNR_RETRY) { | |
689 | qp->attr.rnr_retry = attr->rnr_retry; | |
690 | qp->comp.rnr_retry = attr->rnr_retry; | |
e404f945 PP |
691 | pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), |
692 | attr->rnr_retry); | |
8700e3e7 MS |
693 | } |
694 | ||
695 | if (mask & IB_QP_RQ_PSN) { | |
696 | qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); | |
697 | qp->resp.psn = qp->attr.rq_psn; | |
e404f945 PP |
698 | pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), |
699 | qp->resp.psn); | |
8700e3e7 MS |
700 | } |
701 | ||
702 | if (mask & IB_QP_MIN_RNR_TIMER) { | |
703 | qp->attr.min_rnr_timer = attr->min_rnr_timer; | |
e404f945 | 704 | pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), |
8700e3e7 MS |
705 | attr->min_rnr_timer); |
706 | } | |
707 | ||
708 | if (mask & IB_QP_SQ_PSN) { | |
709 | qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); | |
710 | qp->req.psn = qp->attr.sq_psn; | |
711 | qp->comp.psn = qp->attr.sq_psn; | |
e404f945 | 712 | pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); |
8700e3e7 MS |
713 | } |
714 | ||
8700e3e7 MS |
715 | if (mask & IB_QP_PATH_MIG_STATE) |
716 | qp->attr.path_mig_state = attr->path_mig_state; | |
717 | ||
718 | if (mask & IB_QP_DEST_QPN) | |
719 | qp->attr.dest_qp_num = attr->dest_qp_num; | |
720 | ||
721 | if (mask & IB_QP_STATE) { | |
722 | qp->attr.qp_state = attr->qp_state; | |
723 | ||
724 | switch (attr->qp_state) { | |
725 | case IB_QPS_RESET: | |
e404f945 | 726 | pr_debug("qp#%d state -> RESET\n", qp_num(qp)); |
8700e3e7 MS |
727 | rxe_qp_reset(qp); |
728 | break; | |
729 | ||
730 | case IB_QPS_INIT: | |
e404f945 | 731 | pr_debug("qp#%d state -> INIT\n", qp_num(qp)); |
8700e3e7 MS |
732 | qp->req.state = QP_STATE_INIT; |
733 | qp->resp.state = QP_STATE_INIT; | |
734 | break; | |
735 | ||
736 | case IB_QPS_RTR: | |
e404f945 | 737 | pr_debug("qp#%d state -> RTR\n", qp_num(qp)); |
8700e3e7 MS |
738 | qp->resp.state = QP_STATE_READY; |
739 | break; | |
740 | ||
741 | case IB_QPS_RTS: | |
e404f945 | 742 | pr_debug("qp#%d state -> RTS\n", qp_num(qp)); |
8700e3e7 MS |
743 | qp->req.state = QP_STATE_READY; |
744 | break; | |
745 | ||
746 | case IB_QPS_SQD: | |
e404f945 | 747 | pr_debug("qp#%d state -> SQD\n", qp_num(qp)); |
8700e3e7 MS |
748 | rxe_qp_drain(qp); |
749 | break; | |
750 | ||
751 | case IB_QPS_SQE: | |
e404f945 | 752 | pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); |
8700e3e7 MS |
753 | /* Not possible from modify_qp. */ |
754 | break; | |
755 | ||
756 | case IB_QPS_ERR: | |
e404f945 | 757 | pr_debug("qp#%d state -> ERR\n", qp_num(qp)); |
8700e3e7 MS |
758 | rxe_qp_error(qp); |
759 | break; | |
760 | } | |
761 | } | |
762 | ||
763 | return 0; | |
764 | } | |
765 | ||
766 | /* called by the query qp verb */ | |
767 | int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) | |
768 | { | |
769 | struct rxe_dev *rxe = to_rdev(qp->ibqp.device); | |
770 | ||
771 | *attr = qp->attr; | |
772 | ||
773 | attr->rq_psn = qp->resp.psn; | |
774 | attr->sq_psn = qp->req.psn; | |
775 | ||
776 | attr->cap.max_send_wr = qp->sq.max_wr; | |
777 | attr->cap.max_send_sge = qp->sq.max_sge; | |
778 | attr->cap.max_inline_data = qp->sq.max_inline; | |
779 | ||
780 | if (!qp->srq) { | |
781 | attr->cap.max_recv_wr = qp->rq.max_wr; | |
782 | attr->cap.max_recv_sge = qp->rq.max_sge; | |
783 | } | |
784 | ||
785 | rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr); | |
786 | rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr); | |
787 | ||
788 | if (qp->req.state == QP_STATE_DRAIN) { | |
789 | attr->sq_draining = 1; | |
790 | /* applications that get this state | |
791 | * typically spin on it. yield the | |
792 | * processor | |
793 | */ | |
794 | cond_resched(); | |
795 | } else { | |
796 | attr->sq_draining = 0; | |
797 | } | |
798 | ||
799 | pr_debug("attr->sq_draining = %d\n", attr->sq_draining); | |
800 | ||
801 | return 0; | |
802 | } | |
803 | ||
804 | /* called by the destroy qp verb */ | |
805 | void rxe_qp_destroy(struct rxe_qp *qp) | |
806 | { | |
807 | qp->valid = 0; | |
808 | qp->qp_timeout_jiffies = 0; | |
809 | rxe_cleanup_task(&qp->resp.task); | |
810 | ||
811 | del_timer_sync(&qp->retrans_timer); | |
812 | del_timer_sync(&qp->rnr_nak_timer); | |
813 | ||
814 | rxe_cleanup_task(&qp->req.task); | |
815 | if (qp_type(qp) == IB_QPT_RC) | |
816 | rxe_cleanup_task(&qp->comp.task); | |
817 | ||
818 | /* flush out any receive wr's or pending requests */ | |
819 | __rxe_do_task(&qp->req.task); | |
820 | if (qp->sq.queue) { | |
821 | __rxe_do_task(&qp->comp.task); | |
822 | __rxe_do_task(&qp->req.task); | |
823 | } | |
824 | } | |
825 | ||
826 | /* called when the last reference to the qp is dropped */ | |
827 | void rxe_qp_cleanup(void *arg) | |
828 | { | |
829 | struct rxe_qp *qp = arg; | |
830 | ||
831 | rxe_drop_all_mcast_groups(qp); | |
832 | ||
833 | if (qp->sq.queue) | |
834 | rxe_queue_cleanup(qp->sq.queue); | |
835 | ||
836 | if (qp->srq) | |
837 | rxe_drop_ref(qp->srq); | |
838 | ||
839 | if (qp->rq.queue) | |
840 | rxe_queue_cleanup(qp->rq.queue); | |
841 | ||
842 | if (qp->scq) | |
843 | rxe_drop_ref(qp->scq); | |
844 | if (qp->rcq) | |
845 | rxe_drop_ref(qp->rcq); | |
846 | if (qp->pd) | |
847 | rxe_drop_ref(qp->pd); | |
848 | ||
849 | if (qp->resp.mr) { | |
850 | rxe_drop_ref(qp->resp.mr); | |
851 | qp->resp.mr = NULL; | |
852 | } | |
853 | ||
854 | free_rd_atomic_resources(qp); | |
855 | ||
856 | kernel_sock_shutdown(qp->sk, SHUT_RDWR); | |
857 | } |