]>
Commit | Line | Data |
---|---|---|
8700e3e7 MS |
1 | /* |
2 | * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. | |
3 | * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/skbuff.h> | |
35 | #include <linux/delay.h> | |
36 | #include <linux/sched.h> | |
37 | ||
38 | #include "rxe.h" | |
39 | #include "rxe_loc.h" | |
40 | #include "rxe_queue.h" | |
41 | #include "rxe_task.h" | |
42 | ||
43 | char *rxe_qp_state_name[] = { | |
44 | [QP_STATE_RESET] = "RESET", | |
45 | [QP_STATE_INIT] = "INIT", | |
46 | [QP_STATE_READY] = "READY", | |
47 | [QP_STATE_DRAIN] = "DRAIN", | |
48 | [QP_STATE_DRAINED] = "DRAINED", | |
49 | [QP_STATE_ERROR] = "ERROR", | |
50 | }; | |
51 | ||
52 | static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, | |
53 | int has_srq) | |
54 | { | |
55 | if (cap->max_send_wr > rxe->attr.max_qp_wr) { | |
56 | pr_warn("invalid send wr = %d > %d\n", | |
57 | cap->max_send_wr, rxe->attr.max_qp_wr); | |
58 | goto err1; | |
59 | } | |
60 | ||
61 | if (cap->max_send_sge > rxe->attr.max_sge) { | |
62 | pr_warn("invalid send sge = %d > %d\n", | |
63 | cap->max_send_sge, rxe->attr.max_sge); | |
64 | goto err1; | |
65 | } | |
66 | ||
67 | if (!has_srq) { | |
68 | if (cap->max_recv_wr > rxe->attr.max_qp_wr) { | |
69 | pr_warn("invalid recv wr = %d > %d\n", | |
70 | cap->max_recv_wr, rxe->attr.max_qp_wr); | |
71 | goto err1; | |
72 | } | |
73 | ||
74 | if (cap->max_recv_sge > rxe->attr.max_sge) { | |
75 | pr_warn("invalid recv sge = %d > %d\n", | |
76 | cap->max_recv_sge, rxe->attr.max_sge); | |
77 | goto err1; | |
78 | } | |
79 | } | |
80 | ||
81 | if (cap->max_inline_data > rxe->max_inline_data) { | |
82 | pr_warn("invalid max inline data = %d > %d\n", | |
83 | cap->max_inline_data, rxe->max_inline_data); | |
84 | goto err1; | |
85 | } | |
86 | ||
87 | return 0; | |
88 | ||
89 | err1: | |
90 | return -EINVAL; | |
91 | } | |
92 | ||
93 | int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) | |
94 | { | |
95 | struct ib_qp_cap *cap = &init->cap; | |
96 | struct rxe_port *port; | |
97 | int port_num = init->port_num; | |
98 | ||
99 | if (!init->recv_cq || !init->send_cq) { | |
100 | pr_warn("missing cq\n"); | |
101 | goto err1; | |
102 | } | |
103 | ||
104 | if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) | |
105 | goto err1; | |
106 | ||
107 | if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) { | |
108 | if (port_num != 1) { | |
109 | pr_warn("invalid port = %d\n", port_num); | |
110 | goto err1; | |
111 | } | |
112 | ||
113 | port = &rxe->port; | |
114 | ||
115 | if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) { | |
116 | pr_warn("SMI QP exists for port %d\n", port_num); | |
117 | goto err1; | |
118 | } | |
119 | ||
120 | if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { | |
121 | pr_warn("GSI QP exists for port %d\n", port_num); | |
122 | goto err1; | |
123 | } | |
124 | } | |
125 | ||
126 | return 0; | |
127 | ||
128 | err1: | |
129 | return -EINVAL; | |
130 | } | |
131 | ||
132 | static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) | |
133 | { | |
134 | qp->resp.res_head = 0; | |
135 | qp->resp.res_tail = 0; | |
136 | qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); | |
137 | ||
138 | if (!qp->resp.resources) | |
139 | return -ENOMEM; | |
140 | ||
141 | return 0; | |
142 | } | |
143 | ||
144 | static void free_rd_atomic_resources(struct rxe_qp *qp) | |
145 | { | |
146 | if (qp->resp.resources) { | |
147 | int i; | |
148 | ||
b6bbee0d | 149 | for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { |
8700e3e7 MS |
150 | struct resp_res *res = &qp->resp.resources[i]; |
151 | ||
152 | free_rd_atomic_resource(qp, res); | |
153 | } | |
154 | kfree(qp->resp.resources); | |
155 | qp->resp.resources = NULL; | |
156 | } | |
157 | } | |
158 | ||
159 | void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) | |
160 | { | |
161 | if (res->type == RXE_ATOMIC_MASK) { | |
162 | rxe_drop_ref(qp); | |
163 | kfree_skb(res->atomic.skb); | |
164 | } else if (res->type == RXE_READ_MASK) { | |
165 | if (res->read.mr) | |
166 | rxe_drop_ref(res->read.mr); | |
167 | } | |
168 | res->type = 0; | |
169 | } | |
170 | ||
171 | static void cleanup_rd_atomic_resources(struct rxe_qp *qp) | |
172 | { | |
173 | int i; | |
174 | struct resp_res *res; | |
175 | ||
176 | if (qp->resp.resources) { | |
b6bbee0d | 177 | for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { |
8700e3e7 MS |
178 | res = &qp->resp.resources[i]; |
179 | free_rd_atomic_resource(qp, res); | |
180 | } | |
181 | } | |
182 | } | |
183 | ||
184 | static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, | |
185 | struct ib_qp_init_attr *init) | |
186 | { | |
187 | struct rxe_port *port; | |
188 | u32 qpn; | |
189 | ||
190 | qp->sq_sig_type = init->sq_sig_type; | |
191 | qp->attr.path_mtu = 1; | |
192 | qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); | |
193 | ||
194 | qpn = qp->pelem.index; | |
195 | port = &rxe->port; | |
196 | ||
197 | switch (init->qp_type) { | |
198 | case IB_QPT_SMI: | |
199 | qp->ibqp.qp_num = 0; | |
200 | port->qp_smi_index = qpn; | |
201 | qp->attr.port_num = init->port_num; | |
202 | break; | |
203 | ||
204 | case IB_QPT_GSI: | |
205 | qp->ibqp.qp_num = 1; | |
206 | port->qp_gsi_index = qpn; | |
207 | qp->attr.port_num = init->port_num; | |
208 | break; | |
209 | ||
210 | default: | |
211 | qp->ibqp.qp_num = qpn; | |
212 | break; | |
213 | } | |
214 | ||
215 | INIT_LIST_HEAD(&qp->grp_list); | |
216 | ||
217 | skb_queue_head_init(&qp->send_pkts); | |
218 | ||
219 | spin_lock_init(&qp->grp_lock); | |
220 | spin_lock_init(&qp->state_lock); | |
221 | ||
222 | atomic_set(&qp->ssn, 0); | |
223 | atomic_set(&qp->skb_out, 0); | |
224 | } | |
225 | ||
226 | static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, | |
227 | struct ib_qp_init_attr *init, | |
228 | struct ib_ucontext *context, struct ib_udata *udata) | |
229 | { | |
230 | int err; | |
231 | int wqe_size; | |
232 | ||
233 | err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); | |
234 | if (err < 0) | |
235 | return err; | |
236 | qp->sk->sk->sk_user_data = qp; | |
237 | ||
238 | qp->sq.max_wr = init->cap.max_send_wr; | |
239 | qp->sq.max_sge = init->cap.max_send_sge; | |
240 | qp->sq.max_inline = init->cap.max_inline_data; | |
241 | ||
242 | wqe_size = max_t(int, sizeof(struct rxe_send_wqe) + | |
243 | qp->sq.max_sge * sizeof(struct ib_sge), | |
244 | sizeof(struct rxe_send_wqe) + | |
245 | qp->sq.max_inline); | |
246 | ||
247 | qp->sq.queue = rxe_queue_init(rxe, | |
248 | &qp->sq.max_wr, | |
249 | wqe_size); | |
250 | if (!qp->sq.queue) | |
251 | return -ENOMEM; | |
252 | ||
253 | err = do_mmap_info(rxe, udata, true, | |
254 | context, qp->sq.queue->buf, | |
255 | qp->sq.queue->buf_size, &qp->sq.queue->ip); | |
256 | ||
257 | if (err) { | |
258 | kvfree(qp->sq.queue->buf); | |
259 | kfree(qp->sq.queue); | |
260 | return err; | |
261 | } | |
262 | ||
263 | qp->req.wqe_index = producer_index(qp->sq.queue); | |
264 | qp->req.state = QP_STATE_RESET; | |
265 | qp->req.opcode = -1; | |
266 | qp->comp.opcode = -1; | |
267 | ||
268 | spin_lock_init(&qp->sq.sq_lock); | |
269 | skb_queue_head_init(&qp->req_pkts); | |
270 | ||
271 | rxe_init_task(rxe, &qp->req.task, qp, | |
272 | rxe_requester, "req"); | |
273 | rxe_init_task(rxe, &qp->comp.task, qp, | |
274 | rxe_completer, "comp"); | |
275 | ||
8700e3e7 | 276 | qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ |
99fc12f6 | 277 | if (init->qp_type == IB_QPT_RC) { |
3bfbea74 KC |
278 | timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); |
279 | timer_setup(&qp->retrans_timer, retransmit_timer, 0); | |
99fc12f6 | 280 | } |
8700e3e7 MS |
281 | return 0; |
282 | } | |
283 | ||
284 | static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, | |
285 | struct ib_qp_init_attr *init, | |
286 | struct ib_ucontext *context, struct ib_udata *udata) | |
287 | { | |
288 | int err; | |
289 | int wqe_size; | |
290 | ||
291 | if (!qp->srq) { | |
292 | qp->rq.max_wr = init->cap.max_recv_wr; | |
293 | qp->rq.max_sge = init->cap.max_recv_sge; | |
294 | ||
295 | wqe_size = rcv_wqe_size(qp->rq.max_sge); | |
296 | ||
e404f945 PP |
297 | pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", |
298 | qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); | |
8700e3e7 MS |
299 | |
300 | qp->rq.queue = rxe_queue_init(rxe, | |
301 | &qp->rq.max_wr, | |
302 | wqe_size); | |
303 | if (!qp->rq.queue) | |
304 | return -ENOMEM; | |
305 | ||
306 | err = do_mmap_info(rxe, udata, false, context, | |
307 | qp->rq.queue->buf, | |
308 | qp->rq.queue->buf_size, | |
309 | &qp->rq.queue->ip); | |
310 | if (err) { | |
311 | kvfree(qp->rq.queue->buf); | |
312 | kfree(qp->rq.queue); | |
313 | return err; | |
314 | } | |
315 | } | |
316 | ||
317 | spin_lock_init(&qp->rq.producer_lock); | |
318 | spin_lock_init(&qp->rq.consumer_lock); | |
319 | ||
320 | skb_queue_head_init(&qp->resp_pkts); | |
321 | ||
322 | rxe_init_task(rxe, &qp->resp.task, qp, | |
323 | rxe_responder, "resp"); | |
324 | ||
325 | qp->resp.opcode = OPCODE_NONE; | |
326 | qp->resp.msn = 0; | |
327 | qp->resp.state = QP_STATE_RESET; | |
328 | ||
329 | return 0; | |
330 | } | |
331 | ||
332 | /* called by the create qp verb */ | |
333 | int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, | |
334 | struct ib_qp_init_attr *init, struct ib_udata *udata, | |
335 | struct ib_pd *ibpd) | |
336 | { | |
337 | int err; | |
338 | struct rxe_cq *rcq = to_rcq(init->recv_cq); | |
339 | struct rxe_cq *scq = to_rcq(init->send_cq); | |
340 | struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; | |
341 | struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL; | |
342 | ||
343 | rxe_add_ref(pd); | |
344 | rxe_add_ref(rcq); | |
345 | rxe_add_ref(scq); | |
346 | if (srq) | |
347 | rxe_add_ref(srq); | |
348 | ||
349 | qp->pd = pd; | |
350 | qp->rcq = rcq; | |
351 | qp->scq = scq; | |
352 | qp->srq = srq; | |
353 | ||
354 | rxe_qp_init_misc(rxe, qp, init); | |
355 | ||
356 | err = rxe_qp_init_req(rxe, qp, init, context, udata); | |
357 | if (err) | |
358 | goto err1; | |
359 | ||
360 | err = rxe_qp_init_resp(rxe, qp, init, context, udata); | |
361 | if (err) | |
362 | goto err2; | |
363 | ||
364 | qp->attr.qp_state = IB_QPS_RESET; | |
365 | qp->valid = 1; | |
366 | ||
367 | return 0; | |
368 | ||
369 | err2: | |
370 | rxe_queue_cleanup(qp->sq.queue); | |
371 | err1: | |
372 | if (srq) | |
373 | rxe_drop_ref(srq); | |
374 | rxe_drop_ref(scq); | |
375 | rxe_drop_ref(rcq); | |
376 | rxe_drop_ref(pd); | |
377 | ||
378 | return err; | |
379 | } | |
380 | ||
381 | /* called by the query qp verb */ | |
382 | int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) | |
383 | { | |
384 | init->event_handler = qp->ibqp.event_handler; | |
385 | init->qp_context = qp->ibqp.qp_context; | |
386 | init->send_cq = qp->ibqp.send_cq; | |
387 | init->recv_cq = qp->ibqp.recv_cq; | |
388 | init->srq = qp->ibqp.srq; | |
389 | ||
390 | init->cap.max_send_wr = qp->sq.max_wr; | |
391 | init->cap.max_send_sge = qp->sq.max_sge; | |
392 | init->cap.max_inline_data = qp->sq.max_inline; | |
393 | ||
394 | if (!qp->srq) { | |
395 | init->cap.max_recv_wr = qp->rq.max_wr; | |
396 | init->cap.max_recv_sge = qp->rq.max_sge; | |
397 | } | |
398 | ||
399 | init->sq_sig_type = qp->sq_sig_type; | |
400 | ||
401 | init->qp_type = qp->ibqp.qp_type; | |
402 | init->port_num = 1; | |
403 | ||
404 | return 0; | |
405 | } | |
406 | ||
407 | /* called by the modify qp verb, this routine checks all the parameters before | |
408 | * making any changes | |
409 | */ | |
410 | int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, | |
411 | struct ib_qp_attr *attr, int mask) | |
412 | { | |
413 | enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? | |
414 | attr->cur_qp_state : qp->attr.qp_state; | |
415 | enum ib_qp_state new_state = (mask & IB_QP_STATE) ? | |
416 | attr->qp_state : cur_state; | |
417 | ||
418 | if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask, | |
419 | IB_LINK_LAYER_ETHERNET)) { | |
420 | pr_warn("invalid mask or state for qp\n"); | |
421 | goto err1; | |
422 | } | |
423 | ||
424 | if (mask & IB_QP_STATE) { | |
425 | if (cur_state == IB_QPS_SQD) { | |
426 | if (qp->req.state == QP_STATE_DRAIN && | |
427 | new_state != IB_QPS_ERR) | |
428 | goto err1; | |
429 | } | |
430 | } | |
431 | ||
432 | if (mask & IB_QP_PORT) { | |
433 | if (attr->port_num != 1) { | |
434 | pr_warn("invalid port %d\n", attr->port_num); | |
435 | goto err1; | |
436 | } | |
437 | } | |
438 | ||
439 | if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) | |
440 | goto err1; | |
441 | ||
442 | if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) | |
443 | goto err1; | |
444 | ||
445 | if (mask & IB_QP_ALT_PATH) { | |
446 | if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) | |
447 | goto err1; | |
448 | if (attr->alt_port_num != 1) { | |
449 | pr_warn("invalid alt port %d\n", attr->alt_port_num); | |
450 | goto err1; | |
451 | } | |
452 | if (attr->alt_timeout > 31) { | |
453 | pr_warn("invalid QP alt timeout %d > 31\n", | |
454 | attr->alt_timeout); | |
455 | goto err1; | |
456 | } | |
457 | } | |
458 | ||
459 | if (mask & IB_QP_PATH_MTU) { | |
460 | struct rxe_port *port = &rxe->port; | |
461 | ||
462 | enum ib_mtu max_mtu = port->attr.max_mtu; | |
463 | enum ib_mtu mtu = attr->path_mtu; | |
464 | ||
465 | if (mtu > max_mtu) { | |
466 | pr_debug("invalid mtu (%d) > (%d)\n", | |
467 | ib_mtu_enum_to_int(mtu), | |
468 | ib_mtu_enum_to_int(max_mtu)); | |
469 | goto err1; | |
470 | } | |
471 | } | |
472 | ||
473 | if (mask & IB_QP_MAX_QP_RD_ATOMIC) { | |
474 | if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { | |
475 | pr_warn("invalid max_rd_atomic %d > %d\n", | |
476 | attr->max_rd_atomic, | |
477 | rxe->attr.max_qp_rd_atom); | |
478 | goto err1; | |
479 | } | |
480 | } | |
481 | ||
482 | if (mask & IB_QP_TIMEOUT) { | |
483 | if (attr->timeout > 31) { | |
484 | pr_warn("invalid QP timeout %d > 31\n", | |
485 | attr->timeout); | |
486 | goto err1; | |
487 | } | |
488 | } | |
489 | ||
490 | return 0; | |
491 | ||
492 | err1: | |
493 | return -EINVAL; | |
494 | } | |
495 | ||
496 | /* move the qp to the reset state */ | |
497 | static void rxe_qp_reset(struct rxe_qp *qp) | |
498 | { | |
499 | /* stop tasks from running */ | |
500 | rxe_disable_task(&qp->resp.task); | |
501 | ||
502 | /* stop request/comp */ | |
503 | if (qp->sq.queue) { | |
504 | if (qp_type(qp) == IB_QPT_RC) | |
505 | rxe_disable_task(&qp->comp.task); | |
506 | rxe_disable_task(&qp->req.task); | |
507 | } | |
508 | ||
509 | /* move qp to the reset state */ | |
510 | qp->req.state = QP_STATE_RESET; | |
511 | qp->resp.state = QP_STATE_RESET; | |
512 | ||
513 | /* let state machines reset themselves drain work and packet queues | |
514 | * etc. | |
515 | */ | |
516 | __rxe_do_task(&qp->resp.task); | |
517 | ||
518 | if (qp->sq.queue) { | |
519 | __rxe_do_task(&qp->comp.task); | |
520 | __rxe_do_task(&qp->req.task); | |
aa75b07b | 521 | rxe_queue_reset(qp->sq.queue); |
8700e3e7 MS |
522 | } |
523 | ||
524 | /* cleanup attributes */ | |
525 | atomic_set(&qp->ssn, 0); | |
526 | qp->req.opcode = -1; | |
527 | qp->req.need_retry = 0; | |
528 | qp->req.noack_pkts = 0; | |
529 | qp->resp.msn = 0; | |
530 | qp->resp.opcode = -1; | |
531 | qp->resp.drop_msg = 0; | |
532 | qp->resp.goto_error = 0; | |
533 | qp->resp.sent_psn_nak = 0; | |
534 | ||
535 | if (qp->resp.mr) { | |
536 | rxe_drop_ref(qp->resp.mr); | |
537 | qp->resp.mr = NULL; | |
538 | } | |
539 | ||
540 | cleanup_rd_atomic_resources(qp); | |
541 | ||
542 | /* reenable tasks */ | |
543 | rxe_enable_task(&qp->resp.task); | |
544 | ||
545 | if (qp->sq.queue) { | |
546 | if (qp_type(qp) == IB_QPT_RC) | |
547 | rxe_enable_task(&qp->comp.task); | |
548 | ||
549 | rxe_enable_task(&qp->req.task); | |
550 | } | |
551 | } | |
552 | ||
553 | /* drain the send queue */ | |
554 | static void rxe_qp_drain(struct rxe_qp *qp) | |
555 | { | |
556 | if (qp->sq.queue) { | |
557 | if (qp->req.state != QP_STATE_DRAINED) { | |
558 | qp->req.state = QP_STATE_DRAIN; | |
559 | if (qp_type(qp) == IB_QPT_RC) | |
560 | rxe_run_task(&qp->comp.task, 1); | |
561 | else | |
562 | __rxe_do_task(&qp->comp.task); | |
563 | rxe_run_task(&qp->req.task, 1); | |
564 | } | |
565 | } | |
566 | } | |
567 | ||
568 | /* move the qp to the error state */ | |
569 | void rxe_qp_error(struct rxe_qp *qp) | |
570 | { | |
571 | qp->req.state = QP_STATE_ERROR; | |
572 | qp->resp.state = QP_STATE_ERROR; | |
6d931308 | 573 | qp->attr.qp_state = IB_QPS_ERR; |
8700e3e7 MS |
574 | |
575 | /* drain work and packet queues */ | |
576 | rxe_run_task(&qp->resp.task, 1); | |
577 | ||
578 | if (qp_type(qp) == IB_QPT_RC) | |
579 | rxe_run_task(&qp->comp.task, 1); | |
580 | else | |
581 | __rxe_do_task(&qp->comp.task); | |
582 | rxe_run_task(&qp->req.task, 1); | |
583 | } | |
584 | ||
585 | /* called by the modify qp verb */ | |
586 | int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, | |
587 | struct ib_udata *udata) | |
588 | { | |
589 | int err; | |
590 | struct rxe_dev *rxe = to_rdev(qp->ibqp.device); | |
591 | union ib_gid sgid; | |
592 | struct ib_gid_attr sgid_attr; | |
593 | ||
594 | if (mask & IB_QP_MAX_QP_RD_ATOMIC) { | |
595 | int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); | |
596 | ||
b6bbee0d PP |
597 | qp->attr.max_rd_atomic = max_rd_atomic; |
598 | atomic_set(&qp->req.rd_atomic, max_rd_atomic); | |
599 | } | |
600 | ||
601 | if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { | |
602 | int max_dest_rd_atomic = | |
603 | __roundup_pow_of_two(attr->max_dest_rd_atomic); | |
604 | ||
605 | qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; | |
606 | ||
8700e3e7 MS |
607 | free_rd_atomic_resources(qp); |
608 | ||
b6bbee0d | 609 | err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); |
8700e3e7 MS |
610 | if (err) |
611 | return err; | |
8700e3e7 MS |
612 | } |
613 | ||
614 | if (mask & IB_QP_CUR_STATE) | |
615 | qp->attr.cur_qp_state = attr->qp_state; | |
616 | ||
617 | if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) | |
618 | qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; | |
619 | ||
620 | if (mask & IB_QP_ACCESS_FLAGS) | |
621 | qp->attr.qp_access_flags = attr->qp_access_flags; | |
622 | ||
623 | if (mask & IB_QP_PKEY_INDEX) | |
624 | qp->attr.pkey_index = attr->pkey_index; | |
625 | ||
626 | if (mask & IB_QP_PORT) | |
627 | qp->attr.port_num = attr->port_num; | |
628 | ||
629 | if (mask & IB_QP_QKEY) | |
630 | qp->attr.qkey = attr->qkey; | |
631 | ||
632 | if (mask & IB_QP_AV) { | |
633 | ib_get_cached_gid(&rxe->ib_dev, 1, | |
d8966fcd DC |
634 | rdma_ah_read_grh(&attr->ah_attr)->sgid_index, |
635 | &sgid, &sgid_attr); | |
8700e3e7 MS |
636 | rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av, |
637 | &attr->ah_attr); | |
638 | rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr, | |
639 | &sgid_attr, &sgid); | |
640 | if (sgid_attr.ndev) | |
641 | dev_put(sgid_attr.ndev); | |
642 | } | |
643 | ||
644 | if (mask & IB_QP_ALT_PATH) { | |
d8966fcd DC |
645 | u8 sgid_index = |
646 | rdma_ah_read_grh(&attr->alt_ah_attr)->sgid_index; | |
647 | ||
648 | ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index, | |
649 | &sgid, &sgid_attr); | |
8700e3e7 MS |
650 | |
651 | rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av, | |
652 | &attr->alt_ah_attr); | |
653 | rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr, | |
654 | &sgid_attr, &sgid); | |
655 | if (sgid_attr.ndev) | |
656 | dev_put(sgid_attr.ndev); | |
657 | ||
658 | qp->attr.alt_port_num = attr->alt_port_num; | |
659 | qp->attr.alt_pkey_index = attr->alt_pkey_index; | |
660 | qp->attr.alt_timeout = attr->alt_timeout; | |
661 | } | |
662 | ||
663 | if (mask & IB_QP_PATH_MTU) { | |
664 | qp->attr.path_mtu = attr->path_mtu; | |
665 | qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); | |
666 | } | |
667 | ||
668 | if (mask & IB_QP_TIMEOUT) { | |
669 | qp->attr.timeout = attr->timeout; | |
670 | if (attr->timeout == 0) { | |
671 | qp->qp_timeout_jiffies = 0; | |
672 | } else { | |
673 | /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ | |
674 | int j = nsecs_to_jiffies(4096ULL << attr->timeout); | |
675 | ||
676 | qp->qp_timeout_jiffies = j ? j : 1; | |
677 | } | |
678 | } | |
679 | ||
680 | if (mask & IB_QP_RETRY_CNT) { | |
681 | qp->attr.retry_cnt = attr->retry_cnt; | |
682 | qp->comp.retry_cnt = attr->retry_cnt; | |
e404f945 PP |
683 | pr_debug("qp#%d set retry count = %d\n", qp_num(qp), |
684 | attr->retry_cnt); | |
8700e3e7 MS |
685 | } |
686 | ||
687 | if (mask & IB_QP_RNR_RETRY) { | |
688 | qp->attr.rnr_retry = attr->rnr_retry; | |
689 | qp->comp.rnr_retry = attr->rnr_retry; | |
e404f945 PP |
690 | pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), |
691 | attr->rnr_retry); | |
8700e3e7 MS |
692 | } |
693 | ||
694 | if (mask & IB_QP_RQ_PSN) { | |
695 | qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); | |
696 | qp->resp.psn = qp->attr.rq_psn; | |
e404f945 PP |
697 | pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), |
698 | qp->resp.psn); | |
8700e3e7 MS |
699 | } |
700 | ||
701 | if (mask & IB_QP_MIN_RNR_TIMER) { | |
702 | qp->attr.min_rnr_timer = attr->min_rnr_timer; | |
e404f945 | 703 | pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), |
8700e3e7 MS |
704 | attr->min_rnr_timer); |
705 | } | |
706 | ||
707 | if (mask & IB_QP_SQ_PSN) { | |
708 | qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); | |
709 | qp->req.psn = qp->attr.sq_psn; | |
710 | qp->comp.psn = qp->attr.sq_psn; | |
e404f945 | 711 | pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); |
8700e3e7 MS |
712 | } |
713 | ||
8700e3e7 MS |
714 | if (mask & IB_QP_PATH_MIG_STATE) |
715 | qp->attr.path_mig_state = attr->path_mig_state; | |
716 | ||
717 | if (mask & IB_QP_DEST_QPN) | |
718 | qp->attr.dest_qp_num = attr->dest_qp_num; | |
719 | ||
720 | if (mask & IB_QP_STATE) { | |
721 | qp->attr.qp_state = attr->qp_state; | |
722 | ||
723 | switch (attr->qp_state) { | |
724 | case IB_QPS_RESET: | |
e404f945 | 725 | pr_debug("qp#%d state -> RESET\n", qp_num(qp)); |
8700e3e7 MS |
726 | rxe_qp_reset(qp); |
727 | break; | |
728 | ||
729 | case IB_QPS_INIT: | |
e404f945 | 730 | pr_debug("qp#%d state -> INIT\n", qp_num(qp)); |
8700e3e7 MS |
731 | qp->req.state = QP_STATE_INIT; |
732 | qp->resp.state = QP_STATE_INIT; | |
733 | break; | |
734 | ||
735 | case IB_QPS_RTR: | |
e404f945 | 736 | pr_debug("qp#%d state -> RTR\n", qp_num(qp)); |
8700e3e7 MS |
737 | qp->resp.state = QP_STATE_READY; |
738 | break; | |
739 | ||
740 | case IB_QPS_RTS: | |
e404f945 | 741 | pr_debug("qp#%d state -> RTS\n", qp_num(qp)); |
8700e3e7 MS |
742 | qp->req.state = QP_STATE_READY; |
743 | break; | |
744 | ||
745 | case IB_QPS_SQD: | |
e404f945 | 746 | pr_debug("qp#%d state -> SQD\n", qp_num(qp)); |
8700e3e7 MS |
747 | rxe_qp_drain(qp); |
748 | break; | |
749 | ||
750 | case IB_QPS_SQE: | |
e404f945 | 751 | pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); |
8700e3e7 MS |
752 | /* Not possible from modify_qp. */ |
753 | break; | |
754 | ||
755 | case IB_QPS_ERR: | |
e404f945 | 756 | pr_debug("qp#%d state -> ERR\n", qp_num(qp)); |
8700e3e7 MS |
757 | rxe_qp_error(qp); |
758 | break; | |
759 | } | |
760 | } | |
761 | ||
762 | return 0; | |
763 | } | |
764 | ||
765 | /* called by the query qp verb */ | |
766 | int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) | |
767 | { | |
768 | struct rxe_dev *rxe = to_rdev(qp->ibqp.device); | |
769 | ||
770 | *attr = qp->attr; | |
771 | ||
772 | attr->rq_psn = qp->resp.psn; | |
773 | attr->sq_psn = qp->req.psn; | |
774 | ||
775 | attr->cap.max_send_wr = qp->sq.max_wr; | |
776 | attr->cap.max_send_sge = qp->sq.max_sge; | |
777 | attr->cap.max_inline_data = qp->sq.max_inline; | |
778 | ||
779 | if (!qp->srq) { | |
780 | attr->cap.max_recv_wr = qp->rq.max_wr; | |
781 | attr->cap.max_recv_sge = qp->rq.max_sge; | |
782 | } | |
783 | ||
784 | rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr); | |
785 | rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr); | |
786 | ||
787 | if (qp->req.state == QP_STATE_DRAIN) { | |
788 | attr->sq_draining = 1; | |
789 | /* applications that get this state | |
790 | * typically spin on it. yield the | |
791 | * processor | |
792 | */ | |
793 | cond_resched(); | |
794 | } else { | |
795 | attr->sq_draining = 0; | |
796 | } | |
797 | ||
798 | pr_debug("attr->sq_draining = %d\n", attr->sq_draining); | |
799 | ||
800 | return 0; | |
801 | } | |
802 | ||
803 | /* called by the destroy qp verb */ | |
804 | void rxe_qp_destroy(struct rxe_qp *qp) | |
805 | { | |
806 | qp->valid = 0; | |
807 | qp->qp_timeout_jiffies = 0; | |
808 | rxe_cleanup_task(&qp->resp.task); | |
809 | ||
99fc12f6 PP |
810 | if (qp_type(qp) == IB_QPT_RC) { |
811 | del_timer_sync(&qp->retrans_timer); | |
812 | del_timer_sync(&qp->rnr_nak_timer); | |
813 | } | |
8700e3e7 MS |
814 | |
815 | rxe_cleanup_task(&qp->req.task); | |
2d4b21e0 | 816 | rxe_cleanup_task(&qp->comp.task); |
8700e3e7 MS |
817 | |
818 | /* flush out any receive wr's or pending requests */ | |
819 | __rxe_do_task(&qp->req.task); | |
820 | if (qp->sq.queue) { | |
821 | __rxe_do_task(&qp->comp.task); | |
822 | __rxe_do_task(&qp->req.task); | |
823 | } | |
824 | } | |
825 | ||
826 | /* called when the last reference to the qp is dropped */ | |
aade07b3 | 827 | static void rxe_qp_do_cleanup(struct work_struct *work) |
8700e3e7 | 828 | { |
aade07b3 | 829 | struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); |
8700e3e7 MS |
830 | |
831 | rxe_drop_all_mcast_groups(qp); | |
832 | ||
833 | if (qp->sq.queue) | |
834 | rxe_queue_cleanup(qp->sq.queue); | |
835 | ||
836 | if (qp->srq) | |
837 | rxe_drop_ref(qp->srq); | |
838 | ||
839 | if (qp->rq.queue) | |
840 | rxe_queue_cleanup(qp->rq.queue); | |
841 | ||
842 | if (qp->scq) | |
843 | rxe_drop_ref(qp->scq); | |
844 | if (qp->rcq) | |
845 | rxe_drop_ref(qp->rcq); | |
846 | if (qp->pd) | |
847 | rxe_drop_ref(qp->pd); | |
848 | ||
849 | if (qp->resp.mr) { | |
850 | rxe_drop_ref(qp->resp.mr); | |
851 | qp->resp.mr = NULL; | |
852 | } | |
853 | ||
825a51a4 AB |
854 | if (qp_type(qp) == IB_QPT_RC) |
855 | sk_dst_reset(qp->sk->sk); | |
4ed6ad1e | 856 | |
8700e3e7 MS |
857 | free_rd_atomic_resources(qp); |
858 | ||
859 | kernel_sock_shutdown(qp->sk, SHUT_RDWR); | |
e259934d | 860 | sock_release(qp->sk); |
8700e3e7 | 861 | } |
aade07b3 BVA |
862 | |
863 | /* called when the last reference to the qp is dropped */ | |
864 | void rxe_qp_cleanup(struct rxe_pool_entry *arg) | |
865 | { | |
866 | struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); | |
867 | ||
868 | execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); | |
869 | } |