]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / usnic / usnic_ib_qp_grp.c
1 /*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/bug.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/spinlock.h>
37
38 #include "usnic_log.h"
39 #include "usnic_vnic.h"
40 #include "usnic_fwd.h"
41 #include "usnic_uiom.h"
42 #include "usnic_debugfs.h"
43 #include "usnic_ib_qp_grp.h"
44 #include "usnic_ib_sysfs.h"
45 #include "usnic_transport.h"
46
47 #define DFLT_RQ_IDX 0
48
49 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
50 {
51 switch (state) {
52 case IB_QPS_RESET:
53 return "Rst";
54 case IB_QPS_INIT:
55 return "Init";
56 case IB_QPS_RTR:
57 return "RTR";
58 case IB_QPS_RTS:
59 return "RTS";
60 case IB_QPS_SQD:
61 return "SQD";
62 case IB_QPS_SQE:
63 return "SQE";
64 case IB_QPS_ERR:
65 return "ERR";
66 default:
67 return "UNKOWN STATE";
68
69 }
70 }
71
72 int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
73 {
74 return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
75 }
76
77 int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
78 {
79 struct usnic_ib_qp_grp *qp_grp = obj;
80 struct usnic_ib_qp_grp_flow *default_flow;
81 if (obj) {
82 default_flow = list_first_entry(&qp_grp->flows_lst,
83 struct usnic_ib_qp_grp_flow, link);
84 return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
85 qp_grp->ibqp.qp_num,
86 usnic_ib_qp_grp_state_to_string(
87 qp_grp->state),
88 qp_grp->owner_pid,
89 usnic_vnic_get_index(qp_grp->vf->vnic),
90 default_flow->flow->flow_id);
91 } else {
92 return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
93 }
94 }
95
96 static struct usnic_vnic_res_chunk *
97 get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
98 {
99 lockdep_assert_held(&qp_grp->lock);
100 /*
101 * The QP res chunk, used to derive qp indices,
102 * are just indices of the RQs
103 */
104 return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
105 }
106
107 static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
108 {
109
110 int status;
111 int i, vnic_idx;
112 struct usnic_vnic_res_chunk *res_chunk;
113 struct usnic_vnic_res *res;
114
115 lockdep_assert_held(&qp_grp->lock);
116
117 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
118
119 res_chunk = get_qp_res_chunk(qp_grp);
120 if (IS_ERR_OR_NULL(res_chunk)) {
121 usnic_err("Unable to get qp res with err %ld\n",
122 PTR_ERR(res_chunk));
123 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
124 }
125
126 for (i = 0; i < res_chunk->cnt; i++) {
127 res = res_chunk->res[i];
128 status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
129 res->vnic_idx);
130 if (status) {
131 usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
132 res->vnic_idx, qp_grp->ufdev->name,
133 vnic_idx, status);
134 goto out_err;
135 }
136 }
137
138 return 0;
139
140 out_err:
141 for (i--; i >= 0; i--) {
142 res = res_chunk->res[i];
143 usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
144 res->vnic_idx);
145 }
146
147 return status;
148 }
149
150 static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
151 {
152 int i, vnic_idx;
153 struct usnic_vnic_res_chunk *res_chunk;
154 struct usnic_vnic_res *res;
155 int status = 0;
156
157 lockdep_assert_held(&qp_grp->lock);
158 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
159
160 res_chunk = get_qp_res_chunk(qp_grp);
161 if (IS_ERR_OR_NULL(res_chunk)) {
162 usnic_err("Unable to get qp res with err %ld\n",
163 PTR_ERR(res_chunk));
164 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
165 }
166
167 for (i = 0; i < res_chunk->cnt; i++) {
168 res = res_chunk->res[i];
169 status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
170 res->vnic_idx);
171 if (status) {
172 usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
173 res->vnic_idx,
174 qp_grp->ufdev->name,
175 vnic_idx, status);
176 }
177 }
178
179 return status;
180
181 }
182
183 static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
184 struct usnic_filter_action *uaction)
185 {
186 struct usnic_vnic_res_chunk *res_chunk;
187
188 res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
189 if (IS_ERR_OR_NULL(res_chunk)) {
190 usnic_err("Unable to get %s with err %ld\n",
191 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
192 PTR_ERR(res_chunk));
193 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
194 }
195
196 uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
197 uaction->action.type = FILTER_ACTION_RQ_STEERING;
198 uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
199
200 return 0;
201 }
202
203 static struct usnic_ib_qp_grp_flow*
204 create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
205 struct usnic_transport_spec *trans_spec)
206 {
207 uint16_t port_num;
208 int err;
209 struct filter filter;
210 struct usnic_filter_action uaction;
211 struct usnic_ib_qp_grp_flow *qp_flow;
212 struct usnic_fwd_flow *flow;
213 enum usnic_transport_type trans_type;
214
215 trans_type = trans_spec->trans_type;
216 port_num = trans_spec->usnic_roce.port_num;
217
218 /* Reserve Port */
219 port_num = usnic_transport_rsrv_port(trans_type, port_num);
220 if (port_num == 0)
221 return ERR_PTR(-EINVAL);
222
223 /* Create Flow */
224 usnic_fwd_init_usnic_filter(&filter, port_num);
225 err = init_filter_action(qp_grp, &uaction);
226 if (err)
227 goto out_unreserve_port;
228
229 flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
230 if (IS_ERR_OR_NULL(flow)) {
231 usnic_err("Unable to alloc flow failed with err %ld\n",
232 PTR_ERR(flow));
233 err = flow ? PTR_ERR(flow) : -EFAULT;
234 goto out_unreserve_port;
235 }
236
237 /* Create Flow Handle */
238 qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
239 if (!qp_flow) {
240 err = -ENOMEM;
241 goto out_dealloc_flow;
242 }
243 qp_flow->flow = flow;
244 qp_flow->trans_type = trans_type;
245 qp_flow->usnic_roce.port_num = port_num;
246 qp_flow->qp_grp = qp_grp;
247 return qp_flow;
248
249 out_dealloc_flow:
250 usnic_fwd_dealloc_flow(flow);
251 out_unreserve_port:
252 usnic_transport_unrsrv_port(trans_type, port_num);
253 return ERR_PTR(err);
254 }
255
256 static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
257 {
258 usnic_fwd_dealloc_flow(qp_flow->flow);
259 usnic_transport_unrsrv_port(qp_flow->trans_type,
260 qp_flow->usnic_roce.port_num);
261 kfree(qp_flow);
262 }
263
264 static struct usnic_ib_qp_grp_flow*
265 create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
266 struct usnic_transport_spec *trans_spec)
267 {
268 struct socket *sock;
269 int sock_fd;
270 int err;
271 struct filter filter;
272 struct usnic_filter_action uaction;
273 struct usnic_ib_qp_grp_flow *qp_flow;
274 struct usnic_fwd_flow *flow;
275 enum usnic_transport_type trans_type;
276 uint32_t addr;
277 uint16_t port_num;
278 int proto;
279
280 trans_type = trans_spec->trans_type;
281 sock_fd = trans_spec->udp.sock_fd;
282
283 /* Get and check socket */
284 sock = usnic_transport_get_socket(sock_fd);
285 if (IS_ERR_OR_NULL(sock))
286 return ERR_CAST(sock);
287
288 err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
289 if (err)
290 goto out_put_sock;
291
292 if (proto != IPPROTO_UDP) {
293 usnic_err("Protocol for fd %d is not UDP", sock_fd);
294 err = -EPERM;
295 goto out_put_sock;
296 }
297
298 /* Create flow */
299 usnic_fwd_init_udp_filter(&filter, addr, port_num);
300 err = init_filter_action(qp_grp, &uaction);
301 if (err)
302 goto out_put_sock;
303
304 flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
305 if (IS_ERR_OR_NULL(flow)) {
306 usnic_err("Unable to alloc flow failed with err %ld\n",
307 PTR_ERR(flow));
308 err = flow ? PTR_ERR(flow) : -EFAULT;
309 goto out_put_sock;
310 }
311
312 /* Create qp_flow */
313 qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
314 if (!qp_flow) {
315 err = -ENOMEM;
316 goto out_dealloc_flow;
317 }
318 qp_flow->flow = flow;
319 qp_flow->trans_type = trans_type;
320 qp_flow->udp.sock = sock;
321 qp_flow->qp_grp = qp_grp;
322 return qp_flow;
323
324 out_dealloc_flow:
325 usnic_fwd_dealloc_flow(flow);
326 out_put_sock:
327 usnic_transport_put_socket(sock);
328 return ERR_PTR(err);
329 }
330
331 static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
332 {
333 usnic_fwd_dealloc_flow(qp_flow->flow);
334 usnic_transport_put_socket(qp_flow->udp.sock);
335 kfree(qp_flow);
336 }
337
338 static struct usnic_ib_qp_grp_flow*
339 create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
340 struct usnic_transport_spec *trans_spec)
341 {
342 struct usnic_ib_qp_grp_flow *qp_flow;
343 enum usnic_transport_type trans_type;
344
345 trans_type = trans_spec->trans_type;
346 switch (trans_type) {
347 case USNIC_TRANSPORT_ROCE_CUSTOM:
348 qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
349 break;
350 case USNIC_TRANSPORT_IPV4_UDP:
351 qp_flow = create_udp_flow(qp_grp, trans_spec);
352 break;
353 default:
354 usnic_err("Unsupported transport %u\n",
355 trans_spec->trans_type);
356 return ERR_PTR(-EINVAL);
357 }
358
359 if (!IS_ERR_OR_NULL(qp_flow)) {
360 list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
361 usnic_debugfs_flow_add(qp_flow);
362 }
363
364
365 return qp_flow;
366 }
367
368 static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
369 {
370 usnic_debugfs_flow_remove(qp_flow);
371 list_del(&qp_flow->link);
372
373 switch (qp_flow->trans_type) {
374 case USNIC_TRANSPORT_ROCE_CUSTOM:
375 release_roce_custom_flow(qp_flow);
376 break;
377 case USNIC_TRANSPORT_IPV4_UDP:
378 release_udp_flow(qp_flow);
379 break;
380 default:
381 WARN(1, "Unsupported transport %u\n",
382 qp_flow->trans_type);
383 break;
384 }
385 }
386
387 static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
388 {
389 struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
390 list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
391 release_and_remove_flow(qp_flow);
392 }
393
394 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
395 enum ib_qp_state new_state,
396 void *data)
397 {
398 int status = 0;
399 int vnic_idx;
400 struct ib_event ib_event;
401 enum ib_qp_state old_state;
402 struct usnic_transport_spec *trans_spec;
403 struct usnic_ib_qp_grp_flow *qp_flow;
404
405 old_state = qp_grp->state;
406 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
407 trans_spec = (struct usnic_transport_spec *) data;
408
409 spin_lock(&qp_grp->lock);
410 switch (new_state) {
411 case IB_QPS_RESET:
412 switch (old_state) {
413 case IB_QPS_RESET:
414 /* NO-OP */
415 break;
416 case IB_QPS_INIT:
417 release_and_remove_all_flows(qp_grp);
418 status = 0;
419 break;
420 case IB_QPS_RTR:
421 case IB_QPS_RTS:
422 case IB_QPS_ERR:
423 status = disable_qp_grp(qp_grp);
424 release_and_remove_all_flows(qp_grp);
425 break;
426 default:
427 status = -EINVAL;
428 }
429 break;
430 case IB_QPS_INIT:
431 switch (old_state) {
432 case IB_QPS_RESET:
433 if (trans_spec) {
434 qp_flow = create_and_add_flow(qp_grp,
435 trans_spec);
436 if (IS_ERR_OR_NULL(qp_flow)) {
437 status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
438 break;
439 }
440 } else {
441 /*
442 * Optional to specify filters.
443 */
444 status = 0;
445 }
446 break;
447 case IB_QPS_INIT:
448 if (trans_spec) {
449 qp_flow = create_and_add_flow(qp_grp,
450 trans_spec);
451 if (IS_ERR_OR_NULL(qp_flow)) {
452 status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
453 break;
454 }
455 } else {
456 /*
457 * Doesn't make sense to go into INIT state
458 * from INIT state w/o adding filters.
459 */
460 status = -EINVAL;
461 }
462 break;
463 case IB_QPS_RTR:
464 status = disable_qp_grp(qp_grp);
465 break;
466 case IB_QPS_RTS:
467 status = disable_qp_grp(qp_grp);
468 break;
469 default:
470 status = -EINVAL;
471 }
472 break;
473 case IB_QPS_RTR:
474 switch (old_state) {
475 case IB_QPS_INIT:
476 status = enable_qp_grp(qp_grp);
477 break;
478 default:
479 status = -EINVAL;
480 }
481 break;
482 case IB_QPS_RTS:
483 switch (old_state) {
484 case IB_QPS_RTR:
485 /* NO-OP FOR NOW */
486 break;
487 default:
488 status = -EINVAL;
489 }
490 break;
491 case IB_QPS_ERR:
492 ib_event.device = &qp_grp->vf->pf->ib_dev;
493 ib_event.element.qp = &qp_grp->ibqp;
494 ib_event.event = IB_EVENT_QP_FATAL;
495
496 switch (old_state) {
497 case IB_QPS_RESET:
498 qp_grp->ibqp.event_handler(&ib_event,
499 qp_grp->ibqp.qp_context);
500 break;
501 case IB_QPS_INIT:
502 release_and_remove_all_flows(qp_grp);
503 qp_grp->ibqp.event_handler(&ib_event,
504 qp_grp->ibqp.qp_context);
505 break;
506 case IB_QPS_RTR:
507 case IB_QPS_RTS:
508 status = disable_qp_grp(qp_grp);
509 release_and_remove_all_flows(qp_grp);
510 qp_grp->ibqp.event_handler(&ib_event,
511 qp_grp->ibqp.qp_context);
512 break;
513 default:
514 status = -EINVAL;
515 }
516 break;
517 default:
518 status = -EINVAL;
519 }
520 spin_unlock(&qp_grp->lock);
521
522 if (!status) {
523 qp_grp->state = new_state;
524 usnic_info("Transistioned %u from %s to %s",
525 qp_grp->grp_id,
526 usnic_ib_qp_grp_state_to_string(old_state),
527 usnic_ib_qp_grp_state_to_string(new_state));
528 } else {
529 usnic_err("Failed to transition %u from %s to %s",
530 qp_grp->grp_id,
531 usnic_ib_qp_grp_state_to_string(old_state),
532 usnic_ib_qp_grp_state_to_string(new_state));
533 }
534
535 return status;
536 }
537
538 static struct usnic_vnic_res_chunk**
539 alloc_res_chunk_list(struct usnic_vnic *vnic,
540 struct usnic_vnic_res_spec *res_spec, void *owner_obj)
541 {
542 enum usnic_vnic_res_type res_type;
543 struct usnic_vnic_res_chunk **res_chunk_list;
544 int err, i, res_cnt, res_lst_sz;
545
546 for (res_lst_sz = 0;
547 res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
548 res_lst_sz++) {
549 /* Do Nothing */
550 }
551
552 res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
553 GFP_ATOMIC);
554 if (!res_chunk_list)
555 return ERR_PTR(-ENOMEM);
556
557 for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
558 i++) {
559 res_type = res_spec->resources[i].type;
560 res_cnt = res_spec->resources[i].cnt;
561
562 res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
563 res_cnt, owner_obj);
564 if (IS_ERR_OR_NULL(res_chunk_list[i])) {
565 err = res_chunk_list[i] ?
566 PTR_ERR(res_chunk_list[i]) : -ENOMEM;
567 usnic_err("Failed to get %s from %s with err %d\n",
568 usnic_vnic_res_type_to_str(res_type),
569 usnic_vnic_pci_name(vnic),
570 err);
571 goto out_free_res;
572 }
573 }
574
575 return res_chunk_list;
576
577 out_free_res:
578 for (i--; i > 0; i--)
579 usnic_vnic_put_resources(res_chunk_list[i]);
580 kfree(res_chunk_list);
581 return ERR_PTR(err);
582 }
583
584 static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
585 {
586 int i;
587 for (i = 0; res_chunk_list[i]; i++)
588 usnic_vnic_put_resources(res_chunk_list[i]);
589 kfree(res_chunk_list);
590 }
591
592 static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
593 struct usnic_ib_pd *pd,
594 struct usnic_ib_qp_grp *qp_grp)
595 {
596 int err;
597 struct pci_dev *pdev;
598
599 lockdep_assert_held(&vf->lock);
600
601 pdev = usnic_vnic_get_pdev(vf->vnic);
602 if (vf->qp_grp_ref_cnt == 0) {
603 err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
604 if (err) {
605 usnic_err("Failed to attach %s to domain\n",
606 pci_name(pdev));
607 return err;
608 }
609 vf->pd = pd;
610 }
611 vf->qp_grp_ref_cnt++;
612
613 WARN_ON(vf->pd != pd);
614 qp_grp->vf = vf;
615
616 return 0;
617 }
618
619 static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
620 {
621 struct pci_dev *pdev;
622 struct usnic_ib_pd *pd;
623
624 lockdep_assert_held(&qp_grp->vf->lock);
625
626 pd = qp_grp->vf->pd;
627 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
628 if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
629 qp_grp->vf->pd = NULL;
630 usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
631 }
632 qp_grp->vf = NULL;
633 }
634
635 static void log_spec(struct usnic_vnic_res_spec *res_spec)
636 {
637 char buf[512];
638 usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
639 usnic_dbg("%s\n", buf);
640 }
641
642 static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
643 uint32_t *id)
644 {
645 enum usnic_transport_type trans_type = qp_flow->trans_type;
646 int err;
647 uint16_t port_num = 0;
648
649 switch (trans_type) {
650 case USNIC_TRANSPORT_ROCE_CUSTOM:
651 *id = qp_flow->usnic_roce.port_num;
652 break;
653 case USNIC_TRANSPORT_IPV4_UDP:
654 err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
655 NULL, NULL,
656 &port_num);
657 if (err)
658 return err;
659 /*
660 * Copy port_num to stack first and then to *id,
661 * so that the short to int cast works for little
662 * and big endian systems.
663 */
664 *id = port_num;
665 break;
666 default:
667 usnic_err("Unsupported transport %u\n", trans_type);
668 return -EINVAL;
669 }
670
671 return 0;
672 }
673
674 struct usnic_ib_qp_grp *
675 usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
676 struct usnic_ib_pd *pd,
677 struct usnic_vnic_res_spec *res_spec,
678 struct usnic_transport_spec *transport_spec)
679 {
680 struct usnic_ib_qp_grp *qp_grp;
681 int err;
682 enum usnic_transport_type transport = transport_spec->trans_type;
683 struct usnic_ib_qp_grp_flow *qp_flow;
684
685 lockdep_assert_held(&vf->lock);
686
687 err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
688 res_spec);
689 if (err) {
690 usnic_err("Spec does not meet miniumum req for transport %d\n",
691 transport);
692 log_spec(res_spec);
693 return ERR_PTR(err);
694 }
695
696 qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
697 if (!qp_grp) {
698 usnic_err("Unable to alloc qp_grp - Out of memory\n");
699 return NULL;
700 }
701
702 qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
703 qp_grp);
704 if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
705 err = qp_grp->res_chunk_list ?
706 PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
707 usnic_err("Unable to alloc res for %d with err %d\n",
708 qp_grp->grp_id, err);
709 goto out_free_qp_grp;
710 }
711
712 err = qp_grp_and_vf_bind(vf, pd, qp_grp);
713 if (err)
714 goto out_free_res;
715
716 INIT_LIST_HEAD(&qp_grp->flows_lst);
717 spin_lock_init(&qp_grp->lock);
718 qp_grp->ufdev = ufdev;
719 qp_grp->state = IB_QPS_RESET;
720 qp_grp->owner_pid = current->pid;
721
722 qp_flow = create_and_add_flow(qp_grp, transport_spec);
723 if (IS_ERR_OR_NULL(qp_flow)) {
724 usnic_err("Unable to create and add flow with err %ld\n",
725 PTR_ERR(qp_flow));
726 err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
727 goto out_qp_grp_vf_unbind;
728 }
729
730 err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
731 if (err)
732 goto out_release_flow;
733 qp_grp->ibqp.qp_num = qp_grp->grp_id;
734
735 usnic_ib_sysfs_qpn_add(qp_grp);
736
737 return qp_grp;
738
739 out_release_flow:
740 release_and_remove_flow(qp_flow);
741 out_qp_grp_vf_unbind:
742 qp_grp_and_vf_unbind(qp_grp);
743 out_free_res:
744 free_qp_grp_res(qp_grp->res_chunk_list);
745 out_free_qp_grp:
746 kfree(qp_grp);
747
748 return ERR_PTR(err);
749 }
750
751 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
752 {
753
754 WARN_ON(qp_grp->state != IB_QPS_RESET);
755 lockdep_assert_held(&qp_grp->vf->lock);
756
757 release_and_remove_all_flows(qp_grp);
758 usnic_ib_sysfs_qpn_remove(qp_grp);
759 qp_grp_and_vf_unbind(qp_grp);
760 free_qp_grp_res(qp_grp->res_chunk_list);
761 kfree(qp_grp);
762 }
763
764 struct usnic_vnic_res_chunk*
765 usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
766 enum usnic_vnic_res_type res_type)
767 {
768 int i;
769
770 for (i = 0; qp_grp->res_chunk_list[i]; i++) {
771 if (qp_grp->res_chunk_list[i]->type == res_type)
772 return qp_grp->res_chunk_list[i];
773 }
774
775 return ERR_PTR(-EINVAL);
776 }