]>
Commit | Line | Data |
---|---|---|
8700e3e7 MS |
1 | /* |
2 | * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. | |
3 | * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include "rxe.h" | |
35 | #include "rxe_loc.h" | |
36 | #include "rxe_queue.h" | |
37 | ||
38 | static int rxe_query_device(struct ib_device *dev, | |
39 | struct ib_device_attr *attr, | |
40 | struct ib_udata *uhw) | |
41 | { | |
42 | struct rxe_dev *rxe = to_rdev(dev); | |
43 | ||
44 | if (uhw->inlen || uhw->outlen) | |
45 | return -EINVAL; | |
46 | ||
47 | *attr = rxe->attr; | |
48 | return 0; | |
49 | } | |
50 | ||
51 | static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed, | |
52 | u8 *active_width) | |
53 | { | |
54 | if (speed <= 1000) { | |
55 | *active_width = IB_WIDTH_1X; | |
56 | *active_speed = IB_SPEED_SDR; | |
57 | } else if (speed <= 10000) { | |
58 | *active_width = IB_WIDTH_1X; | |
59 | *active_speed = IB_SPEED_FDR10; | |
60 | } else if (speed <= 20000) { | |
61 | *active_width = IB_WIDTH_4X; | |
62 | *active_speed = IB_SPEED_DDR; | |
63 | } else if (speed <= 30000) { | |
64 | *active_width = IB_WIDTH_4X; | |
65 | *active_speed = IB_SPEED_QDR; | |
66 | } else if (speed <= 40000) { | |
67 | *active_width = IB_WIDTH_4X; | |
68 | *active_speed = IB_SPEED_FDR10; | |
69 | } else { | |
70 | *active_width = IB_WIDTH_4X; | |
71 | *active_speed = IB_SPEED_EDR; | |
72 | } | |
73 | } | |
74 | ||
75 | static int rxe_query_port(struct ib_device *dev, | |
76 | u8 port_num, struct ib_port_attr *attr) | |
77 | { | |
78 | struct rxe_dev *rxe = to_rdev(dev); | |
79 | struct rxe_port *port; | |
80 | u32 speed; | |
81 | ||
82 | if (unlikely(port_num != 1)) { | |
83 | pr_warn("invalid port_number %d\n", port_num); | |
84 | goto err1; | |
85 | } | |
86 | ||
87 | port = &rxe->port; | |
88 | ||
89 | *attr = port->attr; | |
90 | ||
91 | mutex_lock(&rxe->usdev_lock); | |
92 | if (rxe->ndev->ethtool_ops->get_link_ksettings) { | |
93 | struct ethtool_link_ksettings ks; | |
94 | ||
95 | rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks); | |
96 | speed = ks.base.speed; | |
97 | } else if (rxe->ndev->ethtool_ops->get_settings) { | |
98 | struct ethtool_cmd cmd; | |
99 | ||
100 | rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd); | |
101 | speed = cmd.speed; | |
102 | } else { | |
e404f945 PP |
103 | pr_warn("%s speed is unknown, defaulting to 1000\n", |
104 | rxe->ndev->name); | |
8700e3e7 MS |
105 | speed = 1000; |
106 | } | |
e404f945 PP |
107 | rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, |
108 | &attr->active_width); | |
8700e3e7 MS |
109 | mutex_unlock(&rxe->usdev_lock); |
110 | ||
111 | return 0; | |
112 | ||
113 | err1: | |
114 | return -EINVAL; | |
115 | } | |
116 | ||
117 | static int rxe_query_gid(struct ib_device *device, | |
118 | u8 port_num, int index, union ib_gid *gid) | |
119 | { | |
120 | int ret; | |
121 | ||
122 | if (index > RXE_PORT_GID_TBL_LEN) | |
123 | return -EINVAL; | |
124 | ||
125 | ret = ib_get_cached_gid(device, port_num, index, gid, NULL); | |
126 | if (ret == -EAGAIN) { | |
127 | memcpy(gid, &zgid, sizeof(*gid)); | |
128 | return 0; | |
129 | } | |
130 | ||
131 | return ret; | |
132 | } | |
133 | ||
134 | static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int | |
135 | index, const union ib_gid *gid, | |
136 | const struct ib_gid_attr *attr, void **context) | |
137 | { | |
138 | if (index >= RXE_PORT_GID_TBL_LEN) | |
139 | return -EINVAL; | |
140 | return 0; | |
141 | } | |
142 | ||
143 | static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int | |
144 | index, void **context) | |
145 | { | |
146 | if (index >= RXE_PORT_GID_TBL_LEN) | |
147 | return -EINVAL; | |
148 | return 0; | |
149 | } | |
150 | ||
151 | static struct net_device *rxe_get_netdev(struct ib_device *device, | |
152 | u8 port_num) | |
153 | { | |
154 | struct rxe_dev *rxe = to_rdev(device); | |
155 | ||
156 | if (rxe->ndev) { | |
157 | dev_hold(rxe->ndev); | |
158 | return rxe->ndev; | |
159 | } | |
160 | ||
161 | return NULL; | |
162 | } | |
163 | ||
164 | static int rxe_query_pkey(struct ib_device *device, | |
165 | u8 port_num, u16 index, u16 *pkey) | |
166 | { | |
167 | struct rxe_dev *rxe = to_rdev(device); | |
168 | struct rxe_port *port; | |
169 | ||
170 | if (unlikely(port_num != 1)) { | |
171 | dev_warn(device->dma_device, "invalid port_num = %d\n", | |
172 | port_num); | |
173 | goto err1; | |
174 | } | |
175 | ||
176 | port = &rxe->port; | |
177 | ||
178 | if (unlikely(index >= port->attr.pkey_tbl_len)) { | |
179 | dev_warn(device->dma_device, "invalid index = %d\n", | |
180 | index); | |
181 | goto err1; | |
182 | } | |
183 | ||
184 | *pkey = port->pkey_tbl[index]; | |
185 | return 0; | |
186 | ||
187 | err1: | |
188 | return -EINVAL; | |
189 | } | |
190 | ||
191 | static int rxe_modify_device(struct ib_device *dev, | |
192 | int mask, struct ib_device_modify *attr) | |
193 | { | |
194 | struct rxe_dev *rxe = to_rdev(dev); | |
195 | ||
196 | if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) | |
197 | rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid); | |
198 | ||
199 | if (mask & IB_DEVICE_MODIFY_NODE_DESC) { | |
200 | memcpy(rxe->ib_dev.node_desc, | |
201 | attr->node_desc, sizeof(rxe->ib_dev.node_desc)); | |
202 | } | |
203 | ||
204 | return 0; | |
205 | } | |
206 | ||
207 | static int rxe_modify_port(struct ib_device *dev, | |
208 | u8 port_num, int mask, struct ib_port_modify *attr) | |
209 | { | |
210 | struct rxe_dev *rxe = to_rdev(dev); | |
211 | struct rxe_port *port; | |
212 | ||
213 | if (unlikely(port_num != 1)) { | |
214 | pr_warn("invalid port_num = %d\n", port_num); | |
215 | goto err1; | |
216 | } | |
217 | ||
218 | port = &rxe->port; | |
219 | ||
220 | port->attr.port_cap_flags |= attr->set_port_cap_mask; | |
221 | port->attr.port_cap_flags &= ~attr->clr_port_cap_mask; | |
222 | ||
223 | if (mask & IB_PORT_RESET_QKEY_CNTR) | |
224 | port->attr.qkey_viol_cntr = 0; | |
225 | ||
226 | return 0; | |
227 | ||
228 | err1: | |
229 | return -EINVAL; | |
230 | } | |
231 | ||
232 | static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, | |
233 | u8 port_num) | |
234 | { | |
235 | struct rxe_dev *rxe = to_rdev(dev); | |
236 | ||
237 | return rxe->ifc_ops->link_layer(rxe, port_num); | |
238 | } | |
239 | ||
240 | static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev, | |
241 | struct ib_udata *udata) | |
242 | { | |
243 | struct rxe_dev *rxe = to_rdev(dev); | |
244 | struct rxe_ucontext *uc; | |
245 | ||
246 | uc = rxe_alloc(&rxe->uc_pool); | |
247 | return uc ? &uc->ibuc : ERR_PTR(-ENOMEM); | |
248 | } | |
249 | ||
250 | static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc) | |
251 | { | |
252 | struct rxe_ucontext *uc = to_ruc(ibuc); | |
253 | ||
254 | rxe_drop_ref(uc); | |
255 | return 0; | |
256 | } | |
257 | ||
258 | static int rxe_port_immutable(struct ib_device *dev, u8 port_num, | |
259 | struct ib_port_immutable *immutable) | |
260 | { | |
261 | int err; | |
262 | struct ib_port_attr attr; | |
263 | ||
264 | err = rxe_query_port(dev, port_num, &attr); | |
265 | if (err) | |
266 | return err; | |
267 | ||
268 | immutable->pkey_tbl_len = attr.pkey_tbl_len; | |
269 | immutable->gid_tbl_len = attr.gid_tbl_len; | |
270 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; | |
271 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; | |
272 | ||
273 | return 0; | |
274 | } | |
275 | ||
276 | static struct ib_pd *rxe_alloc_pd(struct ib_device *dev, | |
277 | struct ib_ucontext *context, | |
278 | struct ib_udata *udata) | |
279 | { | |
280 | struct rxe_dev *rxe = to_rdev(dev); | |
281 | struct rxe_pd *pd; | |
282 | ||
283 | pd = rxe_alloc(&rxe->pd_pool); | |
284 | return pd ? &pd->ibpd : ERR_PTR(-ENOMEM); | |
285 | } | |
286 | ||
287 | static int rxe_dealloc_pd(struct ib_pd *ibpd) | |
288 | { | |
289 | struct rxe_pd *pd = to_rpd(ibpd); | |
290 | ||
291 | rxe_drop_ref(pd); | |
292 | return 0; | |
293 | } | |
294 | ||
295 | static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr, | |
296 | struct rxe_av *av) | |
297 | { | |
298 | int err; | |
299 | union ib_gid sgid; | |
300 | struct ib_gid_attr sgid_attr; | |
301 | ||
302 | err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num, | |
303 | attr->grh.sgid_index, &sgid, | |
304 | &sgid_attr); | |
305 | if (err) { | |
306 | pr_err("Failed to query sgid. err = %d\n", err); | |
307 | return err; | |
308 | } | |
309 | ||
310 | err = rxe_av_from_attr(rxe, attr->port_num, av, attr); | |
311 | if (!err) | |
312 | err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid); | |
313 | ||
314 | if (sgid_attr.ndev) | |
315 | dev_put(sgid_attr.ndev); | |
316 | return err; | |
317 | } | |
318 | ||
319 | static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | |
320 | { | |
321 | int err; | |
322 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
323 | struct rxe_pd *pd = to_rpd(ibpd); | |
324 | struct rxe_ah *ah; | |
325 | ||
326 | err = rxe_av_chk_attr(rxe, attr); | |
327 | if (err) | |
328 | goto err1; | |
329 | ||
330 | ah = rxe_alloc(&rxe->ah_pool); | |
331 | if (!ah) { | |
332 | err = -ENOMEM; | |
333 | goto err1; | |
334 | } | |
335 | ||
336 | rxe_add_ref(pd); | |
337 | ah->pd = pd; | |
338 | ||
339 | err = rxe_init_av(rxe, attr, &ah->av); | |
340 | if (err) | |
341 | goto err2; | |
342 | ||
343 | return &ah->ibah; | |
344 | ||
345 | err2: | |
346 | rxe_drop_ref(pd); | |
347 | rxe_drop_ref(ah); | |
348 | err1: | |
349 | return ERR_PTR(err); | |
350 | } | |
351 | ||
352 | static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) | |
353 | { | |
354 | int err; | |
355 | struct rxe_dev *rxe = to_rdev(ibah->device); | |
356 | struct rxe_ah *ah = to_rah(ibah); | |
357 | ||
358 | err = rxe_av_chk_attr(rxe, attr); | |
359 | if (err) | |
360 | return err; | |
361 | ||
362 | err = rxe_init_av(rxe, attr, &ah->av); | |
363 | if (err) | |
364 | return err; | |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
369 | static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) | |
370 | { | |
371 | struct rxe_dev *rxe = to_rdev(ibah->device); | |
372 | struct rxe_ah *ah = to_rah(ibah); | |
373 | ||
374 | rxe_av_to_attr(rxe, &ah->av, attr); | |
375 | return 0; | |
376 | } | |
377 | ||
378 | static int rxe_destroy_ah(struct ib_ah *ibah) | |
379 | { | |
380 | struct rxe_ah *ah = to_rah(ibah); | |
381 | ||
382 | rxe_drop_ref(ah->pd); | |
383 | rxe_drop_ref(ah); | |
384 | return 0; | |
385 | } | |
386 | ||
387 | static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr) | |
388 | { | |
389 | int err; | |
390 | int i; | |
391 | u32 length; | |
392 | struct rxe_recv_wqe *recv_wqe; | |
393 | int num_sge = ibwr->num_sge; | |
394 | ||
395 | if (unlikely(queue_full(rq->queue))) { | |
396 | err = -ENOMEM; | |
397 | goto err1; | |
398 | } | |
399 | ||
400 | if (unlikely(num_sge > rq->max_sge)) { | |
401 | err = -EINVAL; | |
402 | goto err1; | |
403 | } | |
404 | ||
405 | length = 0; | |
406 | for (i = 0; i < num_sge; i++) | |
407 | length += ibwr->sg_list[i].length; | |
408 | ||
409 | recv_wqe = producer_addr(rq->queue); | |
410 | recv_wqe->wr_id = ibwr->wr_id; | |
411 | recv_wqe->num_sge = num_sge; | |
412 | ||
413 | memcpy(recv_wqe->dma.sge, ibwr->sg_list, | |
414 | num_sge * sizeof(struct ib_sge)); | |
415 | ||
416 | recv_wqe->dma.length = length; | |
417 | recv_wqe->dma.resid = length; | |
418 | recv_wqe->dma.num_sge = num_sge; | |
419 | recv_wqe->dma.cur_sge = 0; | |
420 | recv_wqe->dma.sge_offset = 0; | |
421 | ||
422 | /* make sure all changes to the work queue are written before we | |
423 | * update the producer pointer | |
424 | */ | |
425 | smp_wmb(); | |
426 | ||
427 | advance_producer(rq->queue); | |
428 | return 0; | |
429 | ||
430 | err1: | |
431 | return err; | |
432 | } | |
433 | ||
434 | static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, | |
435 | struct ib_srq_init_attr *init, | |
436 | struct ib_udata *udata) | |
437 | { | |
438 | int err; | |
439 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
440 | struct rxe_pd *pd = to_rpd(ibpd); | |
441 | struct rxe_srq *srq; | |
442 | struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL; | |
443 | ||
444 | err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); | |
445 | if (err) | |
446 | goto err1; | |
447 | ||
448 | srq = rxe_alloc(&rxe->srq_pool); | |
449 | if (!srq) { | |
450 | err = -ENOMEM; | |
451 | goto err1; | |
452 | } | |
453 | ||
454 | rxe_add_index(srq); | |
455 | rxe_add_ref(pd); | |
456 | srq->pd = pd; | |
457 | ||
458 | err = rxe_srq_from_init(rxe, srq, init, context, udata); | |
459 | if (err) | |
460 | goto err2; | |
461 | ||
462 | return &srq->ibsrq; | |
463 | ||
464 | err2: | |
465 | rxe_drop_ref(pd); | |
466 | rxe_drop_index(srq); | |
467 | rxe_drop_ref(srq); | |
468 | err1: | |
469 | return ERR_PTR(err); | |
470 | } | |
471 | ||
472 | static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
473 | enum ib_srq_attr_mask mask, | |
474 | struct ib_udata *udata) | |
475 | { | |
476 | int err; | |
477 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
478 | struct rxe_dev *rxe = to_rdev(ibsrq->device); | |
479 | ||
480 | err = rxe_srq_chk_attr(rxe, srq, attr, mask); | |
481 | if (err) | |
482 | goto err1; | |
483 | ||
484 | err = rxe_srq_from_attr(rxe, srq, attr, mask, udata); | |
485 | if (err) | |
486 | goto err1; | |
487 | ||
488 | return 0; | |
489 | ||
490 | err1: | |
491 | return err; | |
492 | } | |
493 | ||
494 | static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | |
495 | { | |
496 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
497 | ||
498 | if (srq->error) | |
499 | return -EINVAL; | |
500 | ||
501 | attr->max_wr = srq->rq.queue->buf->index_mask; | |
502 | attr->max_sge = srq->rq.max_sge; | |
503 | attr->srq_limit = srq->limit; | |
504 | return 0; | |
505 | } | |
506 | ||
507 | static int rxe_destroy_srq(struct ib_srq *ibsrq) | |
508 | { | |
509 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
510 | ||
511 | if (srq->rq.queue) | |
512 | rxe_queue_cleanup(srq->rq.queue); | |
513 | ||
514 | rxe_drop_ref(srq->pd); | |
515 | rxe_drop_index(srq); | |
516 | rxe_drop_ref(srq); | |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
521 | static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
522 | struct ib_recv_wr **bad_wr) | |
523 | { | |
524 | int err = 0; | |
525 | unsigned long flags; | |
526 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
527 | ||
528 | spin_lock_irqsave(&srq->rq.producer_lock, flags); | |
529 | ||
530 | while (wr) { | |
531 | err = post_one_recv(&srq->rq, wr); | |
532 | if (unlikely(err)) | |
533 | break; | |
534 | wr = wr->next; | |
535 | } | |
536 | ||
537 | spin_unlock_irqrestore(&srq->rq.producer_lock, flags); | |
538 | ||
539 | if (err) | |
540 | *bad_wr = wr; | |
541 | ||
542 | return err; | |
543 | } | |
544 | ||
545 | static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd, | |
546 | struct ib_qp_init_attr *init, | |
547 | struct ib_udata *udata) | |
548 | { | |
549 | int err; | |
550 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
551 | struct rxe_pd *pd = to_rpd(ibpd); | |
552 | struct rxe_qp *qp; | |
553 | ||
554 | err = rxe_qp_chk_init(rxe, init); | |
555 | if (err) | |
556 | goto err1; | |
557 | ||
558 | qp = rxe_alloc(&rxe->qp_pool); | |
559 | if (!qp) { | |
560 | err = -ENOMEM; | |
561 | goto err1; | |
562 | } | |
563 | ||
564 | if (udata) { | |
565 | if (udata->inlen) { | |
566 | err = -EINVAL; | |
567 | goto err1; | |
568 | } | |
569 | qp->is_user = 1; | |
570 | } | |
571 | ||
572 | rxe_add_index(qp); | |
573 | ||
574 | err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd); | |
575 | if (err) | |
576 | goto err2; | |
577 | ||
578 | return &qp->ibqp; | |
579 | ||
580 | err2: | |
581 | rxe_drop_index(qp); | |
582 | rxe_drop_ref(qp); | |
583 | err1: | |
584 | return ERR_PTR(err); | |
585 | } | |
586 | ||
587 | static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
588 | int mask, struct ib_udata *udata) | |
589 | { | |
590 | int err; | |
591 | struct rxe_dev *rxe = to_rdev(ibqp->device); | |
592 | struct rxe_qp *qp = to_rqp(ibqp); | |
593 | ||
594 | err = rxe_qp_chk_attr(rxe, qp, attr, mask); | |
595 | if (err) | |
596 | goto err1; | |
597 | ||
598 | err = rxe_qp_from_attr(qp, attr, mask, udata); | |
599 | if (err) | |
600 | goto err1; | |
601 | ||
602 | return 0; | |
603 | ||
604 | err1: | |
605 | return err; | |
606 | } | |
607 | ||
608 | static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
609 | int mask, struct ib_qp_init_attr *init) | |
610 | { | |
611 | struct rxe_qp *qp = to_rqp(ibqp); | |
612 | ||
613 | rxe_qp_to_init(qp, init); | |
614 | rxe_qp_to_attr(qp, attr, mask); | |
615 | ||
616 | return 0; | |
617 | } | |
618 | ||
619 | static int rxe_destroy_qp(struct ib_qp *ibqp) | |
620 | { | |
621 | struct rxe_qp *qp = to_rqp(ibqp); | |
622 | ||
623 | rxe_qp_destroy(qp); | |
624 | rxe_drop_index(qp); | |
625 | rxe_drop_ref(qp); | |
626 | return 0; | |
627 | } | |
628 | ||
629 | static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr, | |
630 | unsigned int mask, unsigned int length) | |
631 | { | |
632 | int num_sge = ibwr->num_sge; | |
633 | struct rxe_sq *sq = &qp->sq; | |
634 | ||
635 | if (unlikely(num_sge > sq->max_sge)) | |
636 | goto err1; | |
637 | ||
638 | if (unlikely(mask & WR_ATOMIC_MASK)) { | |
639 | if (length < 8) | |
640 | goto err1; | |
641 | ||
642 | if (atomic_wr(ibwr)->remote_addr & 0x7) | |
643 | goto err1; | |
644 | } | |
645 | ||
646 | if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && | |
647 | (length > sq->max_inline))) | |
648 | goto err1; | |
649 | ||
650 | return 0; | |
651 | ||
652 | err1: | |
653 | return -EINVAL; | |
654 | } | |
655 | ||
656 | static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, | |
657 | struct ib_send_wr *ibwr) | |
658 | { | |
659 | wr->wr_id = ibwr->wr_id; | |
660 | wr->num_sge = ibwr->num_sge; | |
661 | wr->opcode = ibwr->opcode; | |
662 | wr->send_flags = ibwr->send_flags; | |
663 | ||
664 | if (qp_type(qp) == IB_QPT_UD || | |
665 | qp_type(qp) == IB_QPT_SMI || | |
666 | qp_type(qp) == IB_QPT_GSI) { | |
667 | wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn; | |
668 | wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey; | |
669 | if (qp_type(qp) == IB_QPT_GSI) | |
670 | wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index; | |
671 | if (wr->opcode == IB_WR_SEND_WITH_IMM) | |
672 | wr->ex.imm_data = ibwr->ex.imm_data; | |
673 | } else { | |
674 | switch (wr->opcode) { | |
675 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
676 | wr->ex.imm_data = ibwr->ex.imm_data; | |
677 | case IB_WR_RDMA_READ: | |
678 | case IB_WR_RDMA_WRITE: | |
679 | wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; | |
680 | wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; | |
681 | break; | |
682 | case IB_WR_SEND_WITH_IMM: | |
683 | wr->ex.imm_data = ibwr->ex.imm_data; | |
684 | break; | |
685 | case IB_WR_SEND_WITH_INV: | |
686 | wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; | |
687 | break; | |
688 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
689 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
690 | wr->wr.atomic.remote_addr = | |
691 | atomic_wr(ibwr)->remote_addr; | |
692 | wr->wr.atomic.compare_add = | |
693 | atomic_wr(ibwr)->compare_add; | |
694 | wr->wr.atomic.swap = atomic_wr(ibwr)->swap; | |
695 | wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey; | |
696 | break; | |
697 | case IB_WR_LOCAL_INV: | |
698 | wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; | |
699 | break; | |
700 | case IB_WR_REG_MR: | |
701 | wr->wr.reg.mr = reg_wr(ibwr)->mr; | |
702 | wr->wr.reg.key = reg_wr(ibwr)->key; | |
703 | wr->wr.reg.access = reg_wr(ibwr)->access; | |
704 | break; | |
705 | default: | |
706 | break; | |
707 | } | |
708 | } | |
709 | } | |
710 | ||
711 | static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, | |
712 | unsigned int mask, unsigned int length, | |
713 | struct rxe_send_wqe *wqe) | |
714 | { | |
715 | int num_sge = ibwr->num_sge; | |
716 | struct ib_sge *sge; | |
717 | int i; | |
718 | u8 *p; | |
719 | ||
720 | init_send_wr(qp, &wqe->wr, ibwr); | |
721 | ||
722 | if (qp_type(qp) == IB_QPT_UD || | |
723 | qp_type(qp) == IB_QPT_SMI || | |
724 | qp_type(qp) == IB_QPT_GSI) | |
725 | memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av)); | |
726 | ||
727 | if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) { | |
728 | p = wqe->dma.inline_data; | |
729 | ||
730 | sge = ibwr->sg_list; | |
731 | for (i = 0; i < num_sge; i++, sge++) { | |
732 | if (qp->is_user && copy_from_user(p, (__user void *) | |
733 | (uintptr_t)sge->addr, sge->length)) | |
734 | return -EFAULT; | |
735 | ||
736 | else if (!qp->is_user) | |
737 | memcpy(p, (void *)(uintptr_t)sge->addr, | |
738 | sge->length); | |
739 | ||
740 | p += sge->length; | |
741 | } | |
742 | } else if (mask & WR_REG_MASK) { | |
743 | wqe->mask = mask; | |
744 | wqe->state = wqe_state_posted; | |
745 | return 0; | |
746 | } else | |
747 | memcpy(wqe->dma.sge, ibwr->sg_list, | |
748 | num_sge * sizeof(struct ib_sge)); | |
749 | ||
750 | wqe->iova = (mask & WR_ATOMIC_MASK) ? | |
751 | atomic_wr(ibwr)->remote_addr : | |
752 | rdma_wr(ibwr)->remote_addr; | |
753 | wqe->mask = mask; | |
754 | wqe->dma.length = length; | |
755 | wqe->dma.resid = length; | |
756 | wqe->dma.num_sge = num_sge; | |
757 | wqe->dma.cur_sge = 0; | |
758 | wqe->dma.sge_offset = 0; | |
759 | wqe->state = wqe_state_posted; | |
760 | wqe->ssn = atomic_add_return(1, &qp->ssn); | |
761 | ||
762 | return 0; | |
763 | } | |
764 | ||
765 | static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, | |
e404f945 | 766 | unsigned int mask, u32 length) |
8700e3e7 MS |
767 | { |
768 | int err; | |
769 | struct rxe_sq *sq = &qp->sq; | |
770 | struct rxe_send_wqe *send_wqe; | |
771 | unsigned long flags; | |
772 | ||
773 | err = validate_send_wr(qp, ibwr, mask, length); | |
774 | if (err) | |
775 | return err; | |
776 | ||
777 | spin_lock_irqsave(&qp->sq.sq_lock, flags); | |
778 | ||
779 | if (unlikely(queue_full(sq->queue))) { | |
780 | err = -ENOMEM; | |
781 | goto err1; | |
782 | } | |
783 | ||
784 | send_wqe = producer_addr(sq->queue); | |
785 | ||
786 | err = init_send_wqe(qp, ibwr, mask, length, send_wqe); | |
787 | if (unlikely(err)) | |
788 | goto err1; | |
789 | ||
790 | /* | |
791 | * make sure all changes to the work queue are | |
792 | * written before we update the producer pointer | |
793 | */ | |
794 | smp_wmb(); | |
795 | ||
796 | advance_producer(sq->queue); | |
797 | spin_unlock_irqrestore(&qp->sq.sq_lock, flags); | |
798 | ||
799 | return 0; | |
800 | ||
801 | err1: | |
802 | spin_unlock_irqrestore(&qp->sq.sq_lock, flags); | |
803 | return err; | |
804 | } | |
805 | ||
063af595 PP |
806 | static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, |
807 | struct ib_send_wr **bad_wr) | |
8700e3e7 MS |
808 | { |
809 | int err = 0; | |
8700e3e7 MS |
810 | unsigned int mask; |
811 | unsigned int length = 0; | |
812 | int i; | |
813 | int must_sched; | |
814 | ||
8700e3e7 MS |
815 | while (wr) { |
816 | mask = wr_opcode_mask(wr->opcode, qp); | |
817 | if (unlikely(!mask)) { | |
818 | err = -EINVAL; | |
819 | *bad_wr = wr; | |
820 | break; | |
821 | } | |
822 | ||
823 | if (unlikely((wr->send_flags & IB_SEND_INLINE) && | |
824 | !(mask & WR_INLINE_MASK))) { | |
825 | err = -EINVAL; | |
826 | *bad_wr = wr; | |
827 | break; | |
828 | } | |
829 | ||
830 | length = 0; | |
831 | for (i = 0; i < wr->num_sge; i++) | |
832 | length += wr->sg_list[i].length; | |
833 | ||
834 | err = post_one_send(qp, wr, mask, length); | |
835 | ||
836 | if (err) { | |
837 | *bad_wr = wr; | |
838 | break; | |
839 | } | |
840 | wr = wr->next; | |
841 | } | |
842 | ||
843 | /* | |
844 | * Must sched in case of GSI QP because ib_send_mad() hold irq lock, | |
845 | * and the requester call ip_local_out_sk() that takes spin_lock_bh. | |
846 | */ | |
847 | must_sched = (qp_type(qp) == IB_QPT_GSI) || | |
848 | (queue_count(qp->sq.queue) > 1); | |
849 | ||
850 | rxe_run_task(&qp->req.task, must_sched); | |
851 | ||
852 | return err; | |
853 | } | |
854 | ||
063af595 PP |
855 | static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
856 | struct ib_send_wr **bad_wr) | |
857 | { | |
858 | struct rxe_qp *qp = to_rqp(ibqp); | |
859 | ||
860 | if (unlikely(!qp->valid)) { | |
861 | *bad_wr = wr; | |
862 | return -EINVAL; | |
863 | } | |
864 | ||
865 | if (unlikely(qp->req.state < QP_STATE_READY)) { | |
866 | *bad_wr = wr; | |
867 | return -EINVAL; | |
868 | } | |
869 | ||
870 | if (qp->is_user) { | |
871 | /* Utilize process context to do protocol processing */ | |
872 | rxe_run_task(&qp->req.task, 0); | |
873 | return 0; | |
874 | } else | |
875 | return rxe_post_send_kernel(qp, wr, bad_wr); | |
876 | } | |
877 | ||
8700e3e7 MS |
878 | static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
879 | struct ib_recv_wr **bad_wr) | |
880 | { | |
881 | int err = 0; | |
882 | struct rxe_qp *qp = to_rqp(ibqp); | |
883 | struct rxe_rq *rq = &qp->rq; | |
884 | unsigned long flags; | |
885 | ||
886 | if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { | |
887 | *bad_wr = wr; | |
888 | err = -EINVAL; | |
889 | goto err1; | |
890 | } | |
891 | ||
892 | if (unlikely(qp->srq)) { | |
893 | *bad_wr = wr; | |
894 | err = -EINVAL; | |
895 | goto err1; | |
896 | } | |
897 | ||
898 | spin_lock_irqsave(&rq->producer_lock, flags); | |
899 | ||
900 | while (wr) { | |
901 | err = post_one_recv(rq, wr); | |
902 | if (unlikely(err)) { | |
903 | *bad_wr = wr; | |
904 | break; | |
905 | } | |
906 | wr = wr->next; | |
907 | } | |
908 | ||
909 | spin_unlock_irqrestore(&rq->producer_lock, flags); | |
910 | ||
911 | err1: | |
912 | return err; | |
913 | } | |
914 | ||
915 | static struct ib_cq *rxe_create_cq(struct ib_device *dev, | |
916 | const struct ib_cq_init_attr *attr, | |
917 | struct ib_ucontext *context, | |
918 | struct ib_udata *udata) | |
919 | { | |
920 | int err; | |
921 | struct rxe_dev *rxe = to_rdev(dev); | |
922 | struct rxe_cq *cq; | |
923 | ||
924 | if (attr->flags) | |
925 | return ERR_PTR(-EINVAL); | |
926 | ||
927 | err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata); | |
928 | if (err) | |
929 | goto err1; | |
930 | ||
931 | cq = rxe_alloc(&rxe->cq_pool); | |
932 | if (!cq) { | |
933 | err = -ENOMEM; | |
934 | goto err1; | |
935 | } | |
936 | ||
937 | err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, | |
938 | context, udata); | |
939 | if (err) | |
940 | goto err2; | |
941 | ||
942 | return &cq->ibcq; | |
943 | ||
944 | err2: | |
945 | rxe_drop_ref(cq); | |
946 | err1: | |
947 | return ERR_PTR(err); | |
948 | } | |
949 | ||
950 | static int rxe_destroy_cq(struct ib_cq *ibcq) | |
951 | { | |
952 | struct rxe_cq *cq = to_rcq(ibcq); | |
953 | ||
954 | rxe_drop_ref(cq); | |
955 | return 0; | |
956 | } | |
957 | ||
958 | static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |
959 | { | |
960 | int err; | |
961 | struct rxe_cq *cq = to_rcq(ibcq); | |
962 | struct rxe_dev *rxe = to_rdev(ibcq->device); | |
963 | ||
964 | err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata); | |
965 | if (err) | |
966 | goto err1; | |
967 | ||
968 | err = rxe_cq_resize_queue(cq, cqe, udata); | |
969 | if (err) | |
970 | goto err1; | |
971 | ||
972 | return 0; | |
973 | ||
974 | err1: | |
975 | return err; | |
976 | } | |
977 | ||
978 | static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |
979 | { | |
980 | int i; | |
981 | struct rxe_cq *cq = to_rcq(ibcq); | |
982 | struct rxe_cqe *cqe; | |
983 | unsigned long flags; | |
984 | ||
985 | spin_lock_irqsave(&cq->cq_lock, flags); | |
986 | for (i = 0; i < num_entries; i++) { | |
987 | cqe = queue_head(cq->queue); | |
988 | if (!cqe) | |
989 | break; | |
990 | ||
991 | memcpy(wc++, &cqe->ibwc, sizeof(*wc)); | |
992 | advance_consumer(cq->queue); | |
993 | } | |
994 | spin_unlock_irqrestore(&cq->cq_lock, flags); | |
995 | ||
996 | return i; | |
997 | } | |
998 | ||
999 | static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) | |
1000 | { | |
1001 | struct rxe_cq *cq = to_rcq(ibcq); | |
1002 | int count = queue_count(cq->queue); | |
1003 | ||
1004 | return (count > wc_cnt) ? wc_cnt : count; | |
1005 | } | |
1006 | ||
1007 | static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | |
1008 | { | |
1009 | struct rxe_cq *cq = to_rcq(ibcq); | |
accacb8f AB |
1010 | unsigned long irq_flags; |
1011 | int ret = 0; | |
8700e3e7 | 1012 | |
accacb8f | 1013 | spin_lock_irqsave(&cq->cq_lock, irq_flags); |
8700e3e7 MS |
1014 | if (cq->notify != IB_CQ_NEXT_COMP) |
1015 | cq->notify = flags & IB_CQ_SOLICITED_MASK; | |
1016 | ||
accacb8f AB |
1017 | if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue)) |
1018 | ret = 1; | |
1019 | ||
1020 | spin_unlock_irqrestore(&cq->cq_lock, irq_flags); | |
1021 | ||
1022 | return ret; | |
8700e3e7 MS |
1023 | } |
1024 | ||
1025 | static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) | |
1026 | { | |
1027 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
1028 | struct rxe_pd *pd = to_rpd(ibpd); | |
1029 | struct rxe_mem *mr; | |
1030 | int err; | |
1031 | ||
1032 | mr = rxe_alloc(&rxe->mr_pool); | |
1033 | if (!mr) { | |
1034 | err = -ENOMEM; | |
1035 | goto err1; | |
1036 | } | |
1037 | ||
1038 | rxe_add_index(mr); | |
1039 | ||
1040 | rxe_add_ref(pd); | |
1041 | ||
1042 | err = rxe_mem_init_dma(rxe, pd, access, mr); | |
1043 | if (err) | |
1044 | goto err2; | |
1045 | ||
1046 | return &mr->ibmr; | |
1047 | ||
1048 | err2: | |
1049 | rxe_drop_ref(pd); | |
1050 | rxe_drop_index(mr); | |
1051 | rxe_drop_ref(mr); | |
1052 | err1: | |
1053 | return ERR_PTR(err); | |
1054 | } | |
1055 | ||
1056 | static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, | |
1057 | u64 start, | |
1058 | u64 length, | |
1059 | u64 iova, | |
1060 | int access, struct ib_udata *udata) | |
1061 | { | |
1062 | int err; | |
1063 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
1064 | struct rxe_pd *pd = to_rpd(ibpd); | |
1065 | struct rxe_mem *mr; | |
1066 | ||
1067 | mr = rxe_alloc(&rxe->mr_pool); | |
1068 | if (!mr) { | |
1069 | err = -ENOMEM; | |
1070 | goto err2; | |
1071 | } | |
1072 | ||
1073 | rxe_add_index(mr); | |
1074 | ||
1075 | rxe_add_ref(pd); | |
1076 | ||
1077 | err = rxe_mem_init_user(rxe, pd, start, length, iova, | |
1078 | access, udata, mr); | |
1079 | if (err) | |
1080 | goto err3; | |
1081 | ||
1082 | return &mr->ibmr; | |
1083 | ||
1084 | err3: | |
1085 | rxe_drop_ref(pd); | |
1086 | rxe_drop_index(mr); | |
1087 | rxe_drop_ref(mr); | |
1088 | err2: | |
1089 | return ERR_PTR(err); | |
1090 | } | |
1091 | ||
1092 | static int rxe_dereg_mr(struct ib_mr *ibmr) | |
1093 | { | |
1094 | struct rxe_mem *mr = to_rmr(ibmr); | |
1095 | ||
1096 | mr->state = RXE_MEM_STATE_ZOMBIE; | |
1097 | rxe_drop_ref(mr->pd); | |
1098 | rxe_drop_index(mr); | |
1099 | rxe_drop_ref(mr); | |
1100 | return 0; | |
1101 | } | |
1102 | ||
1103 | static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, | |
1104 | enum ib_mr_type mr_type, | |
1105 | u32 max_num_sg) | |
1106 | { | |
1107 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
1108 | struct rxe_pd *pd = to_rpd(ibpd); | |
1109 | struct rxe_mem *mr; | |
1110 | int err; | |
1111 | ||
1112 | if (mr_type != IB_MR_TYPE_MEM_REG) | |
1113 | return ERR_PTR(-EINVAL); | |
1114 | ||
1115 | mr = rxe_alloc(&rxe->mr_pool); | |
1116 | if (!mr) { | |
1117 | err = -ENOMEM; | |
1118 | goto err1; | |
1119 | } | |
1120 | ||
1121 | rxe_add_index(mr); | |
1122 | ||
1123 | rxe_add_ref(pd); | |
1124 | ||
1125 | err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr); | |
1126 | if (err) | |
1127 | goto err2; | |
1128 | ||
1129 | return &mr->ibmr; | |
1130 | ||
1131 | err2: | |
1132 | rxe_drop_ref(pd); | |
1133 | rxe_drop_index(mr); | |
1134 | rxe_drop_ref(mr); | |
1135 | err1: | |
1136 | return ERR_PTR(err); | |
1137 | } | |
1138 | ||
1139 | static int rxe_set_page(struct ib_mr *ibmr, u64 addr) | |
1140 | { | |
1141 | struct rxe_mem *mr = to_rmr(ibmr); | |
1142 | struct rxe_map *map; | |
1143 | struct rxe_phys_buf *buf; | |
1144 | ||
1145 | if (unlikely(mr->nbuf == mr->num_buf)) | |
1146 | return -ENOMEM; | |
1147 | ||
1148 | map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; | |
1149 | buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; | |
1150 | ||
1151 | buf->addr = addr; | |
1152 | buf->size = ibmr->page_size; | |
1153 | mr->nbuf++; | |
1154 | ||
1155 | return 0; | |
1156 | } | |
1157 | ||
e404f945 PP |
1158 | static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, |
1159 | int sg_nents, unsigned int *sg_offset) | |
8700e3e7 MS |
1160 | { |
1161 | struct rxe_mem *mr = to_rmr(ibmr); | |
1162 | int n; | |
1163 | ||
1164 | mr->nbuf = 0; | |
1165 | ||
1166 | n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); | |
1167 | ||
1168 | mr->va = ibmr->iova; | |
1169 | mr->iova = ibmr->iova; | |
1170 | mr->length = ibmr->length; | |
1171 | mr->page_shift = ilog2(ibmr->page_size); | |
1172 | mr->page_mask = ibmr->page_size - 1; | |
1173 | mr->offset = mr->iova & mr->page_mask; | |
1174 | ||
1175 | return n; | |
1176 | } | |
1177 | ||
1178 | static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) | |
1179 | { | |
1180 | int err; | |
1181 | struct rxe_dev *rxe = to_rdev(ibqp->device); | |
1182 | struct rxe_qp *qp = to_rqp(ibqp); | |
1183 | struct rxe_mc_grp *grp; | |
1184 | ||
1185 | /* takes a ref on grp if successful */ | |
1186 | err = rxe_mcast_get_grp(rxe, mgid, &grp); | |
1187 | if (err) | |
1188 | return err; | |
1189 | ||
1190 | err = rxe_mcast_add_grp_elem(rxe, qp, grp); | |
1191 | ||
1192 | rxe_drop_ref(grp); | |
1193 | return err; | |
1194 | } | |
1195 | ||
1196 | static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) | |
1197 | { | |
1198 | struct rxe_dev *rxe = to_rdev(ibqp->device); | |
1199 | struct rxe_qp *qp = to_rqp(ibqp); | |
1200 | ||
1201 | return rxe_mcast_drop_grp_elem(rxe, qp, mgid); | |
1202 | } | |
1203 | ||
1204 | static ssize_t rxe_show_parent(struct device *device, | |
1205 | struct device_attribute *attr, char *buf) | |
1206 | { | |
1207 | struct rxe_dev *rxe = container_of(device, struct rxe_dev, | |
1208 | ib_dev.dev); | |
1209 | char *name; | |
1210 | ||
1211 | name = rxe->ifc_ops->parent_name(rxe, 1); | |
1212 | return snprintf(buf, 16, "%s\n", name); | |
1213 | } | |
1214 | ||
1215 | static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL); | |
1216 | ||
1217 | static struct device_attribute *rxe_dev_attributes[] = { | |
1218 | &dev_attr_parent, | |
1219 | }; | |
1220 | ||
1221 | int rxe_register_device(struct rxe_dev *rxe) | |
1222 | { | |
1223 | int err; | |
1224 | int i; | |
1225 | struct ib_device *dev = &rxe->ib_dev; | |
1226 | ||
1227 | strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX); | |
1228 | strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); | |
1229 | ||
1230 | dev->owner = THIS_MODULE; | |
1231 | dev->node_type = RDMA_NODE_IB_CA; | |
1232 | dev->phys_port_cnt = 1; | |
1233 | dev->num_comp_vectors = RXE_NUM_COMP_VECTORS; | |
1234 | dev->dma_device = rxe->ifc_ops->dma_device(rxe); | |
1235 | dev->local_dma_lkey = 0; | |
1236 | dev->node_guid = rxe->ifc_ops->node_guid(rxe); | |
1237 | dev->dma_ops = &rxe_dma_mapping_ops; | |
1238 | ||
1239 | dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; | |
1240 | dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | |
1241 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | |
1242 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) | |
1243 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) | |
1244 | | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) | |
1245 | | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) | |
1246 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) | |
1247 | | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) | |
1248 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) | |
1249 | | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) | |
1250 | | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV) | |
1251 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) | |
1252 | | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) | |
1253 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) | |
1254 | | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) | |
1255 | | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) | |
1256 | | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) | |
1257 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) | |
1258 | | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) | |
1259 | | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) | |
1260 | | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) | |
1261 | | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ) | |
1262 | | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | |
1263 | | BIT_ULL(IB_USER_VERBS_CMD_REG_MR) | |
1264 | | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) | |
1265 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) | |
1266 | | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH) | |
1267 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH) | |
1268 | | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) | |
1269 | | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) | |
1270 | | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) | |
1271 | ; | |
1272 | ||
1273 | dev->query_device = rxe_query_device; | |
1274 | dev->modify_device = rxe_modify_device; | |
1275 | dev->query_port = rxe_query_port; | |
1276 | dev->modify_port = rxe_modify_port; | |
1277 | dev->get_link_layer = rxe_get_link_layer; | |
1278 | dev->query_gid = rxe_query_gid; | |
1279 | dev->get_netdev = rxe_get_netdev; | |
1280 | dev->add_gid = rxe_add_gid; | |
1281 | dev->del_gid = rxe_del_gid; | |
1282 | dev->query_pkey = rxe_query_pkey; | |
1283 | dev->alloc_ucontext = rxe_alloc_ucontext; | |
1284 | dev->dealloc_ucontext = rxe_dealloc_ucontext; | |
1285 | dev->mmap = rxe_mmap; | |
1286 | dev->get_port_immutable = rxe_port_immutable; | |
1287 | dev->alloc_pd = rxe_alloc_pd; | |
1288 | dev->dealloc_pd = rxe_dealloc_pd; | |
1289 | dev->create_ah = rxe_create_ah; | |
1290 | dev->modify_ah = rxe_modify_ah; | |
1291 | dev->query_ah = rxe_query_ah; | |
1292 | dev->destroy_ah = rxe_destroy_ah; | |
1293 | dev->create_srq = rxe_create_srq; | |
1294 | dev->modify_srq = rxe_modify_srq; | |
1295 | dev->query_srq = rxe_query_srq; | |
1296 | dev->destroy_srq = rxe_destroy_srq; | |
1297 | dev->post_srq_recv = rxe_post_srq_recv; | |
1298 | dev->create_qp = rxe_create_qp; | |
1299 | dev->modify_qp = rxe_modify_qp; | |
1300 | dev->query_qp = rxe_query_qp; | |
1301 | dev->destroy_qp = rxe_destroy_qp; | |
1302 | dev->post_send = rxe_post_send; | |
1303 | dev->post_recv = rxe_post_recv; | |
1304 | dev->create_cq = rxe_create_cq; | |
1305 | dev->destroy_cq = rxe_destroy_cq; | |
1306 | dev->resize_cq = rxe_resize_cq; | |
1307 | dev->poll_cq = rxe_poll_cq; | |
1308 | dev->peek_cq = rxe_peek_cq; | |
1309 | dev->req_notify_cq = rxe_req_notify_cq; | |
1310 | dev->get_dma_mr = rxe_get_dma_mr; | |
1311 | dev->reg_user_mr = rxe_reg_user_mr; | |
1312 | dev->dereg_mr = rxe_dereg_mr; | |
1313 | dev->alloc_mr = rxe_alloc_mr; | |
1314 | dev->map_mr_sg = rxe_map_mr_sg; | |
1315 | dev->attach_mcast = rxe_attach_mcast; | |
1316 | dev->detach_mcast = rxe_detach_mcast; | |
1317 | ||
1318 | err = ib_register_device(dev, NULL); | |
1319 | if (err) { | |
1320 | pr_warn("rxe_register_device failed, err = %d\n", err); | |
1321 | goto err1; | |
1322 | } | |
1323 | ||
1324 | for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) { | |
1325 | err = device_create_file(&dev->dev, rxe_dev_attributes[i]); | |
1326 | if (err) { | |
1327 | pr_warn("device_create_file failed, i = %d, err = %d\n", | |
1328 | i, err); | |
1329 | goto err2; | |
1330 | } | |
1331 | } | |
1332 | ||
1333 | return 0; | |
1334 | ||
1335 | err2: | |
1336 | ib_unregister_device(dev); | |
1337 | err1: | |
1338 | return err; | |
1339 | } | |
1340 | ||
1341 | int rxe_unregister_device(struct rxe_dev *rxe) | |
1342 | { | |
1343 | int i; | |
1344 | struct ib_device *dev = &rxe->ib_dev; | |
1345 | ||
1346 | for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) | |
1347 | device_remove_file(&dev->dev, rxe_dev_attributes[i]); | |
1348 | ||
1349 | ib_unregister_device(dev); | |
1350 | ||
1351 | return 0; | |
1352 | } |