]>
Commit | Line | Data |
---|---|---|
8700e3e7 MS |
1 | /* |
2 | * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. | |
3 | * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
0bbb3b74 | 34 | #include <linux/dma-mapping.h> |
4d6f2859 | 35 | #include <net/addrconf.h> |
8700e3e7 MS |
36 | #include "rxe.h" |
37 | #include "rxe_loc.h" | |
38 | #include "rxe_queue.h" | |
0b1e5b99 | 39 | #include "rxe_hw_counters.h" |
8700e3e7 MS |
40 | |
41 | static int rxe_query_device(struct ib_device *dev, | |
42 | struct ib_device_attr *attr, | |
43 | struct ib_udata *uhw) | |
44 | { | |
45 | struct rxe_dev *rxe = to_rdev(dev); | |
46 | ||
47 | if (uhw->inlen || uhw->outlen) | |
48 | return -EINVAL; | |
49 | ||
50 | *attr = rxe->attr; | |
51 | return 0; | |
52 | } | |
53 | ||
54 | static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed, | |
55 | u8 *active_width) | |
56 | { | |
57 | if (speed <= 1000) { | |
58 | *active_width = IB_WIDTH_1X; | |
59 | *active_speed = IB_SPEED_SDR; | |
60 | } else if (speed <= 10000) { | |
61 | *active_width = IB_WIDTH_1X; | |
62 | *active_speed = IB_SPEED_FDR10; | |
63 | } else if (speed <= 20000) { | |
64 | *active_width = IB_WIDTH_4X; | |
65 | *active_speed = IB_SPEED_DDR; | |
66 | } else if (speed <= 30000) { | |
67 | *active_width = IB_WIDTH_4X; | |
68 | *active_speed = IB_SPEED_QDR; | |
69 | } else if (speed <= 40000) { | |
70 | *active_width = IB_WIDTH_4X; | |
71 | *active_speed = IB_SPEED_FDR10; | |
72 | } else { | |
73 | *active_width = IB_WIDTH_4X; | |
74 | *active_speed = IB_SPEED_EDR; | |
75 | } | |
76 | } | |
77 | ||
78 | static int rxe_query_port(struct ib_device *dev, | |
79 | u8 port_num, struct ib_port_attr *attr) | |
80 | { | |
81 | struct rxe_dev *rxe = to_rdev(dev); | |
82 | struct rxe_port *port; | |
83 | u32 speed; | |
84 | ||
85 | if (unlikely(port_num != 1)) { | |
86 | pr_warn("invalid port_number %d\n", port_num); | |
87 | goto err1; | |
88 | } | |
89 | ||
90 | port = &rxe->port; | |
91 | ||
c4550c63 | 92 | /* *attr being zeroed by the caller, avoid zeroing it here */ |
8700e3e7 MS |
93 | *attr = port->attr; |
94 | ||
95 | mutex_lock(&rxe->usdev_lock); | |
96 | if (rxe->ndev->ethtool_ops->get_link_ksettings) { | |
97 | struct ethtool_link_ksettings ks; | |
98 | ||
99 | rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks); | |
100 | speed = ks.base.speed; | |
101 | } else if (rxe->ndev->ethtool_ops->get_settings) { | |
102 | struct ethtool_cmd cmd; | |
103 | ||
104 | rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd); | |
105 | speed = cmd.speed; | |
106 | } else { | |
e404f945 PP |
107 | pr_warn("%s speed is unknown, defaulting to 1000\n", |
108 | rxe->ndev->name); | |
8700e3e7 MS |
109 | speed = 1000; |
110 | } | |
e404f945 PP |
111 | rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, |
112 | &attr->active_width); | |
8700e3e7 MS |
113 | mutex_unlock(&rxe->usdev_lock); |
114 | ||
115 | return 0; | |
116 | ||
117 | err1: | |
118 | return -EINVAL; | |
119 | } | |
120 | ||
121 | static int rxe_query_gid(struct ib_device *device, | |
122 | u8 port_num, int index, union ib_gid *gid) | |
123 | { | |
124 | int ret; | |
125 | ||
126 | if (index > RXE_PORT_GID_TBL_LEN) | |
127 | return -EINVAL; | |
128 | ||
129 | ret = ib_get_cached_gid(device, port_num, index, gid, NULL); | |
130 | if (ret == -EAGAIN) { | |
131 | memcpy(gid, &zgid, sizeof(*gid)); | |
132 | return 0; | |
133 | } | |
134 | ||
135 | return ret; | |
136 | } | |
137 | ||
138 | static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int | |
139 | index, const union ib_gid *gid, | |
140 | const struct ib_gid_attr *attr, void **context) | |
141 | { | |
142 | if (index >= RXE_PORT_GID_TBL_LEN) | |
143 | return -EINVAL; | |
144 | return 0; | |
145 | } | |
146 | ||
147 | static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int | |
148 | index, void **context) | |
149 | { | |
150 | if (index >= RXE_PORT_GID_TBL_LEN) | |
151 | return -EINVAL; | |
152 | return 0; | |
153 | } | |
154 | ||
155 | static struct net_device *rxe_get_netdev(struct ib_device *device, | |
156 | u8 port_num) | |
157 | { | |
158 | struct rxe_dev *rxe = to_rdev(device); | |
159 | ||
160 | if (rxe->ndev) { | |
161 | dev_hold(rxe->ndev); | |
162 | return rxe->ndev; | |
163 | } | |
164 | ||
165 | return NULL; | |
166 | } | |
167 | ||
168 | static int rxe_query_pkey(struct ib_device *device, | |
169 | u8 port_num, u16 index, u16 *pkey) | |
170 | { | |
171 | struct rxe_dev *rxe = to_rdev(device); | |
172 | struct rxe_port *port; | |
173 | ||
174 | if (unlikely(port_num != 1)) { | |
85e9f1db | 175 | dev_warn(device->dev.parent, "invalid port_num = %d\n", |
8700e3e7 MS |
176 | port_num); |
177 | goto err1; | |
178 | } | |
179 | ||
180 | port = &rxe->port; | |
181 | ||
182 | if (unlikely(index >= port->attr.pkey_tbl_len)) { | |
85e9f1db | 183 | dev_warn(device->dev.parent, "invalid index = %d\n", |
8700e3e7 MS |
184 | index); |
185 | goto err1; | |
186 | } | |
187 | ||
188 | *pkey = port->pkey_tbl[index]; | |
189 | return 0; | |
190 | ||
191 | err1: | |
192 | return -EINVAL; | |
193 | } | |
194 | ||
195 | static int rxe_modify_device(struct ib_device *dev, | |
196 | int mask, struct ib_device_modify *attr) | |
197 | { | |
198 | struct rxe_dev *rxe = to_rdev(dev); | |
199 | ||
200 | if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) | |
201 | rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid); | |
202 | ||
203 | if (mask & IB_DEVICE_MODIFY_NODE_DESC) { | |
204 | memcpy(rxe->ib_dev.node_desc, | |
205 | attr->node_desc, sizeof(rxe->ib_dev.node_desc)); | |
206 | } | |
207 | ||
208 | return 0; | |
209 | } | |
210 | ||
211 | static int rxe_modify_port(struct ib_device *dev, | |
212 | u8 port_num, int mask, struct ib_port_modify *attr) | |
213 | { | |
214 | struct rxe_dev *rxe = to_rdev(dev); | |
215 | struct rxe_port *port; | |
216 | ||
217 | if (unlikely(port_num != 1)) { | |
218 | pr_warn("invalid port_num = %d\n", port_num); | |
219 | goto err1; | |
220 | } | |
221 | ||
222 | port = &rxe->port; | |
223 | ||
224 | port->attr.port_cap_flags |= attr->set_port_cap_mask; | |
225 | port->attr.port_cap_flags &= ~attr->clr_port_cap_mask; | |
226 | ||
227 | if (mask & IB_PORT_RESET_QKEY_CNTR) | |
228 | port->attr.qkey_viol_cntr = 0; | |
229 | ||
230 | return 0; | |
231 | ||
232 | err1: | |
233 | return -EINVAL; | |
234 | } | |
235 | ||
236 | static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, | |
237 | u8 port_num) | |
238 | { | |
239 | struct rxe_dev *rxe = to_rdev(dev); | |
240 | ||
839f5ac0 | 241 | return rxe_link_layer(rxe, port_num); |
8700e3e7 MS |
242 | } |
243 | ||
244 | static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev, | |
245 | struct ib_udata *udata) | |
246 | { | |
247 | struct rxe_dev *rxe = to_rdev(dev); | |
248 | struct rxe_ucontext *uc; | |
249 | ||
250 | uc = rxe_alloc(&rxe->uc_pool); | |
251 | return uc ? &uc->ibuc : ERR_PTR(-ENOMEM); | |
252 | } | |
253 | ||
254 | static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc) | |
255 | { | |
256 | struct rxe_ucontext *uc = to_ruc(ibuc); | |
257 | ||
258 | rxe_drop_ref(uc); | |
259 | return 0; | |
260 | } | |
261 | ||
262 | static int rxe_port_immutable(struct ib_device *dev, u8 port_num, | |
263 | struct ib_port_immutable *immutable) | |
264 | { | |
265 | int err; | |
266 | struct ib_port_attr attr; | |
267 | ||
c4550c63 OG |
268 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; |
269 | ||
270 | err = ib_query_port(dev, port_num, &attr); | |
8700e3e7 MS |
271 | if (err) |
272 | return err; | |
273 | ||
274 | immutable->pkey_tbl_len = attr.pkey_tbl_len; | |
275 | immutable->gid_tbl_len = attr.gid_tbl_len; | |
8700e3e7 MS |
276 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; |
277 | ||
278 | return 0; | |
279 | } | |
280 | ||
281 | static struct ib_pd *rxe_alloc_pd(struct ib_device *dev, | |
282 | struct ib_ucontext *context, | |
283 | struct ib_udata *udata) | |
284 | { | |
285 | struct rxe_dev *rxe = to_rdev(dev); | |
286 | struct rxe_pd *pd; | |
287 | ||
288 | pd = rxe_alloc(&rxe->pd_pool); | |
289 | return pd ? &pd->ibpd : ERR_PTR(-ENOMEM); | |
290 | } | |
291 | ||
292 | static int rxe_dealloc_pd(struct ib_pd *ibpd) | |
293 | { | |
294 | struct rxe_pd *pd = to_rpd(ibpd); | |
295 | ||
296 | rxe_drop_ref(pd); | |
297 | return 0; | |
298 | } | |
299 | ||
90898850 | 300 | static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr, |
8700e3e7 MS |
301 | struct rxe_av *av) |
302 | { | |
303 | int err; | |
304 | union ib_gid sgid; | |
305 | struct ib_gid_attr sgid_attr; | |
306 | ||
d8966fcd DC |
307 | err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr), |
308 | rdma_ah_read_grh(attr)->sgid_index, &sgid, | |
8700e3e7 MS |
309 | &sgid_attr); |
310 | if (err) { | |
311 | pr_err("Failed to query sgid. err = %d\n", err); | |
312 | return err; | |
313 | } | |
314 | ||
d8966fcd | 315 | err = rxe_av_from_attr(rxe, rdma_ah_get_port_num(attr), av, attr); |
8700e3e7 MS |
316 | if (!err) |
317 | err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid); | |
318 | ||
319 | if (sgid_attr.ndev) | |
320 | dev_put(sgid_attr.ndev); | |
321 | return err; | |
322 | } | |
323 | ||
90898850 DC |
324 | static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, |
325 | struct rdma_ah_attr *attr, | |
477864c8 MS |
326 | struct ib_udata *udata) |
327 | ||
8700e3e7 MS |
328 | { |
329 | int err; | |
330 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
331 | struct rxe_pd *pd = to_rpd(ibpd); | |
332 | struct rxe_ah *ah; | |
333 | ||
334 | err = rxe_av_chk_attr(rxe, attr); | |
335 | if (err) | |
336 | goto err1; | |
337 | ||
338 | ah = rxe_alloc(&rxe->ah_pool); | |
339 | if (!ah) { | |
340 | err = -ENOMEM; | |
341 | goto err1; | |
342 | } | |
343 | ||
344 | rxe_add_ref(pd); | |
345 | ah->pd = pd; | |
346 | ||
347 | err = rxe_init_av(rxe, attr, &ah->av); | |
348 | if (err) | |
349 | goto err2; | |
350 | ||
351 | return &ah->ibah; | |
352 | ||
353 | err2: | |
354 | rxe_drop_ref(pd); | |
355 | rxe_drop_ref(ah); | |
356 | err1: | |
357 | return ERR_PTR(err); | |
358 | } | |
359 | ||
90898850 | 360 | static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) |
8700e3e7 MS |
361 | { |
362 | int err; | |
363 | struct rxe_dev *rxe = to_rdev(ibah->device); | |
364 | struct rxe_ah *ah = to_rah(ibah); | |
365 | ||
366 | err = rxe_av_chk_attr(rxe, attr); | |
367 | if (err) | |
368 | return err; | |
369 | ||
370 | err = rxe_init_av(rxe, attr, &ah->av); | |
371 | if (err) | |
372 | return err; | |
373 | ||
374 | return 0; | |
375 | } | |
376 | ||
90898850 | 377 | static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) |
8700e3e7 MS |
378 | { |
379 | struct rxe_dev *rxe = to_rdev(ibah->device); | |
380 | struct rxe_ah *ah = to_rah(ibah); | |
381 | ||
eca7ddf9 | 382 | memset(attr, 0, sizeof(*attr)); |
44c58487 | 383 | attr->type = ibah->type; |
8700e3e7 MS |
384 | rxe_av_to_attr(rxe, &ah->av, attr); |
385 | return 0; | |
386 | } | |
387 | ||
388 | static int rxe_destroy_ah(struct ib_ah *ibah) | |
389 | { | |
390 | struct rxe_ah *ah = to_rah(ibah); | |
391 | ||
392 | rxe_drop_ref(ah->pd); | |
393 | rxe_drop_ref(ah); | |
394 | return 0; | |
395 | } | |
396 | ||
397 | static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr) | |
398 | { | |
399 | int err; | |
400 | int i; | |
401 | u32 length; | |
402 | struct rxe_recv_wqe *recv_wqe; | |
403 | int num_sge = ibwr->num_sge; | |
404 | ||
405 | if (unlikely(queue_full(rq->queue))) { | |
406 | err = -ENOMEM; | |
407 | goto err1; | |
408 | } | |
409 | ||
410 | if (unlikely(num_sge > rq->max_sge)) { | |
411 | err = -EINVAL; | |
412 | goto err1; | |
413 | } | |
414 | ||
415 | length = 0; | |
416 | for (i = 0; i < num_sge; i++) | |
417 | length += ibwr->sg_list[i].length; | |
418 | ||
419 | recv_wqe = producer_addr(rq->queue); | |
420 | recv_wqe->wr_id = ibwr->wr_id; | |
421 | recv_wqe->num_sge = num_sge; | |
422 | ||
423 | memcpy(recv_wqe->dma.sge, ibwr->sg_list, | |
424 | num_sge * sizeof(struct ib_sge)); | |
425 | ||
426 | recv_wqe->dma.length = length; | |
427 | recv_wqe->dma.resid = length; | |
428 | recv_wqe->dma.num_sge = num_sge; | |
429 | recv_wqe->dma.cur_sge = 0; | |
430 | recv_wqe->dma.sge_offset = 0; | |
431 | ||
432 | /* make sure all changes to the work queue are written before we | |
433 | * update the producer pointer | |
434 | */ | |
435 | smp_wmb(); | |
436 | ||
437 | advance_producer(rq->queue); | |
438 | return 0; | |
439 | ||
440 | err1: | |
441 | return err; | |
442 | } | |
443 | ||
444 | static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, | |
445 | struct ib_srq_init_attr *init, | |
446 | struct ib_udata *udata) | |
447 | { | |
448 | int err; | |
449 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
450 | struct rxe_pd *pd = to_rpd(ibpd); | |
451 | struct rxe_srq *srq; | |
452 | struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL; | |
453 | ||
454 | err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); | |
455 | if (err) | |
456 | goto err1; | |
457 | ||
458 | srq = rxe_alloc(&rxe->srq_pool); | |
459 | if (!srq) { | |
460 | err = -ENOMEM; | |
461 | goto err1; | |
462 | } | |
463 | ||
464 | rxe_add_index(srq); | |
465 | rxe_add_ref(pd); | |
466 | srq->pd = pd; | |
467 | ||
468 | err = rxe_srq_from_init(rxe, srq, init, context, udata); | |
469 | if (err) | |
470 | goto err2; | |
471 | ||
472 | return &srq->ibsrq; | |
473 | ||
474 | err2: | |
475 | rxe_drop_ref(pd); | |
476 | rxe_drop_index(srq); | |
477 | rxe_drop_ref(srq); | |
478 | err1: | |
479 | return ERR_PTR(err); | |
480 | } | |
481 | ||
482 | static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
483 | enum ib_srq_attr_mask mask, | |
484 | struct ib_udata *udata) | |
485 | { | |
486 | int err; | |
487 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
488 | struct rxe_dev *rxe = to_rdev(ibsrq->device); | |
489 | ||
490 | err = rxe_srq_chk_attr(rxe, srq, attr, mask); | |
491 | if (err) | |
492 | goto err1; | |
493 | ||
494 | err = rxe_srq_from_attr(rxe, srq, attr, mask, udata); | |
495 | if (err) | |
496 | goto err1; | |
497 | ||
498 | return 0; | |
499 | ||
500 | err1: | |
501 | return err; | |
502 | } | |
503 | ||
504 | static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | |
505 | { | |
506 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
507 | ||
508 | if (srq->error) | |
509 | return -EINVAL; | |
510 | ||
511 | attr->max_wr = srq->rq.queue->buf->index_mask; | |
512 | attr->max_sge = srq->rq.max_sge; | |
513 | attr->srq_limit = srq->limit; | |
514 | return 0; | |
515 | } | |
516 | ||
517 | static int rxe_destroy_srq(struct ib_srq *ibsrq) | |
518 | { | |
519 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
520 | ||
521 | if (srq->rq.queue) | |
522 | rxe_queue_cleanup(srq->rq.queue); | |
523 | ||
524 | rxe_drop_ref(srq->pd); | |
525 | rxe_drop_index(srq); | |
526 | rxe_drop_ref(srq); | |
527 | ||
528 | return 0; | |
529 | } | |
530 | ||
531 | static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
532 | struct ib_recv_wr **bad_wr) | |
533 | { | |
534 | int err = 0; | |
535 | unsigned long flags; | |
536 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
537 | ||
538 | spin_lock_irqsave(&srq->rq.producer_lock, flags); | |
539 | ||
540 | while (wr) { | |
541 | err = post_one_recv(&srq->rq, wr); | |
542 | if (unlikely(err)) | |
543 | break; | |
544 | wr = wr->next; | |
545 | } | |
546 | ||
547 | spin_unlock_irqrestore(&srq->rq.producer_lock, flags); | |
548 | ||
549 | if (err) | |
550 | *bad_wr = wr; | |
551 | ||
552 | return err; | |
553 | } | |
554 | ||
555 | static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd, | |
556 | struct ib_qp_init_attr *init, | |
557 | struct ib_udata *udata) | |
558 | { | |
559 | int err; | |
560 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
561 | struct rxe_pd *pd = to_rpd(ibpd); | |
562 | struct rxe_qp *qp; | |
563 | ||
564 | err = rxe_qp_chk_init(rxe, init); | |
565 | if (err) | |
566 | goto err1; | |
567 | ||
568 | qp = rxe_alloc(&rxe->qp_pool); | |
569 | if (!qp) { | |
570 | err = -ENOMEM; | |
571 | goto err1; | |
572 | } | |
573 | ||
574 | if (udata) { | |
575 | if (udata->inlen) { | |
576 | err = -EINVAL; | |
5b9ea16c | 577 | goto err2; |
8700e3e7 MS |
578 | } |
579 | qp->is_user = 1; | |
580 | } | |
581 | ||
582 | rxe_add_index(qp); | |
583 | ||
584 | err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd); | |
585 | if (err) | |
5b9ea16c | 586 | goto err3; |
8700e3e7 MS |
587 | |
588 | return &qp->ibqp; | |
589 | ||
5b9ea16c | 590 | err3: |
8700e3e7 | 591 | rxe_drop_index(qp); |
5b9ea16c | 592 | err2: |
8700e3e7 MS |
593 | rxe_drop_ref(qp); |
594 | err1: | |
595 | return ERR_PTR(err); | |
596 | } | |
597 | ||
598 | static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
599 | int mask, struct ib_udata *udata) | |
600 | { | |
601 | int err; | |
602 | struct rxe_dev *rxe = to_rdev(ibqp->device); | |
603 | struct rxe_qp *qp = to_rqp(ibqp); | |
604 | ||
605 | err = rxe_qp_chk_attr(rxe, qp, attr, mask); | |
606 | if (err) | |
607 | goto err1; | |
608 | ||
609 | err = rxe_qp_from_attr(qp, attr, mask, udata); | |
610 | if (err) | |
611 | goto err1; | |
612 | ||
613 | return 0; | |
614 | ||
615 | err1: | |
616 | return err; | |
617 | } | |
618 | ||
619 | static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
620 | int mask, struct ib_qp_init_attr *init) | |
621 | { | |
622 | struct rxe_qp *qp = to_rqp(ibqp); | |
623 | ||
624 | rxe_qp_to_init(qp, init); | |
625 | rxe_qp_to_attr(qp, attr, mask); | |
626 | ||
627 | return 0; | |
628 | } | |
629 | ||
630 | static int rxe_destroy_qp(struct ib_qp *ibqp) | |
631 | { | |
632 | struct rxe_qp *qp = to_rqp(ibqp); | |
633 | ||
634 | rxe_qp_destroy(qp); | |
635 | rxe_drop_index(qp); | |
636 | rxe_drop_ref(qp); | |
637 | return 0; | |
638 | } | |
639 | ||
640 | static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr, | |
641 | unsigned int mask, unsigned int length) | |
642 | { | |
643 | int num_sge = ibwr->num_sge; | |
644 | struct rxe_sq *sq = &qp->sq; | |
645 | ||
646 | if (unlikely(num_sge > sq->max_sge)) | |
647 | goto err1; | |
648 | ||
649 | if (unlikely(mask & WR_ATOMIC_MASK)) { | |
650 | if (length < 8) | |
651 | goto err1; | |
652 | ||
653 | if (atomic_wr(ibwr)->remote_addr & 0x7) | |
654 | goto err1; | |
655 | } | |
656 | ||
657 | if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && | |
658 | (length > sq->max_inline))) | |
659 | goto err1; | |
660 | ||
661 | return 0; | |
662 | ||
663 | err1: | |
664 | return -EINVAL; | |
665 | } | |
666 | ||
667 | static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, | |
668 | struct ib_send_wr *ibwr) | |
669 | { | |
670 | wr->wr_id = ibwr->wr_id; | |
671 | wr->num_sge = ibwr->num_sge; | |
672 | wr->opcode = ibwr->opcode; | |
673 | wr->send_flags = ibwr->send_flags; | |
674 | ||
675 | if (qp_type(qp) == IB_QPT_UD || | |
676 | qp_type(qp) == IB_QPT_SMI || | |
677 | qp_type(qp) == IB_QPT_GSI) { | |
678 | wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn; | |
679 | wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey; | |
680 | if (qp_type(qp) == IB_QPT_GSI) | |
681 | wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index; | |
682 | if (wr->opcode == IB_WR_SEND_WITH_IMM) | |
683 | wr->ex.imm_data = ibwr->ex.imm_data; | |
684 | } else { | |
685 | switch (wr->opcode) { | |
686 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
687 | wr->ex.imm_data = ibwr->ex.imm_data; | |
688 | case IB_WR_RDMA_READ: | |
689 | case IB_WR_RDMA_WRITE: | |
690 | wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; | |
691 | wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; | |
692 | break; | |
693 | case IB_WR_SEND_WITH_IMM: | |
694 | wr->ex.imm_data = ibwr->ex.imm_data; | |
695 | break; | |
696 | case IB_WR_SEND_WITH_INV: | |
697 | wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; | |
698 | break; | |
699 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
700 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
701 | wr->wr.atomic.remote_addr = | |
702 | atomic_wr(ibwr)->remote_addr; | |
703 | wr->wr.atomic.compare_add = | |
704 | atomic_wr(ibwr)->compare_add; | |
705 | wr->wr.atomic.swap = atomic_wr(ibwr)->swap; | |
706 | wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey; | |
707 | break; | |
708 | case IB_WR_LOCAL_INV: | |
709 | wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; | |
710 | break; | |
711 | case IB_WR_REG_MR: | |
712 | wr->wr.reg.mr = reg_wr(ibwr)->mr; | |
713 | wr->wr.reg.key = reg_wr(ibwr)->key; | |
714 | wr->wr.reg.access = reg_wr(ibwr)->access; | |
715 | break; | |
716 | default: | |
717 | break; | |
718 | } | |
719 | } | |
720 | } | |
721 | ||
722 | static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, | |
723 | unsigned int mask, unsigned int length, | |
724 | struct rxe_send_wqe *wqe) | |
725 | { | |
726 | int num_sge = ibwr->num_sge; | |
727 | struct ib_sge *sge; | |
728 | int i; | |
729 | u8 *p; | |
730 | ||
731 | init_send_wr(qp, &wqe->wr, ibwr); | |
732 | ||
733 | if (qp_type(qp) == IB_QPT_UD || | |
734 | qp_type(qp) == IB_QPT_SMI || | |
735 | qp_type(qp) == IB_QPT_GSI) | |
736 | memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av)); | |
737 | ||
738 | if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) { | |
739 | p = wqe->dma.inline_data; | |
740 | ||
741 | sge = ibwr->sg_list; | |
742 | for (i = 0; i < num_sge; i++, sge++) { | |
07d432bb JJB |
743 | memcpy(p, (void *)(uintptr_t)sge->addr, |
744 | sge->length); | |
8700e3e7 MS |
745 | |
746 | p += sge->length; | |
747 | } | |
748 | } else if (mask & WR_REG_MASK) { | |
749 | wqe->mask = mask; | |
750 | wqe->state = wqe_state_posted; | |
751 | return 0; | |
752 | } else | |
753 | memcpy(wqe->dma.sge, ibwr->sg_list, | |
754 | num_sge * sizeof(struct ib_sge)); | |
755 | ||
756 | wqe->iova = (mask & WR_ATOMIC_MASK) ? | |
757 | atomic_wr(ibwr)->remote_addr : | |
758 | rdma_wr(ibwr)->remote_addr; | |
759 | wqe->mask = mask; | |
760 | wqe->dma.length = length; | |
761 | wqe->dma.resid = length; | |
762 | wqe->dma.num_sge = num_sge; | |
763 | wqe->dma.cur_sge = 0; | |
764 | wqe->dma.sge_offset = 0; | |
765 | wqe->state = wqe_state_posted; | |
766 | wqe->ssn = atomic_add_return(1, &qp->ssn); | |
767 | ||
768 | return 0; | |
769 | } | |
770 | ||
771 | static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, | |
e404f945 | 772 | unsigned int mask, u32 length) |
8700e3e7 MS |
773 | { |
774 | int err; | |
775 | struct rxe_sq *sq = &qp->sq; | |
776 | struct rxe_send_wqe *send_wqe; | |
777 | unsigned long flags; | |
778 | ||
779 | err = validate_send_wr(qp, ibwr, mask, length); | |
780 | if (err) | |
781 | return err; | |
782 | ||
783 | spin_lock_irqsave(&qp->sq.sq_lock, flags); | |
784 | ||
785 | if (unlikely(queue_full(sq->queue))) { | |
786 | err = -ENOMEM; | |
787 | goto err1; | |
788 | } | |
789 | ||
790 | send_wqe = producer_addr(sq->queue); | |
791 | ||
792 | err = init_send_wqe(qp, ibwr, mask, length, send_wqe); | |
793 | if (unlikely(err)) | |
794 | goto err1; | |
795 | ||
796 | /* | |
797 | * make sure all changes to the work queue are | |
798 | * written before we update the producer pointer | |
799 | */ | |
800 | smp_wmb(); | |
801 | ||
802 | advance_producer(sq->queue); | |
803 | spin_unlock_irqrestore(&qp->sq.sq_lock, flags); | |
804 | ||
805 | return 0; | |
806 | ||
807 | err1: | |
808 | spin_unlock_irqrestore(&qp->sq.sq_lock, flags); | |
809 | return err; | |
810 | } | |
811 | ||
063af595 PP |
812 | static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, |
813 | struct ib_send_wr **bad_wr) | |
8700e3e7 MS |
814 | { |
815 | int err = 0; | |
8700e3e7 MS |
816 | unsigned int mask; |
817 | unsigned int length = 0; | |
818 | int i; | |
819 | int must_sched; | |
820 | ||
8700e3e7 MS |
821 | while (wr) { |
822 | mask = wr_opcode_mask(wr->opcode, qp); | |
823 | if (unlikely(!mask)) { | |
824 | err = -EINVAL; | |
825 | *bad_wr = wr; | |
826 | break; | |
827 | } | |
828 | ||
829 | if (unlikely((wr->send_flags & IB_SEND_INLINE) && | |
830 | !(mask & WR_INLINE_MASK))) { | |
831 | err = -EINVAL; | |
832 | *bad_wr = wr; | |
833 | break; | |
834 | } | |
835 | ||
836 | length = 0; | |
837 | for (i = 0; i < wr->num_sge; i++) | |
838 | length += wr->sg_list[i].length; | |
839 | ||
840 | err = post_one_send(qp, wr, mask, length); | |
841 | ||
842 | if (err) { | |
843 | *bad_wr = wr; | |
844 | break; | |
845 | } | |
846 | wr = wr->next; | |
847 | } | |
848 | ||
849 | /* | |
850 | * Must sched in case of GSI QP because ib_send_mad() hold irq lock, | |
851 | * and the requester call ip_local_out_sk() that takes spin_lock_bh. | |
852 | */ | |
853 | must_sched = (qp_type(qp) == IB_QPT_GSI) || | |
854 | (queue_count(qp->sq.queue) > 1); | |
855 | ||
856 | rxe_run_task(&qp->req.task, must_sched); | |
857 | ||
858 | return err; | |
859 | } | |
860 | ||
063af595 PP |
861 | static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
862 | struct ib_send_wr **bad_wr) | |
863 | { | |
864 | struct rxe_qp *qp = to_rqp(ibqp); | |
865 | ||
866 | if (unlikely(!qp->valid)) { | |
867 | *bad_wr = wr; | |
868 | return -EINVAL; | |
869 | } | |
870 | ||
871 | if (unlikely(qp->req.state < QP_STATE_READY)) { | |
872 | *bad_wr = wr; | |
873 | return -EINVAL; | |
874 | } | |
875 | ||
876 | if (qp->is_user) { | |
877 | /* Utilize process context to do protocol processing */ | |
878 | rxe_run_task(&qp->req.task, 0); | |
879 | return 0; | |
880 | } else | |
881 | return rxe_post_send_kernel(qp, wr, bad_wr); | |
882 | } | |
883 | ||
8700e3e7 MS |
884 | static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
885 | struct ib_recv_wr **bad_wr) | |
886 | { | |
887 | int err = 0; | |
888 | struct rxe_qp *qp = to_rqp(ibqp); | |
889 | struct rxe_rq *rq = &qp->rq; | |
890 | unsigned long flags; | |
891 | ||
892 | if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { | |
893 | *bad_wr = wr; | |
894 | err = -EINVAL; | |
895 | goto err1; | |
896 | } | |
897 | ||
898 | if (unlikely(qp->srq)) { | |
899 | *bad_wr = wr; | |
900 | err = -EINVAL; | |
901 | goto err1; | |
902 | } | |
903 | ||
904 | spin_lock_irqsave(&rq->producer_lock, flags); | |
905 | ||
906 | while (wr) { | |
907 | err = post_one_recv(rq, wr); | |
908 | if (unlikely(err)) { | |
909 | *bad_wr = wr; | |
910 | break; | |
911 | } | |
912 | wr = wr->next; | |
913 | } | |
914 | ||
915 | spin_unlock_irqrestore(&rq->producer_lock, flags); | |
916 | ||
12171971 VI |
917 | if (qp->resp.state == QP_STATE_ERROR) |
918 | rxe_run_task(&qp->resp.task, 1); | |
919 | ||
8700e3e7 MS |
920 | err1: |
921 | return err; | |
922 | } | |
923 | ||
924 | static struct ib_cq *rxe_create_cq(struct ib_device *dev, | |
925 | const struct ib_cq_init_attr *attr, | |
926 | struct ib_ucontext *context, | |
927 | struct ib_udata *udata) | |
928 | { | |
929 | int err; | |
930 | struct rxe_dev *rxe = to_rdev(dev); | |
931 | struct rxe_cq *cq; | |
932 | ||
933 | if (attr->flags) | |
934 | return ERR_PTR(-EINVAL); | |
935 | ||
936 | err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata); | |
937 | if (err) | |
938 | goto err1; | |
939 | ||
940 | cq = rxe_alloc(&rxe->cq_pool); | |
941 | if (!cq) { | |
942 | err = -ENOMEM; | |
943 | goto err1; | |
944 | } | |
945 | ||
946 | err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, | |
947 | context, udata); | |
948 | if (err) | |
949 | goto err2; | |
950 | ||
951 | return &cq->ibcq; | |
952 | ||
953 | err2: | |
954 | rxe_drop_ref(cq); | |
955 | err1: | |
956 | return ERR_PTR(err); | |
957 | } | |
958 | ||
959 | static int rxe_destroy_cq(struct ib_cq *ibcq) | |
960 | { | |
961 | struct rxe_cq *cq = to_rcq(ibcq); | |
962 | ||
963 | rxe_drop_ref(cq); | |
964 | return 0; | |
965 | } | |
966 | ||
967 | static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |
968 | { | |
969 | int err; | |
970 | struct rxe_cq *cq = to_rcq(ibcq); | |
971 | struct rxe_dev *rxe = to_rdev(ibcq->device); | |
972 | ||
973 | err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata); | |
974 | if (err) | |
975 | goto err1; | |
976 | ||
977 | err = rxe_cq_resize_queue(cq, cqe, udata); | |
978 | if (err) | |
979 | goto err1; | |
980 | ||
981 | return 0; | |
982 | ||
983 | err1: | |
984 | return err; | |
985 | } | |
986 | ||
987 | static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |
988 | { | |
989 | int i; | |
990 | struct rxe_cq *cq = to_rcq(ibcq); | |
991 | struct rxe_cqe *cqe; | |
992 | unsigned long flags; | |
993 | ||
994 | spin_lock_irqsave(&cq->cq_lock, flags); | |
995 | for (i = 0; i < num_entries; i++) { | |
996 | cqe = queue_head(cq->queue); | |
997 | if (!cqe) | |
998 | break; | |
999 | ||
1000 | memcpy(wc++, &cqe->ibwc, sizeof(*wc)); | |
1001 | advance_consumer(cq->queue); | |
1002 | } | |
1003 | spin_unlock_irqrestore(&cq->cq_lock, flags); | |
1004 | ||
1005 | return i; | |
1006 | } | |
1007 | ||
1008 | static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) | |
1009 | { | |
1010 | struct rxe_cq *cq = to_rcq(ibcq); | |
1011 | int count = queue_count(cq->queue); | |
1012 | ||
1013 | return (count > wc_cnt) ? wc_cnt : count; | |
1014 | } | |
1015 | ||
1016 | static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | |
1017 | { | |
1018 | struct rxe_cq *cq = to_rcq(ibcq); | |
accacb8f AB |
1019 | unsigned long irq_flags; |
1020 | int ret = 0; | |
8700e3e7 | 1021 | |
accacb8f | 1022 | spin_lock_irqsave(&cq->cq_lock, irq_flags); |
8700e3e7 MS |
1023 | if (cq->notify != IB_CQ_NEXT_COMP) |
1024 | cq->notify = flags & IB_CQ_SOLICITED_MASK; | |
1025 | ||
accacb8f AB |
1026 | if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue)) |
1027 | ret = 1; | |
1028 | ||
1029 | spin_unlock_irqrestore(&cq->cq_lock, irq_flags); | |
1030 | ||
1031 | return ret; | |
8700e3e7 MS |
1032 | } |
1033 | ||
1034 | static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) | |
1035 | { | |
1036 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
1037 | struct rxe_pd *pd = to_rpd(ibpd); | |
1038 | struct rxe_mem *mr; | |
1039 | int err; | |
1040 | ||
1041 | mr = rxe_alloc(&rxe->mr_pool); | |
1042 | if (!mr) { | |
1043 | err = -ENOMEM; | |
1044 | goto err1; | |
1045 | } | |
1046 | ||
1047 | rxe_add_index(mr); | |
1048 | ||
1049 | rxe_add_ref(pd); | |
1050 | ||
1051 | err = rxe_mem_init_dma(rxe, pd, access, mr); | |
1052 | if (err) | |
1053 | goto err2; | |
1054 | ||
1055 | return &mr->ibmr; | |
1056 | ||
1057 | err2: | |
1058 | rxe_drop_ref(pd); | |
1059 | rxe_drop_index(mr); | |
1060 | rxe_drop_ref(mr); | |
1061 | err1: | |
1062 | return ERR_PTR(err); | |
1063 | } | |
1064 | ||
1065 | static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, | |
1066 | u64 start, | |
1067 | u64 length, | |
1068 | u64 iova, | |
1069 | int access, struct ib_udata *udata) | |
1070 | { | |
1071 | int err; | |
1072 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
1073 | struct rxe_pd *pd = to_rpd(ibpd); | |
1074 | struct rxe_mem *mr; | |
1075 | ||
1076 | mr = rxe_alloc(&rxe->mr_pool); | |
1077 | if (!mr) { | |
1078 | err = -ENOMEM; | |
1079 | goto err2; | |
1080 | } | |
1081 | ||
1082 | rxe_add_index(mr); | |
1083 | ||
1084 | rxe_add_ref(pd); | |
1085 | ||
1086 | err = rxe_mem_init_user(rxe, pd, start, length, iova, | |
1087 | access, udata, mr); | |
1088 | if (err) | |
1089 | goto err3; | |
1090 | ||
1091 | return &mr->ibmr; | |
1092 | ||
1093 | err3: | |
1094 | rxe_drop_ref(pd); | |
1095 | rxe_drop_index(mr); | |
1096 | rxe_drop_ref(mr); | |
1097 | err2: | |
1098 | return ERR_PTR(err); | |
1099 | } | |
1100 | ||
1101 | static int rxe_dereg_mr(struct ib_mr *ibmr) | |
1102 | { | |
1103 | struct rxe_mem *mr = to_rmr(ibmr); | |
1104 | ||
1105 | mr->state = RXE_MEM_STATE_ZOMBIE; | |
1106 | rxe_drop_ref(mr->pd); | |
1107 | rxe_drop_index(mr); | |
1108 | rxe_drop_ref(mr); | |
1109 | return 0; | |
1110 | } | |
1111 | ||
1112 | static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, | |
1113 | enum ib_mr_type mr_type, | |
1114 | u32 max_num_sg) | |
1115 | { | |
1116 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
1117 | struct rxe_pd *pd = to_rpd(ibpd); | |
1118 | struct rxe_mem *mr; | |
1119 | int err; | |
1120 | ||
1121 | if (mr_type != IB_MR_TYPE_MEM_REG) | |
1122 | return ERR_PTR(-EINVAL); | |
1123 | ||
1124 | mr = rxe_alloc(&rxe->mr_pool); | |
1125 | if (!mr) { | |
1126 | err = -ENOMEM; | |
1127 | goto err1; | |
1128 | } | |
1129 | ||
1130 | rxe_add_index(mr); | |
1131 | ||
1132 | rxe_add_ref(pd); | |
1133 | ||
1134 | err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr); | |
1135 | if (err) | |
1136 | goto err2; | |
1137 | ||
1138 | return &mr->ibmr; | |
1139 | ||
1140 | err2: | |
1141 | rxe_drop_ref(pd); | |
1142 | rxe_drop_index(mr); | |
1143 | rxe_drop_ref(mr); | |
1144 | err1: | |
1145 | return ERR_PTR(err); | |
1146 | } | |
1147 | ||
1148 | static int rxe_set_page(struct ib_mr *ibmr, u64 addr) | |
1149 | { | |
1150 | struct rxe_mem *mr = to_rmr(ibmr); | |
1151 | struct rxe_map *map; | |
1152 | struct rxe_phys_buf *buf; | |
1153 | ||
1154 | if (unlikely(mr->nbuf == mr->num_buf)) | |
1155 | return -ENOMEM; | |
1156 | ||
1157 | map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; | |
1158 | buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; | |
1159 | ||
1160 | buf->addr = addr; | |
1161 | buf->size = ibmr->page_size; | |
1162 | mr->nbuf++; | |
1163 | ||
1164 | return 0; | |
1165 | } | |
1166 | ||
e404f945 PP |
1167 | static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, |
1168 | int sg_nents, unsigned int *sg_offset) | |
8700e3e7 MS |
1169 | { |
1170 | struct rxe_mem *mr = to_rmr(ibmr); | |
1171 | int n; | |
1172 | ||
1173 | mr->nbuf = 0; | |
1174 | ||
1175 | n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); | |
1176 | ||
1177 | mr->va = ibmr->iova; | |
1178 | mr->iova = ibmr->iova; | |
1179 | mr->length = ibmr->length; | |
1180 | mr->page_shift = ilog2(ibmr->page_size); | |
1181 | mr->page_mask = ibmr->page_size - 1; | |
1182 | mr->offset = mr->iova & mr->page_mask; | |
1183 | ||
1184 | return n; | |
1185 | } | |
1186 | ||
1187 | static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) | |
1188 | { | |
1189 | int err; | |
1190 | struct rxe_dev *rxe = to_rdev(ibqp->device); | |
1191 | struct rxe_qp *qp = to_rqp(ibqp); | |
1192 | struct rxe_mc_grp *grp; | |
1193 | ||
1194 | /* takes a ref on grp if successful */ | |
1195 | err = rxe_mcast_get_grp(rxe, mgid, &grp); | |
1196 | if (err) | |
1197 | return err; | |
1198 | ||
1199 | err = rxe_mcast_add_grp_elem(rxe, qp, grp); | |
1200 | ||
1201 | rxe_drop_ref(grp); | |
1202 | return err; | |
1203 | } | |
1204 | ||
1205 | static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) | |
1206 | { | |
1207 | struct rxe_dev *rxe = to_rdev(ibqp->device); | |
1208 | struct rxe_qp *qp = to_rqp(ibqp); | |
1209 | ||
1210 | return rxe_mcast_drop_grp_elem(rxe, qp, mgid); | |
1211 | } | |
1212 | ||
1213 | static ssize_t rxe_show_parent(struct device *device, | |
1214 | struct device_attribute *attr, char *buf) | |
1215 | { | |
1216 | struct rxe_dev *rxe = container_of(device, struct rxe_dev, | |
1217 | ib_dev.dev); | |
8700e3e7 | 1218 | |
839f5ac0 | 1219 | return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1)); |
8700e3e7 MS |
1220 | } |
1221 | ||
1222 | static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL); | |
1223 | ||
1224 | static struct device_attribute *rxe_dev_attributes[] = { | |
1225 | &dev_attr_parent, | |
1226 | }; | |
1227 | ||
1228 | int rxe_register_device(struct rxe_dev *rxe) | |
1229 | { | |
1230 | int err; | |
1231 | int i; | |
1232 | struct ib_device *dev = &rxe->ib_dev; | |
1233 | ||
1234 | strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX); | |
1235 | strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); | |
1236 | ||
1237 | dev->owner = THIS_MODULE; | |
1238 | dev->node_type = RDMA_NODE_IB_CA; | |
1239 | dev->phys_port_cnt = 1; | |
67cf3623 | 1240 | dev->num_comp_vectors = num_possible_cpus(); |
85e9f1db | 1241 | dev->dev.parent = rxe_dma_device(rxe); |
8700e3e7 | 1242 | dev->local_dma_lkey = 0; |
4d6f2859 YS |
1243 | addrconf_addr_eui48((unsigned char *)&dev->node_guid, |
1244 | rxe->ndev->dev_addr); | |
0bbb3b74 | 1245 | dev->dev.dma_ops = &dma_virt_ops; |
56012e1c | 1246 | dma_coerce_mask_and_coherent(&dev->dev, |
1247 | dma_get_required_mask(dev->dev.parent)); | |
8700e3e7 MS |
1248 | |
1249 | dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; | |
1250 | dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | |
1251 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | |
1252 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) | |
1253 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) | |
1254 | | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) | |
1255 | | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) | |
1256 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) | |
1257 | | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) | |
1258 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) | |
1259 | | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) | |
1260 | | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV) | |
1261 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) | |
1262 | | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) | |
1263 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) | |
1264 | | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) | |
1265 | | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) | |
1266 | | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) | |
1267 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) | |
1268 | | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) | |
1269 | | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) | |
1270 | | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) | |
1271 | | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ) | |
1272 | | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | |
1273 | | BIT_ULL(IB_USER_VERBS_CMD_REG_MR) | |
1274 | | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) | |
1275 | | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) | |
1276 | | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH) | |
1277 | | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH) | |
1278 | | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) | |
1279 | | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) | |
1280 | | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) | |
1281 | ; | |
1282 | ||
1283 | dev->query_device = rxe_query_device; | |
1284 | dev->modify_device = rxe_modify_device; | |
1285 | dev->query_port = rxe_query_port; | |
1286 | dev->modify_port = rxe_modify_port; | |
1287 | dev->get_link_layer = rxe_get_link_layer; | |
1288 | dev->query_gid = rxe_query_gid; | |
1289 | dev->get_netdev = rxe_get_netdev; | |
1290 | dev->add_gid = rxe_add_gid; | |
1291 | dev->del_gid = rxe_del_gid; | |
1292 | dev->query_pkey = rxe_query_pkey; | |
1293 | dev->alloc_ucontext = rxe_alloc_ucontext; | |
1294 | dev->dealloc_ucontext = rxe_dealloc_ucontext; | |
1295 | dev->mmap = rxe_mmap; | |
1296 | dev->get_port_immutable = rxe_port_immutable; | |
1297 | dev->alloc_pd = rxe_alloc_pd; | |
1298 | dev->dealloc_pd = rxe_dealloc_pd; | |
1299 | dev->create_ah = rxe_create_ah; | |
1300 | dev->modify_ah = rxe_modify_ah; | |
1301 | dev->query_ah = rxe_query_ah; | |
1302 | dev->destroy_ah = rxe_destroy_ah; | |
1303 | dev->create_srq = rxe_create_srq; | |
1304 | dev->modify_srq = rxe_modify_srq; | |
1305 | dev->query_srq = rxe_query_srq; | |
1306 | dev->destroy_srq = rxe_destroy_srq; | |
1307 | dev->post_srq_recv = rxe_post_srq_recv; | |
1308 | dev->create_qp = rxe_create_qp; | |
1309 | dev->modify_qp = rxe_modify_qp; | |
1310 | dev->query_qp = rxe_query_qp; | |
1311 | dev->destroy_qp = rxe_destroy_qp; | |
1312 | dev->post_send = rxe_post_send; | |
1313 | dev->post_recv = rxe_post_recv; | |
1314 | dev->create_cq = rxe_create_cq; | |
1315 | dev->destroy_cq = rxe_destroy_cq; | |
1316 | dev->resize_cq = rxe_resize_cq; | |
1317 | dev->poll_cq = rxe_poll_cq; | |
1318 | dev->peek_cq = rxe_peek_cq; | |
1319 | dev->req_notify_cq = rxe_req_notify_cq; | |
1320 | dev->get_dma_mr = rxe_get_dma_mr; | |
1321 | dev->reg_user_mr = rxe_reg_user_mr; | |
1322 | dev->dereg_mr = rxe_dereg_mr; | |
1323 | dev->alloc_mr = rxe_alloc_mr; | |
1324 | dev->map_mr_sg = rxe_map_mr_sg; | |
1325 | dev->attach_mcast = rxe_attach_mcast; | |
1326 | dev->detach_mcast = rxe_detach_mcast; | |
0b1e5b99 YC |
1327 | dev->get_hw_stats = rxe_ib_get_hw_stats; |
1328 | dev->alloc_hw_stats = rxe_ib_alloc_hw_stats; | |
8700e3e7 | 1329 | |
cee2688e | 1330 | rxe->tfm = crypto_alloc_shash("crc32", 0, 0); |
1331 | if (IS_ERR(rxe->tfm)) { | |
27b0b832 | 1332 | pr_err("failed to allocate crc algorithm err:%ld\n", |
cee2688e | 1333 | PTR_ERR(rxe->tfm)); |
1334 | return PTR_ERR(rxe->tfm); | |
1335 | } | |
1336 | ||
8700e3e7 MS |
1337 | err = ib_register_device(dev, NULL); |
1338 | if (err) { | |
1339 | pr_warn("rxe_register_device failed, err = %d\n", err); | |
1340 | goto err1; | |
1341 | } | |
1342 | ||
1343 | for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) { | |
1344 | err = device_create_file(&dev->dev, rxe_dev_attributes[i]); | |
1345 | if (err) { | |
1346 | pr_warn("device_create_file failed, i = %d, err = %d\n", | |
1347 | i, err); | |
1348 | goto err2; | |
1349 | } | |
1350 | } | |
1351 | ||
1352 | return 0; | |
1353 | ||
1354 | err2: | |
1355 | ib_unregister_device(dev); | |
1356 | err1: | |
cee2688e | 1357 | crypto_free_shash(rxe->tfm); |
1358 | ||
8700e3e7 MS |
1359 | return err; |
1360 | } | |
1361 | ||
1362 | int rxe_unregister_device(struct rxe_dev *rxe) | |
1363 | { | |
1364 | int i; | |
1365 | struct ib_device *dev = &rxe->ib_dev; | |
1366 | ||
1367 | for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) | |
1368 | device_remove_file(&dev->dev, rxe_dev_attributes[i]); | |
1369 | ||
1370 | ib_unregister_device(dev); | |
1371 | ||
1372 | return 0; | |
1373 | } |