]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/infiniband/hw/cxgb4/provider.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / cxgb4 / provider.c
1 /*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/inetdevice.h>
44 #include <linux/io.h>
45
46 #include <asm/irq.h>
47 #include <asm/byteorder.h>
48
49 #include <rdma/iw_cm.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/ib_smi.h>
52 #include <rdma/ib_umem.h>
53 #include <rdma/ib_user_verbs.h>
54
55 #include "iw_cxgb4.h"
56
57 static int fastreg_support = 1;
58 module_param(fastreg_support, int, 0644);
59 MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
60
61 static void c4iw_dealloc_ucontext(struct ib_ucontext *context)
62 {
63 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
64 struct c4iw_dev *rhp;
65 struct c4iw_mm_entry *mm, *tmp;
66
67 pr_debug("context %p\n", context);
68 rhp = to_c4iw_dev(ucontext->ibucontext.device);
69
70 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
71 kfree(mm);
72 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
73 }
74
75 static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
76 struct ib_udata *udata)
77 {
78 struct ib_device *ibdev = ucontext->device;
79 struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
80 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
81 struct c4iw_alloc_ucontext_resp uresp;
82 int ret = 0;
83 struct c4iw_mm_entry *mm = NULL;
84
85 pr_debug("ibdev %p\n", ibdev);
86 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
87 INIT_LIST_HEAD(&context->mmaps);
88 spin_lock_init(&context->mmap_lock);
89
90 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
91 pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
92 rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
93 } else {
94 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
95 if (!mm) {
96 ret = -ENOMEM;
97 goto err;
98 }
99
100 uresp.status_page_size = PAGE_SIZE;
101
102 spin_lock(&context->mmap_lock);
103 uresp.status_page_key = context->key;
104 context->key += PAGE_SIZE;
105 spin_unlock(&context->mmap_lock);
106
107 ret = ib_copy_to_udata(udata, &uresp,
108 sizeof(uresp) - sizeof(uresp.reserved));
109 if (ret)
110 goto err_mm;
111
112 mm->key = uresp.status_page_key;
113 mm->addr = virt_to_phys(rhp->rdev.status_page);
114 mm->len = PAGE_SIZE;
115 insert_mmap(context, mm);
116 }
117 return 0;
118 err_mm:
119 kfree(mm);
120 err:
121 return ret;
122 }
123
124 static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
125 {
126 int len = vma->vm_end - vma->vm_start;
127 u32 key = vma->vm_pgoff << PAGE_SHIFT;
128 struct c4iw_rdev *rdev;
129 int ret = 0;
130 struct c4iw_mm_entry *mm;
131 struct c4iw_ucontext *ucontext;
132 u64 addr;
133
134 pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
135 key, len);
136
137 if (vma->vm_start & (PAGE_SIZE-1))
138 return -EINVAL;
139
140 rdev = &(to_c4iw_dev(context->device)->rdev);
141 ucontext = to_c4iw_ucontext(context);
142
143 mm = remove_mmap(ucontext, key, len);
144 if (!mm)
145 return -EINVAL;
146 addr = mm->addr;
147 kfree(mm);
148
149 if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
150 (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
151 pci_resource_len(rdev->lldi.pdev, 0)))) {
152
153 /*
154 * MA_SYNC register...
155 */
156 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
157 ret = io_remap_pfn_range(vma, vma->vm_start,
158 addr >> PAGE_SHIFT,
159 len, vma->vm_page_prot);
160 } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
161 (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
162 pci_resource_len(rdev->lldi.pdev, 2)))) {
163
164 /*
165 * Map user DB or OCQP memory...
166 */
167 if (addr >= rdev->oc_mw_pa)
168 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
169 else {
170 if (!is_t4(rdev->lldi.adapter_type))
171 vma->vm_page_prot =
172 t4_pgprot_wc(vma->vm_page_prot);
173 else
174 vma->vm_page_prot =
175 pgprot_noncached(vma->vm_page_prot);
176 }
177 ret = io_remap_pfn_range(vma, vma->vm_start,
178 addr >> PAGE_SHIFT,
179 len, vma->vm_page_prot);
180 } else {
181
182 /*
183 * Map WQ or CQ contig dma memory...
184 */
185 ret = remap_pfn_range(vma, vma->vm_start,
186 addr >> PAGE_SHIFT,
187 len, vma->vm_page_prot);
188 }
189
190 return ret;
191 }
192
193 static int c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
194 {
195 struct c4iw_dev *rhp;
196 struct c4iw_pd *php;
197
198 php = to_c4iw_pd(pd);
199 rhp = php->rhp;
200 pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
201 c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
202 mutex_lock(&rhp->rdev.stats.lock);
203 rhp->rdev.stats.pd.cur--;
204 mutex_unlock(&rhp->rdev.stats.lock);
205 return 0;
206 }
207
208 static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
209 {
210 struct c4iw_pd *php = to_c4iw_pd(pd);
211 struct ib_device *ibdev = pd->device;
212 u32 pdid;
213 struct c4iw_dev *rhp;
214
215 pr_debug("ibdev %p\n", ibdev);
216 rhp = (struct c4iw_dev *) ibdev;
217 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
218 if (!pdid)
219 return -EINVAL;
220
221 php->pdid = pdid;
222 php->rhp = rhp;
223 if (udata) {
224 struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid};
225
226 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
227 c4iw_deallocate_pd(&php->ibpd, udata);
228 return -EFAULT;
229 }
230 }
231 mutex_lock(&rhp->rdev.stats.lock);
232 rhp->rdev.stats.pd.cur++;
233 if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
234 rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
235 mutex_unlock(&rhp->rdev.stats.lock);
236 pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
237 return 0;
238 }
239
240 static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
241 union ib_gid *gid)
242 {
243 struct c4iw_dev *dev;
244
245 pr_debug("ibdev %p, port %d, index %d, gid %p\n",
246 ibdev, port, index, gid);
247 if (!port)
248 return -EINVAL;
249 dev = to_c4iw_dev(ibdev);
250 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
251 memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
252 return 0;
253 }
254
255 static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
256 struct ib_udata *uhw)
257 {
258
259 struct c4iw_dev *dev;
260
261 pr_debug("ibdev %p\n", ibdev);
262
263 if (uhw->inlen || uhw->outlen)
264 return -EINVAL;
265
266 dev = to_c4iw_dev(ibdev);
267 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
268 props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
269 props->fw_ver = dev->rdev.lldi.fw_vers;
270 props->device_cap_flags = dev->device_cap_flags;
271 props->page_size_cap = T4_PAGESIZE_MASK;
272 props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
273 props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
274 props->max_mr_size = T4_MAX_MR_SIZE;
275 props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
276 props->max_srq = dev->rdev.lldi.vr->srq.size;
277 props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
278 props->max_srq_wr = dev->rdev.hw_queue.t4_max_qp_depth;
279 props->max_send_sge = min(T4_MAX_SEND_SGE, T4_MAX_WRITE_SGE);
280 props->max_recv_sge = T4_MAX_RECV_SGE;
281 props->max_srq_sge = T4_MAX_RECV_SGE;
282 props->max_sge_rd = 1;
283 props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
284 props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
285 c4iw_max_read_depth);
286 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
287 props->max_cq = dev->rdev.lldi.vr->qp.size;
288 props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
289 props->max_mr = c4iw_num_stags(&dev->rdev);
290 props->max_pd = T4_MAX_NUM_PD;
291 props->local_ca_ack_delay = 0;
292 props->max_fast_reg_page_list_len =
293 t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl);
294
295 return 0;
296 }
297
298 static int c4iw_query_port(struct ib_device *ibdev, u8 port,
299 struct ib_port_attr *props)
300 {
301 int ret = 0;
302 pr_debug("ibdev %p\n", ibdev);
303 ret = ib_get_eth_speed(ibdev, port, &props->active_speed,
304 &props->active_width);
305
306 props->port_cap_flags =
307 IB_PORT_CM_SUP |
308 IB_PORT_SNMP_TUNNEL_SUP |
309 IB_PORT_REINIT_SUP |
310 IB_PORT_DEVICE_MGMT_SUP |
311 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
312 props->gid_tbl_len = 1;
313 props->max_msg_sz = -1;
314
315 return ret;
316 }
317
318 static ssize_t hw_rev_show(struct device *dev,
319 struct device_attribute *attr, char *buf)
320 {
321 struct c4iw_dev *c4iw_dev =
322 rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
323
324 pr_debug("dev 0x%p\n", dev);
325 return sysfs_emit(
326 buf, "%d\n",
327 CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
328 }
329 static DEVICE_ATTR_RO(hw_rev);
330
331 static ssize_t hca_type_show(struct device *dev,
332 struct device_attribute *attr, char *buf)
333 {
334 struct c4iw_dev *c4iw_dev =
335 rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
336 struct ethtool_drvinfo info;
337 struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
338
339 pr_debug("dev 0x%p\n", dev);
340 lldev->ethtool_ops->get_drvinfo(lldev, &info);
341 return sysfs_emit(buf, "%s\n", info.driver);
342 }
343 static DEVICE_ATTR_RO(hca_type);
344
345 static ssize_t board_id_show(struct device *dev, struct device_attribute *attr,
346 char *buf)
347 {
348 struct c4iw_dev *c4iw_dev =
349 rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
350
351 pr_debug("dev 0x%p\n", dev);
352 return sysfs_emit(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
353 c4iw_dev->rdev.lldi.pdev->device);
354 }
355 static DEVICE_ATTR_RO(board_id);
356
357 enum counters {
358 IP4INSEGS,
359 IP4OUTSEGS,
360 IP4RETRANSSEGS,
361 IP4OUTRSTS,
362 IP6INSEGS,
363 IP6OUTSEGS,
364 IP6RETRANSSEGS,
365 IP6OUTRSTS,
366 NR_COUNTERS
367 };
368
369 static const char * const names[] = {
370 [IP4INSEGS] = "ip4InSegs",
371 [IP4OUTSEGS] = "ip4OutSegs",
372 [IP4RETRANSSEGS] = "ip4RetransSegs",
373 [IP4OUTRSTS] = "ip4OutRsts",
374 [IP6INSEGS] = "ip6InSegs",
375 [IP6OUTSEGS] = "ip6OutSegs",
376 [IP6RETRANSSEGS] = "ip6RetransSegs",
377 [IP6OUTRSTS] = "ip6OutRsts"
378 };
379
380 static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
381 u8 port_num)
382 {
383 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
384
385 if (port_num != 0)
386 return NULL;
387
388 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
389 RDMA_HW_STATS_DEFAULT_LIFESPAN);
390 }
391
392 static int c4iw_get_mib(struct ib_device *ibdev,
393 struct rdma_hw_stats *stats,
394 u8 port, int index)
395 {
396 struct tp_tcp_stats v4, v6;
397 struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
398
399 cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
400 stats->value[IP4INSEGS] = v4.tcp_in_segs;
401 stats->value[IP4OUTSEGS] = v4.tcp_out_segs;
402 stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs;
403 stats->value[IP4OUTRSTS] = v4.tcp_out_rsts;
404 stats->value[IP6INSEGS] = v6.tcp_in_segs;
405 stats->value[IP6OUTSEGS] = v6.tcp_out_segs;
406 stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs;
407 stats->value[IP6OUTRSTS] = v6.tcp_out_rsts;
408
409 return stats->num_counters;
410 }
411
412 static struct attribute *c4iw_class_attributes[] = {
413 &dev_attr_hw_rev.attr,
414 &dev_attr_hca_type.attr,
415 &dev_attr_board_id.attr,
416 NULL
417 };
418
419 static const struct attribute_group c4iw_attr_group = {
420 .attrs = c4iw_class_attributes,
421 };
422
423 static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
424 struct ib_port_immutable *immutable)
425 {
426 struct ib_port_attr attr;
427 int err;
428
429 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
430
431 err = ib_query_port(ibdev, port_num, &attr);
432 if (err)
433 return err;
434
435 immutable->gid_tbl_len = attr.gid_tbl_len;
436
437 return 0;
438 }
439
440 static void get_dev_fw_str(struct ib_device *dev, char *str)
441 {
442 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
443 ibdev);
444 pr_debug("dev 0x%p\n", dev);
445
446 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
447 FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
448 FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
449 FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
450 FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
451 }
452
453 static const struct ib_device_ops c4iw_dev_ops = {
454 .owner = THIS_MODULE,
455 .driver_id = RDMA_DRIVER_CXGB4,
456 .uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION,
457
458 .alloc_hw_stats = c4iw_alloc_stats,
459 .alloc_mr = c4iw_alloc_mr,
460 .alloc_pd = c4iw_allocate_pd,
461 .alloc_ucontext = c4iw_alloc_ucontext,
462 .create_cq = c4iw_create_cq,
463 .create_qp = c4iw_create_qp,
464 .create_srq = c4iw_create_srq,
465 .dealloc_pd = c4iw_deallocate_pd,
466 .dealloc_ucontext = c4iw_dealloc_ucontext,
467 .dereg_mr = c4iw_dereg_mr,
468 .destroy_cq = c4iw_destroy_cq,
469 .destroy_qp = c4iw_destroy_qp,
470 .destroy_srq = c4iw_destroy_srq,
471 .fill_res_cq_entry = c4iw_fill_res_cq_entry,
472 .fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry,
473 .fill_res_mr_entry = c4iw_fill_res_mr_entry,
474 .get_dev_fw_str = get_dev_fw_str,
475 .get_dma_mr = c4iw_get_dma_mr,
476 .get_hw_stats = c4iw_get_mib,
477 .get_port_immutable = c4iw_port_immutable,
478 .iw_accept = c4iw_accept_cr,
479 .iw_add_ref = c4iw_qp_add_ref,
480 .iw_connect = c4iw_connect,
481 .iw_create_listen = c4iw_create_listen,
482 .iw_destroy_listen = c4iw_destroy_listen,
483 .iw_get_qp = c4iw_get_qp,
484 .iw_reject = c4iw_reject_cr,
485 .iw_rem_ref = c4iw_qp_rem_ref,
486 .map_mr_sg = c4iw_map_mr_sg,
487 .mmap = c4iw_mmap,
488 .modify_qp = c4iw_ib_modify_qp,
489 .modify_srq = c4iw_modify_srq,
490 .poll_cq = c4iw_poll_cq,
491 .post_recv = c4iw_post_receive,
492 .post_send = c4iw_post_send,
493 .post_srq_recv = c4iw_post_srq_recv,
494 .query_device = c4iw_query_device,
495 .query_gid = c4iw_query_gid,
496 .query_port = c4iw_query_port,
497 .query_qp = c4iw_ib_query_qp,
498 .reg_user_mr = c4iw_reg_user_mr,
499 .req_notify_cq = c4iw_arm_cq,
500
501 INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
502 INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw),
503 INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
504 INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
505 INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
506 };
507
508 static int set_netdevs(struct ib_device *ib_dev, struct c4iw_rdev *rdev)
509 {
510 int ret;
511 int i;
512
513 for (i = 0; i < rdev->lldi.nports; i++) {
514 ret = ib_device_set_netdev(ib_dev, rdev->lldi.ports[i],
515 i + 1);
516 if (ret)
517 return ret;
518 }
519 return 0;
520 }
521
522 void c4iw_register_device(struct work_struct *work)
523 {
524 int ret;
525 struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
526 struct c4iw_dev *dev = ctx->dev;
527
528 pr_debug("c4iw_dev %p\n", dev);
529 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
530 memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
531 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
532 if (fastreg_support)
533 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
534 dev->ibdev.local_dma_lkey = 0;
535 dev->ibdev.node_type = RDMA_NODE_RNIC;
536 BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
537 memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
538 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
539 dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
540 dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev;
541
542 memcpy(dev->ibdev.iw_ifname, dev->rdev.lldi.ports[0]->name,
543 sizeof(dev->ibdev.iw_ifname));
544
545 rdma_set_device_sysfs_group(&dev->ibdev, &c4iw_attr_group);
546 ib_set_device_ops(&dev->ibdev, &c4iw_dev_ops);
547 ret = set_netdevs(&dev->ibdev, &dev->rdev);
548 if (ret)
549 goto err_dealloc_ctx;
550 dma_set_max_seg_size(&dev->rdev.lldi.pdev->dev, UINT_MAX);
551 ret = ib_register_device(&dev->ibdev, "cxgb4_%d",
552 &dev->rdev.lldi.pdev->dev);
553 if (ret)
554 goto err_dealloc_ctx;
555 return;
556
557 err_dealloc_ctx:
558 pr_err("%s - Failed registering iwarp device: %d\n",
559 pci_name(ctx->lldi.pdev), ret);
560 c4iw_dealloc(ctx);
561 return;
562 }
563
564 void c4iw_unregister_device(struct c4iw_dev *dev)
565 {
566 pr_debug("c4iw_dev %p\n", dev);
567 ib_unregister_device(&dev->ibdev);
568 return;
569 }