]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/hw/cxgb3/iwch_provider.c
Merge tag 'vfio-v4.11-rc1' of git://github.com/awilliam/linux-vfio
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / cxgb3 / iwch_provider.c
1 /*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/inetdevice.h>
45 #include <linux/slab.h>
46
47 #include <asm/io.h>
48 #include <asm/irq.h>
49 #include <asm/byteorder.h>
50
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_smi.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_user_verbs.h>
56
57 #include "cxio_hal.h"
58 #include "iwch.h"
59 #include "iwch_provider.h"
60 #include "iwch_cm.h"
61 #include <rdma/cxgb3-abi.h>
62 #include "common.h"
63
64 static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
65 struct ib_ah_attr *ah_attr,
66 struct ib_udata *udata)
67 {
68 return ERR_PTR(-ENOSYS);
69 }
70
71 static int iwch_ah_destroy(struct ib_ah *ah)
72 {
73 return -ENOSYS;
74 }
75
76 static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
77 {
78 return -ENOSYS;
79 }
80
81 static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
82 {
83 return -ENOSYS;
84 }
85
86 static int iwch_process_mad(struct ib_device *ibdev,
87 int mad_flags,
88 u8 port_num,
89 const struct ib_wc *in_wc,
90 const struct ib_grh *in_grh,
91 const struct ib_mad_hdr *in_mad,
92 size_t in_mad_size,
93 struct ib_mad_hdr *out_mad,
94 size_t *out_mad_size,
95 u16 *out_mad_pkey_index)
96 {
97 return -ENOSYS;
98 }
99
100 static int iwch_dealloc_ucontext(struct ib_ucontext *context)
101 {
102 struct iwch_dev *rhp = to_iwch_dev(context->device);
103 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
104 struct iwch_mm_entry *mm, *tmp;
105
106 PDBG("%s context %p\n", __func__, context);
107 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
108 kfree(mm);
109 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
110 kfree(ucontext);
111 return 0;
112 }
113
114 static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
115 struct ib_udata *udata)
116 {
117 struct iwch_ucontext *context;
118 struct iwch_dev *rhp = to_iwch_dev(ibdev);
119
120 PDBG("%s ibdev %p\n", __func__, ibdev);
121 context = kzalloc(sizeof(*context), GFP_KERNEL);
122 if (!context)
123 return ERR_PTR(-ENOMEM);
124 cxio_init_ucontext(&rhp->rdev, &context->uctx);
125 INIT_LIST_HEAD(&context->mmaps);
126 spin_lock_init(&context->mmap_lock);
127 return &context->ibucontext;
128 }
129
130 static int iwch_destroy_cq(struct ib_cq *ib_cq)
131 {
132 struct iwch_cq *chp;
133
134 PDBG("%s ib_cq %p\n", __func__, ib_cq);
135 chp = to_iwch_cq(ib_cq);
136
137 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
138 atomic_dec(&chp->refcnt);
139 wait_event(chp->wait, !atomic_read(&chp->refcnt));
140
141 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
142 kfree(chp);
143 return 0;
144 }
145
146 static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
147 const struct ib_cq_init_attr *attr,
148 struct ib_ucontext *ib_context,
149 struct ib_udata *udata)
150 {
151 int entries = attr->cqe;
152 struct iwch_dev *rhp;
153 struct iwch_cq *chp;
154 struct iwch_create_cq_resp uresp;
155 struct iwch_create_cq_req ureq;
156 struct iwch_ucontext *ucontext = NULL;
157 static int warned;
158 size_t resplen;
159
160 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
161 if (attr->flags)
162 return ERR_PTR(-EINVAL);
163
164 rhp = to_iwch_dev(ibdev);
165 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
166 if (!chp)
167 return ERR_PTR(-ENOMEM);
168
169 if (ib_context) {
170 ucontext = to_iwch_ucontext(ib_context);
171 if (!t3a_device(rhp)) {
172 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
173 kfree(chp);
174 return ERR_PTR(-EFAULT);
175 }
176 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
177 }
178 }
179
180 if (t3a_device(rhp)) {
181
182 /*
183 * T3A: Add some fluff to handle extra CQEs inserted
184 * for various errors.
185 * Additional CQE possibilities:
186 * TERMINATE,
187 * incoming RDMA WRITE Failures
188 * incoming RDMA READ REQUEST FAILUREs
189 * NOTE: We cannot ensure the CQ won't overflow.
190 */
191 entries += 16;
192 }
193 entries = roundup_pow_of_two(entries);
194 chp->cq.size_log2 = ilog2(entries);
195
196 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
197 kfree(chp);
198 return ERR_PTR(-ENOMEM);
199 }
200 chp->rhp = rhp;
201 chp->ibcq.cqe = 1 << chp->cq.size_log2;
202 spin_lock_init(&chp->lock);
203 spin_lock_init(&chp->comp_handler_lock);
204 atomic_set(&chp->refcnt, 1);
205 init_waitqueue_head(&chp->wait);
206 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
207 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
208 kfree(chp);
209 return ERR_PTR(-ENOMEM);
210 }
211
212 if (ucontext) {
213 struct iwch_mm_entry *mm;
214
215 mm = kmalloc(sizeof *mm, GFP_KERNEL);
216 if (!mm) {
217 iwch_destroy_cq(&chp->ibcq);
218 return ERR_PTR(-ENOMEM);
219 }
220 uresp.cqid = chp->cq.cqid;
221 uresp.size_log2 = chp->cq.size_log2;
222 spin_lock(&ucontext->mmap_lock);
223 uresp.key = ucontext->key;
224 ucontext->key += PAGE_SIZE;
225 spin_unlock(&ucontext->mmap_lock);
226 mm->key = uresp.key;
227 mm->addr = virt_to_phys(chp->cq.queue);
228 if (udata->outlen < sizeof uresp) {
229 if (!warned++)
230 printk(KERN_WARNING MOD "Warning - "
231 "downlevel libcxgb3 (non-fatal).\n");
232 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
233 sizeof(struct t3_cqe));
234 resplen = sizeof(struct iwch_create_cq_resp_v0);
235 } else {
236 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
237 sizeof(struct t3_cqe));
238 uresp.memsize = mm->len;
239 uresp.reserved = 0;
240 resplen = sizeof uresp;
241 }
242 if (ib_copy_to_udata(udata, &uresp, resplen)) {
243 kfree(mm);
244 iwch_destroy_cq(&chp->ibcq);
245 return ERR_PTR(-EFAULT);
246 }
247 insert_mmap(ucontext, mm);
248 }
249 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
250 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
251 (unsigned long long) chp->cq.dma_addr);
252 return &chp->ibcq;
253 }
254
255 static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
256 {
257 #ifdef notyet
258 struct iwch_cq *chp = to_iwch_cq(cq);
259 struct t3_cq oldcq, newcq;
260 int ret;
261
262 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
263
264 /* We don't downsize... */
265 if (cqe <= cq->cqe)
266 return 0;
267
268 /* create new t3_cq with new size */
269 cqe = roundup_pow_of_two(cqe+1);
270 newcq.size_log2 = ilog2(cqe);
271
272 /* Dont allow resize to less than the current wce count */
273 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
274 return -ENOMEM;
275 }
276
277 /* Quiesce all QPs using this CQ */
278 ret = iwch_quiesce_qps(chp);
279 if (ret) {
280 return ret;
281 }
282
283 ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
284 if (ret) {
285 return ret;
286 }
287
288 /* copy CQEs */
289 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
290 sizeof(struct t3_cqe));
291
292 /* old iwch_qp gets new t3_cq but keeps old cqid */
293 oldcq = chp->cq;
294 chp->cq = newcq;
295 chp->cq.cqid = oldcq.cqid;
296
297 /* resize new t3_cq to update the HW context */
298 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
299 if (ret) {
300 chp->cq = oldcq;
301 return ret;
302 }
303 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
304
305 /* destroy old t3_cq */
306 oldcq.cqid = newcq.cqid;
307 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
308 if (ret) {
309 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
310 __func__, ret);
311 }
312
313 /* add user hooks here */
314
315 /* resume qps */
316 ret = iwch_resume_qps(chp);
317 return ret;
318 #else
319 return -ENOSYS;
320 #endif
321 }
322
323 static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
324 {
325 struct iwch_dev *rhp;
326 struct iwch_cq *chp;
327 enum t3_cq_opcode cq_op;
328 int err;
329 unsigned long flag;
330 u32 rptr;
331
332 chp = to_iwch_cq(ibcq);
333 rhp = chp->rhp;
334 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
335 cq_op = CQ_ARM_SE;
336 else
337 cq_op = CQ_ARM_AN;
338 if (chp->user_rptr_addr) {
339 if (get_user(rptr, chp->user_rptr_addr))
340 return -EFAULT;
341 spin_lock_irqsave(&chp->lock, flag);
342 chp->cq.rptr = rptr;
343 } else
344 spin_lock_irqsave(&chp->lock, flag);
345 PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
346 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
347 spin_unlock_irqrestore(&chp->lock, flag);
348 if (err < 0)
349 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
350 chp->cq.cqid);
351 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
352 err = 0;
353 return err;
354 }
355
356 static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
357 {
358 int len = vma->vm_end - vma->vm_start;
359 u32 key = vma->vm_pgoff << PAGE_SHIFT;
360 struct cxio_rdev *rdev_p;
361 int ret = 0;
362 struct iwch_mm_entry *mm;
363 struct iwch_ucontext *ucontext;
364 u64 addr;
365
366 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
367 key, len);
368
369 if (vma->vm_start & (PAGE_SIZE-1)) {
370 return -EINVAL;
371 }
372
373 rdev_p = &(to_iwch_dev(context->device)->rdev);
374 ucontext = to_iwch_ucontext(context);
375
376 mm = remove_mmap(ucontext, key, len);
377 if (!mm)
378 return -EINVAL;
379 addr = mm->addr;
380 kfree(mm);
381
382 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
383 (addr < (rdev_p->rnic_info.udbell_physbase +
384 rdev_p->rnic_info.udbell_len))) {
385
386 /*
387 * Map T3 DB register.
388 */
389 if (vma->vm_flags & VM_READ) {
390 return -EPERM;
391 }
392
393 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
394 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
395 vma->vm_flags &= ~VM_MAYREAD;
396 ret = io_remap_pfn_range(vma, vma->vm_start,
397 addr >> PAGE_SHIFT,
398 len, vma->vm_page_prot);
399 } else {
400
401 /*
402 * Map WQ or CQ contig dma memory...
403 */
404 ret = remap_pfn_range(vma, vma->vm_start,
405 addr >> PAGE_SHIFT,
406 len, vma->vm_page_prot);
407 }
408
409 return ret;
410 }
411
412 static int iwch_deallocate_pd(struct ib_pd *pd)
413 {
414 struct iwch_dev *rhp;
415 struct iwch_pd *php;
416
417 php = to_iwch_pd(pd);
418 rhp = php->rhp;
419 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
420 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
421 kfree(php);
422 return 0;
423 }
424
425 static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
426 struct ib_ucontext *context,
427 struct ib_udata *udata)
428 {
429 struct iwch_pd *php;
430 u32 pdid;
431 struct iwch_dev *rhp;
432
433 PDBG("%s ibdev %p\n", __func__, ibdev);
434 rhp = (struct iwch_dev *) ibdev;
435 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
436 if (!pdid)
437 return ERR_PTR(-EINVAL);
438 php = kzalloc(sizeof(*php), GFP_KERNEL);
439 if (!php) {
440 cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
441 return ERR_PTR(-ENOMEM);
442 }
443 php->pdid = pdid;
444 php->rhp = rhp;
445 if (context) {
446 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
447 iwch_deallocate_pd(&php->ibpd);
448 return ERR_PTR(-EFAULT);
449 }
450 }
451 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
452 return &php->ibpd;
453 }
454
455 static int iwch_dereg_mr(struct ib_mr *ib_mr)
456 {
457 struct iwch_dev *rhp;
458 struct iwch_mr *mhp;
459 u32 mmid;
460
461 PDBG("%s ib_mr %p\n", __func__, ib_mr);
462
463 mhp = to_iwch_mr(ib_mr);
464 kfree(mhp->pages);
465 rhp = mhp->rhp;
466 mmid = mhp->attr.stag >> 8;
467 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
468 mhp->attr.pbl_addr);
469 iwch_free_pbl(mhp);
470 remove_handle(rhp, &rhp->mmidr, mmid);
471 if (mhp->kva)
472 kfree((void *) (unsigned long) mhp->kva);
473 if (mhp->umem)
474 ib_umem_release(mhp->umem);
475 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
476 kfree(mhp);
477 return 0;
478 }
479
480 static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
481 {
482 const u64 total_size = 0xffffffff;
483 const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
484 struct iwch_pd *php = to_iwch_pd(pd);
485 struct iwch_dev *rhp = php->rhp;
486 struct iwch_mr *mhp;
487 __be64 *page_list;
488 int shift = 26, npages, ret, i;
489
490 PDBG("%s ib_pd %p\n", __func__, pd);
491
492 /*
493 * T3 only supports 32 bits of size.
494 */
495 if (sizeof(phys_addr_t) > 4) {
496 pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
497 return ERR_PTR(-ENOTSUPP);
498 }
499
500 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
501 if (!mhp)
502 return ERR_PTR(-ENOMEM);
503
504 mhp->rhp = rhp;
505
506 npages = (total_size + (1ULL << shift) - 1) >> shift;
507 if (!npages) {
508 ret = -EINVAL;
509 goto err;
510 }
511
512 page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
513 if (!page_list) {
514 ret = -ENOMEM;
515 goto err;
516 }
517
518 for (i = 0; i < npages; i++)
519 page_list[i] = cpu_to_be64((u64)i << shift);
520
521 PDBG("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
522 __func__, mask, shift, total_size, npages);
523
524 ret = iwch_alloc_pbl(mhp, npages);
525 if (ret) {
526 kfree(page_list);
527 goto err_pbl;
528 }
529
530 ret = iwch_write_pbl(mhp, page_list, npages, 0);
531 kfree(page_list);
532 if (ret)
533 goto err_pbl;
534
535 mhp->attr.pdid = php->pdid;
536 mhp->attr.zbva = 0;
537
538 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
539 mhp->attr.va_fbo = 0;
540 mhp->attr.page_size = shift - 12;
541
542 mhp->attr.len = (u32) total_size;
543 mhp->attr.pbl_size = npages;
544 ret = iwch_register_mem(rhp, php, mhp, shift);
545 if (ret)
546 goto err_pbl;
547
548 return &mhp->ibmr;
549
550 err_pbl:
551 iwch_free_pbl(mhp);
552
553 err:
554 kfree(mhp);
555 return ERR_PTR(ret);
556 }
557
558 static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
559 u64 virt, int acc, struct ib_udata *udata)
560 {
561 __be64 *pages;
562 int shift, n, len;
563 int i, k, entry;
564 int err = 0;
565 struct iwch_dev *rhp;
566 struct iwch_pd *php;
567 struct iwch_mr *mhp;
568 struct iwch_reg_user_mr_resp uresp;
569 struct scatterlist *sg;
570 PDBG("%s ib_pd %p\n", __func__, pd);
571
572 php = to_iwch_pd(pd);
573 rhp = php->rhp;
574 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
575 if (!mhp)
576 return ERR_PTR(-ENOMEM);
577
578 mhp->rhp = rhp;
579
580 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
581 if (IS_ERR(mhp->umem)) {
582 err = PTR_ERR(mhp->umem);
583 kfree(mhp);
584 return ERR_PTR(err);
585 }
586
587 shift = ffs(mhp->umem->page_size) - 1;
588
589 n = mhp->umem->nmap;
590
591 err = iwch_alloc_pbl(mhp, n);
592 if (err)
593 goto err;
594
595 pages = (__be64 *) __get_free_page(GFP_KERNEL);
596 if (!pages) {
597 err = -ENOMEM;
598 goto err_pbl;
599 }
600
601 i = n = 0;
602
603 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
604 len = sg_dma_len(sg) >> shift;
605 for (k = 0; k < len; ++k) {
606 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
607 mhp->umem->page_size * k);
608 if (i == PAGE_SIZE / sizeof *pages) {
609 err = iwch_write_pbl(mhp, pages, i, n);
610 if (err)
611 goto pbl_done;
612 n += i;
613 i = 0;
614 }
615 }
616 }
617
618 if (i)
619 err = iwch_write_pbl(mhp, pages, i, n);
620
621 pbl_done:
622 free_page((unsigned long) pages);
623 if (err)
624 goto err_pbl;
625
626 mhp->attr.pdid = php->pdid;
627 mhp->attr.zbva = 0;
628 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
629 mhp->attr.va_fbo = virt;
630 mhp->attr.page_size = shift - 12;
631 mhp->attr.len = (u32) length;
632
633 err = iwch_register_mem(rhp, php, mhp, shift);
634 if (err)
635 goto err_pbl;
636
637 if (udata && !t3a_device(rhp)) {
638 uresp.pbl_addr = (mhp->attr.pbl_addr -
639 rhp->rdev.rnic_info.pbl_base) >> 3;
640 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
641 uresp.pbl_addr);
642
643 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
644 iwch_dereg_mr(&mhp->ibmr);
645 err = -EFAULT;
646 goto err;
647 }
648 }
649
650 return &mhp->ibmr;
651
652 err_pbl:
653 iwch_free_pbl(mhp);
654
655 err:
656 ib_umem_release(mhp->umem);
657 kfree(mhp);
658 return ERR_PTR(err);
659 }
660
661 static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
662 struct ib_udata *udata)
663 {
664 struct iwch_dev *rhp;
665 struct iwch_pd *php;
666 struct iwch_mw *mhp;
667 u32 mmid;
668 u32 stag = 0;
669 int ret;
670
671 if (type != IB_MW_TYPE_1)
672 return ERR_PTR(-EINVAL);
673
674 php = to_iwch_pd(pd);
675 rhp = php->rhp;
676 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
677 if (!mhp)
678 return ERR_PTR(-ENOMEM);
679 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
680 if (ret) {
681 kfree(mhp);
682 return ERR_PTR(ret);
683 }
684 mhp->rhp = rhp;
685 mhp->attr.pdid = php->pdid;
686 mhp->attr.type = TPT_MW;
687 mhp->attr.stag = stag;
688 mmid = (stag) >> 8;
689 mhp->ibmw.rkey = stag;
690 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
691 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
692 kfree(mhp);
693 return ERR_PTR(-ENOMEM);
694 }
695 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
696 return &(mhp->ibmw);
697 }
698
699 static int iwch_dealloc_mw(struct ib_mw *mw)
700 {
701 struct iwch_dev *rhp;
702 struct iwch_mw *mhp;
703 u32 mmid;
704
705 mhp = to_iwch_mw(mw);
706 rhp = mhp->rhp;
707 mmid = (mw->rkey) >> 8;
708 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
709 remove_handle(rhp, &rhp->mmidr, mmid);
710 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
711 kfree(mhp);
712 return 0;
713 }
714
715 static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
716 enum ib_mr_type mr_type,
717 u32 max_num_sg)
718 {
719 struct iwch_dev *rhp;
720 struct iwch_pd *php;
721 struct iwch_mr *mhp;
722 u32 mmid;
723 u32 stag = 0;
724 int ret = 0;
725
726 if (mr_type != IB_MR_TYPE_MEM_REG ||
727 max_num_sg > T3_MAX_FASTREG_DEPTH)
728 return ERR_PTR(-EINVAL);
729
730 php = to_iwch_pd(pd);
731 rhp = php->rhp;
732 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
733 if (!mhp)
734 goto err;
735
736 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
737 if (!mhp->pages) {
738 ret = -ENOMEM;
739 goto pl_err;
740 }
741
742 mhp->rhp = rhp;
743 ret = iwch_alloc_pbl(mhp, max_num_sg);
744 if (ret)
745 goto err1;
746 mhp->attr.pbl_size = max_num_sg;
747 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
748 mhp->attr.pbl_size, mhp->attr.pbl_addr);
749 if (ret)
750 goto err2;
751 mhp->attr.pdid = php->pdid;
752 mhp->attr.type = TPT_NON_SHARED_MR;
753 mhp->attr.stag = stag;
754 mhp->attr.state = 1;
755 mmid = (stag) >> 8;
756 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
757 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
758 goto err3;
759
760 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
761 return &(mhp->ibmr);
762 err3:
763 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
764 mhp->attr.pbl_addr);
765 err2:
766 iwch_free_pbl(mhp);
767 err1:
768 kfree(mhp->pages);
769 pl_err:
770 kfree(mhp);
771 err:
772 return ERR_PTR(ret);
773 }
774
775 static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
776 {
777 struct iwch_mr *mhp = to_iwch_mr(ibmr);
778
779 if (unlikely(mhp->npages == mhp->attr.pbl_size))
780 return -ENOMEM;
781
782 mhp->pages[mhp->npages++] = addr;
783
784 return 0;
785 }
786
787 static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
788 int sg_nents, unsigned int *sg_offset)
789 {
790 struct iwch_mr *mhp = to_iwch_mr(ibmr);
791
792 mhp->npages = 0;
793
794 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
795 }
796
797 static int iwch_destroy_qp(struct ib_qp *ib_qp)
798 {
799 struct iwch_dev *rhp;
800 struct iwch_qp *qhp;
801 struct iwch_qp_attributes attrs;
802 struct iwch_ucontext *ucontext;
803
804 qhp = to_iwch_qp(ib_qp);
805 rhp = qhp->rhp;
806
807 attrs.next_state = IWCH_QP_STATE_ERROR;
808 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
809 wait_event(qhp->wait, !qhp->ep);
810
811 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
812
813 atomic_dec(&qhp->refcnt);
814 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
815
816 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
817 : NULL;
818 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
819 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
820
821 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
822 ib_qp, qhp->wq.qpid, qhp);
823 kfree(qhp);
824 return 0;
825 }
826
827 static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
828 struct ib_qp_init_attr *attrs,
829 struct ib_udata *udata)
830 {
831 struct iwch_dev *rhp;
832 struct iwch_qp *qhp;
833 struct iwch_pd *php;
834 struct iwch_cq *schp;
835 struct iwch_cq *rchp;
836 struct iwch_create_qp_resp uresp;
837 int wqsize, sqsize, rqsize;
838 struct iwch_ucontext *ucontext;
839
840 PDBG("%s ib_pd %p\n", __func__, pd);
841 if (attrs->qp_type != IB_QPT_RC)
842 return ERR_PTR(-EINVAL);
843 php = to_iwch_pd(pd);
844 rhp = php->rhp;
845 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
846 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
847 if (!schp || !rchp)
848 return ERR_PTR(-EINVAL);
849
850 /* The RQT size must be # of entries + 1 rounded up to a power of two */
851 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
852 if (rqsize == attrs->cap.max_recv_wr)
853 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
854
855 /* T3 doesn't support RQT depth < 16 */
856 if (rqsize < 16)
857 rqsize = 16;
858
859 if (rqsize > T3_MAX_RQ_SIZE)
860 return ERR_PTR(-EINVAL);
861
862 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
863 return ERR_PTR(-EINVAL);
864
865 /*
866 * NOTE: The SQ and total WQ sizes don't need to be
867 * a power of two. However, all the code assumes
868 * they are. EG: Q_FREECNT() and friends.
869 */
870 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
871 wqsize = roundup_pow_of_two(rqsize + sqsize);
872
873 /*
874 * Kernel users need more wq space for fastreg WRs which can take
875 * 2 WR fragments.
876 */
877 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
878 if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
879 wqsize = roundup_pow_of_two(rqsize +
880 roundup_pow_of_two(attrs->cap.max_send_wr * 2));
881 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
882 wqsize, sqsize, rqsize);
883 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
884 if (!qhp)
885 return ERR_PTR(-ENOMEM);
886 qhp->wq.size_log2 = ilog2(wqsize);
887 qhp->wq.rq_size_log2 = ilog2(rqsize);
888 qhp->wq.sq_size_log2 = ilog2(sqsize);
889 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
890 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
891 kfree(qhp);
892 return ERR_PTR(-ENOMEM);
893 }
894
895 attrs->cap.max_recv_wr = rqsize - 1;
896 attrs->cap.max_send_wr = sqsize;
897 attrs->cap.max_inline_data = T3_MAX_INLINE;
898
899 qhp->rhp = rhp;
900 qhp->attr.pd = php->pdid;
901 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
902 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
903 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
904 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
905 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
906 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
907 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
908 qhp->attr.state = IWCH_QP_STATE_IDLE;
909 qhp->attr.next_state = IWCH_QP_STATE_IDLE;
910
911 /*
912 * XXX - These don't get passed in from the openib user
913 * at create time. The CM sets them via a QP modify.
914 * Need to fix... I think the CM should
915 */
916 qhp->attr.enable_rdma_read = 1;
917 qhp->attr.enable_rdma_write = 1;
918 qhp->attr.enable_bind = 1;
919 qhp->attr.max_ord = 1;
920 qhp->attr.max_ird = 1;
921
922 spin_lock_init(&qhp->lock);
923 init_waitqueue_head(&qhp->wait);
924 atomic_set(&qhp->refcnt, 1);
925
926 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
927 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
928 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
929 kfree(qhp);
930 return ERR_PTR(-ENOMEM);
931 }
932
933 if (udata) {
934
935 struct iwch_mm_entry *mm1, *mm2;
936
937 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
938 if (!mm1) {
939 iwch_destroy_qp(&qhp->ibqp);
940 return ERR_PTR(-ENOMEM);
941 }
942
943 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
944 if (!mm2) {
945 kfree(mm1);
946 iwch_destroy_qp(&qhp->ibqp);
947 return ERR_PTR(-ENOMEM);
948 }
949
950 uresp.qpid = qhp->wq.qpid;
951 uresp.size_log2 = qhp->wq.size_log2;
952 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
953 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
954 spin_lock(&ucontext->mmap_lock);
955 uresp.key = ucontext->key;
956 ucontext->key += PAGE_SIZE;
957 uresp.db_key = ucontext->key;
958 ucontext->key += PAGE_SIZE;
959 spin_unlock(&ucontext->mmap_lock);
960 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
961 kfree(mm1);
962 kfree(mm2);
963 iwch_destroy_qp(&qhp->ibqp);
964 return ERR_PTR(-EFAULT);
965 }
966 mm1->key = uresp.key;
967 mm1->addr = virt_to_phys(qhp->wq.queue);
968 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
969 insert_mmap(ucontext, mm1);
970 mm2->key = uresp.db_key;
971 mm2->addr = qhp->wq.udb & PAGE_MASK;
972 mm2->len = PAGE_SIZE;
973 insert_mmap(ucontext, mm2);
974 }
975 qhp->ibqp.qp_num = qhp->wq.qpid;
976 init_timer(&(qhp->timer));
977 PDBG("%s sq_num_entries %d, rq_num_entries %d "
978 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
979 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
980 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
981 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
982 return &qhp->ibqp;
983 }
984
985 static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
986 int attr_mask, struct ib_udata *udata)
987 {
988 struct iwch_dev *rhp;
989 struct iwch_qp *qhp;
990 enum iwch_qp_attr_mask mask = 0;
991 struct iwch_qp_attributes attrs;
992
993 PDBG("%s ib_qp %p\n", __func__, ibqp);
994
995 /* iwarp does not support the RTR state */
996 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
997 attr_mask &= ~IB_QP_STATE;
998
999 /* Make sure we still have something left to do */
1000 if (!attr_mask)
1001 return 0;
1002
1003 memset(&attrs, 0, sizeof attrs);
1004 qhp = to_iwch_qp(ibqp);
1005 rhp = qhp->rhp;
1006
1007 attrs.next_state = iwch_convert_state(attr->qp_state);
1008 attrs.enable_rdma_read = (attr->qp_access_flags &
1009 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1010 attrs.enable_rdma_write = (attr->qp_access_flags &
1011 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1012 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1013
1014
1015 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
1016 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1017 (IWCH_QP_ATTR_ENABLE_RDMA_READ |
1018 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
1019 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1020
1021 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
1022 }
1023
1024 void iwch_qp_add_ref(struct ib_qp *qp)
1025 {
1026 PDBG("%s ib_qp %p\n", __func__, qp);
1027 atomic_inc(&(to_iwch_qp(qp)->refcnt));
1028 }
1029
1030 void iwch_qp_rem_ref(struct ib_qp *qp)
1031 {
1032 PDBG("%s ib_qp %p\n", __func__, qp);
1033 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
1034 wake_up(&(to_iwch_qp(qp)->wait));
1035 }
1036
1037 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
1038 {
1039 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1040 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1041 }
1042
1043
1044 static int iwch_query_pkey(struct ib_device *ibdev,
1045 u8 port, u16 index, u16 * pkey)
1046 {
1047 PDBG("%s ibdev %p\n", __func__, ibdev);
1048 *pkey = 0;
1049 return 0;
1050 }
1051
1052 static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1053 int index, union ib_gid *gid)
1054 {
1055 struct iwch_dev *dev;
1056
1057 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
1058 __func__, ibdev, port, index, gid);
1059 dev = to_iwch_dev(ibdev);
1060 BUG_ON(port == 0 || port > 2);
1061 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1062 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
1063 return 0;
1064 }
1065
1066 static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1067 {
1068 struct ethtool_drvinfo info;
1069 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1070 char *cp, *next;
1071 unsigned fw_maj, fw_min, fw_mic;
1072
1073 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1074
1075 next = info.fw_version + 1;
1076 cp = strsep(&next, ".");
1077 sscanf(cp, "%i", &fw_maj);
1078 cp = strsep(&next, ".");
1079 sscanf(cp, "%i", &fw_min);
1080 cp = strsep(&next, ".");
1081 sscanf(cp, "%i", &fw_mic);
1082
1083 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1084 (fw_mic & 0xffff);
1085 }
1086
1087 static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1088 struct ib_udata *uhw)
1089 {
1090
1091 struct iwch_dev *dev;
1092
1093 PDBG("%s ibdev %p\n", __func__, ibdev);
1094
1095 if (uhw->inlen || uhw->outlen)
1096 return -EINVAL;
1097
1098 dev = to_iwch_dev(ibdev);
1099 memset(props, 0, sizeof *props);
1100 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1101 props->hw_ver = dev->rdev.t3cdev_p->type;
1102 props->fw_ver = fw_vers_string_to_u64(dev);
1103 props->device_cap_flags = dev->device_cap_flags;
1104 props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1105 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1106 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
1107 props->max_mr_size = dev->attr.max_mr_size;
1108 props->max_qp = dev->attr.max_qps;
1109 props->max_qp_wr = dev->attr.max_wrs;
1110 props->max_sge = dev->attr.max_sge_per_wr;
1111 props->max_sge_rd = 1;
1112 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1113 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1114 props->max_cq = dev->attr.max_cqs;
1115 props->max_cqe = dev->attr.max_cqes_per_cq;
1116 props->max_mr = dev->attr.max_mem_regs;
1117 props->max_pd = dev->attr.max_pds;
1118 props->local_ca_ack_delay = 0;
1119 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
1120
1121 return 0;
1122 }
1123
1124 static int iwch_query_port(struct ib_device *ibdev,
1125 u8 port, struct ib_port_attr *props)
1126 {
1127 struct iwch_dev *dev;
1128 struct net_device *netdev;
1129 struct in_device *inetdev;
1130
1131 PDBG("%s ibdev %p\n", __func__, ibdev);
1132
1133 dev = to_iwch_dev(ibdev);
1134 netdev = dev->rdev.port_info.lldevs[port-1];
1135
1136 memset(props, 0, sizeof(struct ib_port_attr));
1137 props->max_mtu = IB_MTU_4096;
1138 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
1139
1140 if (!netif_carrier_ok(netdev))
1141 props->state = IB_PORT_DOWN;
1142 else {
1143 inetdev = in_dev_get(netdev);
1144 if (inetdev) {
1145 if (inetdev->ifa_list)
1146 props->state = IB_PORT_ACTIVE;
1147 else
1148 props->state = IB_PORT_INIT;
1149 in_dev_put(inetdev);
1150 } else
1151 props->state = IB_PORT_INIT;
1152 }
1153
1154 props->port_cap_flags =
1155 IB_PORT_CM_SUP |
1156 IB_PORT_SNMP_TUNNEL_SUP |
1157 IB_PORT_REINIT_SUP |
1158 IB_PORT_DEVICE_MGMT_SUP |
1159 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1160 props->gid_tbl_len = 1;
1161 props->pkey_tbl_len = 1;
1162 props->active_width = 2;
1163 props->active_speed = IB_SPEED_DDR;
1164 props->max_msg_sz = -1;
1165
1166 return 0;
1167 }
1168
1169 static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
1170 char *buf)
1171 {
1172 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1173 ibdev.dev);
1174 PDBG("%s dev 0x%p\n", __func__, dev);
1175 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
1176 }
1177
1178 static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1179 char *buf)
1180 {
1181 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1182 ibdev.dev);
1183 struct ethtool_drvinfo info;
1184 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1185
1186 PDBG("%s dev 0x%p\n", __func__, dev);
1187 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1188 return sprintf(buf, "%s\n", info.driver);
1189 }
1190
1191 static ssize_t show_board(struct device *dev, struct device_attribute *attr,
1192 char *buf)
1193 {
1194 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1195 ibdev.dev);
1196 PDBG("%s dev 0x%p\n", __func__, dev);
1197 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1198 iwch_dev->rdev.rnic_info.pdev->device);
1199 }
1200
1201 enum counters {
1202 IPINRECEIVES,
1203 IPINHDRERRORS,
1204 IPINADDRERRORS,
1205 IPINUNKNOWNPROTOS,
1206 IPINDISCARDS,
1207 IPINDELIVERS,
1208 IPOUTREQUESTS,
1209 IPOUTDISCARDS,
1210 IPOUTNOROUTES,
1211 IPREASMTIMEOUT,
1212 IPREASMREQDS,
1213 IPREASMOKS,
1214 IPREASMFAILS,
1215 TCPACTIVEOPENS,
1216 TCPPASSIVEOPENS,
1217 TCPATTEMPTFAILS,
1218 TCPESTABRESETS,
1219 TCPCURRESTAB,
1220 TCPINSEGS,
1221 TCPOUTSEGS,
1222 TCPRETRANSSEGS,
1223 TCPINERRS,
1224 TCPOUTRSTS,
1225 TCPRTOMIN,
1226 TCPRTOMAX,
1227 NR_COUNTERS
1228 };
1229
1230 static const char * const names[] = {
1231 [IPINRECEIVES] = "ipInReceives",
1232 [IPINHDRERRORS] = "ipInHdrErrors",
1233 [IPINADDRERRORS] = "ipInAddrErrors",
1234 [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
1235 [IPINDISCARDS] = "ipInDiscards",
1236 [IPINDELIVERS] = "ipInDelivers",
1237 [IPOUTREQUESTS] = "ipOutRequests",
1238 [IPOUTDISCARDS] = "ipOutDiscards",
1239 [IPOUTNOROUTES] = "ipOutNoRoutes",
1240 [IPREASMTIMEOUT] = "ipReasmTimeout",
1241 [IPREASMREQDS] = "ipReasmReqds",
1242 [IPREASMOKS] = "ipReasmOKs",
1243 [IPREASMFAILS] = "ipReasmFails",
1244 [TCPACTIVEOPENS] = "tcpActiveOpens",
1245 [TCPPASSIVEOPENS] = "tcpPassiveOpens",
1246 [TCPATTEMPTFAILS] = "tcpAttemptFails",
1247 [TCPESTABRESETS] = "tcpEstabResets",
1248 [TCPCURRESTAB] = "tcpCurrEstab",
1249 [TCPINSEGS] = "tcpInSegs",
1250 [TCPOUTSEGS] = "tcpOutSegs",
1251 [TCPRETRANSSEGS] = "tcpRetransSegs",
1252 [TCPINERRS] = "tcpInErrs",
1253 [TCPOUTRSTS] = "tcpOutRsts",
1254 [TCPRTOMIN] = "tcpRtoMin",
1255 [TCPRTOMAX] = "tcpRtoMax",
1256 };
1257
1258 static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
1259 u8 port_num)
1260 {
1261 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
1262
1263 /* Our driver only supports device level stats */
1264 if (port_num != 0)
1265 return NULL;
1266
1267 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
1268 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1269 }
1270
1271 static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1272 u8 port, int index)
1273 {
1274 struct iwch_dev *dev;
1275 struct tp_mib_stats m;
1276 int ret;
1277
1278 if (port != 0 || !stats)
1279 return -ENOSYS;
1280
1281 PDBG("%s ibdev %p\n", __func__, ibdev);
1282 dev = to_iwch_dev(ibdev);
1283 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1284 if (ret)
1285 return -ENOSYS;
1286
1287 stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
1288 stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
1289 stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
1290 stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
1291 stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
1292 stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
1293 stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
1294 stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
1295 stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
1296 stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
1297 stats->value[IPREASMREQDS] = m.ipReasmReqds;
1298 stats->value[IPREASMOKS] = m.ipReasmOKs;
1299 stats->value[IPREASMFAILS] = m.ipReasmFails;
1300 stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
1301 stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
1302 stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
1303 stats->value[TCPESTABRESETS] = m.tcpEstabResets;
1304 stats->value[TCPCURRESTAB] = m.tcpOutRsts;
1305 stats->value[TCPINSEGS] = m.tcpCurrEstab;
1306 stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
1307 stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
1308 stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
1309 stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
1310 stats->value[TCPRTOMIN] = m.tcpRtoMin;
1311 stats->value[TCPRTOMAX] = m.tcpRtoMax;
1312
1313 return stats->num_counters;
1314 }
1315
1316 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1317 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1318 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1319
1320 static struct device_attribute *iwch_class_attributes[] = {
1321 &dev_attr_hw_rev,
1322 &dev_attr_hca_type,
1323 &dev_attr_board_id,
1324 };
1325
1326 static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
1327 struct ib_port_immutable *immutable)
1328 {
1329 struct ib_port_attr attr;
1330 int err;
1331
1332 err = iwch_query_port(ibdev, port_num, &attr);
1333 if (err)
1334 return err;
1335
1336 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1337 immutable->gid_tbl_len = attr.gid_tbl_len;
1338 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
1339
1340 return 0;
1341 }
1342
1343 static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str,
1344 size_t str_len)
1345 {
1346 struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
1347 struct ethtool_drvinfo info;
1348 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1349
1350 PDBG("%s dev 0x%p\n", __func__, iwch_dev);
1351 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1352 snprintf(str, str_len, "%s", info.fw_version);
1353 }
1354
1355 int iwch_register_device(struct iwch_dev *dev)
1356 {
1357 int ret;
1358 int i;
1359
1360 PDBG("%s iwch_dev %p\n", __func__, dev);
1361 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1362 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1363 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1364 dev->ibdev.owner = THIS_MODULE;
1365 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1366 IB_DEVICE_MEM_WINDOW |
1367 IB_DEVICE_MEM_MGT_EXTENSIONS;
1368
1369 /* cxgb3 supports STag 0. */
1370 dev->ibdev.local_dma_lkey = 0;
1371
1372 dev->ibdev.uverbs_cmd_mask =
1373 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1374 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1375 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1376 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1377 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1378 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1379 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1380 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1381 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1382 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1383 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1384 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1385 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1386 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1387 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1388 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1389 (1ull << IB_USER_VERBS_CMD_POST_RECV);
1390 dev->ibdev.node_type = RDMA_NODE_RNIC;
1391 BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
1392 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1393 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1394 dev->ibdev.num_comp_vectors = 1;
1395 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
1396 dev->ibdev.query_device = iwch_query_device;
1397 dev->ibdev.query_port = iwch_query_port;
1398 dev->ibdev.query_pkey = iwch_query_pkey;
1399 dev->ibdev.query_gid = iwch_query_gid;
1400 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1401 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1402 dev->ibdev.mmap = iwch_mmap;
1403 dev->ibdev.alloc_pd = iwch_allocate_pd;
1404 dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1405 dev->ibdev.create_ah = iwch_ah_create;
1406 dev->ibdev.destroy_ah = iwch_ah_destroy;
1407 dev->ibdev.create_qp = iwch_create_qp;
1408 dev->ibdev.modify_qp = iwch_ib_modify_qp;
1409 dev->ibdev.destroy_qp = iwch_destroy_qp;
1410 dev->ibdev.create_cq = iwch_create_cq;
1411 dev->ibdev.destroy_cq = iwch_destroy_cq;
1412 dev->ibdev.resize_cq = iwch_resize_cq;
1413 dev->ibdev.poll_cq = iwch_poll_cq;
1414 dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1415 dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1416 dev->ibdev.dereg_mr = iwch_dereg_mr;
1417 dev->ibdev.alloc_mw = iwch_alloc_mw;
1418 dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1419 dev->ibdev.alloc_mr = iwch_alloc_mr;
1420 dev->ibdev.map_mr_sg = iwch_map_mr_sg;
1421 dev->ibdev.attach_mcast = iwch_multicast_attach;
1422 dev->ibdev.detach_mcast = iwch_multicast_detach;
1423 dev->ibdev.process_mad = iwch_process_mad;
1424 dev->ibdev.req_notify_cq = iwch_arm_cq;
1425 dev->ibdev.post_send = iwch_post_send;
1426 dev->ibdev.post_recv = iwch_post_receive;
1427 dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
1428 dev->ibdev.get_hw_stats = iwch_get_mib;
1429 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1430 dev->ibdev.get_port_immutable = iwch_port_immutable;
1431 dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str;
1432
1433 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
1434 if (!dev->ibdev.iwcm)
1435 return -ENOMEM;
1436
1437 dev->ibdev.iwcm->connect = iwch_connect;
1438 dev->ibdev.iwcm->accept = iwch_accept_cr;
1439 dev->ibdev.iwcm->reject = iwch_reject_cr;
1440 dev->ibdev.iwcm->create_listen = iwch_create_listen;
1441 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1442 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1443 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1444 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1445 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1446 sizeof(dev->ibdev.iwcm->ifname));
1447
1448 ret = ib_register_device(&dev->ibdev, NULL);
1449 if (ret)
1450 goto bail1;
1451
1452 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
1453 ret = device_create_file(&dev->ibdev.dev,
1454 iwch_class_attributes[i]);
1455 if (ret) {
1456 goto bail2;
1457 }
1458 }
1459 return 0;
1460 bail2:
1461 ib_unregister_device(&dev->ibdev);
1462 bail1:
1463 kfree(dev->ibdev.iwcm);
1464 return ret;
1465 }
1466
1467 void iwch_unregister_device(struct iwch_dev *dev)
1468 {
1469 int i;
1470
1471 PDBG("%s iwch_dev %p\n", __func__, dev);
1472 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
1473 device_remove_file(&dev->ibdev.dev,
1474 iwch_class_attributes[i]);
1475 ib_unregister_device(&dev->ibdev);
1476 kfree(dev->ibdev.iwcm);
1477 return;
1478 }