]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/cxgb3/iwch_provider.c
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / cxgb3 / iwch_provider.c
CommitLineData
b038ced7
SW
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
b038ced7
SW
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/delay.h>
38#include <linux/errno.h>
39#include <linux/list.h>
589ee628 40#include <linux/sched/mm.h>
b038ced7
SW
41#include <linux/spinlock.h>
42#include <linux/ethtool.h>
7f049f2f 43#include <linux/rtnetlink.h>
7ab1a2b3 44#include <linux/inetdevice.h>
5a0e3ad6 45#include <linux/slab.h>
b038ced7
SW
46
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/byteorder.h>
50
51#include <rdma/iw_cm.h>
52#include <rdma/ib_verbs.h>
53#include <rdma/ib_smi.h>
f7c6a7b5 54#include <rdma/ib_umem.h>
b038ced7
SW
55#include <rdma/ib_user_verbs.h>
56
57#include "cxio_hal.h"
58#include "iwch.h"
59#include "iwch_provider.h"
60#include "iwch_cm.h"
a85fb338 61#include <rdma/cxgb3-abi.h>
14cc180f 62#include "common.h"
b038ced7 63
b038ced7 64static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
90898850 65 struct rdma_ah_attr *ah_attr,
477864c8 66 struct ib_udata *udata)
b038ced7
SW
67{
68 return ERR_PTR(-ENOSYS);
69}
70
71static int iwch_ah_destroy(struct ib_ah *ah)
72{
73 return -ENOSYS;
74}
75
76static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
77{
78 return -ENOSYS;
79}
80
81static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
82{
83 return -ENOSYS;
84}
85
86static int iwch_process_mad(struct ib_device *ibdev,
87 int mad_flags,
88 u8 port_num,
a97e2d86
IW
89 const struct ib_wc *in_wc,
90 const struct ib_grh *in_grh,
4cd7c947
IW
91 const struct ib_mad_hdr *in_mad,
92 size_t in_mad_size,
93 struct ib_mad_hdr *out_mad,
94 size_t *out_mad_size,
95 u16 *out_mad_pkey_index)
b038ced7
SW
96{
97 return -ENOSYS;
98}
99
100static int iwch_dealloc_ucontext(struct ib_ucontext *context)
101{
102 struct iwch_dev *rhp = to_iwch_dev(context->device);
103 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
104 struct iwch_mm_entry *mm, *tmp;
105
b7b37ee0 106 pr_debug("%s context %p\n", __func__, context);
b038ced7
SW
107 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
108 kfree(mm);
109 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
110 kfree(ucontext);
111 return 0;
112}
113
114static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
115 struct ib_udata *udata)
116{
117 struct iwch_ucontext *context;
118 struct iwch_dev *rhp = to_iwch_dev(ibdev);
119
b7b37ee0 120 pr_debug("%s ibdev %p\n", __func__, ibdev);
b038ced7
SW
121 context = kzalloc(sizeof(*context), GFP_KERNEL);
122 if (!context)
123 return ERR_PTR(-ENOMEM);
124 cxio_init_ucontext(&rhp->rdev, &context->uctx);
125 INIT_LIST_HEAD(&context->mmaps);
126 spin_lock_init(&context->mmap_lock);
127 return &context->ibucontext;
128}
129
130static int iwch_destroy_cq(struct ib_cq *ib_cq)
131{
132 struct iwch_cq *chp;
133
b7b37ee0 134 pr_debug("%s ib_cq %p\n", __func__, ib_cq);
b038ced7
SW
135 chp = to_iwch_cq(ib_cq);
136
137 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
138 atomic_dec(&chp->refcnt);
139 wait_event(chp->wait, !atomic_read(&chp->refcnt));
140
141 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
142 kfree(chp);
143 return 0;
144}
145
bcf4c1ea
MB
146static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
147 const struct ib_cq_init_attr *attr,
148 struct ib_ucontext *ib_context,
149 struct ib_udata *udata)
b038ced7 150{
bcf4c1ea 151 int entries = attr->cqe;
b038ced7
SW
152 struct iwch_dev *rhp;
153 struct iwch_cq *chp;
154 struct iwch_create_cq_resp uresp;
155 struct iwch_create_cq_req ureq;
156 struct iwch_ucontext *ucontext = NULL;
b955150e
SW
157 static int warned;
158 size_t resplen;
b038ced7 159
b7b37ee0 160 pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
bcf4c1ea
MB
161 if (attr->flags)
162 return ERR_PTR(-EINVAL);
163
b038ced7
SW
164 rhp = to_iwch_dev(ibdev);
165 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
166 if (!chp)
167 return ERR_PTR(-ENOMEM);
168
169 if (ib_context) {
170 ucontext = to_iwch_ucontext(ib_context);
171 if (!t3a_device(rhp)) {
172 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
173 kfree(chp);
174 return ERR_PTR(-EFAULT);
175 }
176 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
177 }
178 }
179
180 if (t3a_device(rhp)) {
181
182 /*
183 * T3A: Add some fluff to handle extra CQEs inserted
184 * for various errors.
185 * Additional CQE possibilities:
186 * TERMINATE,
187 * incoming RDMA WRITE Failures
188 * incoming RDMA READ REQUEST FAILUREs
189 * NOTE: We cannot ensure the CQ won't overflow.
190 */
191 entries += 16;
192 }
193 entries = roundup_pow_of_two(entries);
194 chp->cq.size_log2 = ilog2(entries);
195
5279d3ac 196 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
b038ced7
SW
197 kfree(chp);
198 return ERR_PTR(-ENOMEM);
199 }
200 chp->rhp = rhp;
4fa45725 201 chp->ibcq.cqe = 1 << chp->cq.size_log2;
b038ced7 202 spin_lock_init(&chp->lock);
f7cc25d0 203 spin_lock_init(&chp->comp_handler_lock);
b038ced7
SW
204 atomic_set(&chp->refcnt, 1);
205 init_waitqueue_head(&chp->wait);
13a23933
SW
206 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
207 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
208 kfree(chp);
209 return ERR_PTR(-ENOMEM);
210 }
b038ced7
SW
211
212 if (ucontext) {
213 struct iwch_mm_entry *mm;
214
215 mm = kmalloc(sizeof *mm, GFP_KERNEL);
216 if (!mm) {
217 iwch_destroy_cq(&chp->ibcq);
218 return ERR_PTR(-ENOMEM);
219 }
220 uresp.cqid = chp->cq.cqid;
221 uresp.size_log2 = chp->cq.size_log2;
222 spin_lock(&ucontext->mmap_lock);
223 uresp.key = ucontext->key;
224 ucontext->key += PAGE_SIZE;
225 spin_unlock(&ucontext->mmap_lock);
b955150e
SW
226 mm->key = uresp.key;
227 mm->addr = virt_to_phys(chp->cq.queue);
228 if (udata->outlen < sizeof uresp) {
229 if (!warned++)
46b2d4e8 230 pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
b955150e
SW
231 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
232 sizeof(struct t3_cqe));
233 resplen = sizeof(struct iwch_create_cq_resp_v0);
234 } else {
235 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
236 sizeof(struct t3_cqe));
237 uresp.memsize = mm->len;
246fcdbc 238 uresp.reserved = 0;
b955150e
SW
239 resplen = sizeof uresp;
240 }
241 if (ib_copy_to_udata(udata, &uresp, resplen)) {
b038ced7
SW
242 kfree(mm);
243 iwch_destroy_cq(&chp->ibcq);
244 return ERR_PTR(-EFAULT);
245 }
b038ced7
SW
246 insert_mmap(ucontext, mm);
247 }
b7b37ee0
JP
248 pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
249 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
250 (unsigned long long)chp->cq.dma_addr);
b038ced7
SW
251 return &chp->ibcq;
252}
253
254static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
255{
256#ifdef notyet
257 struct iwch_cq *chp = to_iwch_cq(cq);
258 struct t3_cq oldcq, newcq;
259 int ret;
260
b7b37ee0 261 pr_debug("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
b038ced7
SW
262
263 /* We don't downsize... */
264 if (cqe <= cq->cqe)
265 return 0;
266
267 /* create new t3_cq with new size */
268 cqe = roundup_pow_of_two(cqe+1);
269 newcq.size_log2 = ilog2(cqe);
270
271 /* Dont allow resize to less than the current wce count */
272 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
273 return -ENOMEM;
274 }
275
276 /* Quiesce all QPs using this CQ */
277 ret = iwch_quiesce_qps(chp);
278 if (ret) {
279 return ret;
280 }
281
282 ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
283 if (ret) {
284 return ret;
285 }
286
287 /* copy CQEs */
288 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
289 sizeof(struct t3_cqe));
290
291 /* old iwch_qp gets new t3_cq but keeps old cqid */
292 oldcq = chp->cq;
293 chp->cq = newcq;
294 chp->cq.cqid = oldcq.cqid;
295
296 /* resize new t3_cq to update the HW context */
297 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
298 if (ret) {
299 chp->cq = oldcq;
300 return ret;
301 }
302 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
303
304 /* destroy old t3_cq */
305 oldcq.cqid = newcq.cqid;
306 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
307 if (ret) {
46b2d4e8 308 pr_err("%s - cxio_destroy_cq failed %d\n", __func__, ret);
b038ced7
SW
309 }
310
311 /* add user hooks here */
312
313 /* resume qps */
314 ret = iwch_resume_qps(chp);
315 return ret;
316#else
317 return -ENOSYS;
318#endif
319}
320
ed23a727 321static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
b038ced7
SW
322{
323 struct iwch_dev *rhp;
324 struct iwch_cq *chp;
325 enum t3_cq_opcode cq_op;
326 int err;
327 unsigned long flag;
328 u32 rptr;
329
330 chp = to_iwch_cq(ibcq);
331 rhp = chp->rhp;
ed23a727 332 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
b038ced7
SW
333 cq_op = CQ_ARM_SE;
334 else
335 cq_op = CQ_ARM_AN;
336 if (chp->user_rptr_addr) {
337 if (get_user(rptr, chp->user_rptr_addr))
338 return -EFAULT;
339 spin_lock_irqsave(&chp->lock, flag);
340 chp->cq.rptr = rptr;
341 } else
342 spin_lock_irqsave(&chp->lock, flag);
b7b37ee0 343 pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr);
b038ced7
SW
344 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
345 spin_unlock_irqrestore(&chp->lock, flag);
ed23a727 346 if (err < 0)
46b2d4e8 347 pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid);
ed23a727
RD
348 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
349 err = 0;
b038ced7
SW
350 return err;
351}
352
353static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
354{
355 int len = vma->vm_end - vma->vm_start;
356 u32 key = vma->vm_pgoff << PAGE_SHIFT;
357 struct cxio_rdev *rdev_p;
358 int ret = 0;
359 struct iwch_mm_entry *mm;
360 struct iwch_ucontext *ucontext;
aeb100e2 361 u64 addr;
b038ced7 362
b7b37ee0
JP
363 pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
364 key, len);
b038ced7
SW
365
366 if (vma->vm_start & (PAGE_SIZE-1)) {
367 return -EINVAL;
368 }
369
370 rdev_p = &(to_iwch_dev(context->device)->rdev);
371 ucontext = to_iwch_ucontext(context);
372
373 mm = remove_mmap(ucontext, key, len);
374 if (!mm)
375 return -EINVAL;
aeb100e2 376 addr = mm->addr;
b038ced7
SW
377 kfree(mm);
378
aeb100e2
SW
379 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
380 (addr < (rdev_p->rnic_info.udbell_physbase +
b038ced7
SW
381 rdev_p->rnic_info.udbell_len))) {
382
383 /*
384 * Map T3 DB register.
385 */
386 if (vma->vm_flags & VM_READ) {
387 return -EPERM;
388 }
389
390 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
391 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
392 vma->vm_flags &= ~VM_MAYREAD;
393 ret = io_remap_pfn_range(vma, vma->vm_start,
aeb100e2 394 addr >> PAGE_SHIFT,
b038ced7
SW
395 len, vma->vm_page_prot);
396 } else {
397
398 /*
399 * Map WQ or CQ contig dma memory...
400 */
401 ret = remap_pfn_range(vma, vma->vm_start,
aeb100e2 402 addr >> PAGE_SHIFT,
b038ced7
SW
403 len, vma->vm_page_prot);
404 }
405
406 return ret;
407}
408
409static int iwch_deallocate_pd(struct ib_pd *pd)
410{
411 struct iwch_dev *rhp;
412 struct iwch_pd *php;
413
414 php = to_iwch_pd(pd);
415 rhp = php->rhp;
b7b37ee0 416 pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
b038ced7
SW
417 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
418 kfree(php);
419 return 0;
420}
421
422static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
423 struct ib_ucontext *context,
424 struct ib_udata *udata)
425{
426 struct iwch_pd *php;
427 u32 pdid;
428 struct iwch_dev *rhp;
429
b7b37ee0 430 pr_debug("%s ibdev %p\n", __func__, ibdev);
b038ced7
SW
431 rhp = (struct iwch_dev *) ibdev;
432 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
433 if (!pdid)
434 return ERR_PTR(-EINVAL);
435 php = kzalloc(sizeof(*php), GFP_KERNEL);
436 if (!php) {
437 cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
438 return ERR_PTR(-ENOMEM);
439 }
440 php->pdid = pdid;
441 php->rhp = rhp;
442 if (context) {
443 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
444 iwch_deallocate_pd(&php->ibpd);
445 return ERR_PTR(-EFAULT);
446 }
447 }
b7b37ee0 448 pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
b038ced7
SW
449 return &php->ibpd;
450}
451
452static int iwch_dereg_mr(struct ib_mr *ib_mr)
453{
454 struct iwch_dev *rhp;
455 struct iwch_mr *mhp;
456 u32 mmid;
457
b7b37ee0 458 pr_debug("%s ib_mr %p\n", __func__, ib_mr);
b038ced7
SW
459
460 mhp = to_iwch_mr(ib_mr);
14fb4171 461 kfree(mhp->pages);
b038ced7
SW
462 rhp = mhp->rhp;
463 mmid = mhp->attr.stag >> 8;
464 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
465 mhp->attr.pbl_addr);
273748cc 466 iwch_free_pbl(mhp);
b038ced7
SW
467 remove_handle(rhp, &rhp->mmidr, mmid);
468 if (mhp->kva)
469 kfree((void *) (unsigned long) mhp->kva);
f7c6a7b5
RD
470 if (mhp->umem)
471 ib_umem_release(mhp->umem);
b7b37ee0 472 pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
b038ced7
SW
473 kfree(mhp);
474 return 0;
475}
476
35cb3fc0 477static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
b038ced7 478{
35cb3fc0
CH
479 const u64 total_size = 0xffffffff;
480 const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
481 struct iwch_pd *php = to_iwch_pd(pd);
482 struct iwch_dev *rhp = php->rhp;
b038ced7 483 struct iwch_mr *mhp;
35cb3fc0
CH
484 __be64 *page_list;
485 int shift = 26, npages, ret, i;
b038ced7 486
b7b37ee0 487 pr_debug("%s ib_pd %p\n", __func__, pd);
35cb3fc0
CH
488
489 /*
490 * T3 only supports 32 bits of size.
491 */
492 if (sizeof(phys_addr_t) > 4) {
46b2d4e8 493 pr_warn_once("Cannot support dma_mrs on this platform\n");
35cb3fc0
CH
494 return ERR_PTR(-ENOTSUPP);
495 }
b038ced7 496
b038ced7
SW
497 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
498 if (!mhp)
499 return ERR_PTR(-ENOMEM);
500
273748cc
RD
501 mhp->rhp = rhp;
502
35cb3fc0
CH
503 npages = (total_size + (1ULL << shift) - 1) >> shift;
504 if (!npages) {
b038ced7
SW
505 ret = -EINVAL;
506 goto err;
507 }
508
35cb3fc0
CH
509 page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
510 if (!page_list) {
511 ret = -ENOMEM;
b038ced7
SW
512 goto err;
513 }
514
35cb3fc0
CH
515 for (i = 0; i < npages; i++)
516 page_list[i] = cpu_to_be64((u64)i << shift);
517
b7b37ee0
JP
518 pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
519 __func__, mask, shift, total_size, npages);
b038ced7 520
273748cc
RD
521 ret = iwch_alloc_pbl(mhp, npages);
522 if (ret) {
523 kfree(page_list);
524 goto err_pbl;
525 }
526
527 ret = iwch_write_pbl(mhp, page_list, npages, 0);
528 kfree(page_list);
529 if (ret)
530 goto err_pbl;
531
b038ced7
SW
532 mhp->attr.pdid = php->pdid;
533 mhp->attr.zbva = 0;
534
e64518f3 535 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
35cb3fc0 536 mhp->attr.va_fbo = 0;
b038ced7
SW
537 mhp->attr.page_size = shift - 12;
538
539 mhp->attr.len = (u32) total_size;
540 mhp->attr.pbl_size = npages;
273748cc
RD
541 ret = iwch_register_mem(rhp, php, mhp, shift);
542 if (ret)
543 goto err_pbl;
544
b038ced7 545 return &mhp->ibmr;
273748cc
RD
546
547err_pbl:
548 iwch_free_pbl(mhp);
549
b038ced7
SW
550err:
551 kfree(mhp);
552 return ERR_PTR(ret);
b038ced7
SW
553}
554
f7c6a7b5
RD
555static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
556 u64 virt, int acc, struct ib_udata *udata)
b038ced7
SW
557{
558 __be64 *pages;
559 int shift, n, len;
eeb8461e 560 int i, k, entry;
b038ced7 561 int err = 0;
b038ced7
SW
562 struct iwch_dev *rhp;
563 struct iwch_pd *php;
564 struct iwch_mr *mhp;
565 struct iwch_reg_user_mr_resp uresp;
eeb8461e 566 struct scatterlist *sg;
b7b37ee0 567 pr_debug("%s ib_pd %p\n", __func__, pd);
b038ced7
SW
568
569 php = to_iwch_pd(pd);
570 rhp = php->rhp;
571 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
572 if (!mhp)
573 return ERR_PTR(-ENOMEM);
574
273748cc
RD
575 mhp->rhp = rhp;
576
cb9fbc5c 577 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
f7c6a7b5
RD
578 if (IS_ERR(mhp->umem)) {
579 err = PTR_ERR(mhp->umem);
580 kfree(mhp);
581 return ERR_PTR(err);
582 }
583
3e7e1193 584 shift = mhp->umem->page_shift;
f7c6a7b5 585
eeb8461e 586 n = mhp->umem->nmap;
b038ced7 587
273748cc
RD
588 err = iwch_alloc_pbl(mhp, n);
589 if (err)
590 goto err;
591
592 pages = (__be64 *) __get_free_page(GFP_KERNEL);
b038ced7
SW
593 if (!pages) {
594 err = -ENOMEM;
273748cc 595 goto err_pbl;
b038ced7
SW
596 }
597
b038ced7
SW
598 i = n = 0;
599
eeb8461e
YH
600 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
601 len = sg_dma_len(sg) >> shift;
b038ced7 602 for (k = 0; k < len; ++k) {
eeb8461e 603 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
3e7e1193 604 (k << shift));
273748cc
RD
605 if (i == PAGE_SIZE / sizeof *pages) {
606 err = iwch_write_pbl(mhp, pages, i, n);
607 if (err)
608 goto pbl_done;
609 n += i;
610 i = 0;
611 }
b038ced7 612 }
eeb8461e 613 }
b038ced7 614
273748cc
RD
615 if (i)
616 err = iwch_write_pbl(mhp, pages, i, n);
617
618pbl_done:
619 free_page((unsigned long) pages);
620 if (err)
621 goto err_pbl;
622
b038ced7
SW
623 mhp->attr.pdid = php->pdid;
624 mhp->attr.zbva = 0;
e64518f3 625 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
f7c6a7b5 626 mhp->attr.va_fbo = virt;
b038ced7 627 mhp->attr.page_size = shift - 12;
f7c6a7b5 628 mhp->attr.len = (u32) length;
273748cc
RD
629
630 err = iwch_register_mem(rhp, php, mhp, shift);
b038ced7 631 if (err)
273748cc 632 goto err_pbl;
b038ced7 633
8176d297 634 if (udata && !t3a_device(rhp)) {
b038ced7 635 uresp.pbl_addr = (mhp->attr.pbl_addr -
273748cc 636 rhp->rdev.rnic_info.pbl_base) >> 3;
b7b37ee0
JP
637 pr_debug("%s user resp pbl_addr 0x%x\n", __func__,
638 uresp.pbl_addr);
b038ced7
SW
639
640 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
641 iwch_dereg_mr(&mhp->ibmr);
642 err = -EFAULT;
643 goto err;
644 }
645 }
646
647 return &mhp->ibmr;
648
273748cc
RD
649err_pbl:
650 iwch_free_pbl(mhp);
651
b038ced7 652err:
f7c6a7b5 653 ib_umem_release(mhp->umem);
b038ced7
SW
654 kfree(mhp);
655 return ERR_PTR(err);
656}
657
b2a239df
MB
658static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
659 struct ib_udata *udata)
b038ced7
SW
660{
661 struct iwch_dev *rhp;
662 struct iwch_pd *php;
663 struct iwch_mw *mhp;
664 u32 mmid;
665 u32 stag = 0;
666 int ret;
667
7083e42e
SM
668 if (type != IB_MW_TYPE_1)
669 return ERR_PTR(-EINVAL);
670
b038ced7
SW
671 php = to_iwch_pd(pd);
672 rhp = php->rhp;
673 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
674 if (!mhp)
675 return ERR_PTR(-ENOMEM);
676 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
677 if (ret) {
678 kfree(mhp);
679 return ERR_PTR(ret);
680 }
681 mhp->rhp = rhp;
682 mhp->attr.pdid = php->pdid;
683 mhp->attr.type = TPT_MW;
684 mhp->attr.stag = stag;
685 mmid = (stag) >> 8;
70fe1796 686 mhp->ibmw.rkey = stag;
13a23933
SW
687 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
688 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
689 kfree(mhp);
690 return ERR_PTR(-ENOMEM);
691 }
b7b37ee0 692 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
b038ced7
SW
693 return &(mhp->ibmw);
694}
695
696static int iwch_dealloc_mw(struct ib_mw *mw)
697{
698 struct iwch_dev *rhp;
699 struct iwch_mw *mhp;
700 u32 mmid;
701
702 mhp = to_iwch_mw(mw);
703 rhp = mhp->rhp;
704 mmid = (mw->rkey) >> 8;
705 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
706 remove_handle(rhp, &rhp->mmidr, mmid);
b7b37ee0 707 pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
fe194f19 708 kfree(mhp);
b038ced7
SW
709 return 0;
710}
711
f683d3bd
SG
712static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
713 enum ib_mr_type mr_type,
714 u32 max_num_sg)
e7e55829
SW
715{
716 struct iwch_dev *rhp;
717 struct iwch_pd *php;
718 struct iwch_mr *mhp;
719 u32 mmid;
720 u32 stag = 0;
9064d605 721 int ret = -ENOMEM;
e7e55829 722
f683d3bd
SG
723 if (mr_type != IB_MR_TYPE_MEM_REG ||
724 max_num_sg > T3_MAX_FASTREG_DEPTH)
725 return ERR_PTR(-EINVAL);
726
e7e55829
SW
727 php = to_iwch_pd(pd);
728 rhp = php->rhp;
729 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
730 if (!mhp)
13a23933 731 goto err;
e7e55829 732
14fb4171 733 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
9064d605 734 if (!mhp->pages)
14fb4171 735 goto pl_err;
14fb4171 736
e7e55829 737 mhp->rhp = rhp;
f683d3bd 738 ret = iwch_alloc_pbl(mhp, max_num_sg);
13a23933
SW
739 if (ret)
740 goto err1;
f683d3bd 741 mhp->attr.pbl_size = max_num_sg;
e7e55829
SW
742 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
743 mhp->attr.pbl_size, mhp->attr.pbl_addr);
13a23933
SW
744 if (ret)
745 goto err2;
e7e55829
SW
746 mhp->attr.pdid = php->pdid;
747 mhp->attr.type = TPT_NON_SHARED_MR;
748 mhp->attr.stag = stag;
749 mhp->attr.state = 1;
750 mmid = (stag) >> 8;
751 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
9064d605
DC
752 ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
753 if (ret)
13a23933
SW
754 goto err3;
755
b7b37ee0 756 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
e7e55829 757 return &(mhp->ibmr);
13a23933
SW
758err3:
759 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
760 mhp->attr.pbl_addr);
761err2:
762 iwch_free_pbl(mhp);
763err1:
14fb4171
SG
764 kfree(mhp->pages);
765pl_err:
13a23933
SW
766 kfree(mhp);
767err:
768 return ERR_PTR(ret);
e7e55829
SW
769}
770
14fb4171
SG
771static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
772{
773 struct iwch_mr *mhp = to_iwch_mr(ibmr);
774
775 if (unlikely(mhp->npages == mhp->attr.pbl_size))
776 return -ENOMEM;
777
778 mhp->pages[mhp->npages++] = addr;
779
780 return 0;
781}
782
ff2ba993 783static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
9aa8b321 784 int sg_nents, unsigned int *sg_offset)
14fb4171
SG
785{
786 struct iwch_mr *mhp = to_iwch_mr(ibmr);
787
788 mhp->npages = 0;
789
ff2ba993 790 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
14fb4171
SG
791}
792
b038ced7
SW
793static int iwch_destroy_qp(struct ib_qp *ib_qp)
794{
795 struct iwch_dev *rhp;
796 struct iwch_qp *qhp;
797 struct iwch_qp_attributes attrs;
798 struct iwch_ucontext *ucontext;
799
800 qhp = to_iwch_qp(ib_qp);
801 rhp = qhp->rhp;
802
2df50da0
SW
803 attrs.next_state = IWCH_QP_STATE_ERROR;
804 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
b038ced7
SW
805 wait_event(qhp->wait, !qhp->ep);
806
807 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
808
809 atomic_dec(&qhp->refcnt);
810 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
811
812 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
813 : NULL;
814 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
815 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
816
b7b37ee0
JP
817 pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
818 ib_qp, qhp->wq.qpid, qhp);
b038ced7
SW
819 kfree(qhp);
820 return 0;
821}
822
823static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
824 struct ib_qp_init_attr *attrs,
825 struct ib_udata *udata)
826{
827 struct iwch_dev *rhp;
828 struct iwch_qp *qhp;
829 struct iwch_pd *php;
830 struct iwch_cq *schp;
831 struct iwch_cq *rchp;
832 struct iwch_create_qp_resp uresp;
833 int wqsize, sqsize, rqsize;
834 struct iwch_ucontext *ucontext;
835
b7b37ee0 836 pr_debug("%s ib_pd %p\n", __func__, pd);
b038ced7
SW
837 if (attrs->qp_type != IB_QPT_RC)
838 return ERR_PTR(-EINVAL);
839 php = to_iwch_pd(pd);
840 rhp = php->rhp;
841 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
842 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
843 if (!schp || !rchp)
844 return ERR_PTR(-EINVAL);
845
846 /* The RQT size must be # of entries + 1 rounded up to a power of two */
847 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
848 if (rqsize == attrs->cap.max_recv_wr)
849 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
850
851 /* T3 doesn't support RQT depth < 16 */
852 if (rqsize < 16)
853 rqsize = 16;
854
855 if (rqsize > T3_MAX_RQ_SIZE)
856 return ERR_PTR(-EINVAL);
857
1860cdf8
SW
858 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
859 return ERR_PTR(-EINVAL);
860
b038ced7
SW
861 /*
862 * NOTE: The SQ and total WQ sizes don't need to be
863 * a power of two. However, all the code assumes
864 * they are. EG: Q_FREECNT() and friends.
865 */
866 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
867 wqsize = roundup_pow_of_two(rqsize + sqsize);
e7e55829
SW
868
869 /*
870 * Kernel users need more wq space for fastreg WRs which can take
871 * 2 WR fragments.
872 */
873 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
874 if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
875 wqsize = roundup_pow_of_two(rqsize +
876 roundup_pow_of_two(attrs->cap.max_send_wr * 2));
b7b37ee0
JP
877 pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__,
878 wqsize, sqsize, rqsize);
b038ced7
SW
879 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
880 if (!qhp)
881 return ERR_PTR(-ENOMEM);
882 qhp->wq.size_log2 = ilog2(wqsize);
883 qhp->wq.rq_size_log2 = ilog2(rqsize);
884 qhp->wq.sq_size_log2 = ilog2(sqsize);
b038ced7
SW
885 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
886 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
887 kfree(qhp);
888 return ERR_PTR(-ENOMEM);
889 }
1bab74e6 890
b038ced7
SW
891 attrs->cap.max_recv_wr = rqsize - 1;
892 attrs->cap.max_send_wr = sqsize;
1bab74e6
JM
893 attrs->cap.max_inline_data = T3_MAX_INLINE;
894
b038ced7
SW
895 qhp->rhp = rhp;
896 qhp->attr.pd = php->pdid;
897 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
898 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
899 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
900 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
901 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
902 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
903 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
904 qhp->attr.state = IWCH_QP_STATE_IDLE;
905 qhp->attr.next_state = IWCH_QP_STATE_IDLE;
906
907 /*
908 * XXX - These don't get passed in from the openib user
909 * at create time. The CM sets them via a QP modify.
910 * Need to fix... I think the CM should
911 */
912 qhp->attr.enable_rdma_read = 1;
913 qhp->attr.enable_rdma_write = 1;
914 qhp->attr.enable_bind = 1;
915 qhp->attr.max_ord = 1;
916 qhp->attr.max_ird = 1;
917
918 spin_lock_init(&qhp->lock);
919 init_waitqueue_head(&qhp->wait);
920 atomic_set(&qhp->refcnt, 1);
13a23933
SW
921
922 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
923 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
924 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
925 kfree(qhp);
926 return ERR_PTR(-ENOMEM);
927 }
b038ced7
SW
928
929 if (udata) {
930
931 struct iwch_mm_entry *mm1, *mm2;
932
933 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
934 if (!mm1) {
935 iwch_destroy_qp(&qhp->ibqp);
936 return ERR_PTR(-ENOMEM);
937 }
938
939 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
940 if (!mm2) {
941 kfree(mm1);
942 iwch_destroy_qp(&qhp->ibqp);
943 return ERR_PTR(-ENOMEM);
944 }
945
946 uresp.qpid = qhp->wq.qpid;
947 uresp.size_log2 = qhp->wq.size_log2;
948 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
949 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
950 spin_lock(&ucontext->mmap_lock);
951 uresp.key = ucontext->key;
952 ucontext->key += PAGE_SIZE;
953 uresp.db_key = ucontext->key;
954 ucontext->key += PAGE_SIZE;
955 spin_unlock(&ucontext->mmap_lock);
956 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
957 kfree(mm1);
958 kfree(mm2);
959 iwch_destroy_qp(&qhp->ibqp);
960 return ERR_PTR(-EFAULT);
961 }
962 mm1->key = uresp.key;
963 mm1->addr = virt_to_phys(qhp->wq.queue);
964 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
965 insert_mmap(ucontext, mm1);
966 mm2->key = uresp.db_key;
967 mm2->addr = qhp->wq.udb & PAGE_MASK;
968 mm2->len = PAGE_SIZE;
969 insert_mmap(ucontext, mm2);
970 }
971 qhp->ibqp.qp_num = qhp->wq.qpid;
972 init_timer(&(qhp->timer));
b7b37ee0
JP
973 pr_debug("%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
974 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
975 qhp->wq.qpid, qhp, (unsigned long long)qhp->wq.dma_addr,
976 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
b038ced7
SW
977 return &qhp->ibqp;
978}
979
980static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
981 int attr_mask, struct ib_udata *udata)
982{
983 struct iwch_dev *rhp;
984 struct iwch_qp *qhp;
985 enum iwch_qp_attr_mask mask = 0;
986 struct iwch_qp_attributes attrs;
987
b7b37ee0 988 pr_debug("%s ib_qp %p\n", __func__, ibqp);
b038ced7
SW
989
990 /* iwarp does not support the RTR state */
991 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
992 attr_mask &= ~IB_QP_STATE;
993
994 /* Make sure we still have something left to do */
995 if (!attr_mask)
996 return 0;
997
998 memset(&attrs, 0, sizeof attrs);
999 qhp = to_iwch_qp(ibqp);
1000 rhp = qhp->rhp;
1001
1002 attrs.next_state = iwch_convert_state(attr->qp_state);
1003 attrs.enable_rdma_read = (attr->qp_access_flags &
1004 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1005 attrs.enable_rdma_write = (attr->qp_access_flags &
1006 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1007 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1008
1009
1010 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
1011 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1012 (IWCH_QP_ATTR_ENABLE_RDMA_READ |
1013 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
1014 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1015
1016 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
1017}
1018
1019void iwch_qp_add_ref(struct ib_qp *qp)
1020{
b7b37ee0 1021 pr_debug("%s ib_qp %p\n", __func__, qp);
b038ced7
SW
1022 atomic_inc(&(to_iwch_qp(qp)->refcnt));
1023}
1024
1025void iwch_qp_rem_ref(struct ib_qp *qp)
1026{
b7b37ee0 1027 pr_debug("%s ib_qp %p\n", __func__, qp);
b038ced7
SW
1028 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
1029 wake_up(&(to_iwch_qp(qp)->wait));
1030}
1031
2b540355 1032static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
b038ced7 1033{
b7b37ee0 1034 pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
b038ced7
SW
1035 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1036}
1037
1038
1039static int iwch_query_pkey(struct ib_device *ibdev,
1040 u8 port, u16 index, u16 * pkey)
1041{
b7b37ee0 1042 pr_debug("%s ibdev %p\n", __func__, ibdev);
b038ced7
SW
1043 *pkey = 0;
1044 return 0;
1045}
1046
1047static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1048 int index, union ib_gid *gid)
1049{
1050 struct iwch_dev *dev;
1051
b7b37ee0
JP
1052 pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
1053 __func__, ibdev, port, index, gid);
b038ced7
SW
1054 dev = to_iwch_dev(ibdev);
1055 BUG_ON(port == 0 || port > 2);
1056 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1057 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
1058 return 0;
1059}
1060
97d1cc80
SW
1061static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1062{
1063 struct ethtool_drvinfo info;
1064 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1065 char *cp, *next;
1066 unsigned fw_maj, fw_min, fw_mic;
1067
97d1cc80 1068 lldev->ethtool_ops->get_drvinfo(lldev, &info);
97d1cc80
SW
1069
1070 next = info.fw_version + 1;
1071 cp = strsep(&next, ".");
1072 sscanf(cp, "%i", &fw_maj);
1073 cp = strsep(&next, ".");
1074 sscanf(cp, "%i", &fw_min);
1075 cp = strsep(&next, ".");
1076 sscanf(cp, "%i", &fw_mic);
1077
1078 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1079 (fw_mic & 0xffff);
1080}
1081
2528e33e
MB
1082static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1083 struct ib_udata *uhw)
b038ced7
SW
1084{
1085
1086 struct iwch_dev *dev;
2528e33e 1087
b7b37ee0 1088 pr_debug("%s ibdev %p\n", __func__, ibdev);
b038ced7 1089
2528e33e
MB
1090 if (uhw->inlen || uhw->outlen)
1091 return -EINVAL;
1092
b038ced7
SW
1093 dev = to_iwch_dev(ibdev);
1094 memset(props, 0, sizeof *props);
1095 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
97d1cc80
SW
1096 props->hw_ver = dev->rdev.t3cdev_p->type;
1097 props->fw_ver = fw_vers_string_to_u64(dev);
b038ced7 1098 props->device_cap_flags = dev->device_cap_flags;
52c8084b 1099 props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
b038ced7
SW
1100 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1101 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
ccaf10d0 1102 props->max_mr_size = dev->attr.max_mr_size;
b038ced7
SW
1103 props->max_qp = dev->attr.max_qps;
1104 props->max_qp_wr = dev->attr.max_wrs;
1105 props->max_sge = dev->attr.max_sge_per_wr;
1106 props->max_sge_rd = 1;
1107 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
9a766649 1108 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
b038ced7
SW
1109 props->max_cq = dev->attr.max_cqs;
1110 props->max_cqe = dev->attr.max_cqes_per_cq;
1111 props->max_mr = dev->attr.max_mem_regs;
1112 props->max_pd = dev->attr.max_pds;
1113 props->local_ca_ack_delay = 0;
e7e55829 1114 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
b038ced7
SW
1115
1116 return 0;
1117}
1118
1119static int iwch_query_port(struct ib_device *ibdev,
1120 u8 port, struct ib_port_attr *props)
1121{
7ab1a2b3
SW
1122 struct iwch_dev *dev;
1123 struct net_device *netdev;
1124 struct in_device *inetdev;
1125
b7b37ee0 1126 pr_debug("%s ibdev %p\n", __func__, ibdev);
c752c782 1127
7ab1a2b3
SW
1128 dev = to_iwch_dev(ibdev);
1129 netdev = dev->rdev.port_info.lldevs[port-1];
1130
c4550c63 1131 /* props being zeroed by the caller, avoid zeroing it here */
b038ced7 1132 props->max_mtu = IB_MTU_4096;
d3f4aadd 1133 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
7ab1a2b3
SW
1134
1135 if (!netif_carrier_ok(netdev))
1136 props->state = IB_PORT_DOWN;
1137 else {
1138 inetdev = in_dev_get(netdev);
e5da4ed8
SW
1139 if (inetdev) {
1140 if (inetdev->ifa_list)
1141 props->state = IB_PORT_ACTIVE;
1142 else
1143 props->state = IB_PORT_INIT;
1144 in_dev_put(inetdev);
1145 } else
7ab1a2b3 1146 props->state = IB_PORT_INIT;
7ab1a2b3
SW
1147 }
1148
b038ced7
SW
1149 props->port_cap_flags =
1150 IB_PORT_CM_SUP |
1151 IB_PORT_SNMP_TUNNEL_SUP |
1152 IB_PORT_REINIT_SUP |
1153 IB_PORT_DEVICE_MGMT_SUP |
1154 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1155 props->gid_tbl_len = 1;
1156 props->pkey_tbl_len = 1;
b038ced7 1157 props->active_width = 2;
2e96691c 1158 props->active_speed = IB_SPEED_DDR;
b038ced7
SW
1159 props->max_msg_sz = -1;
1160
1161 return 0;
1162}
1163
f4e91eb4
TJ
1164static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
1165 char *buf)
b038ced7 1166{
f4e91eb4
TJ
1167 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1168 ibdev.dev);
b7b37ee0 1169 pr_debug("%s dev 0x%p\n", __func__, dev);
f4e91eb4 1170 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
b038ced7
SW
1171}
1172
f4e91eb4
TJ
1173static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1174 char *buf)
b038ced7 1175{
f4e91eb4
TJ
1176 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1177 ibdev.dev);
b038ced7 1178 struct ethtool_drvinfo info;
f4e91eb4 1179 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
b038ced7 1180
b7b37ee0 1181 pr_debug("%s dev 0x%p\n", __func__, dev);
b038ced7
SW
1182 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1183 return sprintf(buf, "%s\n", info.driver);
1184}
1185
f4e91eb4
TJ
1186static ssize_t show_board(struct device *dev, struct device_attribute *attr,
1187 char *buf)
b038ced7 1188{
f4e91eb4
TJ
1189 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1190 ibdev.dev);
b7b37ee0 1191 pr_debug("%s dev 0x%p\n", __func__, dev);
f4e91eb4
TJ
1192 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1193 iwch_dev->rdev.rnic_info.pdev->device);
b038ced7
SW
1194}
1195
b40f4757
CL
1196enum counters {
1197 IPINRECEIVES,
1198 IPINHDRERRORS,
1199 IPINADDRERRORS,
1200 IPINUNKNOWNPROTOS,
1201 IPINDISCARDS,
1202 IPINDELIVERS,
1203 IPOUTREQUESTS,
1204 IPOUTDISCARDS,
1205 IPOUTNOROUTES,
1206 IPREASMTIMEOUT,
1207 IPREASMREQDS,
1208 IPREASMOKS,
1209 IPREASMFAILS,
1210 TCPACTIVEOPENS,
1211 TCPPASSIVEOPENS,
1212 TCPATTEMPTFAILS,
1213 TCPESTABRESETS,
1214 TCPCURRESTAB,
1215 TCPINSEGS,
1216 TCPOUTSEGS,
1217 TCPRETRANSSEGS,
1218 TCPINERRS,
1219 TCPOUTRSTS,
1220 TCPRTOMIN,
1221 TCPRTOMAX,
1222 NR_COUNTERS
1223};
1224
1225static const char * const names[] = {
1226 [IPINRECEIVES] = "ipInReceives",
1227 [IPINHDRERRORS] = "ipInHdrErrors",
1228 [IPINADDRERRORS] = "ipInAddrErrors",
1229 [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
1230 [IPINDISCARDS] = "ipInDiscards",
1231 [IPINDELIVERS] = "ipInDelivers",
1232 [IPOUTREQUESTS] = "ipOutRequests",
1233 [IPOUTDISCARDS] = "ipOutDiscards",
1234 [IPOUTNOROUTES] = "ipOutNoRoutes",
1235 [IPREASMTIMEOUT] = "ipReasmTimeout",
1236 [IPREASMREQDS] = "ipReasmReqds",
1237 [IPREASMOKS] = "ipReasmOKs",
1238 [IPREASMFAILS] = "ipReasmFails",
1239 [TCPACTIVEOPENS] = "tcpActiveOpens",
1240 [TCPPASSIVEOPENS] = "tcpPassiveOpens",
1241 [TCPATTEMPTFAILS] = "tcpAttemptFails",
1242 [TCPESTABRESETS] = "tcpEstabResets",
1243 [TCPCURRESTAB] = "tcpCurrEstab",
1244 [TCPINSEGS] = "tcpInSegs",
1245 [TCPOUTSEGS] = "tcpOutSegs",
1246 [TCPRETRANSSEGS] = "tcpRetransSegs",
1247 [TCPINERRS] = "tcpInErrs",
1248 [TCPOUTRSTS] = "tcpOutRsts",
1249 [TCPRTOMIN] = "tcpRtoMin",
1250 [TCPRTOMAX] = "tcpRtoMax",
1251};
1252
1253static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
1254 u8 port_num)
1255{
1256 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
1257
1258 /* Our driver only supports device level stats */
1259 if (port_num != 0)
1260 return NULL;
1261
1262 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
1263 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1264}
1265
1266static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1267 u8 port, int index)
14cc180f
SW
1268{
1269 struct iwch_dev *dev;
1270 struct tp_mib_stats m;
1271 int ret;
1272
b40f4757
CL
1273 if (port != 0 || !stats)
1274 return -ENOSYS;
1275
b7b37ee0 1276 pr_debug("%s ibdev %p\n", __func__, ibdev);
14cc180f
SW
1277 dev = to_iwch_dev(ibdev);
1278 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1279 if (ret)
1280 return -ENOSYS;
1281
b40f4757
CL
1282 stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
1283 stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
1284 stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
1285 stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
1286 stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
1287 stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
1288 stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
1289 stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
1290 stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
1291 stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
1292 stats->value[IPREASMREQDS] = m.ipReasmReqds;
1293 stats->value[IPREASMOKS] = m.ipReasmOKs;
1294 stats->value[IPREASMFAILS] = m.ipReasmFails;
1295 stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
1296 stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
1297 stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
1298 stats->value[TCPESTABRESETS] = m.tcpEstabResets;
1299 stats->value[TCPCURRESTAB] = m.tcpOutRsts;
1300 stats->value[TCPINSEGS] = m.tcpCurrEstab;
1301 stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
1302 stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
1303 stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
1304 stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
1305 stats->value[TCPRTOMIN] = m.tcpRtoMin;
1306 stats->value[TCPRTOMAX] = m.tcpRtoMax;
1307
1308 return stats->num_counters;
14cc180f
SW
1309}
1310
f4e91eb4 1311static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
f4e91eb4
TJ
1312static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1313static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
b038ced7 1314
f4e91eb4
TJ
1315static struct device_attribute *iwch_class_attributes[] = {
1316 &dev_attr_hw_rev,
f4e91eb4 1317 &dev_attr_hca_type,
14cc180f 1318 &dev_attr_board_id,
b038ced7
SW
1319};
1320
7738613e
IW
1321static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
1322 struct ib_port_immutable *immutable)
1323{
1324 struct ib_port_attr attr;
1325 int err;
1326
c4550c63
OG
1327 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
1328
1329 err = ib_query_port(ibdev, port_num, &attr);
7738613e
IW
1330 if (err)
1331 return err;
1332
1333 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1334 immutable->gid_tbl_len = attr.gid_tbl_len;
1335
1336 return 0;
1337}
1338
e1803694
IW
1339static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str,
1340 size_t str_len)
1341{
1342 struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
1343 struct ethtool_drvinfo info;
1344 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1345
b7b37ee0 1346 pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
e1803694
IW
1347 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1348 snprintf(str, str_len, "%s", info.fw_version);
1349}
1350
b038ced7
SW
1351int iwch_register_device(struct iwch_dev *dev)
1352{
1353 int ret;
1354 int i;
1355
b7b37ee0 1356 pr_debug("%s iwch_dev %p\n", __func__, dev);
b038ced7
SW
1357 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1358 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1359 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1360 dev->ibdev.owner = THIS_MODULE;
be43324d
SW
1361 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1362 IB_DEVICE_MEM_WINDOW |
1363 IB_DEVICE_MEM_MGT_EXTENSIONS;
96f15c03
SW
1364
1365 /* cxgb3 supports STag 0. */
1366 dev->ibdev.local_dma_lkey = 0;
b038ced7
SW
1367
1368 dev->ibdev.uverbs_cmd_mask =
1369 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1370 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1371 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1372 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1373 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1374 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1375 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1376 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1377 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1378 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1379 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1380 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1381 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1382 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1383 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1384 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1385 (1ull << IB_USER_VERBS_CMD_POST_RECV);
1386 dev->ibdev.node_type = RDMA_NODE_RNIC;
bd99fdea 1387 BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
b038ced7
SW
1388 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1389 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
f4fd0b22 1390 dev->ibdev.num_comp_vectors = 1;
91f734b4 1391 dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
b038ced7
SW
1392 dev->ibdev.query_device = iwch_query_device;
1393 dev->ibdev.query_port = iwch_query_port;
b038ced7
SW
1394 dev->ibdev.query_pkey = iwch_query_pkey;
1395 dev->ibdev.query_gid = iwch_query_gid;
1396 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1397 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1398 dev->ibdev.mmap = iwch_mmap;
1399 dev->ibdev.alloc_pd = iwch_allocate_pd;
1400 dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1401 dev->ibdev.create_ah = iwch_ah_create;
1402 dev->ibdev.destroy_ah = iwch_ah_destroy;
1403 dev->ibdev.create_qp = iwch_create_qp;
1404 dev->ibdev.modify_qp = iwch_ib_modify_qp;
1405 dev->ibdev.destroy_qp = iwch_destroy_qp;
1406 dev->ibdev.create_cq = iwch_create_cq;
1407 dev->ibdev.destroy_cq = iwch_destroy_cq;
1408 dev->ibdev.resize_cq = iwch_resize_cq;
1409 dev->ibdev.poll_cq = iwch_poll_cq;
1410 dev->ibdev.get_dma_mr = iwch_get_dma_mr;
b038ced7
SW
1411 dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1412 dev->ibdev.dereg_mr = iwch_dereg_mr;
1413 dev->ibdev.alloc_mw = iwch_alloc_mw;
b038ced7 1414 dev->ibdev.dealloc_mw = iwch_dealloc_mw;
f683d3bd 1415 dev->ibdev.alloc_mr = iwch_alloc_mr;
14fb4171 1416 dev->ibdev.map_mr_sg = iwch_map_mr_sg;
b038ced7
SW
1417 dev->ibdev.attach_mcast = iwch_multicast_attach;
1418 dev->ibdev.detach_mcast = iwch_multicast_detach;
1419 dev->ibdev.process_mad = iwch_process_mad;
b038ced7
SW
1420 dev->ibdev.req_notify_cq = iwch_arm_cq;
1421 dev->ibdev.post_send = iwch_post_send;
1422 dev->ibdev.post_recv = iwch_post_receive;
b40f4757
CL
1423 dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
1424 dev->ibdev.get_hw_stats = iwch_get_mib;
b955150e 1425 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
7738613e 1426 dev->ibdev.get_port_immutable = iwch_port_immutable;
e1803694 1427 dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str;
b038ced7 1428
6abb6ea8
WC
1429 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
1430 if (!dev->ibdev.iwcm)
1431 return -ENOMEM;
1432
b038ced7
SW
1433 dev->ibdev.iwcm->connect = iwch_connect;
1434 dev->ibdev.iwcm->accept = iwch_accept_cr;
1435 dev->ibdev.iwcm->reject = iwch_reject_cr;
1436 dev->ibdev.iwcm->create_listen = iwch_create_listen;
1437 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1438 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1439 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1440 dev->ibdev.iwcm->get_qp = iwch_get_qp;
ad202348
SW
1441 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1442 sizeof(dev->ibdev.iwcm->ifname));
b038ced7 1443
9a6edb60 1444 ret = ib_register_device(&dev->ibdev, NULL);
b038ced7
SW
1445 if (ret)
1446 goto bail1;
1447
1448 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
f4e91eb4
TJ
1449 ret = device_create_file(&dev->ibdev.dev,
1450 iwch_class_attributes[i]);
b038ced7
SW
1451 if (ret) {
1452 goto bail2;
1453 }
1454 }
1455 return 0;
1456bail2:
1457 ib_unregister_device(&dev->ibdev);
1458bail1:
3793d2fc 1459 kfree(dev->ibdev.iwcm);
b038ced7
SW
1460 return ret;
1461}
1462
1463void iwch_unregister_device(struct iwch_dev *dev)
1464{
1465 int i;
1466
b7b37ee0 1467 pr_debug("%s iwch_dev %p\n", __func__, dev);
b038ced7 1468 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
f4e91eb4
TJ
1469 device_remove_file(&dev->ibdev.dev,
1470 iwch_class_attributes[i]);
b038ced7 1471 ib_unregister_device(&dev->ibdev);
3793d2fc 1472 kfree(dev->ibdev.iwcm);
b038ced7
SW
1473 return;
1474}