]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/cxgb3/iwch_provider.c
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / cxgb3 / iwch_provider.c
CommitLineData
b038ced7
SW
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
b038ced7
SW
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/delay.h>
38#include <linux/errno.h>
39#include <linux/list.h>
d43c36dc 40#include <linux/sched.h>
b038ced7
SW
41#include <linux/spinlock.h>
42#include <linux/ethtool.h>
7f049f2f 43#include <linux/rtnetlink.h>
7ab1a2b3 44#include <linux/inetdevice.h>
5a0e3ad6 45#include <linux/slab.h>
b038ced7
SW
46
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/byteorder.h>
50
51#include <rdma/iw_cm.h>
52#include <rdma/ib_verbs.h>
53#include <rdma/ib_smi.h>
f7c6a7b5 54#include <rdma/ib_umem.h>
b038ced7
SW
55#include <rdma/ib_user_verbs.h>
56
57#include "cxio_hal.h"
58#include "iwch.h"
59#include "iwch_provider.h"
60#include "iwch_cm.h"
a85fb338 61#include <rdma/cxgb3-abi.h>
14cc180f 62#include "common.h"
b038ced7 63
b038ced7 64static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
477864c8
MS
65 struct ib_ah_attr *ah_attr,
66 struct ib_udata *udata)
b038ced7
SW
67{
68 return ERR_PTR(-ENOSYS);
69}
70
71static int iwch_ah_destroy(struct ib_ah *ah)
72{
73 return -ENOSYS;
74}
75
76static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
77{
78 return -ENOSYS;
79}
80
81static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
82{
83 return -ENOSYS;
84}
85
86static int iwch_process_mad(struct ib_device *ibdev,
87 int mad_flags,
88 u8 port_num,
a97e2d86
IW
89 const struct ib_wc *in_wc,
90 const struct ib_grh *in_grh,
4cd7c947
IW
91 const struct ib_mad_hdr *in_mad,
92 size_t in_mad_size,
93 struct ib_mad_hdr *out_mad,
94 size_t *out_mad_size,
95 u16 *out_mad_pkey_index)
b038ced7
SW
96{
97 return -ENOSYS;
98}
99
100static int iwch_dealloc_ucontext(struct ib_ucontext *context)
101{
102 struct iwch_dev *rhp = to_iwch_dev(context->device);
103 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
104 struct iwch_mm_entry *mm, *tmp;
105
33718363 106 PDBG("%s context %p\n", __func__, context);
b038ced7
SW
107 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
108 kfree(mm);
109 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
110 kfree(ucontext);
111 return 0;
112}
113
114static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
115 struct ib_udata *udata)
116{
117 struct iwch_ucontext *context;
118 struct iwch_dev *rhp = to_iwch_dev(ibdev);
119
33718363 120 PDBG("%s ibdev %p\n", __func__, ibdev);
b038ced7
SW
121 context = kzalloc(sizeof(*context), GFP_KERNEL);
122 if (!context)
123 return ERR_PTR(-ENOMEM);
124 cxio_init_ucontext(&rhp->rdev, &context->uctx);
125 INIT_LIST_HEAD(&context->mmaps);
126 spin_lock_init(&context->mmap_lock);
127 return &context->ibucontext;
128}
129
130static int iwch_destroy_cq(struct ib_cq *ib_cq)
131{
132 struct iwch_cq *chp;
133
33718363 134 PDBG("%s ib_cq %p\n", __func__, ib_cq);
b038ced7
SW
135 chp = to_iwch_cq(ib_cq);
136
137 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
138 atomic_dec(&chp->refcnt);
139 wait_event(chp->wait, !atomic_read(&chp->refcnt));
140
141 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
142 kfree(chp);
143 return 0;
144}
145
bcf4c1ea
MB
146static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
147 const struct ib_cq_init_attr *attr,
148 struct ib_ucontext *ib_context,
149 struct ib_udata *udata)
b038ced7 150{
bcf4c1ea 151 int entries = attr->cqe;
b038ced7
SW
152 struct iwch_dev *rhp;
153 struct iwch_cq *chp;
154 struct iwch_create_cq_resp uresp;
155 struct iwch_create_cq_req ureq;
156 struct iwch_ucontext *ucontext = NULL;
b955150e
SW
157 static int warned;
158 size_t resplen;
b038ced7 159
33718363 160 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
bcf4c1ea
MB
161 if (attr->flags)
162 return ERR_PTR(-EINVAL);
163
b038ced7
SW
164 rhp = to_iwch_dev(ibdev);
165 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
166 if (!chp)
167 return ERR_PTR(-ENOMEM);
168
169 if (ib_context) {
170 ucontext = to_iwch_ucontext(ib_context);
171 if (!t3a_device(rhp)) {
172 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
173 kfree(chp);
174 return ERR_PTR(-EFAULT);
175 }
176 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
177 }
178 }
179
180 if (t3a_device(rhp)) {
181
182 /*
183 * T3A: Add some fluff to handle extra CQEs inserted
184 * for various errors.
185 * Additional CQE possibilities:
186 * TERMINATE,
187 * incoming RDMA WRITE Failures
188 * incoming RDMA READ REQUEST FAILUREs
189 * NOTE: We cannot ensure the CQ won't overflow.
190 */
191 entries += 16;
192 }
193 entries = roundup_pow_of_two(entries);
194 chp->cq.size_log2 = ilog2(entries);
195
5279d3ac 196 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
b038ced7
SW
197 kfree(chp);
198 return ERR_PTR(-ENOMEM);
199 }
200 chp->rhp = rhp;
4fa45725 201 chp->ibcq.cqe = 1 << chp->cq.size_log2;
b038ced7 202 spin_lock_init(&chp->lock);
f7cc25d0 203 spin_lock_init(&chp->comp_handler_lock);
b038ced7
SW
204 atomic_set(&chp->refcnt, 1);
205 init_waitqueue_head(&chp->wait);
13a23933
SW
206 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
207 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
208 kfree(chp);
209 return ERR_PTR(-ENOMEM);
210 }
b038ced7
SW
211
212 if (ucontext) {
213 struct iwch_mm_entry *mm;
214
215 mm = kmalloc(sizeof *mm, GFP_KERNEL);
216 if (!mm) {
217 iwch_destroy_cq(&chp->ibcq);
218 return ERR_PTR(-ENOMEM);
219 }
220 uresp.cqid = chp->cq.cqid;
221 uresp.size_log2 = chp->cq.size_log2;
222 spin_lock(&ucontext->mmap_lock);
223 uresp.key = ucontext->key;
224 ucontext->key += PAGE_SIZE;
225 spin_unlock(&ucontext->mmap_lock);
b955150e
SW
226 mm->key = uresp.key;
227 mm->addr = virt_to_phys(chp->cq.queue);
228 if (udata->outlen < sizeof uresp) {
229 if (!warned++)
230 printk(KERN_WARNING MOD "Warning - "
231 "downlevel libcxgb3 (non-fatal).\n");
232 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
233 sizeof(struct t3_cqe));
234 resplen = sizeof(struct iwch_create_cq_resp_v0);
235 } else {
236 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
237 sizeof(struct t3_cqe));
238 uresp.memsize = mm->len;
246fcdbc 239 uresp.reserved = 0;
b955150e
SW
240 resplen = sizeof uresp;
241 }
242 if (ib_copy_to_udata(udata, &uresp, resplen)) {
b038ced7
SW
243 kfree(mm);
244 iwch_destroy_cq(&chp->ibcq);
245 return ERR_PTR(-EFAULT);
246 }
b038ced7
SW
247 insert_mmap(ucontext, mm);
248 }
249 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
250 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
251 (unsigned long long) chp->cq.dma_addr);
252 return &chp->ibcq;
253}
254
255static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
256{
257#ifdef notyet
258 struct iwch_cq *chp = to_iwch_cq(cq);
259 struct t3_cq oldcq, newcq;
260 int ret;
261
33718363 262 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
b038ced7
SW
263
264 /* We don't downsize... */
265 if (cqe <= cq->cqe)
266 return 0;
267
268 /* create new t3_cq with new size */
269 cqe = roundup_pow_of_two(cqe+1);
270 newcq.size_log2 = ilog2(cqe);
271
272 /* Dont allow resize to less than the current wce count */
273 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
274 return -ENOMEM;
275 }
276
277 /* Quiesce all QPs using this CQ */
278 ret = iwch_quiesce_qps(chp);
279 if (ret) {
280 return ret;
281 }
282
283 ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
284 if (ret) {
285 return ret;
286 }
287
288 /* copy CQEs */
289 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
290 sizeof(struct t3_cqe));
291
292 /* old iwch_qp gets new t3_cq but keeps old cqid */
293 oldcq = chp->cq;
294 chp->cq = newcq;
295 chp->cq.cqid = oldcq.cqid;
296
297 /* resize new t3_cq to update the HW context */
298 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
299 if (ret) {
300 chp->cq = oldcq;
301 return ret;
302 }
303 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
304
305 /* destroy old t3_cq */
306 oldcq.cqid = newcq.cqid;
307 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
308 if (ret) {
309 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
33718363 310 __func__, ret);
b038ced7
SW
311 }
312
313 /* add user hooks here */
314
315 /* resume qps */
316 ret = iwch_resume_qps(chp);
317 return ret;
318#else
319 return -ENOSYS;
320#endif
321}
322
ed23a727 323static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
b038ced7
SW
324{
325 struct iwch_dev *rhp;
326 struct iwch_cq *chp;
327 enum t3_cq_opcode cq_op;
328 int err;
329 unsigned long flag;
330 u32 rptr;
331
332 chp = to_iwch_cq(ibcq);
333 rhp = chp->rhp;
ed23a727 334 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
b038ced7
SW
335 cq_op = CQ_ARM_SE;
336 else
337 cq_op = CQ_ARM_AN;
338 if (chp->user_rptr_addr) {
339 if (get_user(rptr, chp->user_rptr_addr))
340 return -EFAULT;
341 spin_lock_irqsave(&chp->lock, flag);
342 chp->cq.rptr = rptr;
343 } else
344 spin_lock_irqsave(&chp->lock, flag);
33718363 345 PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
b038ced7
SW
346 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
347 spin_unlock_irqrestore(&chp->lock, flag);
ed23a727 348 if (err < 0)
b038ced7
SW
349 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
350 chp->cq.cqid);
ed23a727
RD
351 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
352 err = 0;
b038ced7
SW
353 return err;
354}
355
356static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
357{
358 int len = vma->vm_end - vma->vm_start;
359 u32 key = vma->vm_pgoff << PAGE_SHIFT;
360 struct cxio_rdev *rdev_p;
361 int ret = 0;
362 struct iwch_mm_entry *mm;
363 struct iwch_ucontext *ucontext;
aeb100e2 364 u64 addr;
b038ced7 365
33718363 366 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
b038ced7
SW
367 key, len);
368
369 if (vma->vm_start & (PAGE_SIZE-1)) {
370 return -EINVAL;
371 }
372
373 rdev_p = &(to_iwch_dev(context->device)->rdev);
374 ucontext = to_iwch_ucontext(context);
375
376 mm = remove_mmap(ucontext, key, len);
377 if (!mm)
378 return -EINVAL;
aeb100e2 379 addr = mm->addr;
b038ced7
SW
380 kfree(mm);
381
aeb100e2
SW
382 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
383 (addr < (rdev_p->rnic_info.udbell_physbase +
b038ced7
SW
384 rdev_p->rnic_info.udbell_len))) {
385
386 /*
387 * Map T3 DB register.
388 */
389 if (vma->vm_flags & VM_READ) {
390 return -EPERM;
391 }
392
393 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
394 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
395 vma->vm_flags &= ~VM_MAYREAD;
396 ret = io_remap_pfn_range(vma, vma->vm_start,
aeb100e2 397 addr >> PAGE_SHIFT,
b038ced7
SW
398 len, vma->vm_page_prot);
399 } else {
400
401 /*
402 * Map WQ or CQ contig dma memory...
403 */
404 ret = remap_pfn_range(vma, vma->vm_start,
aeb100e2 405 addr >> PAGE_SHIFT,
b038ced7
SW
406 len, vma->vm_page_prot);
407 }
408
409 return ret;
410}
411
412static int iwch_deallocate_pd(struct ib_pd *pd)
413{
414 struct iwch_dev *rhp;
415 struct iwch_pd *php;
416
417 php = to_iwch_pd(pd);
418 rhp = php->rhp;
33718363 419 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
b038ced7
SW
420 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
421 kfree(php);
422 return 0;
423}
424
425static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
426 struct ib_ucontext *context,
427 struct ib_udata *udata)
428{
429 struct iwch_pd *php;
430 u32 pdid;
431 struct iwch_dev *rhp;
432
33718363 433 PDBG("%s ibdev %p\n", __func__, ibdev);
b038ced7
SW
434 rhp = (struct iwch_dev *) ibdev;
435 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
436 if (!pdid)
437 return ERR_PTR(-EINVAL);
438 php = kzalloc(sizeof(*php), GFP_KERNEL);
439 if (!php) {
440 cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
441 return ERR_PTR(-ENOMEM);
442 }
443 php->pdid = pdid;
444 php->rhp = rhp;
445 if (context) {
446 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
447 iwch_deallocate_pd(&php->ibpd);
448 return ERR_PTR(-EFAULT);
449 }
450 }
33718363 451 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
b038ced7
SW
452 return &php->ibpd;
453}
454
455static int iwch_dereg_mr(struct ib_mr *ib_mr)
456{
457 struct iwch_dev *rhp;
458 struct iwch_mr *mhp;
459 u32 mmid;
460
33718363 461 PDBG("%s ib_mr %p\n", __func__, ib_mr);
b038ced7
SW
462
463 mhp = to_iwch_mr(ib_mr);
14fb4171 464 kfree(mhp->pages);
b038ced7
SW
465 rhp = mhp->rhp;
466 mmid = mhp->attr.stag >> 8;
467 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
468 mhp->attr.pbl_addr);
273748cc 469 iwch_free_pbl(mhp);
b038ced7
SW
470 remove_handle(rhp, &rhp->mmidr, mmid);
471 if (mhp->kva)
472 kfree((void *) (unsigned long) mhp->kva);
f7c6a7b5
RD
473 if (mhp->umem)
474 ib_umem_release(mhp->umem);
33718363 475 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
b038ced7
SW
476 kfree(mhp);
477 return 0;
478}
479
35cb3fc0 480static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
b038ced7 481{
35cb3fc0
CH
482 const u64 total_size = 0xffffffff;
483 const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
484 struct iwch_pd *php = to_iwch_pd(pd);
485 struct iwch_dev *rhp = php->rhp;
b038ced7 486 struct iwch_mr *mhp;
35cb3fc0
CH
487 __be64 *page_list;
488 int shift = 26, npages, ret, i;
b038ced7 489
33718363 490 PDBG("%s ib_pd %p\n", __func__, pd);
35cb3fc0
CH
491
492 /*
493 * T3 only supports 32 bits of size.
494 */
495 if (sizeof(phys_addr_t) > 4) {
496 pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
497 return ERR_PTR(-ENOTSUPP);
498 }
b038ced7 499
b038ced7
SW
500 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
501 if (!mhp)
502 return ERR_PTR(-ENOMEM);
503
273748cc
RD
504 mhp->rhp = rhp;
505
35cb3fc0
CH
506 npages = (total_size + (1ULL << shift) - 1) >> shift;
507 if (!npages) {
b038ced7
SW
508 ret = -EINVAL;
509 goto err;
510 }
511
35cb3fc0
CH
512 page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
513 if (!page_list) {
514 ret = -ENOMEM;
b038ced7
SW
515 goto err;
516 }
517
35cb3fc0
CH
518 for (i = 0; i < npages; i++)
519 page_list[i] = cpu_to_be64((u64)i << shift);
520
521 PDBG("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
522 __func__, mask, shift, total_size, npages);
b038ced7 523
273748cc
RD
524 ret = iwch_alloc_pbl(mhp, npages);
525 if (ret) {
526 kfree(page_list);
527 goto err_pbl;
528 }
529
530 ret = iwch_write_pbl(mhp, page_list, npages, 0);
531 kfree(page_list);
532 if (ret)
533 goto err_pbl;
534
b038ced7
SW
535 mhp->attr.pdid = php->pdid;
536 mhp->attr.zbva = 0;
537
e64518f3 538 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
35cb3fc0 539 mhp->attr.va_fbo = 0;
b038ced7
SW
540 mhp->attr.page_size = shift - 12;
541
542 mhp->attr.len = (u32) total_size;
543 mhp->attr.pbl_size = npages;
273748cc
RD
544 ret = iwch_register_mem(rhp, php, mhp, shift);
545 if (ret)
546 goto err_pbl;
547
b038ced7 548 return &mhp->ibmr;
273748cc
RD
549
550err_pbl:
551 iwch_free_pbl(mhp);
552
b038ced7
SW
553err:
554 kfree(mhp);
555 return ERR_PTR(ret);
b038ced7
SW
556}
557
f7c6a7b5
RD
558static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
559 u64 virt, int acc, struct ib_udata *udata)
b038ced7
SW
560{
561 __be64 *pages;
562 int shift, n, len;
eeb8461e 563 int i, k, entry;
b038ced7 564 int err = 0;
b038ced7
SW
565 struct iwch_dev *rhp;
566 struct iwch_pd *php;
567 struct iwch_mr *mhp;
568 struct iwch_reg_user_mr_resp uresp;
eeb8461e 569 struct scatterlist *sg;
33718363 570 PDBG("%s ib_pd %p\n", __func__, pd);
b038ced7
SW
571
572 php = to_iwch_pd(pd);
573 rhp = php->rhp;
574 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
575 if (!mhp)
576 return ERR_PTR(-ENOMEM);
577
273748cc
RD
578 mhp->rhp = rhp;
579
cb9fbc5c 580 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
f7c6a7b5
RD
581 if (IS_ERR(mhp->umem)) {
582 err = PTR_ERR(mhp->umem);
583 kfree(mhp);
584 return ERR_PTR(err);
585 }
586
587 shift = ffs(mhp->umem->page_size) - 1;
588
eeb8461e 589 n = mhp->umem->nmap;
b038ced7 590
273748cc
RD
591 err = iwch_alloc_pbl(mhp, n);
592 if (err)
593 goto err;
594
595 pages = (__be64 *) __get_free_page(GFP_KERNEL);
b038ced7
SW
596 if (!pages) {
597 err = -ENOMEM;
273748cc 598 goto err_pbl;
b038ced7
SW
599 }
600
b038ced7
SW
601 i = n = 0;
602
eeb8461e
YH
603 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
604 len = sg_dma_len(sg) >> shift;
b038ced7 605 for (k = 0; k < len; ++k) {
eeb8461e 606 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
f7c6a7b5 607 mhp->umem->page_size * k);
273748cc
RD
608 if (i == PAGE_SIZE / sizeof *pages) {
609 err = iwch_write_pbl(mhp, pages, i, n);
610 if (err)
611 goto pbl_done;
612 n += i;
613 i = 0;
614 }
b038ced7 615 }
eeb8461e 616 }
b038ced7 617
273748cc
RD
618 if (i)
619 err = iwch_write_pbl(mhp, pages, i, n);
620
621pbl_done:
622 free_page((unsigned long) pages);
623 if (err)
624 goto err_pbl;
625
b038ced7
SW
626 mhp->attr.pdid = php->pdid;
627 mhp->attr.zbva = 0;
e64518f3 628 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
f7c6a7b5 629 mhp->attr.va_fbo = virt;
b038ced7 630 mhp->attr.page_size = shift - 12;
f7c6a7b5 631 mhp->attr.len = (u32) length;
273748cc
RD
632
633 err = iwch_register_mem(rhp, php, mhp, shift);
b038ced7 634 if (err)
273748cc 635 goto err_pbl;
b038ced7 636
8176d297 637 if (udata && !t3a_device(rhp)) {
b038ced7 638 uresp.pbl_addr = (mhp->attr.pbl_addr -
273748cc 639 rhp->rdev.rnic_info.pbl_base) >> 3;
33718363 640 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
b038ced7
SW
641 uresp.pbl_addr);
642
643 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
644 iwch_dereg_mr(&mhp->ibmr);
645 err = -EFAULT;
646 goto err;
647 }
648 }
649
650 return &mhp->ibmr;
651
273748cc
RD
652err_pbl:
653 iwch_free_pbl(mhp);
654
b038ced7 655err:
f7c6a7b5 656 ib_umem_release(mhp->umem);
b038ced7
SW
657 kfree(mhp);
658 return ERR_PTR(err);
659}
660
b2a239df
MB
661static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
662 struct ib_udata *udata)
b038ced7
SW
663{
664 struct iwch_dev *rhp;
665 struct iwch_pd *php;
666 struct iwch_mw *mhp;
667 u32 mmid;
668 u32 stag = 0;
669 int ret;
670
7083e42e
SM
671 if (type != IB_MW_TYPE_1)
672 return ERR_PTR(-EINVAL);
673
b038ced7
SW
674 php = to_iwch_pd(pd);
675 rhp = php->rhp;
676 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
677 if (!mhp)
678 return ERR_PTR(-ENOMEM);
679 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
680 if (ret) {
681 kfree(mhp);
682 return ERR_PTR(ret);
683 }
684 mhp->rhp = rhp;
685 mhp->attr.pdid = php->pdid;
686 mhp->attr.type = TPT_MW;
687 mhp->attr.stag = stag;
688 mmid = (stag) >> 8;
70fe1796 689 mhp->ibmw.rkey = stag;
13a23933
SW
690 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
691 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
692 kfree(mhp);
693 return ERR_PTR(-ENOMEM);
694 }
33718363 695 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
b038ced7
SW
696 return &(mhp->ibmw);
697}
698
699static int iwch_dealloc_mw(struct ib_mw *mw)
700{
701 struct iwch_dev *rhp;
702 struct iwch_mw *mhp;
703 u32 mmid;
704
705 mhp = to_iwch_mw(mw);
706 rhp = mhp->rhp;
707 mmid = (mw->rkey) >> 8;
708 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
709 remove_handle(rhp, &rhp->mmidr, mmid);
33718363 710 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
fe194f19 711 kfree(mhp);
b038ced7
SW
712 return 0;
713}
714
f683d3bd
SG
715static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
716 enum ib_mr_type mr_type,
717 u32 max_num_sg)
e7e55829
SW
718{
719 struct iwch_dev *rhp;
720 struct iwch_pd *php;
721 struct iwch_mr *mhp;
722 u32 mmid;
723 u32 stag = 0;
13a23933 724 int ret = 0;
e7e55829 725
f683d3bd
SG
726 if (mr_type != IB_MR_TYPE_MEM_REG ||
727 max_num_sg > T3_MAX_FASTREG_DEPTH)
728 return ERR_PTR(-EINVAL);
729
e7e55829
SW
730 php = to_iwch_pd(pd);
731 rhp = php->rhp;
732 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
733 if (!mhp)
13a23933 734 goto err;
e7e55829 735
14fb4171
SG
736 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
737 if (!mhp->pages) {
738 ret = -ENOMEM;
739 goto pl_err;
740 }
741
e7e55829 742 mhp->rhp = rhp;
f683d3bd 743 ret = iwch_alloc_pbl(mhp, max_num_sg);
13a23933
SW
744 if (ret)
745 goto err1;
f683d3bd 746 mhp->attr.pbl_size = max_num_sg;
e7e55829
SW
747 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
748 mhp->attr.pbl_size, mhp->attr.pbl_addr);
13a23933
SW
749 if (ret)
750 goto err2;
e7e55829
SW
751 mhp->attr.pdid = php->pdid;
752 mhp->attr.type = TPT_NON_SHARED_MR;
753 mhp->attr.stag = stag;
754 mhp->attr.state = 1;
755 mmid = (stag) >> 8;
756 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
13a23933
SW
757 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
758 goto err3;
759
e7e55829
SW
760 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
761 return &(mhp->ibmr);
13a23933
SW
762err3:
763 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
764 mhp->attr.pbl_addr);
765err2:
766 iwch_free_pbl(mhp);
767err1:
14fb4171
SG
768 kfree(mhp->pages);
769pl_err:
13a23933
SW
770 kfree(mhp);
771err:
772 return ERR_PTR(ret);
e7e55829
SW
773}
774
14fb4171
SG
775static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
776{
777 struct iwch_mr *mhp = to_iwch_mr(ibmr);
778
779 if (unlikely(mhp->npages == mhp->attr.pbl_size))
780 return -ENOMEM;
781
782 mhp->pages[mhp->npages++] = addr;
783
784 return 0;
785}
786
ff2ba993 787static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
9aa8b321 788 int sg_nents, unsigned int *sg_offset)
14fb4171
SG
789{
790 struct iwch_mr *mhp = to_iwch_mr(ibmr);
791
792 mhp->npages = 0;
793
ff2ba993 794 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
14fb4171
SG
795}
796
b038ced7
SW
797static int iwch_destroy_qp(struct ib_qp *ib_qp)
798{
799 struct iwch_dev *rhp;
800 struct iwch_qp *qhp;
801 struct iwch_qp_attributes attrs;
802 struct iwch_ucontext *ucontext;
803
804 qhp = to_iwch_qp(ib_qp);
805 rhp = qhp->rhp;
806
2df50da0
SW
807 attrs.next_state = IWCH_QP_STATE_ERROR;
808 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
b038ced7
SW
809 wait_event(qhp->wait, !qhp->ep);
810
811 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
812
813 atomic_dec(&qhp->refcnt);
814 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
815
816 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
817 : NULL;
818 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
819 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
820
33718363 821 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
b038ced7
SW
822 ib_qp, qhp->wq.qpid, qhp);
823 kfree(qhp);
824 return 0;
825}
826
827static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
828 struct ib_qp_init_attr *attrs,
829 struct ib_udata *udata)
830{
831 struct iwch_dev *rhp;
832 struct iwch_qp *qhp;
833 struct iwch_pd *php;
834 struct iwch_cq *schp;
835 struct iwch_cq *rchp;
836 struct iwch_create_qp_resp uresp;
837 int wqsize, sqsize, rqsize;
838 struct iwch_ucontext *ucontext;
839
33718363 840 PDBG("%s ib_pd %p\n", __func__, pd);
b038ced7
SW
841 if (attrs->qp_type != IB_QPT_RC)
842 return ERR_PTR(-EINVAL);
843 php = to_iwch_pd(pd);
844 rhp = php->rhp;
845 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
846 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
847 if (!schp || !rchp)
848 return ERR_PTR(-EINVAL);
849
850 /* The RQT size must be # of entries + 1 rounded up to a power of two */
851 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
852 if (rqsize == attrs->cap.max_recv_wr)
853 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
854
855 /* T3 doesn't support RQT depth < 16 */
856 if (rqsize < 16)
857 rqsize = 16;
858
859 if (rqsize > T3_MAX_RQ_SIZE)
860 return ERR_PTR(-EINVAL);
861
1860cdf8
SW
862 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
863 return ERR_PTR(-EINVAL);
864
b038ced7
SW
865 /*
866 * NOTE: The SQ and total WQ sizes don't need to be
867 * a power of two. However, all the code assumes
868 * they are. EG: Q_FREECNT() and friends.
869 */
870 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
871 wqsize = roundup_pow_of_two(rqsize + sqsize);
e7e55829
SW
872
873 /*
874 * Kernel users need more wq space for fastreg WRs which can take
875 * 2 WR fragments.
876 */
877 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
878 if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
879 wqsize = roundup_pow_of_two(rqsize +
880 roundup_pow_of_two(attrs->cap.max_send_wr * 2));
33718363 881 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
b038ced7
SW
882 wqsize, sqsize, rqsize);
883 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
884 if (!qhp)
885 return ERR_PTR(-ENOMEM);
886 qhp->wq.size_log2 = ilog2(wqsize);
887 qhp->wq.rq_size_log2 = ilog2(rqsize);
888 qhp->wq.sq_size_log2 = ilog2(sqsize);
b038ced7
SW
889 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
890 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
891 kfree(qhp);
892 return ERR_PTR(-ENOMEM);
893 }
1bab74e6 894
b038ced7
SW
895 attrs->cap.max_recv_wr = rqsize - 1;
896 attrs->cap.max_send_wr = sqsize;
1bab74e6
JM
897 attrs->cap.max_inline_data = T3_MAX_INLINE;
898
b038ced7
SW
899 qhp->rhp = rhp;
900 qhp->attr.pd = php->pdid;
901 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
902 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
903 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
904 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
905 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
906 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
907 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
908 qhp->attr.state = IWCH_QP_STATE_IDLE;
909 qhp->attr.next_state = IWCH_QP_STATE_IDLE;
910
911 /*
912 * XXX - These don't get passed in from the openib user
913 * at create time. The CM sets them via a QP modify.
914 * Need to fix... I think the CM should
915 */
916 qhp->attr.enable_rdma_read = 1;
917 qhp->attr.enable_rdma_write = 1;
918 qhp->attr.enable_bind = 1;
919 qhp->attr.max_ord = 1;
920 qhp->attr.max_ird = 1;
921
922 spin_lock_init(&qhp->lock);
923 init_waitqueue_head(&qhp->wait);
924 atomic_set(&qhp->refcnt, 1);
13a23933
SW
925
926 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
927 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
928 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
929 kfree(qhp);
930 return ERR_PTR(-ENOMEM);
931 }
b038ced7
SW
932
933 if (udata) {
934
935 struct iwch_mm_entry *mm1, *mm2;
936
937 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
938 if (!mm1) {
939 iwch_destroy_qp(&qhp->ibqp);
940 return ERR_PTR(-ENOMEM);
941 }
942
943 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
944 if (!mm2) {
945 kfree(mm1);
946 iwch_destroy_qp(&qhp->ibqp);
947 return ERR_PTR(-ENOMEM);
948 }
949
950 uresp.qpid = qhp->wq.qpid;
951 uresp.size_log2 = qhp->wq.size_log2;
952 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
953 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
954 spin_lock(&ucontext->mmap_lock);
955 uresp.key = ucontext->key;
956 ucontext->key += PAGE_SIZE;
957 uresp.db_key = ucontext->key;
958 ucontext->key += PAGE_SIZE;
959 spin_unlock(&ucontext->mmap_lock);
960 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
961 kfree(mm1);
962 kfree(mm2);
963 iwch_destroy_qp(&qhp->ibqp);
964 return ERR_PTR(-EFAULT);
965 }
966 mm1->key = uresp.key;
967 mm1->addr = virt_to_phys(qhp->wq.queue);
968 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
969 insert_mmap(ucontext, mm1);
970 mm2->key = uresp.db_key;
971 mm2->addr = qhp->wq.udb & PAGE_MASK;
972 mm2->len = PAGE_SIZE;
973 insert_mmap(ucontext, mm2);
974 }
975 qhp->ibqp.qp_num = qhp->wq.qpid;
976 init_timer(&(qhp->timer));
977 PDBG("%s sq_num_entries %d, rq_num_entries %d "
4ab928f6 978 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
33718363 979 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
b038ced7 980 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
4ab928f6 981 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
b038ced7
SW
982 return &qhp->ibqp;
983}
984
985static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
986 int attr_mask, struct ib_udata *udata)
987{
988 struct iwch_dev *rhp;
989 struct iwch_qp *qhp;
990 enum iwch_qp_attr_mask mask = 0;
991 struct iwch_qp_attributes attrs;
992
33718363 993 PDBG("%s ib_qp %p\n", __func__, ibqp);
b038ced7
SW
994
995 /* iwarp does not support the RTR state */
996 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
997 attr_mask &= ~IB_QP_STATE;
998
999 /* Make sure we still have something left to do */
1000 if (!attr_mask)
1001 return 0;
1002
1003 memset(&attrs, 0, sizeof attrs);
1004 qhp = to_iwch_qp(ibqp);
1005 rhp = qhp->rhp;
1006
1007 attrs.next_state = iwch_convert_state(attr->qp_state);
1008 attrs.enable_rdma_read = (attr->qp_access_flags &
1009 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1010 attrs.enable_rdma_write = (attr->qp_access_flags &
1011 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1012 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1013
1014
1015 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
1016 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1017 (IWCH_QP_ATTR_ENABLE_RDMA_READ |
1018 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
1019 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1020
1021 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
1022}
1023
1024void iwch_qp_add_ref(struct ib_qp *qp)
1025{
33718363 1026 PDBG("%s ib_qp %p\n", __func__, qp);
b038ced7
SW
1027 atomic_inc(&(to_iwch_qp(qp)->refcnt));
1028}
1029
1030void iwch_qp_rem_ref(struct ib_qp *qp)
1031{
33718363 1032 PDBG("%s ib_qp %p\n", __func__, qp);
b038ced7
SW
1033 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
1034 wake_up(&(to_iwch_qp(qp)->wait));
1035}
1036
2b540355 1037static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
b038ced7 1038{
33718363 1039 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
b038ced7
SW
1040 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1041}
1042
1043
1044static int iwch_query_pkey(struct ib_device *ibdev,
1045 u8 port, u16 index, u16 * pkey)
1046{
33718363 1047 PDBG("%s ibdev %p\n", __func__, ibdev);
b038ced7
SW
1048 *pkey = 0;
1049 return 0;
1050}
1051
1052static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1053 int index, union ib_gid *gid)
1054{
1055 struct iwch_dev *dev;
1056
1057 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
33718363 1058 __func__, ibdev, port, index, gid);
b038ced7
SW
1059 dev = to_iwch_dev(ibdev);
1060 BUG_ON(port == 0 || port > 2);
1061 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1062 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
1063 return 0;
1064}
1065
97d1cc80
SW
1066static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1067{
1068 struct ethtool_drvinfo info;
1069 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1070 char *cp, *next;
1071 unsigned fw_maj, fw_min, fw_mic;
1072
97d1cc80 1073 lldev->ethtool_ops->get_drvinfo(lldev, &info);
97d1cc80
SW
1074
1075 next = info.fw_version + 1;
1076 cp = strsep(&next, ".");
1077 sscanf(cp, "%i", &fw_maj);
1078 cp = strsep(&next, ".");
1079 sscanf(cp, "%i", &fw_min);
1080 cp = strsep(&next, ".");
1081 sscanf(cp, "%i", &fw_mic);
1082
1083 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1084 (fw_mic & 0xffff);
1085}
1086
2528e33e
MB
1087static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1088 struct ib_udata *uhw)
b038ced7
SW
1089{
1090
1091 struct iwch_dev *dev;
2528e33e 1092
33718363 1093 PDBG("%s ibdev %p\n", __func__, ibdev);
b038ced7 1094
2528e33e
MB
1095 if (uhw->inlen || uhw->outlen)
1096 return -EINVAL;
1097
b038ced7
SW
1098 dev = to_iwch_dev(ibdev);
1099 memset(props, 0, sizeof *props);
1100 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
97d1cc80
SW
1101 props->hw_ver = dev->rdev.t3cdev_p->type;
1102 props->fw_ver = fw_vers_string_to_u64(dev);
b038ced7 1103 props->device_cap_flags = dev->device_cap_flags;
52c8084b 1104 props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
b038ced7
SW
1105 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1106 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
ccaf10d0 1107 props->max_mr_size = dev->attr.max_mr_size;
b038ced7
SW
1108 props->max_qp = dev->attr.max_qps;
1109 props->max_qp_wr = dev->attr.max_wrs;
1110 props->max_sge = dev->attr.max_sge_per_wr;
1111 props->max_sge_rd = 1;
1112 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
9a766649 1113 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
b038ced7
SW
1114 props->max_cq = dev->attr.max_cqs;
1115 props->max_cqe = dev->attr.max_cqes_per_cq;
1116 props->max_mr = dev->attr.max_mem_regs;
1117 props->max_pd = dev->attr.max_pds;
1118 props->local_ca_ack_delay = 0;
e7e55829 1119 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
b038ced7
SW
1120
1121 return 0;
1122}
1123
1124static int iwch_query_port(struct ib_device *ibdev,
1125 u8 port, struct ib_port_attr *props)
1126{
7ab1a2b3
SW
1127 struct iwch_dev *dev;
1128 struct net_device *netdev;
1129 struct in_device *inetdev;
1130
33718363 1131 PDBG("%s ibdev %p\n", __func__, ibdev);
c752c782 1132
7ab1a2b3
SW
1133 dev = to_iwch_dev(ibdev);
1134 netdev = dev->rdev.port_info.lldevs[port-1];
1135
c752c782 1136 memset(props, 0, sizeof(struct ib_port_attr));
b038ced7 1137 props->max_mtu = IB_MTU_4096;
7ab1a2b3
SW
1138 if (netdev->mtu >= 4096)
1139 props->active_mtu = IB_MTU_4096;
1140 else if (netdev->mtu >= 2048)
1141 props->active_mtu = IB_MTU_2048;
1142 else if (netdev->mtu >= 1024)
1143 props->active_mtu = IB_MTU_1024;
1144 else if (netdev->mtu >= 512)
1145 props->active_mtu = IB_MTU_512;
1146 else
1147 props->active_mtu = IB_MTU_256;
1148
1149 if (!netif_carrier_ok(netdev))
1150 props->state = IB_PORT_DOWN;
1151 else {
1152 inetdev = in_dev_get(netdev);
e5da4ed8
SW
1153 if (inetdev) {
1154 if (inetdev->ifa_list)
1155 props->state = IB_PORT_ACTIVE;
1156 else
1157 props->state = IB_PORT_INIT;
1158 in_dev_put(inetdev);
1159 } else
7ab1a2b3 1160 props->state = IB_PORT_INIT;
7ab1a2b3
SW
1161 }
1162
b038ced7
SW
1163 props->port_cap_flags =
1164 IB_PORT_CM_SUP |
1165 IB_PORT_SNMP_TUNNEL_SUP |
1166 IB_PORT_REINIT_SUP |
1167 IB_PORT_DEVICE_MGMT_SUP |
1168 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1169 props->gid_tbl_len = 1;
1170 props->pkey_tbl_len = 1;
b038ced7 1171 props->active_width = 2;
2e96691c 1172 props->active_speed = IB_SPEED_DDR;
b038ced7
SW
1173 props->max_msg_sz = -1;
1174
1175 return 0;
1176}
1177
f4e91eb4
TJ
1178static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
1179 char *buf)
b038ced7 1180{
f4e91eb4
TJ
1181 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1182 ibdev.dev);
1183 PDBG("%s dev 0x%p\n", __func__, dev);
1184 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
b038ced7
SW
1185}
1186
f4e91eb4
TJ
1187static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1188 char *buf)
b038ced7 1189{
f4e91eb4
TJ
1190 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1191 ibdev.dev);
b038ced7 1192 struct ethtool_drvinfo info;
f4e91eb4 1193 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
b038ced7 1194
f4e91eb4 1195 PDBG("%s dev 0x%p\n", __func__, dev);
b038ced7
SW
1196 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1197 return sprintf(buf, "%s\n", info.driver);
1198}
1199
f4e91eb4
TJ
1200static ssize_t show_board(struct device *dev, struct device_attribute *attr,
1201 char *buf)
b038ced7 1202{
f4e91eb4
TJ
1203 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1204 ibdev.dev);
1205 PDBG("%s dev 0x%p\n", __func__, dev);
1206 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1207 iwch_dev->rdev.rnic_info.pdev->device);
b038ced7
SW
1208}
1209
b40f4757
CL
1210enum counters {
1211 IPINRECEIVES,
1212 IPINHDRERRORS,
1213 IPINADDRERRORS,
1214 IPINUNKNOWNPROTOS,
1215 IPINDISCARDS,
1216 IPINDELIVERS,
1217 IPOUTREQUESTS,
1218 IPOUTDISCARDS,
1219 IPOUTNOROUTES,
1220 IPREASMTIMEOUT,
1221 IPREASMREQDS,
1222 IPREASMOKS,
1223 IPREASMFAILS,
1224 TCPACTIVEOPENS,
1225 TCPPASSIVEOPENS,
1226 TCPATTEMPTFAILS,
1227 TCPESTABRESETS,
1228 TCPCURRESTAB,
1229 TCPINSEGS,
1230 TCPOUTSEGS,
1231 TCPRETRANSSEGS,
1232 TCPINERRS,
1233 TCPOUTRSTS,
1234 TCPRTOMIN,
1235 TCPRTOMAX,
1236 NR_COUNTERS
1237};
1238
1239static const char * const names[] = {
1240 [IPINRECEIVES] = "ipInReceives",
1241 [IPINHDRERRORS] = "ipInHdrErrors",
1242 [IPINADDRERRORS] = "ipInAddrErrors",
1243 [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
1244 [IPINDISCARDS] = "ipInDiscards",
1245 [IPINDELIVERS] = "ipInDelivers",
1246 [IPOUTREQUESTS] = "ipOutRequests",
1247 [IPOUTDISCARDS] = "ipOutDiscards",
1248 [IPOUTNOROUTES] = "ipOutNoRoutes",
1249 [IPREASMTIMEOUT] = "ipReasmTimeout",
1250 [IPREASMREQDS] = "ipReasmReqds",
1251 [IPREASMOKS] = "ipReasmOKs",
1252 [IPREASMFAILS] = "ipReasmFails",
1253 [TCPACTIVEOPENS] = "tcpActiveOpens",
1254 [TCPPASSIVEOPENS] = "tcpPassiveOpens",
1255 [TCPATTEMPTFAILS] = "tcpAttemptFails",
1256 [TCPESTABRESETS] = "tcpEstabResets",
1257 [TCPCURRESTAB] = "tcpCurrEstab",
1258 [TCPINSEGS] = "tcpInSegs",
1259 [TCPOUTSEGS] = "tcpOutSegs",
1260 [TCPRETRANSSEGS] = "tcpRetransSegs",
1261 [TCPINERRS] = "tcpInErrs",
1262 [TCPOUTRSTS] = "tcpOutRsts",
1263 [TCPRTOMIN] = "tcpRtoMin",
1264 [TCPRTOMAX] = "tcpRtoMax",
1265};
1266
1267static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
1268 u8 port_num)
1269{
1270 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
1271
1272 /* Our driver only supports device level stats */
1273 if (port_num != 0)
1274 return NULL;
1275
1276 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
1277 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1278}
1279
1280static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1281 u8 port, int index)
14cc180f
SW
1282{
1283 struct iwch_dev *dev;
1284 struct tp_mib_stats m;
1285 int ret;
1286
b40f4757
CL
1287 if (port != 0 || !stats)
1288 return -ENOSYS;
1289
14cc180f
SW
1290 PDBG("%s ibdev %p\n", __func__, ibdev);
1291 dev = to_iwch_dev(ibdev);
1292 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1293 if (ret)
1294 return -ENOSYS;
1295
b40f4757
CL
1296 stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
1297 stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
1298 stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
1299 stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
1300 stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
1301 stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
1302 stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
1303 stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
1304 stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
1305 stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
1306 stats->value[IPREASMREQDS] = m.ipReasmReqds;
1307 stats->value[IPREASMOKS] = m.ipReasmOKs;
1308 stats->value[IPREASMFAILS] = m.ipReasmFails;
1309 stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
1310 stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
1311 stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
1312 stats->value[TCPESTABRESETS] = m.tcpEstabResets;
1313 stats->value[TCPCURRESTAB] = m.tcpOutRsts;
1314 stats->value[TCPINSEGS] = m.tcpCurrEstab;
1315 stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
1316 stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
1317 stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
1318 stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
1319 stats->value[TCPRTOMIN] = m.tcpRtoMin;
1320 stats->value[TCPRTOMAX] = m.tcpRtoMax;
1321
1322 return stats->num_counters;
14cc180f
SW
1323}
1324
f4e91eb4 1325static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
f4e91eb4
TJ
1326static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1327static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
b038ced7 1328
f4e91eb4
TJ
1329static struct device_attribute *iwch_class_attributes[] = {
1330 &dev_attr_hw_rev,
f4e91eb4 1331 &dev_attr_hca_type,
14cc180f 1332 &dev_attr_board_id,
b038ced7
SW
1333};
1334
7738613e
IW
1335static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
1336 struct ib_port_immutable *immutable)
1337{
1338 struct ib_port_attr attr;
1339 int err;
1340
1341 err = iwch_query_port(ibdev, port_num, &attr);
1342 if (err)
1343 return err;
1344
1345 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1346 immutable->gid_tbl_len = attr.gid_tbl_len;
f9b22e35 1347 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
7738613e
IW
1348
1349 return 0;
1350}
1351
e1803694
IW
1352static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str,
1353 size_t str_len)
1354{
1355 struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
1356 struct ethtool_drvinfo info;
1357 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1358
1359 PDBG("%s dev 0x%p\n", __func__, iwch_dev);
1360 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1361 snprintf(str, str_len, "%s", info.fw_version);
1362}
1363
b038ced7
SW
1364int iwch_register_device(struct iwch_dev *dev)
1365{
1366 int ret;
1367 int i;
1368
33718363 1369 PDBG("%s iwch_dev %p\n", __func__, dev);
b038ced7
SW
1370 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1371 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1372 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1373 dev->ibdev.owner = THIS_MODULE;
be43324d
SW
1374 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1375 IB_DEVICE_MEM_WINDOW |
1376 IB_DEVICE_MEM_MGT_EXTENSIONS;
96f15c03
SW
1377
1378 /* cxgb3 supports STag 0. */
1379 dev->ibdev.local_dma_lkey = 0;
b038ced7
SW
1380
1381 dev->ibdev.uverbs_cmd_mask =
1382 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1383 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1384 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1385 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1386 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1387 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1388 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1389 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1390 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1391 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1392 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1393 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1394 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1395 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1396 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1397 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1398 (1ull << IB_USER_VERBS_CMD_POST_RECV);
1399 dev->ibdev.node_type = RDMA_NODE_RNIC;
bd99fdea 1400 BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
b038ced7
SW
1401 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1402 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
f4fd0b22 1403 dev->ibdev.num_comp_vectors = 1;
b038ced7 1404 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
b038ced7
SW
1405 dev->ibdev.query_device = iwch_query_device;
1406 dev->ibdev.query_port = iwch_query_port;
b038ced7
SW
1407 dev->ibdev.query_pkey = iwch_query_pkey;
1408 dev->ibdev.query_gid = iwch_query_gid;
1409 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1410 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1411 dev->ibdev.mmap = iwch_mmap;
1412 dev->ibdev.alloc_pd = iwch_allocate_pd;
1413 dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1414 dev->ibdev.create_ah = iwch_ah_create;
1415 dev->ibdev.destroy_ah = iwch_ah_destroy;
1416 dev->ibdev.create_qp = iwch_create_qp;
1417 dev->ibdev.modify_qp = iwch_ib_modify_qp;
1418 dev->ibdev.destroy_qp = iwch_destroy_qp;
1419 dev->ibdev.create_cq = iwch_create_cq;
1420 dev->ibdev.destroy_cq = iwch_destroy_cq;
1421 dev->ibdev.resize_cq = iwch_resize_cq;
1422 dev->ibdev.poll_cq = iwch_poll_cq;
1423 dev->ibdev.get_dma_mr = iwch_get_dma_mr;
b038ced7
SW
1424 dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1425 dev->ibdev.dereg_mr = iwch_dereg_mr;
1426 dev->ibdev.alloc_mw = iwch_alloc_mw;
b038ced7 1427 dev->ibdev.dealloc_mw = iwch_dealloc_mw;
f683d3bd 1428 dev->ibdev.alloc_mr = iwch_alloc_mr;
14fb4171 1429 dev->ibdev.map_mr_sg = iwch_map_mr_sg;
b038ced7
SW
1430 dev->ibdev.attach_mcast = iwch_multicast_attach;
1431 dev->ibdev.detach_mcast = iwch_multicast_detach;
1432 dev->ibdev.process_mad = iwch_process_mad;
b038ced7
SW
1433 dev->ibdev.req_notify_cq = iwch_arm_cq;
1434 dev->ibdev.post_send = iwch_post_send;
1435 dev->ibdev.post_recv = iwch_post_receive;
b40f4757
CL
1436 dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
1437 dev->ibdev.get_hw_stats = iwch_get_mib;
b955150e 1438 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
7738613e 1439 dev->ibdev.get_port_immutable = iwch_port_immutable;
e1803694 1440 dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str;
b038ced7 1441
6abb6ea8
WC
1442 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
1443 if (!dev->ibdev.iwcm)
1444 return -ENOMEM;
1445
b038ced7
SW
1446 dev->ibdev.iwcm->connect = iwch_connect;
1447 dev->ibdev.iwcm->accept = iwch_accept_cr;
1448 dev->ibdev.iwcm->reject = iwch_reject_cr;
1449 dev->ibdev.iwcm->create_listen = iwch_create_listen;
1450 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1451 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1452 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1453 dev->ibdev.iwcm->get_qp = iwch_get_qp;
ad202348
SW
1454 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1455 sizeof(dev->ibdev.iwcm->ifname));
b038ced7 1456
9a6edb60 1457 ret = ib_register_device(&dev->ibdev, NULL);
b038ced7
SW
1458 if (ret)
1459 goto bail1;
1460
1461 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
f4e91eb4
TJ
1462 ret = device_create_file(&dev->ibdev.dev,
1463 iwch_class_attributes[i]);
b038ced7
SW
1464 if (ret) {
1465 goto bail2;
1466 }
1467 }
1468 return 0;
1469bail2:
1470 ib_unregister_device(&dev->ibdev);
1471bail1:
3793d2fc 1472 kfree(dev->ibdev.iwcm);
b038ced7
SW
1473 return ret;
1474}
1475
1476void iwch_unregister_device(struct iwch_dev *dev)
1477{
1478 int i;
1479
33718363 1480 PDBG("%s iwch_dev %p\n", __func__, dev);
b038ced7 1481 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
f4e91eb4
TJ
1482 device_remove_file(&dev->ibdev.dev,
1483 iwch_class_attributes[i]);
b038ced7 1484 ib_unregister_device(&dev->ibdev);
3793d2fc 1485 kfree(dev->ibdev.iwcm);
b038ced7
SW
1486 return;
1487}