2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
38 #include "ipath_verbs.h"
41 * ipath_cq_enter - add a new entry to the completion queue
42 * @cq: completion queue
43 * @entry: work completion entry to add
44 * @sig: true if @entry is a solicitated entry
46 * This may be called with qp->s_lock held.
48 void ipath_cq_enter(struct ipath_cq
*cq
, struct ib_wc
*entry
, int solicited
)
50 struct ipath_cq_wc
*wc
;
55 spin_lock_irqsave(&cq
->lock
, flags
);
58 * Note that the head pointer might be writable by user processes.
59 * Take care to verify it is a sane value.
63 if (head
>= (unsigned) cq
->ibcq
.cqe
) {
68 if (unlikely(next
== wc
->tail
)) {
69 spin_unlock_irqrestore(&cq
->lock
, flags
);
70 if (cq
->ibcq
.event_handler
) {
73 ev
.device
= cq
->ibcq
.device
;
74 ev
.element
.cq
= &cq
->ibcq
;
75 ev
.event
= IB_EVENT_CQ_ERR
;
76 cq
->ibcq
.event_handler(&ev
, cq
->ibcq
.cq_context
);
81 wc
->uqueue
[head
].wr_id
= entry
->wr_id
;
82 wc
->uqueue
[head
].status
= entry
->status
;
83 wc
->uqueue
[head
].opcode
= entry
->opcode
;
84 wc
->uqueue
[head
].vendor_err
= entry
->vendor_err
;
85 wc
->uqueue
[head
].byte_len
= entry
->byte_len
;
86 wc
->uqueue
[head
].ex
.imm_data
= (__u32 __force
) entry
->ex
.imm_data
;
87 wc
->uqueue
[head
].qp_num
= entry
->qp
->qp_num
;
88 wc
->uqueue
[head
].src_qp
= entry
->src_qp
;
89 wc
->uqueue
[head
].wc_flags
= entry
->wc_flags
;
90 wc
->uqueue
[head
].pkey_index
= entry
->pkey_index
;
91 wc
->uqueue
[head
].slid
= entry
->slid
;
92 wc
->uqueue
[head
].sl
= entry
->sl
;
93 wc
->uqueue
[head
].dlid_path_bits
= entry
->dlid_path_bits
;
94 wc
->uqueue
[head
].port_num
= entry
->port_num
;
95 /* Make sure entry is written before the head index. */
98 wc
->kqueue
[head
] = *entry
;
101 if (cq
->notify
== IB_CQ_NEXT_COMP
||
102 (cq
->notify
== IB_CQ_SOLICITED
&& solicited
)) {
103 cq
->notify
= IB_CQ_NONE
;
106 * This will cause send_complete() to be called in
109 tasklet_hi_schedule(&cq
->comptask
);
112 spin_unlock_irqrestore(&cq
->lock
, flags
);
114 if (entry
->status
!= IB_WC_SUCCESS
)
115 to_idev(cq
->ibcq
.device
)->n_wqe_errs
++;
119 * ipath_poll_cq - poll for work completion entries
120 * @ibcq: the completion queue to poll
121 * @num_entries: the maximum number of entries to return
122 * @entry: pointer to array where work completions are placed
124 * Returns the number of completion entries polled.
126 * This may be called from interrupt context. Also called by ib_poll_cq()
127 * in the generic verbs code.
129 int ipath_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*entry
)
131 struct ipath_cq
*cq
= to_icq(ibcq
);
132 struct ipath_cq_wc
*wc
;
137 /* The kernel can only poll a kernel completion queue */
143 spin_lock_irqsave(&cq
->lock
, flags
);
147 if (tail
> (u32
) cq
->ibcq
.cqe
)
148 tail
= (u32
) cq
->ibcq
.cqe
;
149 for (npolled
= 0; npolled
< num_entries
; ++npolled
, ++entry
) {
150 if (tail
== wc
->head
)
152 /* The kernel doesn't need a RMB since it has the lock. */
153 *entry
= wc
->kqueue
[tail
];
154 if (tail
>= cq
->ibcq
.cqe
)
161 spin_unlock_irqrestore(&cq
->lock
, flags
);
167 static void send_complete(unsigned long data
)
169 struct ipath_cq
*cq
= (struct ipath_cq
*)data
;
172 * The completion handler will most likely rearm the notification
173 * and poll for all pending entries. If a new completion entry
174 * is added while we are in this routine, tasklet_hi_schedule()
175 * won't call us again until we return so we check triggered to
176 * see if we need to call the handler again.
179 u8 triggered
= cq
->triggered
;
181 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
183 if (cq
->triggered
== triggered
)
189 * ipath_create_cq - create a completion queue
190 * @ibdev: the device this completion queue is attached to
191 * @attr: creation attributes
192 * @context: unused by the InfiniPath driver
193 * @udata: unused by the InfiniPath driver
195 * Returns a pointer to the completion queue or negative errno values
198 * Called by ib_create_cq() in the generic verbs code.
200 struct ib_cq
*ipath_create_cq(struct ib_device
*ibdev
,
201 const struct ib_cq_init_attr
*attr
,
202 struct ib_ucontext
*context
,
203 struct ib_udata
*udata
)
205 int entries
= attr
->cqe
;
206 struct ipath_ibdev
*dev
= to_idev(ibdev
);
208 struct ipath_cq_wc
*wc
;
213 return ERR_PTR(-EINVAL
);
215 if (entries
< 1 || entries
> ib_ipath_max_cqes
) {
216 ret
= ERR_PTR(-EINVAL
);
220 /* Allocate the completion queue structure. */
221 cq
= kmalloc(sizeof(*cq
), GFP_KERNEL
);
223 ret
= ERR_PTR(-ENOMEM
);
228 * Allocate the completion queue entries and head/tail pointers.
229 * This is allocated separately so that it can be resized and
230 * also mapped into user space.
231 * We need to use vmalloc() in order to support mmap and large
232 * numbers of entries.
235 if (udata
&& udata
->outlen
>= sizeof(__u64
))
236 sz
+= sizeof(struct ib_uverbs_wc
) * (entries
+ 1);
238 sz
+= sizeof(struct ib_wc
) * (entries
+ 1);
239 wc
= vmalloc_user(sz
);
241 ret
= ERR_PTR(-ENOMEM
);
246 * Return the address of the WC as the offset to mmap.
247 * See ipath_mmap() for details.
249 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
252 cq
->ip
= ipath_create_mmap_info(dev
, sz
, context
, wc
);
254 ret
= ERR_PTR(-ENOMEM
);
258 err
= ib_copy_to_udata(udata
, &cq
->ip
->offset
,
259 sizeof(cq
->ip
->offset
));
267 spin_lock(&dev
->n_cqs_lock
);
268 if (dev
->n_cqs_allocated
== ib_ipath_max_cqs
) {
269 spin_unlock(&dev
->n_cqs_lock
);
270 ret
= ERR_PTR(-ENOMEM
);
274 dev
->n_cqs_allocated
++;
275 spin_unlock(&dev
->n_cqs_lock
);
278 spin_lock_irq(&dev
->pending_lock
);
279 list_add(&cq
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
280 spin_unlock_irq(&dev
->pending_lock
);
284 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
285 * The number of entries should be >= the number requested or return
288 cq
->ibcq
.cqe
= entries
;
289 cq
->notify
= IB_CQ_NONE
;
291 spin_lock_init(&cq
->lock
);
292 tasklet_init(&cq
->comptask
, send_complete
, (unsigned long)cq
);
312 * ipath_destroy_cq - destroy a completion queue
313 * @ibcq: the completion queue to destroy.
315 * Returns 0 for success.
317 * Called by ib_destroy_cq() in the generic verbs code.
319 int ipath_destroy_cq(struct ib_cq
*ibcq
)
321 struct ipath_ibdev
*dev
= to_idev(ibcq
->device
);
322 struct ipath_cq
*cq
= to_icq(ibcq
);
324 tasklet_kill(&cq
->comptask
);
325 spin_lock(&dev
->n_cqs_lock
);
326 dev
->n_cqs_allocated
--;
327 spin_unlock(&dev
->n_cqs_lock
);
329 kref_put(&cq
->ip
->ref
, ipath_release_mmap_info
);
338 * ipath_req_notify_cq - change the notification type for a completion queue
339 * @ibcq: the completion queue
340 * @notify_flags: the type of notification to request
342 * Returns 0 for success.
344 * This may be called from interrupt context. Also called by
345 * ib_req_notify_cq() in the generic verbs code.
347 int ipath_req_notify_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags notify_flags
)
349 struct ipath_cq
*cq
= to_icq(ibcq
);
353 spin_lock_irqsave(&cq
->lock
, flags
);
355 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
356 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
358 if (cq
->notify
!= IB_CQ_NEXT_COMP
)
359 cq
->notify
= notify_flags
& IB_CQ_SOLICITED_MASK
;
361 if ((notify_flags
& IB_CQ_REPORT_MISSED_EVENTS
) &&
362 cq
->queue
->head
!= cq
->queue
->tail
)
365 spin_unlock_irqrestore(&cq
->lock
, flags
);
371 * ipath_resize_cq - change the size of the CQ
372 * @ibcq: the completion queue
374 * Returns 0 for success.
376 int ipath_resize_cq(struct ib_cq
*ibcq
, int cqe
, struct ib_udata
*udata
)
378 struct ipath_cq
*cq
= to_icq(ibcq
);
379 struct ipath_cq_wc
*old_wc
;
380 struct ipath_cq_wc
*wc
;
385 if (cqe
< 1 || cqe
> ib_ipath_max_cqes
) {
391 * Need to use vmalloc() if we want to support large #s of entries.
394 if (udata
&& udata
->outlen
>= sizeof(__u64
))
395 sz
+= sizeof(struct ib_uverbs_wc
) * (cqe
+ 1);
397 sz
+= sizeof(struct ib_wc
) * (cqe
+ 1);
398 wc
= vmalloc_user(sz
);
404 /* Check that we can write the offset to mmap. */
405 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
408 ret
= ib_copy_to_udata(udata
, &offset
, sizeof(offset
));
413 spin_lock_irq(&cq
->lock
);
415 * Make sure head and tail are sane since they
416 * might be user writable.
420 if (head
> (u32
) cq
->ibcq
.cqe
)
421 head
= (u32
) cq
->ibcq
.cqe
;
423 if (tail
> (u32
) cq
->ibcq
.cqe
)
424 tail
= (u32
) cq
->ibcq
.cqe
;
426 n
= cq
->ibcq
.cqe
+ 1 + head
- tail
;
429 if (unlikely((u32
)cqe
< n
)) {
433 for (n
= 0; tail
!= head
; n
++) {
435 wc
->uqueue
[n
] = old_wc
->uqueue
[tail
];
437 wc
->kqueue
[n
] = old_wc
->kqueue
[tail
];
438 if (tail
== (u32
) cq
->ibcq
.cqe
)
447 spin_unlock_irq(&cq
->lock
);
452 struct ipath_ibdev
*dev
= to_idev(ibcq
->device
);
453 struct ipath_mmap_info
*ip
= cq
->ip
;
455 ipath_update_mmap_info(dev
, ip
, sz
, wc
);
458 * Return the offset to mmap.
459 * See ipath_mmap() for details.
461 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
462 ret
= ib_copy_to_udata(udata
, &ip
->offset
,
468 spin_lock_irq(&dev
->pending_lock
);
469 if (list_empty(&ip
->pending_mmaps
))
470 list_add(&ip
->pending_mmaps
, &dev
->pending_mmaps
);
471 spin_unlock_irq(&dev
->pending_lock
);
478 spin_unlock_irq(&cq
->lock
);