]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/ipath/ipath_cq.c
Staging: Add staging/rdma directory and update MAINTAINERS
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / ipath / ipath_cq.c
1 /*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/err.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37
38 #include "ipath_verbs.h"
39
40 /**
41 * ipath_cq_enter - add a new entry to the completion queue
42 * @cq: completion queue
43 * @entry: work completion entry to add
44 * @sig: true if @entry is a solicitated entry
45 *
46 * This may be called with qp->s_lock held.
47 */
48 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
49 {
50 struct ipath_cq_wc *wc;
51 unsigned long flags;
52 u32 head;
53 u32 next;
54
55 spin_lock_irqsave(&cq->lock, flags);
56
57 /*
58 * Note that the head pointer might be writable by user processes.
59 * Take care to verify it is a sane value.
60 */
61 wc = cq->queue;
62 head = wc->head;
63 if (head >= (unsigned) cq->ibcq.cqe) {
64 head = cq->ibcq.cqe;
65 next = 0;
66 } else
67 next = head + 1;
68 if (unlikely(next == wc->tail)) {
69 spin_unlock_irqrestore(&cq->lock, flags);
70 if (cq->ibcq.event_handler) {
71 struct ib_event ev;
72
73 ev.device = cq->ibcq.device;
74 ev.element.cq = &cq->ibcq;
75 ev.event = IB_EVENT_CQ_ERR;
76 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
77 }
78 return;
79 }
80 if (cq->ip) {
81 wc->uqueue[head].wr_id = entry->wr_id;
82 wc->uqueue[head].status = entry->status;
83 wc->uqueue[head].opcode = entry->opcode;
84 wc->uqueue[head].vendor_err = entry->vendor_err;
85 wc->uqueue[head].byte_len = entry->byte_len;
86 wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
87 wc->uqueue[head].qp_num = entry->qp->qp_num;
88 wc->uqueue[head].src_qp = entry->src_qp;
89 wc->uqueue[head].wc_flags = entry->wc_flags;
90 wc->uqueue[head].pkey_index = entry->pkey_index;
91 wc->uqueue[head].slid = entry->slid;
92 wc->uqueue[head].sl = entry->sl;
93 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
94 wc->uqueue[head].port_num = entry->port_num;
95 /* Make sure entry is written before the head index. */
96 smp_wmb();
97 } else
98 wc->kqueue[head] = *entry;
99 wc->head = next;
100
101 if (cq->notify == IB_CQ_NEXT_COMP ||
102 (cq->notify == IB_CQ_SOLICITED && solicited)) {
103 cq->notify = IB_CQ_NONE;
104 cq->triggered++;
105 /*
106 * This will cause send_complete() to be called in
107 * another thread.
108 */
109 tasklet_hi_schedule(&cq->comptask);
110 }
111
112 spin_unlock_irqrestore(&cq->lock, flags);
113
114 if (entry->status != IB_WC_SUCCESS)
115 to_idev(cq->ibcq.device)->n_wqe_errs++;
116 }
117
118 /**
119 * ipath_poll_cq - poll for work completion entries
120 * @ibcq: the completion queue to poll
121 * @num_entries: the maximum number of entries to return
122 * @entry: pointer to array where work completions are placed
123 *
124 * Returns the number of completion entries polled.
125 *
126 * This may be called from interrupt context. Also called by ib_poll_cq()
127 * in the generic verbs code.
128 */
129 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
130 {
131 struct ipath_cq *cq = to_icq(ibcq);
132 struct ipath_cq_wc *wc;
133 unsigned long flags;
134 int npolled;
135 u32 tail;
136
137 /* The kernel can only poll a kernel completion queue */
138 if (cq->ip) {
139 npolled = -EINVAL;
140 goto bail;
141 }
142
143 spin_lock_irqsave(&cq->lock, flags);
144
145 wc = cq->queue;
146 tail = wc->tail;
147 if (tail > (u32) cq->ibcq.cqe)
148 tail = (u32) cq->ibcq.cqe;
149 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
150 if (tail == wc->head)
151 break;
152 /* The kernel doesn't need a RMB since it has the lock. */
153 *entry = wc->kqueue[tail];
154 if (tail >= cq->ibcq.cqe)
155 tail = 0;
156 else
157 tail++;
158 }
159 wc->tail = tail;
160
161 spin_unlock_irqrestore(&cq->lock, flags);
162
163 bail:
164 return npolled;
165 }
166
167 static void send_complete(unsigned long data)
168 {
169 struct ipath_cq *cq = (struct ipath_cq *)data;
170
171 /*
172 * The completion handler will most likely rearm the notification
173 * and poll for all pending entries. If a new completion entry
174 * is added while we are in this routine, tasklet_hi_schedule()
175 * won't call us again until we return so we check triggered to
176 * see if we need to call the handler again.
177 */
178 for (;;) {
179 u8 triggered = cq->triggered;
180
181 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
182
183 if (cq->triggered == triggered)
184 return;
185 }
186 }
187
188 /**
189 * ipath_create_cq - create a completion queue
190 * @ibdev: the device this completion queue is attached to
191 * @attr: creation attributes
192 * @context: unused by the InfiniPath driver
193 * @udata: unused by the InfiniPath driver
194 *
195 * Returns a pointer to the completion queue or negative errno values
196 * for failure.
197 *
198 * Called by ib_create_cq() in the generic verbs code.
199 */
200 struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
201 const struct ib_cq_init_attr *attr,
202 struct ib_ucontext *context,
203 struct ib_udata *udata)
204 {
205 int entries = attr->cqe;
206 struct ipath_ibdev *dev = to_idev(ibdev);
207 struct ipath_cq *cq;
208 struct ipath_cq_wc *wc;
209 struct ib_cq *ret;
210 u32 sz;
211
212 if (attr->flags)
213 return ERR_PTR(-EINVAL);
214
215 if (entries < 1 || entries > ib_ipath_max_cqes) {
216 ret = ERR_PTR(-EINVAL);
217 goto done;
218 }
219
220 /* Allocate the completion queue structure. */
221 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
222 if (!cq) {
223 ret = ERR_PTR(-ENOMEM);
224 goto done;
225 }
226
227 /*
228 * Allocate the completion queue entries and head/tail pointers.
229 * This is allocated separately so that it can be resized and
230 * also mapped into user space.
231 * We need to use vmalloc() in order to support mmap and large
232 * numbers of entries.
233 */
234 sz = sizeof(*wc);
235 if (udata && udata->outlen >= sizeof(__u64))
236 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
237 else
238 sz += sizeof(struct ib_wc) * (entries + 1);
239 wc = vmalloc_user(sz);
240 if (!wc) {
241 ret = ERR_PTR(-ENOMEM);
242 goto bail_cq;
243 }
244
245 /*
246 * Return the address of the WC as the offset to mmap.
247 * See ipath_mmap() for details.
248 */
249 if (udata && udata->outlen >= sizeof(__u64)) {
250 int err;
251
252 cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
253 if (!cq->ip) {
254 ret = ERR_PTR(-ENOMEM);
255 goto bail_wc;
256 }
257
258 err = ib_copy_to_udata(udata, &cq->ip->offset,
259 sizeof(cq->ip->offset));
260 if (err) {
261 ret = ERR_PTR(err);
262 goto bail_ip;
263 }
264 } else
265 cq->ip = NULL;
266
267 spin_lock(&dev->n_cqs_lock);
268 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
269 spin_unlock(&dev->n_cqs_lock);
270 ret = ERR_PTR(-ENOMEM);
271 goto bail_ip;
272 }
273
274 dev->n_cqs_allocated++;
275 spin_unlock(&dev->n_cqs_lock);
276
277 if (cq->ip) {
278 spin_lock_irq(&dev->pending_lock);
279 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
280 spin_unlock_irq(&dev->pending_lock);
281 }
282
283 /*
284 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
285 * The number of entries should be >= the number requested or return
286 * an error.
287 */
288 cq->ibcq.cqe = entries;
289 cq->notify = IB_CQ_NONE;
290 cq->triggered = 0;
291 spin_lock_init(&cq->lock);
292 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
293 wc->head = 0;
294 wc->tail = 0;
295 cq->queue = wc;
296
297 ret = &cq->ibcq;
298
299 goto done;
300
301 bail_ip:
302 kfree(cq->ip);
303 bail_wc:
304 vfree(wc);
305 bail_cq:
306 kfree(cq);
307 done:
308 return ret;
309 }
310
311 /**
312 * ipath_destroy_cq - destroy a completion queue
313 * @ibcq: the completion queue to destroy.
314 *
315 * Returns 0 for success.
316 *
317 * Called by ib_destroy_cq() in the generic verbs code.
318 */
319 int ipath_destroy_cq(struct ib_cq *ibcq)
320 {
321 struct ipath_ibdev *dev = to_idev(ibcq->device);
322 struct ipath_cq *cq = to_icq(ibcq);
323
324 tasklet_kill(&cq->comptask);
325 spin_lock(&dev->n_cqs_lock);
326 dev->n_cqs_allocated--;
327 spin_unlock(&dev->n_cqs_lock);
328 if (cq->ip)
329 kref_put(&cq->ip->ref, ipath_release_mmap_info);
330 else
331 vfree(cq->queue);
332 kfree(cq);
333
334 return 0;
335 }
336
337 /**
338 * ipath_req_notify_cq - change the notification type for a completion queue
339 * @ibcq: the completion queue
340 * @notify_flags: the type of notification to request
341 *
342 * Returns 0 for success.
343 *
344 * This may be called from interrupt context. Also called by
345 * ib_req_notify_cq() in the generic verbs code.
346 */
347 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
348 {
349 struct ipath_cq *cq = to_icq(ibcq);
350 unsigned long flags;
351 int ret = 0;
352
353 spin_lock_irqsave(&cq->lock, flags);
354 /*
355 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
356 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
357 */
358 if (cq->notify != IB_CQ_NEXT_COMP)
359 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
360
361 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
362 cq->queue->head != cq->queue->tail)
363 ret = 1;
364
365 spin_unlock_irqrestore(&cq->lock, flags);
366
367 return ret;
368 }
369
370 /**
371 * ipath_resize_cq - change the size of the CQ
372 * @ibcq: the completion queue
373 *
374 * Returns 0 for success.
375 */
376 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
377 {
378 struct ipath_cq *cq = to_icq(ibcq);
379 struct ipath_cq_wc *old_wc;
380 struct ipath_cq_wc *wc;
381 u32 head, tail, n;
382 int ret;
383 u32 sz;
384
385 if (cqe < 1 || cqe > ib_ipath_max_cqes) {
386 ret = -EINVAL;
387 goto bail;
388 }
389
390 /*
391 * Need to use vmalloc() if we want to support large #s of entries.
392 */
393 sz = sizeof(*wc);
394 if (udata && udata->outlen >= sizeof(__u64))
395 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
396 else
397 sz += sizeof(struct ib_wc) * (cqe + 1);
398 wc = vmalloc_user(sz);
399 if (!wc) {
400 ret = -ENOMEM;
401 goto bail;
402 }
403
404 /* Check that we can write the offset to mmap. */
405 if (udata && udata->outlen >= sizeof(__u64)) {
406 __u64 offset = 0;
407
408 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
409 if (ret)
410 goto bail_free;
411 }
412
413 spin_lock_irq(&cq->lock);
414 /*
415 * Make sure head and tail are sane since they
416 * might be user writable.
417 */
418 old_wc = cq->queue;
419 head = old_wc->head;
420 if (head > (u32) cq->ibcq.cqe)
421 head = (u32) cq->ibcq.cqe;
422 tail = old_wc->tail;
423 if (tail > (u32) cq->ibcq.cqe)
424 tail = (u32) cq->ibcq.cqe;
425 if (head < tail)
426 n = cq->ibcq.cqe + 1 + head - tail;
427 else
428 n = head - tail;
429 if (unlikely((u32)cqe < n)) {
430 ret = -EINVAL;
431 goto bail_unlock;
432 }
433 for (n = 0; tail != head; n++) {
434 if (cq->ip)
435 wc->uqueue[n] = old_wc->uqueue[tail];
436 else
437 wc->kqueue[n] = old_wc->kqueue[tail];
438 if (tail == (u32) cq->ibcq.cqe)
439 tail = 0;
440 else
441 tail++;
442 }
443 cq->ibcq.cqe = cqe;
444 wc->head = n;
445 wc->tail = 0;
446 cq->queue = wc;
447 spin_unlock_irq(&cq->lock);
448
449 vfree(old_wc);
450
451 if (cq->ip) {
452 struct ipath_ibdev *dev = to_idev(ibcq->device);
453 struct ipath_mmap_info *ip = cq->ip;
454
455 ipath_update_mmap_info(dev, ip, sz, wc);
456
457 /*
458 * Return the offset to mmap.
459 * See ipath_mmap() for details.
460 */
461 if (udata && udata->outlen >= sizeof(__u64)) {
462 ret = ib_copy_to_udata(udata, &ip->offset,
463 sizeof(ip->offset));
464 if (ret)
465 goto bail;
466 }
467
468 spin_lock_irq(&dev->pending_lock);
469 if (list_empty(&ip->pending_mmaps))
470 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
471 spin_unlock_irq(&dev->pending_lock);
472 }
473
474 ret = 0;
475 goto bail;
476
477 bail_unlock:
478 spin_unlock_irq(&cq->lock);
479 bail_free:
480 vfree(wc);
481 bail:
482 return ret;
483 }