]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/qib/qib_qp.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / qib / qib_qp.c
1 /*
2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <rdma/rdma_vt.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
40 #endif
41
42 #include "qib.h"
43
44 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
45 struct rvt_qpn_map *map, unsigned off)
46 {
47 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
48 }
49
50 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
51 struct rvt_qpn_map *map, unsigned off,
52 unsigned n, u16 qpt_mask)
53 {
54 if (qpt_mask) {
55 off++;
56 if (((off & qpt_mask) >> 1) >= n)
57 off = (off | qpt_mask) + 2;
58 } else {
59 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
60 }
61 return off;
62 }
63
64 const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
65 [IB_WR_RDMA_WRITE] = {
66 .length = sizeof(struct ib_rdma_wr),
67 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
68 },
69
70 [IB_WR_RDMA_READ] = {
71 .length = sizeof(struct ib_rdma_wr),
72 .qpt_support = BIT(IB_QPT_RC),
73 .flags = RVT_OPERATION_ATOMIC,
74 },
75
76 [IB_WR_ATOMIC_CMP_AND_SWP] = {
77 .length = sizeof(struct ib_atomic_wr),
78 .qpt_support = BIT(IB_QPT_RC),
79 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
80 },
81
82 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
83 .length = sizeof(struct ib_atomic_wr),
84 .qpt_support = BIT(IB_QPT_RC),
85 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
86 },
87
88 [IB_WR_RDMA_WRITE_WITH_IMM] = {
89 .length = sizeof(struct ib_rdma_wr),
90 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
91 },
92
93 [IB_WR_SEND] = {
94 .length = sizeof(struct ib_send_wr),
95 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
96 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
97 },
98
99 [IB_WR_SEND_WITH_IMM] = {
100 .length = sizeof(struct ib_send_wr),
101 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
102 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
103 },
104
105 };
106
107 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
108 {
109 unsigned long page = get_zeroed_page(GFP_KERNEL);
110
111 /*
112 * Free the page if someone raced with us installing it.
113 */
114
115 spin_lock(&qpt->lock);
116 if (map->page)
117 free_page(page);
118 else
119 map->page = (void *)page;
120 spin_unlock(&qpt->lock);
121 }
122
123 /*
124 * Allocate the next available QPN or
125 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
126 */
127 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
128 enum ib_qp_type type, u8 port)
129 {
130 u32 i, offset, max_scan, qpn;
131 struct rvt_qpn_map *map;
132 u32 ret;
133 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
134 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
135 verbs_dev);
136 u16 qpt_mask = dd->qpn_mask;
137
138 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
139 unsigned n;
140
141 ret = type == IB_QPT_GSI;
142 n = 1 << (ret + 2 * (port - 1));
143 spin_lock(&qpt->lock);
144 if (qpt->flags & n)
145 ret = -EINVAL;
146 else
147 qpt->flags |= n;
148 spin_unlock(&qpt->lock);
149 goto bail;
150 }
151
152 qpn = qpt->last + 2;
153 if (qpn >= RVT_QPN_MAX)
154 qpn = 2;
155 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
156 qpn = (qpn | qpt_mask) + 2;
157 offset = qpn & RVT_BITS_PER_PAGE_MASK;
158 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
159 max_scan = qpt->nmaps - !offset;
160 for (i = 0;;) {
161 if (unlikely(!map->page)) {
162 get_map_page(qpt, map);
163 if (unlikely(!map->page))
164 break;
165 }
166 do {
167 if (!test_and_set_bit(offset, map->page)) {
168 qpt->last = qpn;
169 ret = qpn;
170 goto bail;
171 }
172 offset = find_next_offset(qpt, map, offset,
173 dd->n_krcv_queues, qpt_mask);
174 qpn = mk_qpn(qpt, map, offset);
175 /*
176 * This test differs from alloc_pidmap().
177 * If find_next_offset() does find a zero
178 * bit, we don't need to check for QPN
179 * wrapping around past our starting QPN.
180 * We just need to be sure we don't loop
181 * forever.
182 */
183 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
184 /*
185 * In order to keep the number of pages allocated to a
186 * minimum, we scan the all existing pages before increasing
187 * the size of the bitmap table.
188 */
189 if (++i > max_scan) {
190 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
191 break;
192 map = &qpt->map[qpt->nmaps++];
193 offset = 0;
194 } else if (map < &qpt->map[qpt->nmaps]) {
195 ++map;
196 offset = 0;
197 } else {
198 map = &qpt->map[0];
199 offset = 2;
200 }
201 qpn = mk_qpn(qpt, map, offset);
202 }
203
204 ret = -ENOMEM;
205
206 bail:
207 return ret;
208 }
209
210 /**
211 * qib_free_all_qps - check for QPs still in use
212 */
213 unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
214 {
215 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
216 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
217 verbs_dev);
218 unsigned n, qp_inuse = 0;
219
220 for (n = 0; n < dd->num_pports; n++) {
221 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
222
223 rcu_read_lock();
224 if (rcu_dereference(ibp->rvp.qp[0]))
225 qp_inuse++;
226 if (rcu_dereference(ibp->rvp.qp[1]))
227 qp_inuse++;
228 rcu_read_unlock();
229 }
230 return qp_inuse;
231 }
232
233 void qib_notify_qp_reset(struct rvt_qp *qp)
234 {
235 struct qib_qp_priv *priv = qp->priv;
236
237 atomic_set(&priv->s_dma_busy, 0);
238 }
239
240 void qib_notify_error_qp(struct rvt_qp *qp)
241 {
242 struct qib_qp_priv *priv = qp->priv;
243 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
244
245 spin_lock(&dev->rdi.pending_lock);
246 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
247 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
248 list_del_init(&priv->iowait);
249 }
250 spin_unlock(&dev->rdi.pending_lock);
251
252 if (!(qp->s_flags & RVT_S_BUSY)) {
253 qp->s_hdrwords = 0;
254 if (qp->s_rdma_mr) {
255 rvt_put_mr(qp->s_rdma_mr);
256 qp->s_rdma_mr = NULL;
257 }
258 if (priv->s_tx) {
259 qib_put_txreq(priv->s_tx);
260 priv->s_tx = NULL;
261 }
262 }
263 }
264
265 static int mtu_to_enum(u32 mtu)
266 {
267 int enum_mtu;
268
269 switch (mtu) {
270 case 4096:
271 enum_mtu = IB_MTU_4096;
272 break;
273 case 2048:
274 enum_mtu = IB_MTU_2048;
275 break;
276 case 1024:
277 enum_mtu = IB_MTU_1024;
278 break;
279 case 512:
280 enum_mtu = IB_MTU_512;
281 break;
282 case 256:
283 enum_mtu = IB_MTU_256;
284 break;
285 default:
286 enum_mtu = IB_MTU_2048;
287 }
288 return enum_mtu;
289 }
290
291 int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
292 struct ib_qp_attr *attr)
293 {
294 int mtu, pmtu, pidx = qp->port_num - 1;
295 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
296 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
297 verbs_dev);
298 mtu = ib_mtu_enum_to_int(attr->path_mtu);
299 if (mtu == -1)
300 return -EINVAL;
301
302 if (mtu > dd->pport[pidx].ibmtu)
303 pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
304 else
305 pmtu = attr->path_mtu;
306 return pmtu;
307 }
308
309 int qib_mtu_to_path_mtu(u32 mtu)
310 {
311 return mtu_to_enum(mtu);
312 }
313
314 u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
315 {
316 return ib_mtu_enum_to_int(pmtu);
317 }
318
319 void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
320 {
321 struct qib_qp_priv *priv;
322
323 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
324 if (!priv)
325 return ERR_PTR(-ENOMEM);
326 priv->owner = qp;
327
328 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
329 if (!priv->s_hdr) {
330 kfree(priv);
331 return ERR_PTR(-ENOMEM);
332 }
333 init_waitqueue_head(&priv->wait_dma);
334 INIT_WORK(&priv->s_work, _qib_do_send);
335 INIT_LIST_HEAD(&priv->iowait);
336
337 return priv;
338 }
339
340 void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
341 {
342 struct qib_qp_priv *priv = qp->priv;
343
344 kfree(priv->s_hdr);
345 kfree(priv);
346 }
347
348 void qib_stop_send_queue(struct rvt_qp *qp)
349 {
350 struct qib_qp_priv *priv = qp->priv;
351
352 cancel_work_sync(&priv->s_work);
353 }
354
355 void qib_quiesce_qp(struct rvt_qp *qp)
356 {
357 struct qib_qp_priv *priv = qp->priv;
358
359 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
360 if (priv->s_tx) {
361 qib_put_txreq(priv->s_tx);
362 priv->s_tx = NULL;
363 }
364 }
365
366 void qib_flush_qp_waiters(struct rvt_qp *qp)
367 {
368 struct qib_qp_priv *priv = qp->priv;
369 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
370
371 spin_lock(&dev->rdi.pending_lock);
372 if (!list_empty(&priv->iowait))
373 list_del_init(&priv->iowait);
374 spin_unlock(&dev->rdi.pending_lock);
375 }
376
377 /**
378 * qib_check_send_wqe - validate wr/wqe
379 * @qp - The qp
380 * @wqe - The built wqe
381 *
382 * validate wr/wqe. This is called
383 * prior to inserting the wqe into
384 * the ring but after the wqe has been
385 * setup.
386 *
387 * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
388 */
389 int qib_check_send_wqe(struct rvt_qp *qp,
390 struct rvt_swqe *wqe)
391 {
392 struct rvt_ah *ah;
393 int ret = 0;
394
395 switch (qp->ibqp.qp_type) {
396 case IB_QPT_RC:
397 case IB_QPT_UC:
398 if (wqe->length > 0x80000000U)
399 return -EINVAL;
400 break;
401 case IB_QPT_SMI:
402 case IB_QPT_GSI:
403 case IB_QPT_UD:
404 ah = ibah_to_rvtah(wqe->ud_wr.ah);
405 if (wqe->length > (1 << ah->log_pmtu))
406 return -EINVAL;
407 /* progress hint */
408 ret = 1;
409 break;
410 default:
411 break;
412 }
413 return ret;
414 }
415
416 #ifdef CONFIG_DEBUG_FS
417
418 struct qib_qp_iter {
419 struct qib_ibdev *dev;
420 struct rvt_qp *qp;
421 int n;
422 };
423
424 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
425 {
426 struct qib_qp_iter *iter;
427
428 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
429 if (!iter)
430 return NULL;
431
432 iter->dev = dev;
433
434 return iter;
435 }
436
437 int qib_qp_iter_next(struct qib_qp_iter *iter)
438 {
439 struct qib_ibdev *dev = iter->dev;
440 int n = iter->n;
441 int ret = 1;
442 struct rvt_qp *pqp = iter->qp;
443 struct rvt_qp *qp;
444
445 for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
446 if (pqp)
447 qp = rcu_dereference(pqp->next);
448 else
449 qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
450 pqp = qp;
451 if (qp) {
452 iter->qp = qp;
453 iter->n = n;
454 return 0;
455 }
456 }
457 return ret;
458 }
459
460 static const char * const qp_type_str[] = {
461 "SMI", "GSI", "RC", "UC", "UD",
462 };
463
464 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
465 {
466 struct rvt_swqe *wqe;
467 struct rvt_qp *qp = iter->qp;
468 struct qib_qp_priv *priv = qp->priv;
469
470 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
471 seq_printf(s,
472 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
473 iter->n,
474 qp->ibqp.qp_num,
475 qp_type_str[qp->ibqp.qp_type],
476 qp->state,
477 wqe->wr.opcode,
478 qp->s_hdrwords,
479 qp->s_flags,
480 atomic_read(&priv->s_dma_busy),
481 !list_empty(&priv->iowait),
482 qp->timeout,
483 wqe->ssn,
484 qp->s_lsn,
485 qp->s_last_psn,
486 qp->s_psn, qp->s_next_psn,
487 qp->s_sending_psn, qp->s_sending_hpsn,
488 qp->s_last, qp->s_acked, qp->s_cur,
489 qp->s_tail, qp->s_head, qp->s_size,
490 qp->remote_qpn,
491 rdma_ah_get_dlid(&qp->remote_ah_attr));
492 }
493
494 #endif