]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/ipath/ipath_ud.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / ipath / ipath_ud.c
1 /*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <rdma/ib_smi.h>
35
36 #include "ipath_verbs.h"
37 #include "ipath_kernel.h"
38
39 /**
40 * ipath_ud_loopback - handle send on loopback QPs
41 * @sqp: the sending QP
42 * @swqe: the send work request
43 *
44 * This is called from ipath_make_ud_req() to forward a WQE addressed
45 * to the same HCA.
46 * Note that the receive interrupt handler may be calling ipath_ud_rcv()
47 * while this is being called.
48 */
49 static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
50 {
51 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
52 struct ipath_qp *qp;
53 struct ib_ah_attr *ah_attr;
54 unsigned long flags;
55 struct ipath_rq *rq;
56 struct ipath_srq *srq;
57 struct ipath_sge_state rsge;
58 struct ipath_sge *sge;
59 struct ipath_rwq *wq;
60 struct ipath_rwqe *wqe;
61 void (*handler)(struct ib_event *, void *);
62 struct ib_wc wc;
63 u32 tail;
64 u32 rlen;
65 u32 length;
66
67 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn);
68 if (!qp) {
69 dev->n_pkt_drops++;
70 goto send_comp;
71 }
72
73 rsge.sg_list = NULL;
74
75 /*
76 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
77 * Qkeys with the high order bit set mean use the
78 * qkey from the QP context instead of the WR (see 10.2.5).
79 */
80 if (unlikely(qp->ibqp.qp_num &&
81 ((int) swqe->wr.wr.ud.remote_qkey < 0 ?
82 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) {
83 /* XXX OK to lose a count once in a while. */
84 dev->qkey_violations++;
85 dev->n_pkt_drops++;
86 goto drop;
87 }
88
89 /*
90 * A GRH is expected to preceed the data even if not
91 * present on the wire.
92 */
93 length = swqe->length;
94 wc.byte_len = length + sizeof(struct ib_grh);
95
96 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
97 wc.wc_flags = IB_WC_WITH_IMM;
98 wc.imm_data = swqe->wr.ex.imm_data;
99 } else {
100 wc.wc_flags = 0;
101 wc.imm_data = 0;
102 }
103
104 /*
105 * This would be a lot simpler if we could call ipath_get_rwqe()
106 * but that uses state that the receive interrupt handler uses
107 * so we would need to lock out receive interrupts while doing
108 * local loopback.
109 */
110 if (qp->ibqp.srq) {
111 srq = to_isrq(qp->ibqp.srq);
112 handler = srq->ibsrq.event_handler;
113 rq = &srq->rq;
114 } else {
115 srq = NULL;
116 handler = NULL;
117 rq = &qp->r_rq;
118 }
119
120 if (rq->max_sge > 1) {
121 /*
122 * XXX We could use GFP_KERNEL if ipath_do_send()
123 * was always called from the tasklet instead of
124 * from ipath_post_send().
125 */
126 rsge.sg_list = kmalloc((rq->max_sge - 1) *
127 sizeof(struct ipath_sge),
128 GFP_ATOMIC);
129 if (!rsge.sg_list) {
130 dev->n_pkt_drops++;
131 goto drop;
132 }
133 }
134
135 /*
136 * Get the next work request entry to find where to put the data.
137 * Note that it is safe to drop the lock after changing rq->tail
138 * since ipath_post_receive() won't fill the empty slot.
139 */
140 spin_lock_irqsave(&rq->lock, flags);
141 wq = rq->wq;
142 tail = wq->tail;
143 /* Validate tail before using it since it is user writable. */
144 if (tail >= rq->size)
145 tail = 0;
146 if (unlikely(tail == wq->head)) {
147 spin_unlock_irqrestore(&rq->lock, flags);
148 dev->n_pkt_drops++;
149 goto drop;
150 }
151 wqe = get_rwqe_ptr(rq, tail);
152 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
153 spin_unlock_irqrestore(&rq->lock, flags);
154 dev->n_pkt_drops++;
155 goto drop;
156 }
157 /* Silently drop packets which are too big. */
158 if (wc.byte_len > rlen) {
159 spin_unlock_irqrestore(&rq->lock, flags);
160 dev->n_pkt_drops++;
161 goto drop;
162 }
163 if (++tail >= rq->size)
164 tail = 0;
165 wq->tail = tail;
166 wc.wr_id = wqe->wr_id;
167 if (handler) {
168 u32 n;
169
170 /*
171 * validate head pointer value and compute
172 * the number of remaining WQEs.
173 */
174 n = wq->head;
175 if (n >= rq->size)
176 n = 0;
177 if (n < tail)
178 n += rq->size - tail;
179 else
180 n -= tail;
181 if (n < srq->limit) {
182 struct ib_event ev;
183
184 srq->limit = 0;
185 spin_unlock_irqrestore(&rq->lock, flags);
186 ev.device = qp->ibqp.device;
187 ev.element.srq = qp->ibqp.srq;
188 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
189 handler(&ev, srq->ibsrq.srq_context);
190 } else
191 spin_unlock_irqrestore(&rq->lock, flags);
192 } else
193 spin_unlock_irqrestore(&rq->lock, flags);
194
195 ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
196 if (ah_attr->ah_flags & IB_AH_GRH) {
197 ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
198 wc.wc_flags |= IB_WC_GRH;
199 } else
200 ipath_skip_sge(&rsge, sizeof(struct ib_grh));
201 sge = swqe->sg_list;
202 while (length) {
203 u32 len = sge->length;
204
205 if (len > length)
206 len = length;
207 if (len > sge->sge_length)
208 len = sge->sge_length;
209 BUG_ON(len == 0);
210 ipath_copy_sge(&rsge, sge->vaddr, len);
211 sge->vaddr += len;
212 sge->length -= len;
213 sge->sge_length -= len;
214 if (sge->sge_length == 0) {
215 if (--swqe->wr.num_sge)
216 sge++;
217 } else if (sge->length == 0 && sge->mr != NULL) {
218 if (++sge->n >= IPATH_SEGSZ) {
219 if (++sge->m >= sge->mr->mapsz)
220 break;
221 sge->n = 0;
222 }
223 sge->vaddr =
224 sge->mr->map[sge->m]->segs[sge->n].vaddr;
225 sge->length =
226 sge->mr->map[sge->m]->segs[sge->n].length;
227 }
228 length -= len;
229 }
230 wc.status = IB_WC_SUCCESS;
231 wc.opcode = IB_WC_RECV;
232 wc.vendor_err = 0;
233 wc.qp = &qp->ibqp;
234 wc.src_qp = sqp->ibqp.qp_num;
235 /* XXX do we know which pkey matched? Only needed for GSI. */
236 wc.pkey_index = 0;
237 wc.slid = dev->dd->ipath_lid |
238 (ah_attr->src_path_bits &
239 ((1 << dev->dd->ipath_lmc) - 1));
240 wc.sl = ah_attr->sl;
241 wc.dlid_path_bits =
242 ah_attr->dlid & ((1 << dev->dd->ipath_lmc) - 1);
243 wc.port_num = 1;
244 /* Signal completion event if the solicited bit is set. */
245 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
246 swqe->wr.send_flags & IB_SEND_SOLICITED);
247 drop:
248 kfree(rsge.sg_list);
249 if (atomic_dec_and_test(&qp->refcount))
250 wake_up(&qp->wait);
251 send_comp:
252 ipath_send_complete(sqp, swqe, IB_WC_SUCCESS);
253 }
254
255 /**
256 * ipath_make_ud_req - construct a UD request packet
257 * @qp: the QP
258 *
259 * Return 1 if constructed; otherwise, return 0.
260 */
261 int ipath_make_ud_req(struct ipath_qp *qp)
262 {
263 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
264 struct ipath_other_headers *ohdr;
265 struct ib_ah_attr *ah_attr;
266 struct ipath_swqe *wqe;
267 u32 nwords;
268 u32 extra_bytes;
269 u32 bth0;
270 u16 lrh0;
271 u16 lid;
272 int ret = 0;
273
274 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)))
275 goto bail;
276
277 if (qp->s_cur == qp->s_head)
278 goto bail;
279
280 wqe = get_swqe_ptr(qp, qp->s_cur);
281
282 /* Construct the header. */
283 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
284 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
285 if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
286 dev->n_multicast_xmit++;
287 else
288 dev->n_unicast_xmit++;
289 } else {
290 dev->n_unicast_xmit++;
291 lid = ah_attr->dlid &
292 ~((1 << dev->dd->ipath_lmc) - 1);
293 if (unlikely(lid == dev->dd->ipath_lid)) {
294 ipath_ud_loopback(qp, wqe);
295 goto done;
296 }
297 }
298
299 extra_bytes = -wqe->length & 3;
300 nwords = (wqe->length + extra_bytes) >> 2;
301
302 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
303 qp->s_hdrwords = 7;
304 qp->s_cur_size = wqe->length;
305 qp->s_cur_sge = &qp->s_sge;
306 qp->s_dmult = ah_attr->static_rate;
307 qp->s_wqe = wqe;
308 qp->s_sge.sge = wqe->sg_list[0];
309 qp->s_sge.sg_list = wqe->sg_list + 1;
310 qp->s_sge.num_sge = wqe->wr.num_sge;
311
312 if (ah_attr->ah_flags & IB_AH_GRH) {
313 /* Header size in 32-bit words. */
314 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
315 &ah_attr->grh,
316 qp->s_hdrwords, nwords);
317 lrh0 = IPATH_LRH_GRH;
318 ohdr = &qp->s_hdr.u.l.oth;
319 /*
320 * Don't worry about sending to locally attached multicast
321 * QPs. It is unspecified by the spec. what happens.
322 */
323 } else {
324 /* Header size in 32-bit words. */
325 lrh0 = IPATH_LRH_BTH;
326 ohdr = &qp->s_hdr.u.oth;
327 }
328 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
329 qp->s_hdrwords++;
330 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
331 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
332 } else
333 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
334 lrh0 |= ah_attr->sl << 4;
335 if (qp->ibqp.qp_type == IB_QPT_SMI)
336 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
337 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
338 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
339 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
340 SIZE_OF_CRC);
341 lid = dev->dd->ipath_lid;
342 if (lid) {
343 lid |= ah_attr->src_path_bits &
344 ((1 << dev->dd->ipath_lmc) - 1);
345 qp->s_hdr.lrh[3] = cpu_to_be16(lid);
346 } else
347 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
348 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
349 bth0 |= 1 << 23;
350 bth0 |= extra_bytes << 20;
351 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
352 ipath_get_pkey(dev->dd, qp->s_pkey_index);
353 ohdr->bth[0] = cpu_to_be32(bth0);
354 /*
355 * Use the multicast QP if the destination LID is a multicast LID.
356 */
357 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
358 ah_attr->dlid != IPATH_PERMISSIVE_LID ?
359 __constant_cpu_to_be32(IPATH_MULTICAST_QPN) :
360 cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
361 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
362 /*
363 * Qkeys with the high order bit set mean use the
364 * qkey from the QP context instead of the WR (see 10.2.5).
365 */
366 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
367 qp->qkey : wqe->wr.wr.ud.remote_qkey);
368 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
369
370 done:
371 if (++qp->s_cur >= qp->s_size)
372 qp->s_cur = 0;
373 ret = 1;
374
375 bail:
376 return ret;
377 }
378
379 /**
380 * ipath_ud_rcv - receive an incoming UD packet
381 * @dev: the device the packet came in on
382 * @hdr: the packet header
383 * @has_grh: true if the packet has a GRH
384 * @data: the packet data
385 * @tlen: the packet length
386 * @qp: the QP the packet came on
387 *
388 * This is called from ipath_qp_rcv() to process an incoming UD packet
389 * for the given QP.
390 * Called at interrupt level.
391 */
392 void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
393 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
394 {
395 struct ipath_other_headers *ohdr;
396 int opcode;
397 u32 hdrsize;
398 u32 pad;
399 struct ib_wc wc;
400 u32 qkey;
401 u32 src_qp;
402 u16 dlid;
403 int header_in_data;
404
405 /* Check for GRH */
406 if (!has_grh) {
407 ohdr = &hdr->u.oth;
408 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
409 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
410 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
411 header_in_data = 0;
412 } else {
413 ohdr = &hdr->u.l.oth;
414 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
415 /*
416 * The header with GRH is 68 bytes and the core driver sets
417 * the eager header buffer size to 56 bytes so the last 12
418 * bytes of the IB header is in the data buffer.
419 */
420 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
421 if (header_in_data) {
422 qkey = be32_to_cpu(((__be32 *) data)[1]);
423 src_qp = be32_to_cpu(((__be32 *) data)[2]);
424 data += 12;
425 } else {
426 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
427 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
428 }
429 }
430 src_qp &= IPATH_QPN_MASK;
431
432 /*
433 * Check that the permissive LID is only used on QP0
434 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
435 */
436 if (qp->ibqp.qp_num) {
437 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
438 hdr->lrh[3] == IB_LID_PERMISSIVE)) {
439 dev->n_pkt_drops++;
440 goto bail;
441 }
442 if (unlikely(qkey != qp->qkey)) {
443 /* XXX OK to lose a count once in a while. */
444 dev->qkey_violations++;
445 dev->n_pkt_drops++;
446 goto bail;
447 }
448 } else if (hdr->lrh[1] == IB_LID_PERMISSIVE ||
449 hdr->lrh[3] == IB_LID_PERMISSIVE) {
450 struct ib_smp *smp = (struct ib_smp *) data;
451
452 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
453 dev->n_pkt_drops++;
454 goto bail;
455 }
456 }
457
458 /*
459 * The opcode is in the low byte when its in network order
460 * (top byte when in host order).
461 */
462 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
463 if (qp->ibqp.qp_num > 1 &&
464 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
465 if (header_in_data) {
466 wc.imm_data = *(__be32 *) data;
467 data += sizeof(__be32);
468 } else
469 wc.imm_data = ohdr->u.ud.imm_data;
470 wc.wc_flags = IB_WC_WITH_IMM;
471 hdrsize += sizeof(u32);
472 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
473 wc.imm_data = 0;
474 wc.wc_flags = 0;
475 } else {
476 dev->n_pkt_drops++;
477 goto bail;
478 }
479
480 /* Get the number of bytes the message was padded by. */
481 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
482 if (unlikely(tlen < (hdrsize + pad + 4))) {
483 /* Drop incomplete packets. */
484 dev->n_pkt_drops++;
485 goto bail;
486 }
487 tlen -= hdrsize + pad + 4;
488
489 /* Drop invalid MAD packets (see 13.5.3.1). */
490 if (unlikely((qp->ibqp.qp_num == 0 &&
491 (tlen != 256 ||
492 (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) ||
493 (qp->ibqp.qp_num == 1 &&
494 (tlen != 256 ||
495 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) {
496 dev->n_pkt_drops++;
497 goto bail;
498 }
499
500 /*
501 * A GRH is expected to preceed the data even if not
502 * present on the wire.
503 */
504 wc.byte_len = tlen + sizeof(struct ib_grh);
505
506 /*
507 * Get the next work request entry to find where to put the data.
508 */
509 if (qp->r_reuse_sge)
510 qp->r_reuse_sge = 0;
511 else if (!ipath_get_rwqe(qp, 0)) {
512 /*
513 * Count VL15 packets dropped due to no receive buffer.
514 * Otherwise, count them as buffer overruns since usually,
515 * the HW will be able to receive packets even if there are
516 * no QPs with posted receive buffers.
517 */
518 if (qp->ibqp.qp_num == 0)
519 dev->n_vl15_dropped++;
520 else
521 dev->rcv_errors++;
522 goto bail;
523 }
524 /* Silently drop packets which are too big. */
525 if (wc.byte_len > qp->r_len) {
526 qp->r_reuse_sge = 1;
527 dev->n_pkt_drops++;
528 goto bail;
529 }
530 if (has_grh) {
531 ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
532 sizeof(struct ib_grh));
533 wc.wc_flags |= IB_WC_GRH;
534 } else
535 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
536 ipath_copy_sge(&qp->r_sge, data,
537 wc.byte_len - sizeof(struct ib_grh));
538 qp->r_wrid_valid = 0;
539 wc.wr_id = qp->r_wr_id;
540 wc.status = IB_WC_SUCCESS;
541 wc.opcode = IB_WC_RECV;
542 wc.vendor_err = 0;
543 wc.qp = &qp->ibqp;
544 wc.src_qp = src_qp;
545 /* XXX do we know which pkey matched? Only needed for GSI. */
546 wc.pkey_index = 0;
547 wc.slid = be16_to_cpu(hdr->lrh[3]);
548 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
549 dlid = be16_to_cpu(hdr->lrh[1]);
550 /*
551 * Save the LMC lower bits if the destination LID is a unicast LID.
552 */
553 wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
554 dlid & ((1 << dev->dd->ipath_lmc) - 1);
555 wc.port_num = 1;
556 /* Signal completion event if the solicited bit is set. */
557 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
558 (ohdr->bth[0] &
559 __constant_cpu_to_be32(1 << 23)) != 0);
560
561 bail:;
562 }