]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/rdma/hfi1/ud.c
staging/rdma/hfi1: Use rdmavt version of post_send
[mirror_ubuntu-artful-kernel.git] / drivers / staging / rdma / hfi1 / ud.c
1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include <linux/net.h>
52 #include <rdma/ib_smi.h>
53
54 #include "hfi.h"
55 #include "mad.h"
56 #include "qp.h"
57
58 /**
59 * ud_loopback - handle send on loopback QPs
60 * @sqp: the sending QP
61 * @swqe: the send work request
62 *
63 * This is called from hfi1_make_ud_req() to forward a WQE addressed
64 * to the same HFI.
65 * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
66 * while this is being called.
67 */
68 static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
69 {
70 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
71 struct hfi1_pportdata *ppd;
72 struct rvt_qp *qp;
73 struct ib_ah_attr *ah_attr;
74 unsigned long flags;
75 struct rvt_sge_state ssge;
76 struct rvt_sge *sge;
77 struct ib_wc wc;
78 u32 length;
79 enum ib_qp_type sqptype, dqptype;
80
81 rcu_read_lock();
82
83 qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
84 if (!qp) {
85 ibp->rvp.n_pkt_drops++;
86 rcu_read_unlock();
87 return;
88 }
89
90 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
91 IB_QPT_UD : sqp->ibqp.qp_type;
92 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
93 IB_QPT_UD : qp->ibqp.qp_type;
94
95 if (dqptype != sqptype ||
96 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
97 ibp->rvp.n_pkt_drops++;
98 goto drop;
99 }
100
101 ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
102 ppd = ppd_from_ibp(ibp);
103
104 if (qp->ibqp.qp_num > 1) {
105 u16 pkey;
106 u16 slid;
107 u8 sc5 = ibp->sl_to_sc[ah_attr->sl];
108
109 pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
110 slid = ppd->lid | (ah_attr->src_path_bits &
111 ((1 << ppd->lmc) - 1));
112 if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
113 qp->s_pkey_index, slid))) {
114 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, pkey,
115 ah_attr->sl,
116 sqp->ibqp.qp_num, qp->ibqp.qp_num,
117 slid, ah_attr->dlid);
118 goto drop;
119 }
120 }
121
122 /*
123 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
124 * Qkeys with the high order bit set mean use the
125 * qkey from the QP context instead of the WR (see 10.2.5).
126 */
127 if (qp->ibqp.qp_num) {
128 u32 qkey;
129
130 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
131 sqp->qkey : swqe->ud_wr.remote_qkey;
132 if (unlikely(qkey != qp->qkey)) {
133 u16 lid;
134
135 lid = ppd->lid | (ah_attr->src_path_bits &
136 ((1 << ppd->lmc) - 1));
137 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
138 ah_attr->sl,
139 sqp->ibqp.qp_num, qp->ibqp.qp_num,
140 lid,
141 ah_attr->dlid);
142 goto drop;
143 }
144 }
145
146 /*
147 * A GRH is expected to precede the data even if not
148 * present on the wire.
149 */
150 length = swqe->length;
151 memset(&wc, 0, sizeof(wc));
152 wc.byte_len = length + sizeof(struct ib_grh);
153
154 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
155 wc.wc_flags = IB_WC_WITH_IMM;
156 wc.ex.imm_data = swqe->wr.ex.imm_data;
157 }
158
159 spin_lock_irqsave(&qp->r_lock, flags);
160
161 /*
162 * Get the next work request entry to find where to put the data.
163 */
164 if (qp->r_flags & RVT_R_REUSE_SGE)
165 qp->r_flags &= ~RVT_R_REUSE_SGE;
166 else {
167 int ret;
168
169 ret = hfi1_get_rwqe(qp, 0);
170 if (ret < 0) {
171 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
172 goto bail_unlock;
173 }
174 if (!ret) {
175 if (qp->ibqp.qp_num == 0)
176 ibp->rvp.n_vl15_dropped++;
177 goto bail_unlock;
178 }
179 }
180 /* Silently drop packets which are too big. */
181 if (unlikely(wc.byte_len > qp->r_len)) {
182 qp->r_flags |= RVT_R_REUSE_SGE;
183 ibp->rvp.n_pkt_drops++;
184 goto bail_unlock;
185 }
186
187 if (ah_attr->ah_flags & IB_AH_GRH) {
188 hfi1_copy_sge(&qp->r_sge, &ah_attr->grh,
189 sizeof(struct ib_grh), 1);
190 wc.wc_flags |= IB_WC_GRH;
191 } else
192 hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
193 ssge.sg_list = swqe->sg_list + 1;
194 ssge.sge = *swqe->sg_list;
195 ssge.num_sge = swqe->wr.num_sge;
196 sge = &ssge.sge;
197 while (length) {
198 u32 len = sge->length;
199
200 if (len > length)
201 len = length;
202 if (len > sge->sge_length)
203 len = sge->sge_length;
204 WARN_ON_ONCE(len == 0);
205 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
206 sge->vaddr += len;
207 sge->length -= len;
208 sge->sge_length -= len;
209 if (sge->sge_length == 0) {
210 if (--ssge.num_sge)
211 *sge = *ssge.sg_list++;
212 } else if (sge->length == 0 && sge->mr->lkey) {
213 if (++sge->n >= RVT_SEGSZ) {
214 if (++sge->m >= sge->mr->mapsz)
215 break;
216 sge->n = 0;
217 }
218 sge->vaddr =
219 sge->mr->map[sge->m]->segs[sge->n].vaddr;
220 sge->length =
221 sge->mr->map[sge->m]->segs[sge->n].length;
222 }
223 length -= len;
224 }
225 hfi1_put_ss(&qp->r_sge);
226 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
227 goto bail_unlock;
228 wc.wr_id = qp->r_wr_id;
229 wc.status = IB_WC_SUCCESS;
230 wc.opcode = IB_WC_RECV;
231 wc.qp = &qp->ibqp;
232 wc.src_qp = sqp->ibqp.qp_num;
233 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
234 if (sqp->ibqp.qp_type == IB_QPT_GSI ||
235 sqp->ibqp.qp_type == IB_QPT_SMI)
236 wc.pkey_index = swqe->ud_wr.pkey_index;
237 else
238 wc.pkey_index = sqp->s_pkey_index;
239 } else {
240 wc.pkey_index = 0;
241 }
242 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
243 /* Check for loopback when the port lid is not set */
244 if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
245 wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
246 wc.sl = ah_attr->sl;
247 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
248 wc.port_num = qp->port_num;
249 /* Signal completion event if the solicited bit is set. */
250 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
251 swqe->wr.send_flags & IB_SEND_SOLICITED);
252 ibp->rvp.n_loop_pkts++;
253 bail_unlock:
254 spin_unlock_irqrestore(&qp->r_lock, flags);
255 drop:
256 rcu_read_unlock();
257 }
258
259 /**
260 * hfi1_make_ud_req - construct a UD request packet
261 * @qp: the QP
262 *
263 * Return 1 if constructed; otherwise, return 0.
264 */
265 int hfi1_make_ud_req(struct rvt_qp *qp)
266 {
267 struct hfi1_qp_priv *priv = qp->priv;
268 struct hfi1_other_headers *ohdr;
269 struct ib_ah_attr *ah_attr;
270 struct hfi1_pportdata *ppd;
271 struct hfi1_ibport *ibp;
272 struct rvt_swqe *wqe;
273 unsigned long flags;
274 u32 nwords;
275 u32 extra_bytes;
276 u32 bth0;
277 u16 lrh0;
278 u16 lid;
279 int ret = 0;
280 int next_cur;
281 u8 sc5;
282
283 spin_lock_irqsave(&qp->s_lock, flags);
284
285 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
286 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
287 goto bail;
288 /* We are in the error state, flush the work request. */
289 if (qp->s_last == qp->s_head)
290 goto bail;
291 /* If DMAs are in progress, we can't flush immediately. */
292 if (atomic_read(&priv->s_iowait.sdma_busy)) {
293 qp->s_flags |= RVT_S_WAIT_DMA;
294 goto bail;
295 }
296 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
297 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
298 goto done;
299 }
300
301 if (qp->s_cur == qp->s_head)
302 goto bail;
303
304 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
305 next_cur = qp->s_cur + 1;
306 if (next_cur >= qp->s_size)
307 next_cur = 0;
308
309 /* Construct the header. */
310 ibp = to_iport(qp->ibqp.device, qp->port_num);
311 ppd = ppd_from_ibp(ibp);
312 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
313 if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
314 ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
315 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
316 if (unlikely(!loopback && (lid == ppd->lid ||
317 (lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
318 qp->ibqp.qp_type == IB_QPT_GSI)))) {
319 /*
320 * If DMAs are in progress, we can't generate
321 * a completion for the loopback packet since
322 * it would be out of order.
323 * Instead of waiting, we could queue a
324 * zero length descriptor so we get a callback.
325 */
326 if (atomic_read(&priv->s_iowait.sdma_busy)) {
327 qp->s_flags |= RVT_S_WAIT_DMA;
328 goto bail;
329 }
330 qp->s_cur = next_cur;
331 spin_unlock_irqrestore(&qp->s_lock, flags);
332 ud_loopback(qp, wqe);
333 spin_lock_irqsave(&qp->s_lock, flags);
334 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
335 goto done;
336 }
337 }
338
339 qp->s_cur = next_cur;
340 extra_bytes = -wqe->length & 3;
341 nwords = (wqe->length + extra_bytes) >> 2;
342
343 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
344 qp->s_hdrwords = 7;
345 qp->s_cur_size = wqe->length;
346 qp->s_cur_sge = &qp->s_sge;
347 qp->s_srate = ah_attr->static_rate;
348 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
349 qp->s_wqe = wqe;
350 qp->s_sge.sge = wqe->sg_list[0];
351 qp->s_sge.sg_list = wqe->sg_list + 1;
352 qp->s_sge.num_sge = wqe->wr.num_sge;
353 qp->s_sge.total_len = wqe->length;
354
355 if (ah_attr->ah_flags & IB_AH_GRH) {
356 /* Header size in 32-bit words. */
357 qp->s_hdrwords += hfi1_make_grh(ibp, &priv->s_hdr->ibh.u.l.grh,
358 &ah_attr->grh,
359 qp->s_hdrwords, nwords);
360 lrh0 = HFI1_LRH_GRH;
361 ohdr = &priv->s_hdr->ibh.u.l.oth;
362 /*
363 * Don't worry about sending to locally attached multicast
364 * QPs. It is unspecified by the spec. what happens.
365 */
366 } else {
367 /* Header size in 32-bit words. */
368 lrh0 = HFI1_LRH_BTH;
369 ohdr = &priv->s_hdr->ibh.u.oth;
370 }
371 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
372 qp->s_hdrwords++;
373 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
374 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
375 } else
376 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
377 sc5 = ibp->sl_to_sc[ah_attr->sl];
378 lrh0 |= (ah_attr->sl & 0xf) << 4;
379 if (qp->ibqp.qp_type == IB_QPT_SMI) {
380 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
381 priv->s_sc = 0xf;
382 } else {
383 lrh0 |= (sc5 & 0xf) << 12;
384 priv->s_sc = sc5;
385 }
386 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
387 priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
388 priv->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
389 priv->s_hdr->ibh.lrh[2] =
390 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
391 if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE))
392 priv->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
393 else {
394 lid = ppd->lid;
395 if (lid) {
396 lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
397 priv->s_hdr->ibh.lrh[3] = cpu_to_be16(lid);
398 } else
399 priv->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
400 }
401 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
402 bth0 |= IB_BTH_SOLICITED;
403 bth0 |= extra_bytes << 20;
404 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
405 bth0 |= hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
406 else
407 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
408 ohdr->bth[0] = cpu_to_be32(bth0);
409 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
410 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++));
411 /*
412 * Qkeys with the high order bit set mean use the
413 * qkey from the QP context instead of the WR (see 10.2.5).
414 */
415 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
416 qp->qkey : wqe->ud_wr.remote_qkey);
417 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
418 /* disarm any ahg */
419 priv->s_hdr->ahgcount = 0;
420 priv->s_hdr->ahgidx = 0;
421 priv->s_hdr->tx_flags = 0;
422 priv->s_hdr->sde = NULL;
423
424 done:
425 ret = 1;
426 goto unlock;
427
428 bail:
429 qp->s_flags &= ~RVT_S_BUSY;
430 unlock:
431 spin_unlock_irqrestore(&qp->s_lock, flags);
432 return ret;
433 }
434
435 /*
436 * Hardware can't check this so we do it here.
437 *
438 * This is a slightly different algorithm than the standard pkey check. It
439 * special cases the management keys and allows for 0x7fff and 0xffff to be in
440 * the table at the same time.
441 *
442 * @returns the index found or -1 if not found
443 */
444 int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
445 {
446 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
447 unsigned i;
448
449 if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
450 unsigned lim_idx = -1;
451
452 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
453 /* here we look for an exact match */
454 if (ppd->pkeys[i] == pkey)
455 return i;
456 if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
457 lim_idx = i;
458 }
459
460 /* did not find 0xffff return 0x7fff idx if found */
461 if (pkey == FULL_MGMT_P_KEY)
462 return lim_idx;
463
464 /* no match... */
465 return -1;
466 }
467
468 pkey &= 0x7fff; /* remove limited/full membership bit */
469
470 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
471 if ((ppd->pkeys[i] & 0x7fff) == pkey)
472 return i;
473
474 /*
475 * Should not get here, this means hardware failed to validate pkeys.
476 */
477 return -1;
478 }
479
480 void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
481 u32 pkey, u32 slid, u32 dlid, u8 sc5,
482 const struct ib_grh *old_grh)
483 {
484 u64 pbc, pbc_flags = 0;
485 u32 bth0, plen, vl, hwords = 5;
486 u16 lrh0;
487 u8 sl = ibp->sc_to_sl[sc5];
488 struct hfi1_ib_header hdr;
489 struct hfi1_other_headers *ohdr;
490 struct pio_buf *pbuf;
491 struct send_context *ctxt = qp_to_send_context(qp, sc5);
492 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
493
494 if (old_grh) {
495 struct ib_grh *grh = &hdr.u.l.grh;
496
497 grh->version_tclass_flow = old_grh->version_tclass_flow;
498 grh->paylen = cpu_to_be16((hwords - 2 + SIZE_OF_CRC) << 2);
499 grh->hop_limit = 0xff;
500 grh->sgid = old_grh->dgid;
501 grh->dgid = old_grh->sgid;
502 ohdr = &hdr.u.l.oth;
503 lrh0 = HFI1_LRH_GRH;
504 hwords += sizeof(struct ib_grh) / sizeof(u32);
505 } else {
506 ohdr = &hdr.u.oth;
507 lrh0 = HFI1_LRH_BTH;
508 }
509
510 lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
511
512 bth0 = pkey | (IB_OPCODE_CNP << 24);
513 ohdr->bth[0] = cpu_to_be32(bth0);
514
515 ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << HFI1_BECN_SHIFT));
516 ohdr->bth[2] = 0; /* PSN 0 */
517
518 hdr.lrh[0] = cpu_to_be16(lrh0);
519 hdr.lrh[1] = cpu_to_be16(dlid);
520 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
521 hdr.lrh[3] = cpu_to_be16(slid);
522
523 plen = 2 /* PBC */ + hwords;
524 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
525 vl = sc_to_vlt(ppd->dd, sc5);
526 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
527 if (ctxt) {
528 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
529 if (pbuf)
530 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
531 &hdr, hwords);
532 }
533 }
534
535 /*
536 * opa_smp_check() - Do the regular pkey checking, and the additional
537 * checks for SMPs specified in OPAv1 rev 0.90, section 9.10.26
538 * ("SMA Packet Checks").
539 *
540 * Note that:
541 * - Checks are done using the pkey directly from the packet's BTH,
542 * and specifically _not_ the pkey that we attach to the completion,
543 * which may be different.
544 * - These checks are specifically for "non-local" SMPs (i.e., SMPs
545 * which originated on another node). SMPs which are sent from, and
546 * destined to this node are checked in opa_local_smp_check().
547 *
548 * At the point where opa_smp_check() is called, we know:
549 * - destination QP is QP0
550 *
551 * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
552 */
553 static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
554 struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
555 {
556 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
557
558 /*
559 * I don't think it's possible for us to get here with sc != 0xf,
560 * but check it to be certain.
561 */
562 if (sc5 != 0xf)
563 return 1;
564
565 if (rcv_pkey_check(ppd, pkey, sc5, slid))
566 return 1;
567
568 /*
569 * At this point we know (and so don't need to check again) that
570 * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
571 * (see ingress_pkey_check).
572 */
573 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
574 smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
575 ingress_pkey_table_fail(ppd, pkey, slid);
576 return 1;
577 }
578
579 /*
580 * SMPs fall into one of four (disjoint) categories:
581 * SMA request, SMA response, trap, or trap repress.
582 * Our response depends, in part, on which type of
583 * SMP we're processing.
584 *
585 * If this is not an SMA request, or trap repress:
586 * - accept MAD if the port is running an SM
587 * - pkey == FULL_MGMT_P_KEY =>
588 * reply with unsupported method (i.e., just mark
589 * the smp's status field here, and let it be
590 * processed normally)
591 * - pkey != LIM_MGMT_P_KEY =>
592 * increment port recv constraint errors, drop MAD
593 * If this is an SMA request or trap repress:
594 * - pkey != FULL_MGMT_P_KEY =>
595 * increment port recv constraint errors, drop MAD
596 */
597 switch (smp->method) {
598 case IB_MGMT_METHOD_GET:
599 case IB_MGMT_METHOD_SET:
600 case IB_MGMT_METHOD_REPORT:
601 case IB_MGMT_METHOD_TRAP_REPRESS:
602 if (pkey != FULL_MGMT_P_KEY) {
603 ingress_pkey_table_fail(ppd, pkey, slid);
604 return 1;
605 }
606 break;
607 case IB_MGMT_METHOD_SEND:
608 case IB_MGMT_METHOD_TRAP:
609 case IB_MGMT_METHOD_GET_RESP:
610 case IB_MGMT_METHOD_REPORT_RESP:
611 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
612 return 0;
613 if (pkey == FULL_MGMT_P_KEY) {
614 smp->status |= IB_SMP_UNSUP_METHOD;
615 return 0;
616 }
617 if (pkey != LIM_MGMT_P_KEY) {
618 ingress_pkey_table_fail(ppd, pkey, slid);
619 return 1;
620 }
621 break;
622 default:
623 break;
624 }
625 return 0;
626 }
627
628
629 /**
630 * hfi1_ud_rcv - receive an incoming UD packet
631 * @ibp: the port the packet came in on
632 * @hdr: the packet header
633 * @rcv_flags: flags relevant to rcv processing
634 * @data: the packet data
635 * @tlen: the packet length
636 * @qp: the QP the packet came on
637 *
638 * This is called from qp_rcv() to process an incoming UD packet
639 * for the given QP.
640 * Called at interrupt level.
641 */
642 void hfi1_ud_rcv(struct hfi1_packet *packet)
643 {
644 struct hfi1_other_headers *ohdr = packet->ohdr;
645 int opcode;
646 u32 hdrsize = packet->hlen;
647 u32 pad;
648 struct ib_wc wc;
649 u32 qkey;
650 u32 src_qp;
651 u16 dlid, pkey;
652 int mgmt_pkey_idx = -1;
653 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
654 struct hfi1_ib_header *hdr = packet->hdr;
655 u32 rcv_flags = packet->rcv_flags;
656 void *data = packet->ebuf;
657 u32 tlen = packet->tlen;
658 struct rvt_qp *qp = packet->qp;
659 bool has_grh = rcv_flags & HFI1_HAS_GRH;
660 bool sc4_bit = has_sc4_bit(packet);
661 u8 sc;
662 u32 bth1;
663 int is_mcast;
664 struct ib_grh *grh = NULL;
665
666 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
667 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
668 dlid = be16_to_cpu(hdr->lrh[1]);
669 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
670 (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
671 bth1 = be32_to_cpu(ohdr->bth[1]);
672 if (unlikely(bth1 & HFI1_BECN_SMASK)) {
673 /*
674 * In pre-B0 h/w the CNP_OPCODE is handled via an
675 * error path.
676 */
677 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
678 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
679 u8 sl, sc5;
680
681 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
682 sc5 |= sc4_bit;
683 sl = ibp->sc_to_sl[sc5];
684
685 process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
686 }
687
688 /*
689 * The opcode is in the low byte when its in network order
690 * (top byte when in host order).
691 */
692 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
693 opcode &= 0xff;
694
695 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
696
697 if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
698 u16 slid = be16_to_cpu(hdr->lrh[3]);
699 u8 sc5;
700
701 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
702 sc5 |= sc4_bit;
703
704 return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
705 }
706 /*
707 * Get the number of bytes the message was padded by
708 * and drop incomplete packets.
709 */
710 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
711 if (unlikely(tlen < (hdrsize + pad + 4)))
712 goto drop;
713
714 tlen -= hdrsize + pad + 4;
715
716 /*
717 * Check that the permissive LID is only used on QP0
718 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
719 */
720 if (qp->ibqp.qp_num) {
721 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
722 hdr->lrh[3] == IB_LID_PERMISSIVE))
723 goto drop;
724 if (qp->ibqp.qp_num > 1) {
725 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
726 u16 slid;
727 u8 sc5;
728
729 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
730 sc5 |= sc4_bit;
731
732 slid = be16_to_cpu(hdr->lrh[3]);
733 if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
734 /*
735 * Traps will not be sent for packets dropped
736 * by the HW. This is fine, as sending trap
737 * for invalid pkeys is optional according to
738 * IB spec (release 1.3, section 10.9.4)
739 */
740 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
741 pkey,
742 (be16_to_cpu(hdr->lrh[0]) >> 4) &
743 0xF,
744 src_qp, qp->ibqp.qp_num,
745 be16_to_cpu(hdr->lrh[3]),
746 be16_to_cpu(hdr->lrh[1]));
747 return;
748 }
749 } else {
750 /* GSI packet */
751 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
752 if (mgmt_pkey_idx < 0)
753 goto drop;
754
755 }
756 if (unlikely(qkey != qp->qkey)) {
757 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
758 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
759 src_qp, qp->ibqp.qp_num,
760 be16_to_cpu(hdr->lrh[3]),
761 be16_to_cpu(hdr->lrh[1]));
762 return;
763 }
764 /* Drop invalid MAD packets (see 13.5.3.1). */
765 if (unlikely(qp->ibqp.qp_num == 1 &&
766 (tlen > 2048 ||
767 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
768 goto drop;
769 } else {
770 /* Received on QP0, and so by definition, this is an SMP */
771 struct opa_smp *smp = (struct opa_smp *)data;
772 u16 slid = be16_to_cpu(hdr->lrh[3]);
773 u8 sc5;
774
775 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
776 sc5 |= sc4_bit;
777
778 if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
779 goto drop;
780
781 if (tlen > 2048)
782 goto drop;
783 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
784 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
785 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
786 goto drop;
787
788 /* look up SMI pkey */
789 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
790 if (mgmt_pkey_idx < 0)
791 goto drop;
792
793 }
794
795 if (qp->ibqp.qp_num > 1 &&
796 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
797 wc.ex.imm_data = ohdr->u.ud.imm_data;
798 wc.wc_flags = IB_WC_WITH_IMM;
799 tlen -= sizeof(u32);
800 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
801 wc.ex.imm_data = 0;
802 wc.wc_flags = 0;
803 } else
804 goto drop;
805
806 /*
807 * A GRH is expected to precede the data even if not
808 * present on the wire.
809 */
810 wc.byte_len = tlen + sizeof(struct ib_grh);
811
812 /*
813 * Get the next work request entry to find where to put the data.
814 */
815 if (qp->r_flags & RVT_R_REUSE_SGE)
816 qp->r_flags &= ~RVT_R_REUSE_SGE;
817 else {
818 int ret;
819
820 ret = hfi1_get_rwqe(qp, 0);
821 if (ret < 0) {
822 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
823 return;
824 }
825 if (!ret) {
826 if (qp->ibqp.qp_num == 0)
827 ibp->rvp.n_vl15_dropped++;
828 return;
829 }
830 }
831 /* Silently drop packets which are too big. */
832 if (unlikely(wc.byte_len > qp->r_len)) {
833 qp->r_flags |= RVT_R_REUSE_SGE;
834 goto drop;
835 }
836 if (has_grh) {
837 hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh,
838 sizeof(struct ib_grh), 1);
839 wc.wc_flags |= IB_WC_GRH;
840 } else
841 hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
842 hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
843 hfi1_put_ss(&qp->r_sge);
844 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
845 return;
846 wc.wr_id = qp->r_wr_id;
847 wc.status = IB_WC_SUCCESS;
848 wc.opcode = IB_WC_RECV;
849 wc.vendor_err = 0;
850 wc.qp = &qp->ibqp;
851 wc.src_qp = src_qp;
852
853 if (qp->ibqp.qp_type == IB_QPT_GSI ||
854 qp->ibqp.qp_type == IB_QPT_SMI) {
855 if (mgmt_pkey_idx < 0) {
856 if (net_ratelimit()) {
857 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
858 struct hfi1_devdata *dd = ppd->dd;
859
860 dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
861 qp->ibqp.qp_type);
862 mgmt_pkey_idx = 0;
863 }
864 }
865 wc.pkey_index = (unsigned)mgmt_pkey_idx;
866 } else
867 wc.pkey_index = 0;
868
869 wc.slid = be16_to_cpu(hdr->lrh[3]);
870 sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
871 sc |= sc4_bit;
872 wc.sl = ibp->sc_to_sl[sc];
873
874 /*
875 * Save the LMC lower bits if the destination LID is a unicast LID.
876 */
877 wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
878 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
879 wc.port_num = qp->port_num;
880 /* Signal completion event if the solicited bit is set. */
881 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
882 (ohdr->bth[0] &
883 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
884 return;
885
886 drop:
887 ibp->rvp.n_pkt_drops++;
888 }