]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/rdma/hfi1/ud.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / drivers / staging / rdma / hfi1 / ud.c
1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include <linux/net.h>
52 #include <rdma/ib_smi.h>
53
54 #include "hfi.h"
55 #include "mad.h"
56 #include "qp.h"
57
58 /**
59 * ud_loopback - handle send on loopback QPs
60 * @sqp: the sending QP
61 * @swqe: the send work request
62 *
63 * This is called from hfi1_make_ud_req() to forward a WQE addressed
64 * to the same HFI.
65 * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
66 * while this is being called.
67 */
68 static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
69 {
70 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
71 struct hfi1_pportdata *ppd;
72 struct hfi1_qp *qp;
73 struct ib_ah_attr *ah_attr;
74 unsigned long flags;
75 struct hfi1_sge_state ssge;
76 struct hfi1_sge *sge;
77 struct ib_wc wc;
78 u32 length;
79 enum ib_qp_type sqptype, dqptype;
80
81 rcu_read_lock();
82
83 qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
84 if (!qp) {
85 ibp->n_pkt_drops++;
86 rcu_read_unlock();
87 return;
88 }
89
90 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
91 IB_QPT_UD : sqp->ibqp.qp_type;
92 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
93 IB_QPT_UD : qp->ibqp.qp_type;
94
95 if (dqptype != sqptype ||
96 !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) {
97 ibp->n_pkt_drops++;
98 goto drop;
99 }
100
101 ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
102 ppd = ppd_from_ibp(ibp);
103
104 if (qp->ibqp.qp_num > 1) {
105 u16 pkey;
106 u16 slid;
107 u8 sc5 = ibp->sl_to_sc[ah_attr->sl];
108
109 pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
110 slid = ppd->lid | (ah_attr->src_path_bits &
111 ((1 << ppd->lmc) - 1));
112 if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
113 qp->s_pkey_index, slid))) {
114 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, pkey,
115 ah_attr->sl,
116 sqp->ibqp.qp_num, qp->ibqp.qp_num,
117 slid, ah_attr->dlid);
118 goto drop;
119 }
120 }
121
122 /*
123 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
124 * Qkeys with the high order bit set mean use the
125 * qkey from the QP context instead of the WR (see 10.2.5).
126 */
127 if (qp->ibqp.qp_num) {
128 u32 qkey;
129
130 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
131 sqp->qkey : swqe->ud_wr.remote_qkey;
132 if (unlikely(qkey != qp->qkey)) {
133 u16 lid;
134
135 lid = ppd->lid | (ah_attr->src_path_bits &
136 ((1 << ppd->lmc) - 1));
137 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
138 ah_attr->sl,
139 sqp->ibqp.qp_num, qp->ibqp.qp_num,
140 lid,
141 ah_attr->dlid);
142 goto drop;
143 }
144 }
145
146 /*
147 * A GRH is expected to precede the data even if not
148 * present on the wire.
149 */
150 length = swqe->length;
151 memset(&wc, 0, sizeof(wc));
152 wc.byte_len = length + sizeof(struct ib_grh);
153
154 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
155 wc.wc_flags = IB_WC_WITH_IMM;
156 wc.ex.imm_data = swqe->wr.ex.imm_data;
157 }
158
159 spin_lock_irqsave(&qp->r_lock, flags);
160
161 /*
162 * Get the next work request entry to find where to put the data.
163 */
164 if (qp->r_flags & HFI1_R_REUSE_SGE)
165 qp->r_flags &= ~HFI1_R_REUSE_SGE;
166 else {
167 int ret;
168
169 ret = hfi1_get_rwqe(qp, 0);
170 if (ret < 0) {
171 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
172 goto bail_unlock;
173 }
174 if (!ret) {
175 if (qp->ibqp.qp_num == 0)
176 ibp->n_vl15_dropped++;
177 goto bail_unlock;
178 }
179 }
180 /* Silently drop packets which are too big. */
181 if (unlikely(wc.byte_len > qp->r_len)) {
182 qp->r_flags |= HFI1_R_REUSE_SGE;
183 ibp->n_pkt_drops++;
184 goto bail_unlock;
185 }
186
187 if (ah_attr->ah_flags & IB_AH_GRH) {
188 hfi1_copy_sge(&qp->r_sge, &ah_attr->grh,
189 sizeof(struct ib_grh), 1);
190 wc.wc_flags |= IB_WC_GRH;
191 } else
192 hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
193 ssge.sg_list = swqe->sg_list + 1;
194 ssge.sge = *swqe->sg_list;
195 ssge.num_sge = swqe->wr.num_sge;
196 sge = &ssge.sge;
197 while (length) {
198 u32 len = sge->length;
199
200 if (len > length)
201 len = length;
202 if (len > sge->sge_length)
203 len = sge->sge_length;
204 WARN_ON_ONCE(len == 0);
205 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
206 sge->vaddr += len;
207 sge->length -= len;
208 sge->sge_length -= len;
209 if (sge->sge_length == 0) {
210 if (--ssge.num_sge)
211 *sge = *ssge.sg_list++;
212 } else if (sge->length == 0 && sge->mr->lkey) {
213 if (++sge->n >= HFI1_SEGSZ) {
214 if (++sge->m >= sge->mr->mapsz)
215 break;
216 sge->n = 0;
217 }
218 sge->vaddr =
219 sge->mr->map[sge->m]->segs[sge->n].vaddr;
220 sge->length =
221 sge->mr->map[sge->m]->segs[sge->n].length;
222 }
223 length -= len;
224 }
225 hfi1_put_ss(&qp->r_sge);
226 if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
227 goto bail_unlock;
228 wc.wr_id = qp->r_wr_id;
229 wc.status = IB_WC_SUCCESS;
230 wc.opcode = IB_WC_RECV;
231 wc.qp = &qp->ibqp;
232 wc.src_qp = sqp->ibqp.qp_num;
233 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
234 if (sqp->ibqp.qp_type == IB_QPT_GSI ||
235 sqp->ibqp.qp_type == IB_QPT_SMI)
236 wc.pkey_index = swqe->ud_wr.pkey_index;
237 else
238 wc.pkey_index = sqp->s_pkey_index;
239 } else {
240 wc.pkey_index = 0;
241 }
242 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
243 /* Check for loopback when the port lid is not set */
244 if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
245 wc.slid = HFI1_PERMISSIVE_LID;
246 wc.sl = ah_attr->sl;
247 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
248 wc.port_num = qp->port_num;
249 /* Signal completion event if the solicited bit is set. */
250 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
251 swqe->wr.send_flags & IB_SEND_SOLICITED);
252 ibp->n_loop_pkts++;
253 bail_unlock:
254 spin_unlock_irqrestore(&qp->r_lock, flags);
255 drop:
256 rcu_read_unlock();
257 }
258
259 /**
260 * hfi1_make_ud_req - construct a UD request packet
261 * @qp: the QP
262 *
263 * Return 1 if constructed; otherwise, return 0.
264 */
265 int hfi1_make_ud_req(struct hfi1_qp *qp)
266 {
267 struct hfi1_other_headers *ohdr;
268 struct ib_ah_attr *ah_attr;
269 struct hfi1_pportdata *ppd;
270 struct hfi1_ibport *ibp;
271 struct hfi1_swqe *wqe;
272 unsigned long flags;
273 u32 nwords;
274 u32 extra_bytes;
275 u32 bth0;
276 u16 lrh0;
277 u16 lid;
278 int ret = 0;
279 int next_cur;
280 u8 sc5;
281
282 spin_lock_irqsave(&qp->s_lock, flags);
283
284 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK)) {
285 if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
286 goto bail;
287 /* We are in the error state, flush the work request. */
288 if (qp->s_last == qp->s_head)
289 goto bail;
290 /* If DMAs are in progress, we can't flush immediately. */
291 if (atomic_read(&qp->s_iowait.sdma_busy)) {
292 qp->s_flags |= HFI1_S_WAIT_DMA;
293 goto bail;
294 }
295 wqe = get_swqe_ptr(qp, qp->s_last);
296 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
297 goto done;
298 }
299
300 if (qp->s_cur == qp->s_head)
301 goto bail;
302
303 wqe = get_swqe_ptr(qp, qp->s_cur);
304 next_cur = qp->s_cur + 1;
305 if (next_cur >= qp->s_size)
306 next_cur = 0;
307
308 /* Construct the header. */
309 ibp = to_iport(qp->ibqp.device, qp->port_num);
310 ppd = ppd_from_ibp(ibp);
311 ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
312 if (ah_attr->dlid < HFI1_MULTICAST_LID_BASE ||
313 ah_attr->dlid == HFI1_PERMISSIVE_LID) {
314 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
315 if (unlikely(!loopback && (lid == ppd->lid ||
316 (lid == HFI1_PERMISSIVE_LID &&
317 qp->ibqp.qp_type == IB_QPT_GSI)))) {
318 /*
319 * If DMAs are in progress, we can't generate
320 * a completion for the loopback packet since
321 * it would be out of order.
322 * Instead of waiting, we could queue a
323 * zero length descriptor so we get a callback.
324 */
325 if (atomic_read(&qp->s_iowait.sdma_busy)) {
326 qp->s_flags |= HFI1_S_WAIT_DMA;
327 goto bail;
328 }
329 qp->s_cur = next_cur;
330 spin_unlock_irqrestore(&qp->s_lock, flags);
331 ud_loopback(qp, wqe);
332 spin_lock_irqsave(&qp->s_lock, flags);
333 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
334 goto done;
335 }
336 }
337
338 qp->s_cur = next_cur;
339 extra_bytes = -wqe->length & 3;
340 nwords = (wqe->length + extra_bytes) >> 2;
341
342 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
343 qp->s_hdrwords = 7;
344 qp->s_cur_size = wqe->length;
345 qp->s_cur_sge = &qp->s_sge;
346 qp->s_srate = ah_attr->static_rate;
347 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
348 qp->s_wqe = wqe;
349 qp->s_sge.sge = wqe->sg_list[0];
350 qp->s_sge.sg_list = wqe->sg_list + 1;
351 qp->s_sge.num_sge = wqe->wr.num_sge;
352 qp->s_sge.total_len = wqe->length;
353
354 if (ah_attr->ah_flags & IB_AH_GRH) {
355 /* Header size in 32-bit words. */
356 qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh,
357 &ah_attr->grh,
358 qp->s_hdrwords, nwords);
359 lrh0 = HFI1_LRH_GRH;
360 ohdr = &qp->s_hdr->ibh.u.l.oth;
361 /*
362 * Don't worry about sending to locally attached multicast
363 * QPs. It is unspecified by the spec. what happens.
364 */
365 } else {
366 /* Header size in 32-bit words. */
367 lrh0 = HFI1_LRH_BTH;
368 ohdr = &qp->s_hdr->ibh.u.oth;
369 }
370 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
371 qp->s_hdrwords++;
372 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
373 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
374 } else
375 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
376 sc5 = ibp->sl_to_sc[ah_attr->sl];
377 lrh0 |= (ah_attr->sl & 0xf) << 4;
378 if (qp->ibqp.qp_type == IB_QPT_SMI) {
379 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
380 qp->s_sc = 0xf;
381 } else {
382 lrh0 |= (sc5 & 0xf) << 12;
383 qp->s_sc = sc5;
384 }
385 qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
386 qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
387 qp->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
388 qp->s_hdr->ibh.lrh[2] =
389 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
390 if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE))
391 qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
392 else {
393 lid = ppd->lid;
394 if (lid) {
395 lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
396 qp->s_hdr->ibh.lrh[3] = cpu_to_be16(lid);
397 } else
398 qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
399 }
400 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
401 bth0 |= IB_BTH_SOLICITED;
402 bth0 |= extra_bytes << 20;
403 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
404 bth0 |= hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
405 else
406 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
407 ohdr->bth[0] = cpu_to_be32(bth0);
408 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
409 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++));
410 /*
411 * Qkeys with the high order bit set mean use the
412 * qkey from the QP context instead of the WR (see 10.2.5).
413 */
414 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
415 qp->qkey : wqe->ud_wr.remote_qkey);
416 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
417 /* disarm any ahg */
418 qp->s_hdr->ahgcount = 0;
419 qp->s_hdr->ahgidx = 0;
420 qp->s_hdr->tx_flags = 0;
421 qp->s_hdr->sde = NULL;
422
423 done:
424 ret = 1;
425 goto unlock;
426
427 bail:
428 qp->s_flags &= ~HFI1_S_BUSY;
429 unlock:
430 spin_unlock_irqrestore(&qp->s_lock, flags);
431 return ret;
432 }
433
434 /*
435 * Hardware can't check this so we do it here.
436 *
437 * This is a slightly different algorithm than the standard pkey check. It
438 * special cases the management keys and allows for 0x7fff and 0xffff to be in
439 * the table at the same time.
440 *
441 * @returns the index found or -1 if not found
442 */
443 int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
444 {
445 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
446 unsigned i;
447
448 if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
449 unsigned lim_idx = -1;
450
451 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
452 /* here we look for an exact match */
453 if (ppd->pkeys[i] == pkey)
454 return i;
455 if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
456 lim_idx = i;
457 }
458
459 /* did not find 0xffff return 0x7fff idx if found */
460 if (pkey == FULL_MGMT_P_KEY)
461 return lim_idx;
462
463 /* no match... */
464 return -1;
465 }
466
467 pkey &= 0x7fff; /* remove limited/full membership bit */
468
469 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
470 if ((ppd->pkeys[i] & 0x7fff) == pkey)
471 return i;
472
473 /*
474 * Should not get here, this means hardware failed to validate pkeys.
475 */
476 return -1;
477 }
478
479 void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
480 u32 pkey, u32 slid, u32 dlid, u8 sc5,
481 const struct ib_grh *old_grh)
482 {
483 u64 pbc, pbc_flags = 0;
484 u32 bth0, plen, vl, hwords = 5;
485 u16 lrh0;
486 u8 sl = ibp->sc_to_sl[sc5];
487 struct hfi1_ib_header hdr;
488 struct hfi1_other_headers *ohdr;
489 struct pio_buf *pbuf;
490 struct send_context *ctxt = qp_to_send_context(qp, sc5);
491 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
492
493 if (old_grh) {
494 struct ib_grh *grh = &hdr.u.l.grh;
495
496 grh->version_tclass_flow = old_grh->version_tclass_flow;
497 grh->paylen = cpu_to_be16((hwords - 2 + SIZE_OF_CRC) << 2);
498 grh->hop_limit = 0xff;
499 grh->sgid = old_grh->dgid;
500 grh->dgid = old_grh->sgid;
501 ohdr = &hdr.u.l.oth;
502 lrh0 = HFI1_LRH_GRH;
503 hwords += sizeof(struct ib_grh) / sizeof(u32);
504 } else {
505 ohdr = &hdr.u.oth;
506 lrh0 = HFI1_LRH_BTH;
507 }
508
509 lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
510
511 bth0 = pkey | (IB_OPCODE_CNP << 24);
512 ohdr->bth[0] = cpu_to_be32(bth0);
513
514 ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << HFI1_BECN_SHIFT));
515 ohdr->bth[2] = 0; /* PSN 0 */
516
517 hdr.lrh[0] = cpu_to_be16(lrh0);
518 hdr.lrh[1] = cpu_to_be16(dlid);
519 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
520 hdr.lrh[3] = cpu_to_be16(slid);
521
522 plen = 2 /* PBC */ + hwords;
523 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
524 vl = sc_to_vlt(ppd->dd, sc5);
525 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
526 if (ctxt) {
527 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
528 if (pbuf)
529 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
530 &hdr, hwords);
531 }
532 }
533
534 /*
535 * opa_smp_check() - Do the regular pkey checking, and the additional
536 * checks for SMPs specified in OPAv1 rev 0.90, section 9.10.26
537 * ("SMA Packet Checks").
538 *
539 * Note that:
540 * - Checks are done using the pkey directly from the packet's BTH,
541 * and specifically _not_ the pkey that we attach to the completion,
542 * which may be different.
543 * - These checks are specifically for "non-local" SMPs (i.e., SMPs
544 * which originated on another node). SMPs which are sent from, and
545 * destined to this node are checked in opa_local_smp_check().
546 *
547 * At the point where opa_smp_check() is called, we know:
548 * - destination QP is QP0
549 *
550 * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
551 */
552 static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
553 struct hfi1_qp *qp, u16 slid, struct opa_smp *smp)
554 {
555 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
556
557 /*
558 * I don't think it's possible for us to get here with sc != 0xf,
559 * but check it to be certain.
560 */
561 if (sc5 != 0xf)
562 return 1;
563
564 if (rcv_pkey_check(ppd, pkey, sc5, slid))
565 return 1;
566
567 /*
568 * At this point we know (and so don't need to check again) that
569 * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
570 * (see ingress_pkey_check).
571 */
572 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
573 smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
574 ingress_pkey_table_fail(ppd, pkey, slid);
575 return 1;
576 }
577
578 /*
579 * SMPs fall into one of four (disjoint) categories:
580 * SMA request, SMA response, trap, or trap repress.
581 * Our response depends, in part, on which type of
582 * SMP we're processing.
583 *
584 * If this is not an SMA request, or trap repress:
585 * - accept MAD if the port is running an SM
586 * - pkey == FULL_MGMT_P_KEY =>
587 * reply with unsupported method (i.e., just mark
588 * the smp's status field here, and let it be
589 * processed normally)
590 * - pkey != LIM_MGMT_P_KEY =>
591 * increment port recv constraint errors, drop MAD
592 * If this is an SMA request or trap repress:
593 * - pkey != FULL_MGMT_P_KEY =>
594 * increment port recv constraint errors, drop MAD
595 */
596 switch (smp->method) {
597 case IB_MGMT_METHOD_GET:
598 case IB_MGMT_METHOD_SET:
599 case IB_MGMT_METHOD_REPORT:
600 case IB_MGMT_METHOD_TRAP_REPRESS:
601 if (pkey != FULL_MGMT_P_KEY) {
602 ingress_pkey_table_fail(ppd, pkey, slid);
603 return 1;
604 }
605 break;
606 case IB_MGMT_METHOD_SEND:
607 case IB_MGMT_METHOD_TRAP:
608 case IB_MGMT_METHOD_GET_RESP:
609 case IB_MGMT_METHOD_REPORT_RESP:
610 if (ibp->port_cap_flags & IB_PORT_SM)
611 return 0;
612 if (pkey == FULL_MGMT_P_KEY) {
613 smp->status |= IB_SMP_UNSUP_METHOD;
614 return 0;
615 }
616 if (pkey != LIM_MGMT_P_KEY) {
617 ingress_pkey_table_fail(ppd, pkey, slid);
618 return 1;
619 }
620 break;
621 default:
622 break;
623 }
624 return 0;
625 }
626
627
628 /**
629 * hfi1_ud_rcv - receive an incoming UD packet
630 * @ibp: the port the packet came in on
631 * @hdr: the packet header
632 * @rcv_flags: flags relevant to rcv processing
633 * @data: the packet data
634 * @tlen: the packet length
635 * @qp: the QP the packet came on
636 *
637 * This is called from qp_rcv() to process an incoming UD packet
638 * for the given QP.
639 * Called at interrupt level.
640 */
641 void hfi1_ud_rcv(struct hfi1_packet *packet)
642 {
643 struct hfi1_other_headers *ohdr = packet->ohdr;
644 int opcode;
645 u32 hdrsize = packet->hlen;
646 u32 pad;
647 struct ib_wc wc;
648 u32 qkey;
649 u32 src_qp;
650 u16 dlid, pkey;
651 int mgmt_pkey_idx = -1;
652 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
653 struct hfi1_ib_header *hdr = packet->hdr;
654 u32 rcv_flags = packet->rcv_flags;
655 void *data = packet->ebuf;
656 u32 tlen = packet->tlen;
657 struct hfi1_qp *qp = packet->qp;
658 bool has_grh = rcv_flags & HFI1_HAS_GRH;
659 bool sc4_bit = has_sc4_bit(packet);
660 u8 sc;
661 u32 bth1;
662 int is_mcast;
663 struct ib_grh *grh = NULL;
664
665 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
666 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
667 dlid = be16_to_cpu(hdr->lrh[1]);
668 is_mcast = (dlid > HFI1_MULTICAST_LID_BASE) &&
669 (dlid != HFI1_PERMISSIVE_LID);
670 bth1 = be32_to_cpu(ohdr->bth[1]);
671 if (unlikely(bth1 & HFI1_BECN_SMASK)) {
672 /*
673 * In pre-B0 h/w the CNP_OPCODE is handled via an
674 * error path (errata 291394).
675 */
676 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
677 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
678 u8 sl, sc5;
679
680 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
681 sc5 |= sc4_bit;
682 sl = ibp->sc_to_sl[sc5];
683
684 process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
685 }
686
687 /*
688 * The opcode is in the low byte when its in network order
689 * (top byte when in host order).
690 */
691 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
692 opcode &= 0xff;
693
694 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
695
696 if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
697 u16 slid = be16_to_cpu(hdr->lrh[3]);
698 u8 sc5;
699
700 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
701 sc5 |= sc4_bit;
702
703 return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
704 }
705 /*
706 * Get the number of bytes the message was padded by
707 * and drop incomplete packets.
708 */
709 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
710 if (unlikely(tlen < (hdrsize + pad + 4)))
711 goto drop;
712
713 tlen -= hdrsize + pad + 4;
714
715 /*
716 * Check that the permissive LID is only used on QP0
717 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
718 */
719 if (qp->ibqp.qp_num) {
720 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
721 hdr->lrh[3] == IB_LID_PERMISSIVE))
722 goto drop;
723 if (qp->ibqp.qp_num > 1) {
724 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
725 u16 slid;
726 u8 sc5;
727
728 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
729 sc5 |= sc4_bit;
730
731 slid = be16_to_cpu(hdr->lrh[3]);
732 if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
733 /*
734 * Traps will not be sent for packets dropped
735 * by the HW. This is fine, as sending trap
736 * for invalid pkeys is optional according to
737 * IB spec (release 1.3, section 10.9.4)
738 */
739 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
740 pkey,
741 (be16_to_cpu(hdr->lrh[0]) >> 4) &
742 0xF,
743 src_qp, qp->ibqp.qp_num,
744 be16_to_cpu(hdr->lrh[3]),
745 be16_to_cpu(hdr->lrh[1]));
746 return;
747 }
748 } else {
749 /* GSI packet */
750 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
751 if (mgmt_pkey_idx < 0)
752 goto drop;
753
754 }
755 if (unlikely(qkey != qp->qkey)) {
756 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
757 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
758 src_qp, qp->ibqp.qp_num,
759 be16_to_cpu(hdr->lrh[3]),
760 be16_to_cpu(hdr->lrh[1]));
761 return;
762 }
763 /* Drop invalid MAD packets (see 13.5.3.1). */
764 if (unlikely(qp->ibqp.qp_num == 1 &&
765 (tlen > 2048 ||
766 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
767 goto drop;
768 } else {
769 /* Received on QP0, and so by definition, this is an SMP */
770 struct opa_smp *smp = (struct opa_smp *)data;
771 u16 slid = be16_to_cpu(hdr->lrh[3]);
772 u8 sc5;
773
774 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
775 sc5 |= sc4_bit;
776
777 if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
778 goto drop;
779
780 if (tlen > 2048)
781 goto drop;
782 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
783 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
784 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
785 goto drop;
786
787 /* look up SMI pkey */
788 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
789 if (mgmt_pkey_idx < 0)
790 goto drop;
791
792 }
793
794 if (qp->ibqp.qp_num > 1 &&
795 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
796 wc.ex.imm_data = ohdr->u.ud.imm_data;
797 wc.wc_flags = IB_WC_WITH_IMM;
798 tlen -= sizeof(u32);
799 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
800 wc.ex.imm_data = 0;
801 wc.wc_flags = 0;
802 } else
803 goto drop;
804
805 /*
806 * A GRH is expected to precede the data even if not
807 * present on the wire.
808 */
809 wc.byte_len = tlen + sizeof(struct ib_grh);
810
811 /*
812 * Get the next work request entry to find where to put the data.
813 */
814 if (qp->r_flags & HFI1_R_REUSE_SGE)
815 qp->r_flags &= ~HFI1_R_REUSE_SGE;
816 else {
817 int ret;
818
819 ret = hfi1_get_rwqe(qp, 0);
820 if (ret < 0) {
821 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
822 return;
823 }
824 if (!ret) {
825 if (qp->ibqp.qp_num == 0)
826 ibp->n_vl15_dropped++;
827 return;
828 }
829 }
830 /* Silently drop packets which are too big. */
831 if (unlikely(wc.byte_len > qp->r_len)) {
832 qp->r_flags |= HFI1_R_REUSE_SGE;
833 goto drop;
834 }
835 if (has_grh) {
836 hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh,
837 sizeof(struct ib_grh), 1);
838 wc.wc_flags |= IB_WC_GRH;
839 } else
840 hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
841 hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
842 hfi1_put_ss(&qp->r_sge);
843 if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
844 return;
845 wc.wr_id = qp->r_wr_id;
846 wc.status = IB_WC_SUCCESS;
847 wc.opcode = IB_WC_RECV;
848 wc.vendor_err = 0;
849 wc.qp = &qp->ibqp;
850 wc.src_qp = src_qp;
851
852 if (qp->ibqp.qp_type == IB_QPT_GSI ||
853 qp->ibqp.qp_type == IB_QPT_SMI) {
854 if (mgmt_pkey_idx < 0) {
855 if (net_ratelimit()) {
856 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
857 struct hfi1_devdata *dd = ppd->dd;
858
859 dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
860 qp->ibqp.qp_type);
861 mgmt_pkey_idx = 0;
862 }
863 }
864 wc.pkey_index = (unsigned)mgmt_pkey_idx;
865 } else
866 wc.pkey_index = 0;
867
868 wc.slid = be16_to_cpu(hdr->lrh[3]);
869 sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
870 sc |= sc4_bit;
871 wc.sl = ibp->sc_to_sl[sc];
872
873 /*
874 * Save the LMC lower bits if the destination LID is a unicast LID.
875 */
876 wc.dlid_path_bits = dlid >= HFI1_MULTICAST_LID_BASE ? 0 :
877 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
878 wc.port_num = qp->port_num;
879 /* Signal completion event if the solicited bit is set. */
880 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
881 (ohdr->bth[0] &
882 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
883 return;
884
885 drop:
886 ibp->n_pkt_drops++;
887 }