]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/infiniband/hw/hfi1/ud.c
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_...
[mirror_ubuntu-focal-kernel.git] / drivers / infiniband / hw / hfi1 / ud.c
CommitLineData
77241056 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/net.h>
49#include <rdma/ib_smi.h>
50
51#include "hfi.h"
52#include "mad.h"
bb5df5f9 53#include "verbs_txreq.h"
711e104d 54#include "qp.h"
77241056 55
88733e3b
DH
56/* We support only two types - 9B and 16B for now */
57static const hfi1_make_req hfi1_make_ud_req_tbl[2] = {
58 [HFI1_PKT_TYPE_9B] = &hfi1_make_ud_req_9B,
59 [HFI1_PKT_TYPE_16B] = &hfi1_make_ud_req_16B
60};
61
77241056
MM
62/**
63 * ud_loopback - handle send on loopback QPs
64 * @sqp: the sending QP
65 * @swqe: the send work request
66 *
67 * This is called from hfi1_make_ud_req() to forward a WQE addressed
68 * to the same HFI.
69 * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
70 * while this is being called.
71 */
895420dd 72static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
77241056
MM
73{
74 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
75 struct hfi1_pportdata *ppd;
88733e3b 76 struct hfi1_qp_priv *priv = sqp->priv;
895420dd 77 struct rvt_qp *qp;
90898850 78 struct rdma_ah_attr *ah_attr;
77241056 79 unsigned long flags;
895420dd
DD
80 struct rvt_sge_state ssge;
81 struct rvt_sge *sge;
77241056
MM
82 struct ib_wc wc;
83 u32 length;
84 enum ib_qp_type sqptype, dqptype;
85
86 rcu_read_lock();
87
ec4274f1
DD
88 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
89 swqe->ud_wr.remote_qpn);
77241056 90 if (!qp) {
4eb06882 91 ibp->rvp.n_pkt_drops++;
77241056
MM
92 rcu_read_unlock();
93 return;
94 }
95
96 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
97 IB_QPT_UD : sqp->ibqp.qp_type;
98 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
99 IB_QPT_UD : qp->ibqp.qp_type;
100
101 if (dqptype != sqptype ||
83693bd1 102 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
4eb06882 103 ibp->rvp.n_pkt_drops++;
77241056
MM
104 goto drop;
105 }
106
15723f06 107 ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
77241056
MM
108 ppd = ppd_from_ibp(ibp);
109
110 if (qp->ibqp.qp_num > 1) {
111 u16 pkey;
88733e3b 112 u32 slid;
d8966fcd 113 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
77241056
MM
114
115 pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
d8966fcd 116 slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
77241056
MM
117 ((1 << ppd->lmc) - 1));
118 if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
5786adf3
DH
119 qp->s_pkey_index,
120 slid, false))) {
13d84914
DD
121 hfi1_bad_pkey(ibp, pkey,
122 rdma_ah_get_sl(ah_attr),
123 sqp->ibqp.qp_num, qp->ibqp.qp_num,
124 slid, rdma_ah_get_dlid(ah_attr));
77241056
MM
125 goto drop;
126 }
127 }
128
129 /*
130 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
131 * Qkeys with the high order bit set mean use the
132 * qkey from the QP context instead of the WR (see 10.2.5).
133 */
134 if (qp->ibqp.qp_num) {
135 u32 qkey;
136
e622f2f4
CH
137 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
138 sqp->qkey : swqe->ud_wr.remote_qkey;
13d84914
DD
139 if (unlikely(qkey != qp->qkey))
140 goto drop; /* silently drop per IBTA spec */
77241056
MM
141 }
142
143 /*
144 * A GRH is expected to precede the data even if not
145 * present on the wire.
146 */
147 length = swqe->length;
148 memset(&wc, 0, sizeof(wc));
149 wc.byte_len = length + sizeof(struct ib_grh);
150
151 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
152 wc.wc_flags = IB_WC_WITH_IMM;
153 wc.ex.imm_data = swqe->wr.ex.imm_data;
154 }
155
156 spin_lock_irqsave(&qp->r_lock, flags);
157
158 /*
159 * Get the next work request entry to find where to put the data.
160 */
e490974e 161 if (qp->r_flags & RVT_R_REUSE_SGE) {
54d10c1e 162 qp->r_flags &= ~RVT_R_REUSE_SGE;
e490974e 163 } else {
77241056
MM
164 int ret;
165
ec4274f1 166 ret = hfi1_rvt_get_rwqe(qp, 0);
77241056 167 if (ret < 0) {
beb5a042 168 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
77241056
MM
169 goto bail_unlock;
170 }
171 if (!ret) {
172 if (qp->ibqp.qp_num == 0)
4eb06882 173 ibp->rvp.n_vl15_dropped++;
77241056
MM
174 goto bail_unlock;
175 }
176 }
177 /* Silently drop packets which are too big. */
178 if (unlikely(wc.byte_len > qp->r_len)) {
54d10c1e 179 qp->r_flags |= RVT_R_REUSE_SGE;
4eb06882 180 ibp->rvp.n_pkt_drops++;
77241056
MM
181 goto bail_unlock;
182 }
183
d8966fcd 184 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
527dbf12 185 struct ib_grh grh;
88733e3b
DH
186 struct ib_global_route grd = *(rdma_ah_read_grh(ah_attr));
187
188 /*
189 * For loopback packets with extended LIDs, the
190 * sgid_index in the GRH is 0 and the dgid is
191 * OPA GID of the sender. While creating a response
192 * to the loopback packet, IB core creates the new
193 * sgid_index from the DGID and that will be the
194 * OPA_GID_INDEX. The new dgid is from the sgid
195 * index and that will be in the IB GID format.
196 *
197 * We now have a case where the sent packet had a
198 * different sgid_index and dgid compared to the
199 * one that was received in response.
200 *
201 * Fix this inconsistency.
202 */
203 if (priv->hdr_type == HFI1_PKT_TYPE_16B) {
204 if (grd.sgid_index == 0)
205 grd.sgid_index = OPA_GID_INDEX;
527dbf12 206
88733e3b
DH
207 if (ib_is_opa_gid(&grd.dgid))
208 grd.dgid.global.interface_id =
209 cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
210 }
211
212 hfi1_make_grh(ibp, &grh, &grd, 0, 0);
527dbf12 213 hfi1_copy_sge(&qp->r_sge, &grh,
0128fcea 214 sizeof(grh), true, false);
77241056 215 wc.wc_flags |= IB_WC_GRH;
e490974e 216 } else {
1198fcea 217 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
e490974e 218 }
77241056
MM
219 ssge.sg_list = swqe->sg_list + 1;
220 ssge.sge = *swqe->sg_list;
221 ssge.num_sge = swqe->wr.num_sge;
222 sge = &ssge.sge;
223 while (length) {
224 u32 len = sge->length;
225
226 if (len > length)
227 len = length;
228 if (len > sge->sge_length)
229 len = sge->sge_length;
230 WARN_ON_ONCE(len == 0);
0128fcea 231 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false);
77241056
MM
232 sge->vaddr += len;
233 sge->length -= len;
234 sge->sge_length -= len;
235 if (sge->sge_length == 0) {
236 if (--ssge.num_sge)
237 *sge = *ssge.sg_list++;
238 } else if (sge->length == 0 && sge->mr->lkey) {
cd4ceee3 239 if (++sge->n >= RVT_SEGSZ) {
77241056
MM
240 if (++sge->m >= sge->mr->mapsz)
241 break;
242 sge->n = 0;
243 }
244 sge->vaddr =
245 sge->mr->map[sge->m]->segs[sge->n].vaddr;
246 sge->length =
247 sge->mr->map[sge->m]->segs[sge->n].length;
248 }
249 length -= len;
250 }
ec4274f1 251 rvt_put_ss(&qp->r_sge);
54d10c1e 252 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
77241056
MM
253 goto bail_unlock;
254 wc.wr_id = qp->r_wr_id;
255 wc.status = IB_WC_SUCCESS;
256 wc.opcode = IB_WC_RECV;
257 wc.qp = &qp->ibqp;
258 wc.src_qp = sqp->ibqp.qp_num;
259 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
260 if (sqp->ibqp.qp_type == IB_QPT_GSI ||
261 sqp->ibqp.qp_type == IB_QPT_SMI)
e622f2f4 262 wc.pkey_index = swqe->ud_wr.pkey_index;
77241056
MM
263 else
264 wc.pkey_index = sqp->s_pkey_index;
265 } else {
266 wc.pkey_index = 0;
267 }
d8966fcd 268 wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
88733e3b 269 ((1 << ppd->lmc) - 1));
77241056
MM
270 /* Check for loopback when the port lid is not set */
271 if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
8859b4a6 272 wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
d8966fcd
DC
273 wc.sl = rdma_ah_get_sl(ah_attr);
274 wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
77241056
MM
275 wc.port_num = qp->port_num;
276 /* Signal completion event if the solicited bit is set. */
abd712da
DD
277 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
278 swqe->wr.send_flags & IB_SEND_SOLICITED);
4eb06882 279 ibp->rvp.n_loop_pkts++;
77241056
MM
280bail_unlock:
281 spin_unlock_irqrestore(&qp->r_lock, flags);
282drop:
283 rcu_read_unlock();
284}
285
88733e3b
DH
286static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe,
287 struct ib_other_headers *ohdr,
288 u16 *pkey, u32 extra_bytes, bool bypass)
289{
290 u32 bth0;
291 struct hfi1_ibport *ibp;
292
293 ibp = to_iport(qp->ibqp.device, qp->port_num);
294 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
295 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
296 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
297 } else {
298 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
299 }
300
301 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
302 bth0 |= IB_BTH_SOLICITED;
303 bth0 |= extra_bytes << 20;
304 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
305 *pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
306 else
307 *pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
308 if (!bypass)
309 bth0 |= *pkey;
310 ohdr->bth[0] = cpu_to_be32(bth0);
311 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
312 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
313 /*
314 * Qkeys with the high order bit set mean use the
315 * qkey from the QP context instead of the WR (see 10.2.5).
316 */
317 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
318 qp->qkey : wqe->ud_wr.remote_qkey);
319 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
320}
321
322void hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
323 struct rvt_swqe *wqe)
324{
325 u32 nwords, extra_bytes;
326 u16 len, slid, dlid, pkey;
327 u16 lrh0 = 0;
328 u8 sc5;
329 struct hfi1_qp_priv *priv = qp->priv;
330 struct ib_other_headers *ohdr;
331 struct rdma_ah_attr *ah_attr;
332 struct hfi1_pportdata *ppd;
333 struct hfi1_ibport *ibp;
334 struct ib_grh *grh;
335
336 ibp = to_iport(qp->ibqp.device, qp->port_num);
337 ppd = ppd_from_ibp(ibp);
338 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
339
340 extra_bytes = -wqe->length & 3;
341 nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC;
342 /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
343 qp->s_hdrwords = 7;
344 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
345 qp->s_hdrwords++;
346
347 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
348 grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
349 qp->s_hdrwords += hfi1_make_grh(ibp, grh,
350 rdma_ah_read_grh(ah_attr),
351 qp->s_hdrwords - 2, nwords);
352 lrh0 = HFI1_LRH_GRH;
353 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
354 } else {
355 lrh0 = HFI1_LRH_BTH;
356 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
357 }
358
359 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
360 lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
361 if (qp->ibqp.qp_type == IB_QPT_SMI) {
362 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
363 priv->s_sc = 0xf;
364 } else {
365 lrh0 |= (sc5 & 0xf) << 12;
366 priv->s_sc = sc5;
367 }
368
369 dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
370 if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
371 slid = be16_to_cpu(IB_LID_PERMISSIVE);
372 } else {
373 u16 lid = (u16)ppd->lid;
374
375 if (lid) {
376 lid |= rdma_ah_get_path_bits(ah_attr) &
377 ((1 << ppd->lmc) - 1);
378 slid = lid;
379 } else {
380 slid = be16_to_cpu(IB_LID_PERMISSIVE);
381 }
382 }
383 hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, false);
384 len = qp->s_hdrwords + nwords;
385
386 /* Setup the packet */
387 ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_9B;
388 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
389 lrh0, len, dlid, slid);
390}
391
392void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
393 struct rvt_swqe *wqe)
394{
395 struct hfi1_qp_priv *priv = qp->priv;
396 struct ib_other_headers *ohdr;
397 struct rdma_ah_attr *ah_attr;
398 struct hfi1_pportdata *ppd;
399 struct hfi1_ibport *ibp;
400 u32 dlid, slid, nwords, extra_bytes;
401 u16 len, pkey;
402 u8 l4, sc5;
403
404 ibp = to_iport(qp->ibqp.device, qp->port_num);
405 ppd = ppd_from_ibp(ibp);
406 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
407 /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
408 qp->s_hdrwords = 9;
409 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
410 qp->s_hdrwords++;
411
412 /* SW provides space for CRC and LT for bypass packets. */
413 extra_bytes = hfi1_get_16b_padding((qp->s_hdrwords << 2),
414 wqe->length);
415 nwords = ((wqe->length + extra_bytes + SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
416
417 if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
418 hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) {
419 struct ib_grh *grh;
420 struct ib_global_route *grd = rdma_ah_retrieve_grh(ah_attr);
421 /*
422 * Ensure OPA GIDs are transformed to IB gids
423 * before creating the GRH.
424 */
425 if (grd->sgid_index == OPA_GID_INDEX) {
426 dd_dev_warn(ppd->dd, "Bad sgid_index. sgid_index: %d\n",
427 grd->sgid_index);
428 grd->sgid_index = 0;
429 }
430 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
431 qp->s_hdrwords += hfi1_make_grh(ibp, grh, grd,
432 qp->s_hdrwords - 4, nwords);
433 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
434 l4 = OPA_16B_L4_IB_GLOBAL;
435 } else {
436 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
437 l4 = OPA_16B_L4_IB_LOCAL;
438 }
439
440 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
441 if (qp->ibqp.qp_type == IB_QPT_SMI)
442 priv->s_sc = 0xf;
443 else
444 priv->s_sc = sc5;
445
446 dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 16B);
447 if (!ppd->lid)
448 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
449 else
450 slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
451 ((1 << ppd->lmc) - 1));
452
453 hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
454 /* Convert dwords to flits */
455 len = (qp->s_hdrwords + nwords) >> 1;
456
457 /* Setup the packet */
458 ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_16B;
459 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
460 slid, dlid, len, pkey, 0, 0, l4, priv->s_sc);
461}
462
77241056
MM
463/**
464 * hfi1_make_ud_req - construct a UD request packet
465 * @qp: the QP
466 *
46a80d62
MM
467 * Assume s_lock is held.
468 *
77241056
MM
469 * Return 1 if constructed; otherwise, return 0.
470 */
bb5df5f9 471int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
77241056 472{
4c6829c5 473 struct hfi1_qp_priv *priv = qp->priv;
90898850 474 struct rdma_ah_attr *ah_attr;
77241056
MM
475 struct hfi1_pportdata *ppd;
476 struct hfi1_ibport *ibp;
895420dd 477 struct rvt_swqe *wqe;
77241056 478 int next_cur;
88733e3b 479 u32 lid;
77241056 480
bb5df5f9
DD
481 ps->s_txreq = get_txreq(ps->dev, qp);
482 if (IS_ERR(ps->s_txreq))
483 goto bail_no_tx;
484
83693bd1
DD
485 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
486 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
77241056
MM
487 goto bail;
488 /* We are in the error state, flush the work request. */
46a80d62 489 smp_read_barrier_depends(); /* see post_one_send */
6aa7de05 490 if (qp->s_last == READ_ONCE(qp->s_head))
77241056
MM
491 goto bail;
492 /* If DMAs are in progress, we can't flush immediately. */
14553ca1 493 if (iowait_sdma_pending(&priv->s_iowait)) {
54d10c1e 494 qp->s_flags |= RVT_S_WAIT_DMA;
77241056
MM
495 goto bail;
496 }
83693bd1 497 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
77241056 498 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
bb5df5f9 499 goto done_free_tx;
77241056
MM
500 }
501
46a80d62
MM
502 /* see post_one_send() */
503 smp_read_barrier_depends();
6aa7de05 504 if (qp->s_cur == READ_ONCE(qp->s_head))
77241056
MM
505 goto bail;
506
83693bd1 507 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
77241056
MM
508 next_cur = qp->s_cur + 1;
509 if (next_cur >= qp->s_size)
510 next_cur = 0;
511
512 /* Construct the header. */
513 ibp = to_iport(qp->ibqp.device, qp->port_num);
514 ppd = ppd_from_ibp(ibp);
15723f06 515 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
88733e3b
DH
516 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, ah_attr);
517 if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) ||
518 (rdma_ah_get_dlid(ah_attr) == be32_to_cpu(OPA_LID_PERMISSIVE))) {
d8966fcd 519 lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
17fb4f29 520 if (unlikely(!loopback &&
88733e3b
DH
521 ((lid == ppd->lid) ||
522 ((lid == be32_to_cpu(OPA_LID_PERMISSIVE)) &&
523 (qp->ibqp.qp_type == IB_QPT_GSI))))) {
747f4d7a 524 unsigned long tflags = ps->flags;
77241056
MM
525 /*
526 * If DMAs are in progress, we can't generate
527 * a completion for the loopback packet since
528 * it would be out of order.
529 * Instead of waiting, we could queue a
530 * zero length descriptor so we get a callback.
531 */
14553ca1 532 if (iowait_sdma_pending(&priv->s_iowait)) {
54d10c1e 533 qp->s_flags |= RVT_S_WAIT_DMA;
77241056
MM
534 goto bail;
535 }
536 qp->s_cur = next_cur;
747f4d7a 537 spin_unlock_irqrestore(&qp->s_lock, tflags);
77241056 538 ud_loopback(qp, wqe);
747f4d7a
MM
539 spin_lock_irqsave(&qp->s_lock, tflags);
540 ps->flags = tflags;
77241056 541 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
bb5df5f9 542 goto done_free_tx;
77241056
MM
543 }
544 }
545
546 qp->s_cur = next_cur;
e922ae06 547 ps->s_txreq->s_cur_size = wqe->length;
b777f154 548 ps->s_txreq->ss = &qp->s_sge;
d8966fcd 549 qp->s_srate = rdma_ah_get_static_rate(ah_attr);
77241056
MM
550 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
551 qp->s_wqe = wqe;
552 qp->s_sge.sge = wqe->sg_list[0];
553 qp->s_sge.sg_list = wqe->sg_list + 1;
554 qp->s_sge.num_sge = wqe->wr.num_sge;
555 qp->s_sge.total_len = wqe->length;
556
88733e3b
DH
557 /* Make the appropriate header */
558 hfi1_make_ud_req_tbl[priv->hdr_type](qp, ps, qp->s_wqe);
4c6829c5 559 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
711e104d 560 ps->s_txreq->sde = priv->s_sde;
721d0427 561 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
711e104d 562 ps->s_txreq->psc = priv->s_sendcontext;
77241056 563 /* disarm any ahg */
a9b6b3bc
DC
564 priv->s_ahg->ahgcount = 0;
565 priv->s_ahg->ahgidx = 0;
566 priv->s_ahg->tx_flags = 0;
711e104d
MM
567 /* pbc */
568 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
77241056 569
46a80d62 570 return 1;
bb5df5f9
DD
571
572done_free_tx:
573 hfi1_put_txreq(ps->s_txreq);
574 ps->s_txreq = NULL;
575 return 1;
576
77241056 577bail:
bb5df5f9
DD
578 hfi1_put_txreq(ps->s_txreq);
579
580bail_no_tx:
581 ps->s_txreq = NULL;
54d10c1e 582 qp->s_flags &= ~RVT_S_BUSY;
bb5df5f9
DD
583 qp->s_hdrwords = 0;
584 return 0;
77241056
MM
585}
586
587/*
588 * Hardware can't check this so we do it here.
589 *
590 * This is a slightly different algorithm than the standard pkey check. It
591 * special cases the management keys and allows for 0x7fff and 0xffff to be in
592 * the table at the same time.
593 *
594 * @returns the index found or -1 if not found
595 */
596int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
597{
598 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
599 unsigned i;
600
601 if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
602 unsigned lim_idx = -1;
603
604 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
605 /* here we look for an exact match */
606 if (ppd->pkeys[i] == pkey)
607 return i;
608 if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
609 lim_idx = i;
610 }
611
612 /* did not find 0xffff return 0x7fff idx if found */
613 if (pkey == FULL_MGMT_P_KEY)
614 return lim_idx;
615
616 /* no match... */
617 return -1;
618 }
619
620 pkey &= 0x7fff; /* remove limited/full membership bit */
621
622 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
623 if ((ppd->pkeys[i] & 0x7fff) == pkey)
624 return i;
625
626 /*
627 * Should not get here, this means hardware failed to validate pkeys.
628 */
629 return -1;
630}
631
88733e3b
DH
632void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
633 u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
634 u8 sc5, const struct ib_grh *old_grh)
635{
636 u64 pbc, pbc_flags = 0;
637 u32 bth0, plen, vl, hwords = 7;
638 u16 len;
639 u8 l4;
640 struct hfi1_16b_header hdr;
641 struct ib_other_headers *ohdr;
642 struct pio_buf *pbuf;
643 struct send_context *ctxt = qp_to_send_context(qp, sc5);
644 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
645 u32 nwords;
646
647 /* Populate length */
648 nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
649 SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
650 if (old_grh) {
651 struct ib_grh *grh = &hdr.u.l.grh;
652
653 grh->version_tclass_flow = old_grh->version_tclass_flow;
654 grh->paylen = cpu_to_be16((hwords - 4 + nwords) << 2);
655 grh->hop_limit = 0xff;
656 grh->sgid = old_grh->dgid;
657 grh->dgid = old_grh->sgid;
658 ohdr = &hdr.u.l.oth;
659 l4 = OPA_16B_L4_IB_GLOBAL;
660 hwords += sizeof(struct ib_grh) / sizeof(u32);
661 } else {
662 ohdr = &hdr.u.oth;
663 l4 = OPA_16B_L4_IB_LOCAL;
664 }
665
666 /* BIT 16 to 19 is TVER. Bit 20 to 22 is pad cnt */
667 bth0 = (IB_OPCODE_CNP << 24) | (1 << 16) |
668 (hfi1_get_16b_padding(hwords << 2, 0) << 20);
669 ohdr->bth[0] = cpu_to_be32(bth0);
670
671 ohdr->bth[1] = cpu_to_be32(remote_qpn);
672 ohdr->bth[2] = 0; /* PSN 0 */
673
674 /* Convert dwords to flits */
675 len = (hwords + nwords) >> 1;
676 hfi1_make_16b_hdr(&hdr, slid, dlid, len, pkey, 1, 0, l4, sc5);
677
678 plen = 2 /* PBC */ + hwords + nwords;
679 pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
680 vl = sc_to_vlt(ppd->dd, sc5);
681 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
682 if (ctxt) {
683 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
684 if (pbuf)
685 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
686 &hdr, hwords);
687 }
688}
689
895420dd 690void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
77241056
MM
691 u32 pkey, u32 slid, u32 dlid, u8 sc5,
692 const struct ib_grh *old_grh)
693{
694 u64 pbc, pbc_flags = 0;
695 u32 bth0, plen, vl, hwords = 5;
696 u16 lrh0;
697 u8 sl = ibp->sc_to_sl[sc5];
261a4351
MM
698 struct ib_header hdr;
699 struct ib_other_headers *ohdr;
77241056
MM
700 struct pio_buf *pbuf;
701 struct send_context *ctxt = qp_to_send_context(qp, sc5);
702 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
703
704 if (old_grh) {
705 struct ib_grh *grh = &hdr.u.l.grh;
706
707 grh->version_tclass_flow = old_grh->version_tclass_flow;
708 grh->paylen = cpu_to_be16((hwords - 2 + SIZE_OF_CRC) << 2);
709 grh->hop_limit = 0xff;
710 grh->sgid = old_grh->dgid;
711 grh->dgid = old_grh->sgid;
712 ohdr = &hdr.u.l.oth;
713 lrh0 = HFI1_LRH_GRH;
714 hwords += sizeof(struct ib_grh) / sizeof(u32);
715 } else {
716 ohdr = &hdr.u.oth;
717 lrh0 = HFI1_LRH_BTH;
718 }
719
720 lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
721
722 bth0 = pkey | (IB_OPCODE_CNP << 24);
723 ohdr->bth[0] = cpu_to_be32(bth0);
724
3d591099 725 ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
77241056
MM
726 ohdr->bth[2] = 0; /* PSN 0 */
727
88733e3b 728 hfi1_make_ib_hdr(&hdr, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
77241056 729 plen = 2 /* PBC */ + hwords;
7dafbab3 730 pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
77241056
MM
731 vl = sc_to_vlt(ppd->dd, sc5);
732 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
733 if (ctxt) {
734 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
735 if (pbuf)
736 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
737 &hdr, hwords);
738 }
739}
740
741/*
742 * opa_smp_check() - Do the regular pkey checking, and the additional
f3809209
JX
743 * checks for SMPs specified in OPAv1 rev 1.0, 9/19/2016 update, section
744 * 9.10.25 ("SMA Packet Checks").
77241056
MM
745 *
746 * Note that:
747 * - Checks are done using the pkey directly from the packet's BTH,
748 * and specifically _not_ the pkey that we attach to the completion,
749 * which may be different.
750 * - These checks are specifically for "non-local" SMPs (i.e., SMPs
751 * which originated on another node). SMPs which are sent from, and
752 * destined to this node are checked in opa_local_smp_check().
753 *
754 * At the point where opa_smp_check() is called, we know:
755 * - destination QP is QP0
756 *
757 * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
758 */
759static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
895420dd 760 struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
77241056
MM
761{
762 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
763
764 /*
765 * I don't think it's possible for us to get here with sc != 0xf,
766 * but check it to be certain.
767 */
768 if (sc5 != 0xf)
769 return 1;
770
771 if (rcv_pkey_check(ppd, pkey, sc5, slid))
772 return 1;
773
774 /*
775 * At this point we know (and so don't need to check again) that
776 * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
777 * (see ingress_pkey_check).
778 */
779 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
780 smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
781 ingress_pkey_table_fail(ppd, pkey, slid);
782 return 1;
783 }
784
785 /*
786 * SMPs fall into one of four (disjoint) categories:
f3809209
JX
787 * SMA request, SMA response, SMA trap, or SMA trap repress.
788 * Our response depends, in part, on which type of SMP we're
789 * processing.
77241056 790 *
f3809209
JX
791 * If this is an SMA response, skip the check here.
792 *
793 * If this is an SMA request or SMA trap repress:
77241056
MM
794 * - pkey != FULL_MGMT_P_KEY =>
795 * increment port recv constraint errors, drop MAD
f3809209
JX
796 *
797 * Otherwise:
798 * - accept if the port is running an SM
799 * - drop MAD if it's an SMA trap
800 * - pkey == FULL_MGMT_P_KEY =>
801 * reply with unsupported method
802 * - pkey != FULL_MGMT_P_KEY =>
803 * increment port recv constraint errors, drop MAD
77241056
MM
804 */
805 switch (smp->method) {
f3809209
JX
806 case IB_MGMT_METHOD_GET_RESP:
807 case IB_MGMT_METHOD_REPORT_RESP:
808 break;
77241056
MM
809 case IB_MGMT_METHOD_GET:
810 case IB_MGMT_METHOD_SET:
811 case IB_MGMT_METHOD_REPORT:
812 case IB_MGMT_METHOD_TRAP_REPRESS:
813 if (pkey != FULL_MGMT_P_KEY) {
814 ingress_pkey_table_fail(ppd, pkey, slid);
815 return 1;
816 }
817 break;
f3809209 818 default:
4eb06882 819 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
77241056 820 return 0;
f3809209
JX
821 if (smp->method == IB_MGMT_METHOD_TRAP)
822 return 1;
77241056
MM
823 if (pkey == FULL_MGMT_P_KEY) {
824 smp->status |= IB_SMP_UNSUP_METHOD;
825 return 0;
826 }
f3809209
JX
827 ingress_pkey_table_fail(ppd, pkey, slid);
828 return 1;
77241056
MM
829 }
830 return 0;
831}
832
77241056
MM
833/**
834 * hfi1_ud_rcv - receive an incoming UD packet
835 * @ibp: the port the packet came in on
836 * @hdr: the packet header
837 * @rcv_flags: flags relevant to rcv processing
838 * @data: the packet data
839 * @tlen: the packet length
840 * @qp: the QP the packet came on
841 *
842 * This is called from qp_rcv() to process an incoming UD packet
843 * for the given QP.
844 * Called at interrupt level.
845 */
846void hfi1_ud_rcv(struct hfi1_packet *packet)
847{
261a4351 848 struct ib_other_headers *ohdr = packet->ohdr;
77241056 849 u32 hdrsize = packet->hlen;
77241056
MM
850 struct ib_wc wc;
851 u32 qkey;
852 u32 src_qp;
9039746c 853 u16 pkey;
77241056 854 int mgmt_pkey_idx = -1;
f3e862cb 855 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
89c057ca 856 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
261a4351 857 struct ib_header *hdr = packet->hdr;
72c07e2b 858 void *data = packet->payload;
77241056 859 u32 tlen = packet->tlen;
895420dd 860 struct rvt_qp *qp = packet->qp;
88733e3b 861 u8 sc5 = packet->sc;
9039746c 862 u8 sl_from_sc;
9039746c
DH
863 u8 opcode = packet->opcode;
864 u8 sl = packet->sl;
865 u32 dlid = packet->dlid;
866 u32 slid = packet->slid;
88733e3b
DH
867 u8 extra_bytes;
868 bool dlid_is_permissive;
869 bool slid_is_permissive;
77241056 870
88733e3b 871 extra_bytes = packet->pad + packet->extra_byte + (SIZE_OF_CRC << 2);
7dafbab3
DH
872 qkey = ib_get_qkey(ohdr);
873 src_qp = ib_get_sqpn(ohdr);
88733e3b
DH
874
875 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
876 u32 permissive_lid =
877 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B);
878
879 pkey = hfi1_16B_get_pkey(packet->hdr);
880 dlid_is_permissive = (dlid == permissive_lid);
881 slid_is_permissive = (slid == permissive_lid);
882 } else {
883 hdr = packet->hdr;
884 pkey = ib_bth_get_pkey(ohdr);
885 dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
886 slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
887 }
89c057ca 888 sl_from_sc = ibp->sc_to_sl[sc5];
77241056 889
5fd2b562 890 process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
77241056
MM
891 /*
892 * Get the number of bytes the message was padded by
893 * and drop incomplete packets.
894 */
89c057ca 895 if (unlikely(tlen < (hdrsize + extra_bytes)))
77241056
MM
896 goto drop;
897
89c057ca 898 tlen -= hdrsize + extra_bytes;
77241056
MM
899
900 /*
901 * Check that the permissive LID is only used on QP0
902 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
903 */
904 if (qp->ibqp.qp_num) {
88733e3b 905 if (unlikely(dlid_is_permissive || slid_is_permissive))
77241056
MM
906 goto drop;
907 if (qp->ibqp.qp_num > 1) {
77241056
MM
908 if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
909 /*
910 * Traps will not be sent for packets dropped
911 * by the HW. This is fine, as sending trap
912 * for invalid pkeys is optional according to
913 * IB spec (release 1.3, section 10.9.4)
914 */
13d84914
DD
915 hfi1_bad_pkey(ibp,
916 pkey, sl,
917 src_qp, qp->ibqp.qp_num,
918 slid, dlid);
77241056
MM
919 return;
920 }
921 } else {
922 /* GSI packet */
923 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
924 if (mgmt_pkey_idx < 0)
925 goto drop;
77241056 926 }
13d84914 927 if (unlikely(qkey != qp->qkey)) /* Silent drop */
77241056 928 return;
13d84914 929
77241056
MM
930 /* Drop invalid MAD packets (see 13.5.3.1). */
931 if (unlikely(qp->ibqp.qp_num == 1 &&
89c057ca 932 (tlen > 2048 || (sc5 == 0xF))))
77241056
MM
933 goto drop;
934 } else {
935 /* Received on QP0, and so by definition, this is an SMP */
936 struct opa_smp *smp = (struct opa_smp *)data;
77241056
MM
937
938 if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
939 goto drop;
940
941 if (tlen > 2048)
942 goto drop;
88733e3b 943 if ((dlid_is_permissive || slid_is_permissive) &&
77241056
MM
944 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
945 goto drop;
946
947 /* look up SMI pkey */
948 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
949 if (mgmt_pkey_idx < 0)
950 goto drop;
77241056
MM
951 }
952
953 if (qp->ibqp.qp_num > 1 &&
954 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
955 wc.ex.imm_data = ohdr->u.ud.imm_data;
956 wc.wc_flags = IB_WC_WITH_IMM;
957 tlen -= sizeof(u32);
958 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
959 wc.ex.imm_data = 0;
960 wc.wc_flags = 0;
e490974e 961 } else {
77241056 962 goto drop;
e490974e 963 }
77241056
MM
964
965 /*
966 * A GRH is expected to precede the data even if not
967 * present on the wire.
968 */
969 wc.byte_len = tlen + sizeof(struct ib_grh);
970
971 /*
972 * Get the next work request entry to find where to put the data.
973 */
e490974e 974 if (qp->r_flags & RVT_R_REUSE_SGE) {
54d10c1e 975 qp->r_flags &= ~RVT_R_REUSE_SGE;
e490974e 976 } else {
77241056
MM
977 int ret;
978
ec4274f1 979 ret = hfi1_rvt_get_rwqe(qp, 0);
77241056 980 if (ret < 0) {
beb5a042 981 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
77241056
MM
982 return;
983 }
984 if (!ret) {
985 if (qp->ibqp.qp_num == 0)
4eb06882 986 ibp->rvp.n_vl15_dropped++;
77241056
MM
987 return;
988 }
989 }
990 /* Silently drop packets which are too big. */
991 if (unlikely(wc.byte_len > qp->r_len)) {
54d10c1e 992 qp->r_flags |= RVT_R_REUSE_SGE;
77241056
MM
993 goto drop;
994 }
9039746c 995 if (packet->grh) {
88733e3b
DH
996 hfi1_copy_sge(&qp->r_sge, packet->grh,
997 sizeof(struct ib_grh), true, false);
998 wc.wc_flags |= IB_WC_GRH;
999 } else if (packet->etype == RHF_RCV_TYPE_BYPASS) {
1000 struct ib_grh grh;
1001 /*
1002 * Assuming we only created 16B on the send side
1003 * if we want to use large LIDs, since GRH was stripped
1004 * out when creating 16B, add back the GRH here.
1005 */
1006 hfi1_make_ext_grh(packet, &grh, slid, dlid);
1007 hfi1_copy_sge(&qp->r_sge, &grh,
0128fcea 1008 sizeof(struct ib_grh), true, false);
77241056 1009 wc.wc_flags |= IB_WC_GRH;
e490974e 1010 } else {
1198fcea 1011 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
e490974e 1012 }
7b0b01aa 1013 hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
0128fcea 1014 true, false);
ec4274f1 1015 rvt_put_ss(&qp->r_sge);
54d10c1e 1016 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
77241056
MM
1017 return;
1018 wc.wr_id = qp->r_wr_id;
1019 wc.status = IB_WC_SUCCESS;
1020 wc.opcode = IB_WC_RECV;
1021 wc.vendor_err = 0;
1022 wc.qp = &qp->ibqp;
1023 wc.src_qp = src_qp;
1024
1025 if (qp->ibqp.qp_type == IB_QPT_GSI ||
1026 qp->ibqp.qp_type == IB_QPT_SMI) {
1027 if (mgmt_pkey_idx < 0) {
1028 if (net_ratelimit()) {
77241056
MM
1029 struct hfi1_devdata *dd = ppd->dd;
1030
1031 dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
1032 qp->ibqp.qp_type);
1033 mgmt_pkey_idx = 0;
1034 }
1035 }
1036 wc.pkey_index = (unsigned)mgmt_pkey_idx;
e490974e 1037 } else {
77241056 1038 wc.pkey_index = 0;
e490974e 1039 }
88733e3b
DH
1040 if (slid_is_permissive)
1041 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
89c057ca
DC
1042 wc.slid = slid;
1043 wc.sl = sl_from_sc;
77241056
MM
1044
1045 /*
1046 * Save the LMC lower bits if the destination LID is a unicast LID.
1047 */
88733e3b 1048 wc.dlid_path_bits = hfi1_check_mcast(dlid) ? 0 :
77241056
MM
1049 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
1050 wc.port_num = qp->port_num;
1051 /* Signal completion event if the solicited bit is set. */
abd712da
DD
1052 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
1053 (ohdr->bth[0] &
1054 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
77241056
MM
1055 return;
1056
1057drop:
4eb06882 1058 ibp->rvp.n_pkt_drops++;
77241056 1059}