2 * Copyright(c) 2015-2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/pci.h>
51 #include <linux/delay.h>
52 #include <linux/netdevice.h>
53 #include <linux/vmalloc.h>
54 #include <linux/module.h>
55 #include <linux/prefetch.h>
56 #include <rdma/ib_verbs.h>
66 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
69 * The size has to be longer than this string, so we can append
70 * board/chip information to it in the initialization code.
72 const char ib_hfi1_version
[] = HFI1_DRIVER_VERSION
"\n";
74 DEFINE_SPINLOCK(hfi1_devs_lock
);
75 LIST_HEAD(hfi1_dev_list
);
76 DEFINE_MUTEX(hfi1_mutex
); /* general driver use */
78 unsigned int hfi1_max_mtu
= HFI1_DEFAULT_MAX_MTU
;
79 module_param_named(max_mtu
, hfi1_max_mtu
, uint
, S_IRUGO
);
80 MODULE_PARM_DESC(max_mtu
, "Set max MTU bytes, default is " __stringify(
81 HFI1_DEFAULT_MAX_MTU
));
83 unsigned int hfi1_cu
= 1;
84 module_param_named(cu
, hfi1_cu
, uint
, S_IRUGO
);
85 MODULE_PARM_DESC(cu
, "Credit return units");
87 unsigned long hfi1_cap_mask
= HFI1_CAP_MASK_DEFAULT
;
88 static int hfi1_caps_set(const char *, const struct kernel_param
*);
89 static int hfi1_caps_get(char *, const struct kernel_param
*);
90 static const struct kernel_param_ops cap_ops
= {
94 module_param_cb(cap_mask
, &cap_ops
, &hfi1_cap_mask
, S_IWUSR
| S_IRUGO
);
95 MODULE_PARM_DESC(cap_mask
, "Bit mask of enabled/disabled HW features");
97 MODULE_LICENSE("Dual BSD/GPL");
98 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
99 MODULE_VERSION(HFI1_DRIVER_VERSION
);
102 * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
104 #define MAX_PKT_RECV 64
106 * MAX_PKT_THREAD_RCV is the max # of packets processed before
107 * the qp_wait_list queue is flushed.
109 #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4)
110 #define EGR_HEAD_UPDATE_THRESHOLD 16
112 struct hfi1_ib_stats hfi1_stats
;
114 static int hfi1_caps_set(const char *val
, const struct kernel_param
*kp
)
117 unsigned long *cap_mask_ptr
= (unsigned long *)kp
->arg
,
118 cap_mask
= *cap_mask_ptr
, value
, diff
,
119 write_mask
= ((HFI1_CAP_WRITABLE_MASK
<< HFI1_CAP_USER_SHIFT
) |
120 HFI1_CAP_WRITABLE_MASK
);
122 ret
= kstrtoul(val
, 0, &value
);
124 pr_warn("Invalid module parameter value for 'cap_mask'\n");
127 /* Get the changed bits (except the locked bit) */
128 diff
= value
^ (cap_mask
& ~HFI1_CAP_LOCKED_SMASK
);
130 /* Remove any bits that are not allowed to change after driver load */
131 if (HFI1_CAP_LOCKED() && (diff
& ~write_mask
)) {
132 pr_warn("Ignoring non-writable capability bits %#lx\n",
137 /* Mask off any reserved bits */
138 diff
&= ~HFI1_CAP_RESERVED_MASK
;
139 /* Clear any previously set and changing bits */
141 /* Update the bits with the new capability */
142 cap_mask
|= (value
& diff
);
143 /* Check for any kernel/user restrictions */
144 diff
= (cap_mask
& (HFI1_CAP_MUST_HAVE_KERN
<< HFI1_CAP_USER_SHIFT
)) ^
145 ((cap_mask
& HFI1_CAP_MUST_HAVE_KERN
) << HFI1_CAP_USER_SHIFT
);
147 /* Set the bitmask to the final set */
148 *cap_mask_ptr
= cap_mask
;
153 static int hfi1_caps_get(char *buffer
, const struct kernel_param
*kp
)
155 unsigned long cap_mask
= *(unsigned long *)kp
->arg
;
157 cap_mask
&= ~HFI1_CAP_LOCKED_SMASK
;
158 cap_mask
|= ((cap_mask
& HFI1_CAP_K2U
) << HFI1_CAP_USER_SHIFT
);
160 return scnprintf(buffer
, PAGE_SIZE
, "0x%lx", cap_mask
);
163 const char *get_unit_name(int unit
)
165 static char iname
[16];
167 snprintf(iname
, sizeof(iname
), DRIVER_NAME
"_%u", unit
);
171 const char *get_card_name(struct rvt_dev_info
*rdi
)
173 struct hfi1_ibdev
*ibdev
= container_of(rdi
, struct hfi1_ibdev
, rdi
);
174 struct hfi1_devdata
*dd
= container_of(ibdev
,
175 struct hfi1_devdata
, verbs_dev
);
176 return get_unit_name(dd
->unit
);
179 struct pci_dev
*get_pci_dev(struct rvt_dev_info
*rdi
)
181 struct hfi1_ibdev
*ibdev
= container_of(rdi
, struct hfi1_ibdev
, rdi
);
182 struct hfi1_devdata
*dd
= container_of(ibdev
,
183 struct hfi1_devdata
, verbs_dev
);
188 * Return count of units with at least one port ACTIVE.
190 int hfi1_count_active_units(void)
192 struct hfi1_devdata
*dd
;
193 struct hfi1_pportdata
*ppd
;
195 int pidx
, nunits_active
= 0;
197 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
198 list_for_each_entry(dd
, &hfi1_dev_list
, list
) {
199 if (!(dd
->flags
& HFI1_PRESENT
) || !dd
->kregbase
)
201 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
202 ppd
= dd
->pport
+ pidx
;
203 if (ppd
->lid
&& ppd
->linkup
) {
209 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
210 return nunits_active
;
214 * Return count of all units, optionally return in arguments
215 * the number of usable (present) units, and the number of
218 int hfi1_count_units(int *npresentp
, int *nupp
)
220 int nunits
= 0, npresent
= 0, nup
= 0;
221 struct hfi1_devdata
*dd
;
224 struct hfi1_pportdata
*ppd
;
226 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
228 list_for_each_entry(dd
, &hfi1_dev_list
, list
) {
230 if ((dd
->flags
& HFI1_PRESENT
) && dd
->kregbase
)
232 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
233 ppd
= dd
->pport
+ pidx
;
234 if (ppd
->lid
&& ppd
->linkup
)
239 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
242 *npresentp
= npresent
;
250 * Get address of eager buffer from it's index (allocated in chunks, not
253 static inline void *get_egrbuf(const struct hfi1_ctxtdata
*rcd
, u64 rhf
,
256 u32 idx
= rhf_egr_index(rhf
), offset
= rhf_egr_buf_offset(rhf
);
258 *update
|= !(idx
& (rcd
->egrbufs
.threshold
- 1)) && !offset
;
259 return (void *)(((u64
)(rcd
->egrbufs
.rcvtids
[idx
].addr
)) +
260 (offset
* RCV_BUF_BLOCK_SIZE
));
264 * Validate and encode the a given RcvArray Buffer size.
265 * The function will check whether the given size falls within
266 * allowed size ranges for the respective type and, optionally,
267 * return the proper encoding.
269 int hfi1_rcvbuf_validate(u32 size
, u8 type
, u16
*encoded
)
271 if (unlikely(!PAGE_ALIGNED(size
)))
273 if (unlikely(size
< MIN_EAGER_BUFFER
))
276 (type
== PT_EAGER
? MAX_EAGER_BUFFER
: MAX_EXPECTED_BUFFER
))
279 *encoded
= ilog2(size
/ PAGE_SIZE
) + 1;
283 static void rcv_hdrerr(struct hfi1_ctxtdata
*rcd
, struct hfi1_pportdata
*ppd
,
284 struct hfi1_packet
*packet
)
286 struct ib_header
*rhdr
= packet
->hdr
;
287 u32 rte
= rhf_rcv_type_err(packet
->rhf
);
288 int lnh
= ib_get_lnh(rhdr
);
289 struct hfi1_ibport
*ibp
= rcd_to_iport(rcd
);
290 struct hfi1_devdata
*dd
= ppd
->dd
;
291 struct rvt_dev_info
*rdi
= &dd
->verbs_dev
.rdi
;
293 if (packet
->rhf
& (RHF_VCRC_ERR
| RHF_ICRC_ERR
))
296 if (packet
->rhf
& RHF_TID_ERR
) {
297 /* For TIDERR and RC QPs preemptively schedule a NAK */
298 struct ib_other_headers
*ohdr
= NULL
;
299 u32 tlen
= rhf_pkt_len(packet
->rhf
); /* in bytes */
300 u16 lid
= ib_get_dlid(rhdr
);
304 /* Sanity check packet */
309 if (lnh
== HFI1_LRH_BTH
) {
311 } else if (lnh
== HFI1_LRH_GRH
) {
314 ohdr
= &rhdr
->u
.l
.oth
;
315 if (rhdr
->u
.l
.grh
.next_hdr
!= IB_GRH_NEXT_HDR
)
317 vtf
= be32_to_cpu(rhdr
->u
.l
.grh
.version_tclass_flow
);
318 if ((vtf
>> IB_GRH_VERSION_SHIFT
) != IB_GRH_VERSION
)
320 rcv_flags
|= HFI1_HAS_GRH
;
324 /* Get the destination QP number. */
325 qp_num
= be32_to_cpu(ohdr
->bth
[1]) & RVT_QPN_MASK
;
326 if (lid
< be16_to_cpu(IB_MULTICAST_LID_BASE
)) {
331 qp
= rvt_lookup_qpn(rdi
, &ibp
->rvp
, qp_num
);
338 * Handle only RC QPs - for other QP types drop error
341 spin_lock_irqsave(&qp
->r_lock
, flags
);
343 /* Check for valid receive state. */
344 if (!(ib_rvt_state_ops
[qp
->state
] &
345 RVT_PROCESS_RECV_OK
)) {
346 ibp
->rvp
.n_pkt_drops
++;
349 switch (qp
->ibqp
.qp_type
) {
358 /* For now don't handle any other QP types */
362 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
365 } /* Valid packet with TIDErr */
367 /* handle "RcvTypeErr" flags */
369 case RHF_RTE_ERROR_OP_CODE_ERR
:
375 if (rhf_use_egr_bfr(packet
->rhf
))
379 goto drop
; /* this should never happen */
381 if (lnh
== HFI1_LRH_BTH
)
382 bth
= (__be32
*)ebuf
;
383 else if (lnh
== HFI1_LRH_GRH
)
384 bth
= (__be32
*)((char *)ebuf
+ sizeof(struct ib_grh
));
388 opcode
= be32_to_cpu(bth
[0]) >> 24;
391 if (opcode
== IB_OPCODE_CNP
) {
393 * Only in pre-B0 h/w is the CNP_OPCODE handled
394 * via this code path.
396 struct rvt_qp
*qp
= NULL
;
399 u8 svc_type
, sl
, sc5
;
401 sc5
= hfi1_9B_get_sc5(rhdr
, packet
->rhf
);
402 sl
= ibp
->sc_to_sl
[sc5
];
404 lqpn
= be32_to_cpu(bth
[1]) & RVT_QPN_MASK
;
406 qp
= rvt_lookup_qpn(rdi
, &ibp
->rvp
, lqpn
);
412 switch (qp
->ibqp
.qp_type
) {
416 svc_type
= IB_CC_SVCTYPE_UD
;
419 rlid
= ib_get_slid(rhdr
);
420 rqpn
= qp
->remote_qpn
;
421 svc_type
= IB_CC_SVCTYPE_UC
;
427 process_becn(ppd
, sl
, rlid
, lqpn
, rqpn
, svc_type
);
431 packet
->rhf
&= ~RHF_RCV_TYPE_ERR_SMASK
;
442 static inline void init_packet(struct hfi1_ctxtdata
*rcd
,
443 struct hfi1_packet
*packet
)
445 packet
->rsize
= rcd
->rcvhdrqentsize
; /* words */
446 packet
->maxcnt
= rcd
->rcvhdrq_cnt
* packet
->rsize
; /* words */
450 packet
->rhf_addr
= get_rhf_addr(rcd
);
451 packet
->rhf
= rhf_to_cpu(packet
->rhf_addr
);
452 packet
->rhqoff
= rcd
->head
;
454 packet
->rcv_flags
= 0;
457 void hfi1_process_ecn_slowpath(struct rvt_qp
*qp
, struct hfi1_packet
*pkt
,
460 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
461 struct ib_header
*hdr
= pkt
->hdr
;
462 struct ib_other_headers
*ohdr
= pkt
->ohdr
;
463 struct ib_grh
*grh
= NULL
;
465 u16 rlid
, dlid
= ib_get_dlid(hdr
);
467 bool is_mcast
= false;
469 if (pkt
->rcv_flags
& HFI1_HAS_GRH
)
472 switch (qp
->ibqp
.qp_type
) {
476 rlid
= ib_get_slid(hdr
);
477 rqpn
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]) & RVT_QPN_MASK
;
478 svc_type
= IB_CC_SVCTYPE_UD
;
479 is_mcast
= (dlid
> be16_to_cpu(IB_MULTICAST_LID_BASE
)) &&
480 (dlid
!= be16_to_cpu(IB_LID_PERMISSIVE
));
483 rlid
= rdma_ah_get_dlid(&qp
->remote_ah_attr
);
484 rqpn
= qp
->remote_qpn
;
485 svc_type
= IB_CC_SVCTYPE_UC
;
488 rlid
= rdma_ah_get_dlid(&qp
->remote_ah_attr
);
489 rqpn
= qp
->remote_qpn
;
490 svc_type
= IB_CC_SVCTYPE_RC
;
496 sc
= hfi1_9B_get_sc5(hdr
, pkt
->rhf
);
498 bth1
= be32_to_cpu(ohdr
->bth
[1]);
499 if (do_cnp
&& (bth1
& IB_FECN_SMASK
)) {
500 u16 pkey
= (u16
)be32_to_cpu(ohdr
->bth
[0]);
502 return_cnp(ibp
, qp
, rqpn
, pkey
, dlid
, rlid
, sc
, grh
);
505 if (!is_mcast
&& (bth1
& IB_BECN_SMASK
)) {
506 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
507 u32 lqpn
= bth1
& RVT_QPN_MASK
;
508 u8 sl
= ibp
->sc_to_sl
[sc
];
510 process_becn(ppd
, sl
, rlid
, lqpn
, rqpn
, svc_type
);
516 struct hfi1_ctxtdata
*rcd
;
524 static inline void init_ps_mdata(struct ps_mdata
*mdata
,
525 struct hfi1_packet
*packet
)
527 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
530 mdata
->rsize
= packet
->rsize
;
531 mdata
->maxcnt
= packet
->maxcnt
;
532 mdata
->ps_head
= packet
->rhqoff
;
534 if (HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
)) {
535 mdata
->ps_tail
= get_rcvhdrtail(rcd
);
536 if (rcd
->ctxt
== HFI1_CTRL_CTXT
)
537 mdata
->ps_seq
= rcd
->seq_cnt
;
539 mdata
->ps_seq
= 0; /* not used with DMA_RTAIL */
541 mdata
->ps_tail
= 0; /* used only with DMA_RTAIL*/
542 mdata
->ps_seq
= rcd
->seq_cnt
;
546 static inline int ps_done(struct ps_mdata
*mdata
, u64 rhf
,
547 struct hfi1_ctxtdata
*rcd
)
549 if (HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
))
550 return mdata
->ps_head
== mdata
->ps_tail
;
551 return mdata
->ps_seq
!= rhf_rcv_seq(rhf
);
554 static inline int ps_skip(struct ps_mdata
*mdata
, u64 rhf
,
555 struct hfi1_ctxtdata
*rcd
)
558 * Control context can potentially receive an invalid rhf.
561 if ((rcd
->ctxt
== HFI1_CTRL_CTXT
) && (mdata
->ps_head
!= mdata
->ps_tail
))
562 return mdata
->ps_seq
!= rhf_rcv_seq(rhf
);
567 static inline void update_ps_mdata(struct ps_mdata
*mdata
,
568 struct hfi1_ctxtdata
*rcd
)
570 mdata
->ps_head
+= mdata
->rsize
;
571 if (mdata
->ps_head
>= mdata
->maxcnt
)
574 /* Control context must do seq counting */
575 if (!HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
) ||
576 (rcd
->ctxt
== HFI1_CTRL_CTXT
)) {
577 if (++mdata
->ps_seq
> 13)
583 * prescan_rxq - search through the receive queue looking for packets
584 * containing Excplicit Congestion Notifications (FECNs, or BECNs).
585 * When an ECN is found, process the Congestion Notification, and toggle
587 * This is declared as a macro to allow quick checking of the port to avoid
588 * the overhead of a function call if not enabled.
590 #define prescan_rxq(rcd, packet) \
592 if (rcd->ppd->cc_prescan) \
593 __prescan_rxq(packet); \
595 static void __prescan_rxq(struct hfi1_packet
*packet
)
597 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
598 struct ps_mdata mdata
;
600 init_ps_mdata(&mdata
, packet
);
603 struct hfi1_devdata
*dd
= rcd
->dd
;
604 struct hfi1_ibport
*ibp
= rcd_to_iport(rcd
);
605 __le32
*rhf_addr
= (__le32
*)rcd
->rcvhdrq
+ mdata
.ps_head
+
608 struct ib_header
*hdr
;
609 struct rvt_dev_info
*rdi
= &dd
->verbs_dev
.rdi
;
610 u64 rhf
= rhf_to_cpu(rhf_addr
);
611 u32 etype
= rhf_rcv_type(rhf
), qpn
, bth1
;
615 if (ps_done(&mdata
, rhf
, rcd
))
618 if (ps_skip(&mdata
, rhf
, rcd
))
621 if (etype
!= RHF_RCV_TYPE_IB
)
624 packet
->hdr
= hfi1_get_msgheader(dd
, rhf_addr
);
626 lnh
= ib_get_lnh(hdr
);
628 if (lnh
== HFI1_LRH_BTH
) {
629 packet
->ohdr
= &hdr
->u
.oth
;
630 } else if (lnh
== HFI1_LRH_GRH
) {
631 packet
->ohdr
= &hdr
->u
.l
.oth
;
632 packet
->rcv_flags
|= HFI1_HAS_GRH
;
634 goto next
; /* just in case */
637 bth1
= be32_to_cpu(packet
->ohdr
->bth
[1]);
638 is_ecn
= !!(bth1
& (IB_FECN_SMASK
| IB_BECN_SMASK
));
643 qpn
= bth1
& RVT_QPN_MASK
;
645 qp
= rvt_lookup_qpn(rdi
, &ibp
->rvp
, qpn
);
652 process_ecn(qp
, packet
, true);
655 /* turn off BECN, FECN */
656 bth1
&= ~(IB_FECN_SMASK
| IB_BECN_SMASK
);
657 packet
->ohdr
->bth
[1] = cpu_to_be32(bth1
);
659 update_ps_mdata(&mdata
, rcd
);
663 static void process_rcv_qp_work(struct hfi1_ctxtdata
*rcd
)
665 struct rvt_qp
*qp
, *nqp
;
668 * Iterate over all QPs waiting to respond.
669 * The list won't change since the IRQ is only run on one CPU.
671 list_for_each_entry_safe(qp
, nqp
, &rcd
->qp_wait_list
, rspwait
) {
672 list_del_init(&qp
->rspwait
);
673 if (qp
->r_flags
& RVT_R_RSP_NAK
) {
674 qp
->r_flags
&= ~RVT_R_RSP_NAK
;
675 hfi1_send_rc_ack(rcd
, qp
, 0);
677 if (qp
->r_flags
& RVT_R_RSP_SEND
) {
680 qp
->r_flags
&= ~RVT_R_RSP_SEND
;
681 spin_lock_irqsave(&qp
->s_lock
, flags
);
682 if (ib_rvt_state_ops
[qp
->state
] &
683 RVT_PROCESS_OR_FLUSH_SEND
)
684 hfi1_schedule_send(qp
);
685 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
691 static noinline
int max_packet_exceeded(struct hfi1_packet
*packet
, int thread
)
694 if ((packet
->numpkt
& (MAX_PKT_RECV_THREAD
- 1)) == 0)
695 /* allow defered processing */
696 process_rcv_qp_work(packet
->rcd
);
700 this_cpu_inc(*packet
->rcd
->dd
->rcv_limit
);
701 return RCV_PKT_LIMIT
;
705 static inline int check_max_packet(struct hfi1_packet
*packet
, int thread
)
707 int ret
= RCV_PKT_OK
;
709 if (unlikely((packet
->numpkt
& (MAX_PKT_RECV
- 1)) == 0))
710 ret
= max_packet_exceeded(packet
, thread
);
714 static noinline
int skip_rcv_packet(struct hfi1_packet
*packet
, int thread
)
718 /* Set up for the next packet */
719 packet
->rhqoff
+= packet
->rsize
;
720 if (packet
->rhqoff
>= packet
->maxcnt
)
724 ret
= check_max_packet(packet
, thread
);
726 packet
->rhf_addr
= (__le32
*)packet
->rcd
->rcvhdrq
+ packet
->rhqoff
+
727 packet
->rcd
->dd
->rhf_offset
;
728 packet
->rhf
= rhf_to_cpu(packet
->rhf_addr
);
733 static inline int process_rcv_packet(struct hfi1_packet
*packet
, int thread
)
737 packet
->hdr
= hfi1_get_msgheader(packet
->rcd
->dd
,
739 packet
->hlen
= (u8
*)packet
->rhf_addr
- (u8
*)packet
->hdr
;
740 packet
->etype
= rhf_rcv_type(packet
->rhf
);
742 packet
->tlen
= rhf_pkt_len(packet
->rhf
); /* in bytes */
743 /* retrieve eager buffer details */
745 if (rhf_use_egr_bfr(packet
->rhf
)) {
746 packet
->etail
= rhf_egr_index(packet
->rhf
);
747 packet
->ebuf
= get_egrbuf(packet
->rcd
, packet
->rhf
,
750 * Prefetch the contents of the eager buffer. It is
751 * OK to send a negative length to prefetch_range().
752 * The +2 is the size of the RHF.
754 prefetch_range(packet
->ebuf
,
755 packet
->tlen
- ((packet
->rcd
->rcvhdrqentsize
-
756 (rhf_hdrq_offset(packet
->rhf
)
761 * Call a type specific handler for the packet. We
762 * should be able to trust that etype won't be beyond
763 * the range of valid indexes. If so something is really
764 * wrong and we can probably just let things come
765 * crashing down. There is no need to eat another
766 * comparison in this performance critical code.
768 packet
->rcd
->dd
->rhf_rcv_function_map
[packet
->etype
](packet
);
771 /* Set up for the next packet */
772 packet
->rhqoff
+= packet
->rsize
;
773 if (packet
->rhqoff
>= packet
->maxcnt
)
776 ret
= check_max_packet(packet
, thread
);
778 packet
->rhf_addr
= (__le32
*)packet
->rcd
->rcvhdrq
+ packet
->rhqoff
+
779 packet
->rcd
->dd
->rhf_offset
;
780 packet
->rhf
= rhf_to_cpu(packet
->rhf_addr
);
785 static inline void process_rcv_update(int last
, struct hfi1_packet
*packet
)
788 * Update head regs etc., every 16 packets, if not last pkt,
789 * to help prevent rcvhdrq overflows, when many packets
790 * are processed and queue is nearly full.
791 * Don't request an interrupt for intermediate updates.
793 if (!last
&& !(packet
->numpkt
& 0xf)) {
794 update_usrhead(packet
->rcd
, packet
->rhqoff
, packet
->updegr
,
795 packet
->etail
, 0, 0);
798 packet
->rcv_flags
= 0;
801 static inline void finish_packet(struct hfi1_packet
*packet
)
804 * Nothing we need to free for the packet.
806 * The only thing we need to do is a final update and call for an
809 update_usrhead(packet
->rcd
, packet
->rcd
->head
, packet
->updegr
,
810 packet
->etail
, rcv_intr_dynamic
, packet
->numpkt
);
814 * Handle receive interrupts when using the no dma rtail option.
816 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata
*rcd
, int thread
)
819 int last
= RCV_PKT_OK
;
820 struct hfi1_packet packet
;
822 init_packet(rcd
, &packet
);
823 seq
= rhf_rcv_seq(packet
.rhf
);
824 if (seq
!= rcd
->seq_cnt
) {
829 prescan_rxq(rcd
, &packet
);
831 while (last
== RCV_PKT_OK
) {
832 last
= process_rcv_packet(&packet
, thread
);
833 seq
= rhf_rcv_seq(packet
.rhf
);
834 if (++rcd
->seq_cnt
> 13)
836 if (seq
!= rcd
->seq_cnt
)
838 process_rcv_update(last
, &packet
);
840 process_rcv_qp_work(rcd
);
841 rcd
->head
= packet
.rhqoff
;
843 finish_packet(&packet
);
847 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata
*rcd
, int thread
)
850 int last
= RCV_PKT_OK
;
851 struct hfi1_packet packet
;
853 init_packet(rcd
, &packet
);
854 hdrqtail
= get_rcvhdrtail(rcd
);
855 if (packet
.rhqoff
== hdrqtail
) {
859 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
861 prescan_rxq(rcd
, &packet
);
863 while (last
== RCV_PKT_OK
) {
864 last
= process_rcv_packet(&packet
, thread
);
865 if (packet
.rhqoff
== hdrqtail
)
867 process_rcv_update(last
, &packet
);
869 process_rcv_qp_work(rcd
);
870 rcd
->head
= packet
.rhqoff
;
872 finish_packet(&packet
);
876 static inline void set_nodma_rtail(struct hfi1_devdata
*dd
, u8 ctxt
)
881 * For dynamically allocated kernel contexts (like vnic) switch
882 * interrupt handler only for that context. Otherwise, switch
883 * interrupt handler for all statically allocated kernel contexts.
885 if (ctxt
>= dd
->first_dyn_alloc_ctxt
) {
886 dd
->rcd
[ctxt
]->do_interrupt
=
887 &handle_receive_interrupt_nodma_rtail
;
891 for (i
= HFI1_CTRL_CTXT
+ 1; i
< dd
->first_dyn_alloc_ctxt
; i
++)
892 dd
->rcd
[i
]->do_interrupt
=
893 &handle_receive_interrupt_nodma_rtail
;
896 static inline void set_dma_rtail(struct hfi1_devdata
*dd
, u8 ctxt
)
901 * For dynamically allocated kernel contexts (like vnic) switch
902 * interrupt handler only for that context. Otherwise, switch
903 * interrupt handler for all statically allocated kernel contexts.
905 if (ctxt
>= dd
->first_dyn_alloc_ctxt
) {
906 dd
->rcd
[ctxt
]->do_interrupt
=
907 &handle_receive_interrupt_dma_rtail
;
911 for (i
= HFI1_CTRL_CTXT
+ 1; i
< dd
->first_dyn_alloc_ctxt
; i
++)
912 dd
->rcd
[i
]->do_interrupt
=
913 &handle_receive_interrupt_dma_rtail
;
916 void set_all_slowpath(struct hfi1_devdata
*dd
)
920 /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
921 for (i
= HFI1_CTRL_CTXT
+ 1; i
< dd
->num_rcv_contexts
; i
++) {
922 struct hfi1_ctxtdata
*rcd
= dd
->rcd
[i
];
924 if ((i
< dd
->first_dyn_alloc_ctxt
) ||
925 (rcd
&& rcd
->sc
&& (rcd
->sc
->type
== SC_KERNEL
)))
926 rcd
->do_interrupt
= &handle_receive_interrupt
;
930 static inline int set_armed_to_active(struct hfi1_ctxtdata
*rcd
,
931 struct hfi1_packet
*packet
,
932 struct hfi1_devdata
*dd
)
934 struct work_struct
*lsaw
= &rcd
->ppd
->linkstate_active_work
;
935 struct ib_header
*hdr
= hfi1_get_msgheader(packet
->rcd
->dd
,
937 u8 etype
= rhf_rcv_type(packet
->rhf
);
939 if (etype
== RHF_RCV_TYPE_IB
&&
940 hfi1_9B_get_sc5(hdr
, packet
->rhf
) != 0xf) {
941 int hwstate
= read_logical_state(dd
);
943 if (hwstate
!= LSTATE_ACTIVE
) {
944 dd_dev_info(dd
, "Unexpected link state %d\n", hwstate
);
948 queue_work(rcd
->ppd
->hfi1_wq
, lsaw
);
955 * handle_receive_interrupt - receive a packet
958 * Called from interrupt handler for errors or receive interrupt.
959 * This is the slow path interrupt handler.
961 int handle_receive_interrupt(struct hfi1_ctxtdata
*rcd
, int thread
)
963 struct hfi1_devdata
*dd
= rcd
->dd
;
965 int needset
, last
= RCV_PKT_OK
;
966 struct hfi1_packet packet
;
969 /* Control context will always use the slow path interrupt handler */
970 needset
= (rcd
->ctxt
== HFI1_CTRL_CTXT
) ? 0 : 1;
972 init_packet(rcd
, &packet
);
974 if (!HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
)) {
975 u32 seq
= rhf_rcv_seq(packet
.rhf
);
977 if (seq
!= rcd
->seq_cnt
) {
983 hdrqtail
= get_rcvhdrtail(rcd
);
984 if (packet
.rhqoff
== hdrqtail
) {
988 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
991 * Control context can potentially receive an invalid
992 * rhf. Drop such packets.
994 if (rcd
->ctxt
== HFI1_CTRL_CTXT
) {
995 u32 seq
= rhf_rcv_seq(packet
.rhf
);
997 if (seq
!= rcd
->seq_cnt
)
1002 prescan_rxq(rcd
, &packet
);
1004 while (last
== RCV_PKT_OK
) {
1005 if (unlikely(dd
->do_drop
&&
1006 atomic_xchg(&dd
->drop_packet
, DROP_PACKET_OFF
) ==
1010 /* On to the next packet */
1011 packet
.rhqoff
+= packet
.rsize
;
1012 packet
.rhf_addr
= (__le32
*)rcd
->rcvhdrq
+
1015 packet
.rhf
= rhf_to_cpu(packet
.rhf_addr
);
1017 } else if (skip_pkt
) {
1018 last
= skip_rcv_packet(&packet
, thread
);
1021 /* Auto activate link on non-SC15 packet receive */
1022 if (unlikely(rcd
->ppd
->host_link_state
==
1024 set_armed_to_active(rcd
, &packet
, dd
))
1026 last
= process_rcv_packet(&packet
, thread
);
1029 if (!HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
)) {
1030 u32 seq
= rhf_rcv_seq(packet
.rhf
);
1032 if (++rcd
->seq_cnt
> 13)
1034 if (seq
!= rcd
->seq_cnt
)
1035 last
= RCV_PKT_DONE
;
1037 dd_dev_info(dd
, "Switching to NO_DMA_RTAIL\n");
1038 set_nodma_rtail(dd
, rcd
->ctxt
);
1042 if (packet
.rhqoff
== hdrqtail
)
1043 last
= RCV_PKT_DONE
;
1045 * Control context can potentially receive an invalid
1046 * rhf. Drop such packets.
1048 if (rcd
->ctxt
== HFI1_CTRL_CTXT
) {
1049 u32 seq
= rhf_rcv_seq(packet
.rhf
);
1051 if (++rcd
->seq_cnt
> 13)
1053 if (!last
&& (seq
!= rcd
->seq_cnt
))
1059 "Switching to DMA_RTAIL\n");
1060 set_dma_rtail(dd
, rcd
->ctxt
);
1065 process_rcv_update(last
, &packet
);
1068 process_rcv_qp_work(rcd
);
1069 rcd
->head
= packet
.rhqoff
;
1073 * Always write head at end, and setup rcv interrupt, even
1074 * if no packets were processed.
1076 finish_packet(&packet
);
1081 * We may discover in the interrupt that the hardware link state has
1082 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
1083 * and we need to update the driver's notion of the link state. We cannot
1084 * run set_link_state from interrupt context, so we queue this function on
1087 * We delay the regular interrupt processing until after the state changes
1088 * so that the link will be in the correct state by the time any application
1089 * we wake up attempts to send a reply to any message it received.
1090 * (Subsequent receive interrupts may possibly force the wakeup before we
1091 * update the link state.)
1093 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
1094 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
1095 * so we're safe from use-after-free of the rcd.
1097 void receive_interrupt_work(struct work_struct
*work
)
1099 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
1100 linkstate_active_work
);
1101 struct hfi1_devdata
*dd
= ppd
->dd
;
1104 /* Received non-SC15 packet implies neighbor_normal */
1105 ppd
->neighbor_normal
= 1;
1106 set_link_state(ppd
, HLS_UP_ACTIVE
);
1109 * Interrupt all statically allocated kernel contexts that could
1110 * have had an interrupt during auto activation.
1112 for (i
= HFI1_CTRL_CTXT
; i
< dd
->first_dyn_alloc_ctxt
; i
++)
1113 force_recv_intr(dd
->rcd
[i
]);
1117 * Convert a given MTU size to the on-wire MAD packet enumeration.
1118 * Return -1 if the size is invalid.
1120 int mtu_to_enum(u32 mtu
, int default_if_bad
)
1123 case 0: return OPA_MTU_0
;
1124 case 256: return OPA_MTU_256
;
1125 case 512: return OPA_MTU_512
;
1126 case 1024: return OPA_MTU_1024
;
1127 case 2048: return OPA_MTU_2048
;
1128 case 4096: return OPA_MTU_4096
;
1129 case 8192: return OPA_MTU_8192
;
1130 case 10240: return OPA_MTU_10240
;
1132 return default_if_bad
;
1135 u16
enum_to_mtu(int mtu
)
1138 case OPA_MTU_0
: return 0;
1139 case OPA_MTU_256
: return 256;
1140 case OPA_MTU_512
: return 512;
1141 case OPA_MTU_1024
: return 1024;
1142 case OPA_MTU_2048
: return 2048;
1143 case OPA_MTU_4096
: return 4096;
1144 case OPA_MTU_8192
: return 8192;
1145 case OPA_MTU_10240
: return 10240;
1146 default: return 0xffff;
1151 * set_mtu - set the MTU
1152 * @ppd: the per port data
1154 * We can handle "any" incoming size, the issue here is whether we
1155 * need to restrict our outgoing size. We do not deal with what happens
1156 * to programs that are already running when the size changes.
1158 int set_mtu(struct hfi1_pportdata
*ppd
)
1160 struct hfi1_devdata
*dd
= ppd
->dd
;
1161 int i
, drain
, ret
= 0, is_up
= 0;
1164 for (i
= 0; i
< ppd
->vls_supported
; i
++)
1165 if (ppd
->ibmtu
< dd
->vld
[i
].mtu
)
1166 ppd
->ibmtu
= dd
->vld
[i
].mtu
;
1167 ppd
->ibmaxlen
= ppd
->ibmtu
+ lrh_max_header_bytes(ppd
->dd
);
1169 mutex_lock(&ppd
->hls_lock
);
1170 if (ppd
->host_link_state
== HLS_UP_INIT
||
1171 ppd
->host_link_state
== HLS_UP_ARMED
||
1172 ppd
->host_link_state
== HLS_UP_ACTIVE
)
1175 drain
= !is_ax(dd
) && is_up
;
1179 * MTU is specified per-VL. To ensure that no packet gets
1180 * stuck (due, e.g., to the MTU for the packet's VL being
1181 * reduced), empty the per-VL FIFOs before adjusting MTU.
1183 ret
= stop_drain_data_vls(dd
);
1186 dd_dev_err(dd
, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
1191 hfi1_set_ib_cfg(ppd
, HFI1_IB_CFG_MTU
, 0);
1194 open_fill_data_vls(dd
); /* reopen all VLs */
1197 mutex_unlock(&ppd
->hls_lock
);
1202 int hfi1_set_lid(struct hfi1_pportdata
*ppd
, u32 lid
, u8 lmc
)
1204 struct hfi1_devdata
*dd
= ppd
->dd
;
1208 hfi1_set_ib_cfg(ppd
, HFI1_IB_CFG_LIDLMC
, 0);
1210 dd_dev_info(dd
, "port %u: got a lid: 0x%x\n", ppd
->port
, lid
);
1215 void shutdown_led_override(struct hfi1_pportdata
*ppd
)
1217 struct hfi1_devdata
*dd
= ppd
->dd
;
1220 * This pairs with the memory barrier in hfi1_start_led_override to
1221 * ensure that we read the correct state of LED beaconing represented
1222 * by led_override_timer_active
1225 if (atomic_read(&ppd
->led_override_timer_active
)) {
1226 del_timer_sync(&ppd
->led_override_timer
);
1227 atomic_set(&ppd
->led_override_timer_active
, 0);
1228 /* Ensure the atomic_set is visible to all CPUs */
1232 /* Hand control of the LED to the DC for normal operation */
1233 write_csr(dd
, DCC_CFG_LED_CNTRL
, 0);
1236 static void run_led_override(unsigned long opaque
)
1238 struct hfi1_pportdata
*ppd
= (struct hfi1_pportdata
*)opaque
;
1239 struct hfi1_devdata
*dd
= ppd
->dd
;
1240 unsigned long timeout
;
1243 if (!(dd
->flags
& HFI1_INITTED
))
1246 phase_idx
= ppd
->led_override_phase
& 1;
1248 setextled(dd
, phase_idx
);
1250 timeout
= ppd
->led_override_vals
[phase_idx
];
1252 /* Set up for next phase */
1253 ppd
->led_override_phase
= !ppd
->led_override_phase
;
1255 mod_timer(&ppd
->led_override_timer
, jiffies
+ timeout
);
1259 * To have the LED blink in a particular pattern, provide timeon and timeoff
1261 * To turn off custom blinking and return to normal operation, use
1262 * shutdown_led_override()
1264 void hfi1_start_led_override(struct hfi1_pportdata
*ppd
, unsigned int timeon
,
1265 unsigned int timeoff
)
1267 if (!(ppd
->dd
->flags
& HFI1_INITTED
))
1270 /* Convert to jiffies for direct use in timer */
1271 ppd
->led_override_vals
[0] = msecs_to_jiffies(timeoff
);
1272 ppd
->led_override_vals
[1] = msecs_to_jiffies(timeon
);
1274 /* Arbitrarily start from LED on phase */
1275 ppd
->led_override_phase
= 1;
1278 * If the timer has not already been started, do so. Use a "quick"
1279 * timeout so the handler will be called soon to look at our request.
1281 if (!timer_pending(&ppd
->led_override_timer
)) {
1282 setup_timer(&ppd
->led_override_timer
, run_led_override
,
1283 (unsigned long)ppd
);
1284 ppd
->led_override_timer
.expires
= jiffies
+ 1;
1285 add_timer(&ppd
->led_override_timer
);
1286 atomic_set(&ppd
->led_override_timer_active
, 1);
1287 /* Ensure the atomic_set is visible to all CPUs */
1293 * hfi1_reset_device - reset the chip if possible
1294 * @unit: the device to reset
1296 * Whether or not reset is successful, we attempt to re-initialize the chip
1297 * (that is, much like a driver unload/reload). We clear the INITTED flag
1298 * so that the various entry points will fail until we reinitialize. For
1299 * now, we only allow this if no user contexts are open that use chip resources
1301 int hfi1_reset_device(int unit
)
1304 struct hfi1_devdata
*dd
= hfi1_lookup(unit
);
1305 struct hfi1_pportdata
*ppd
;
1306 unsigned long flags
;
1314 dd_dev_info(dd
, "Reset on unit %u requested\n", unit
);
1316 if (!dd
->kregbase
|| !(dd
->flags
& HFI1_PRESENT
)) {
1318 "Invalid unit number %u or not initialized or not present\n",
1324 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
1326 for (i
= dd
->first_dyn_alloc_ctxt
;
1327 i
< dd
->num_rcv_contexts
; i
++) {
1328 if (!dd
->rcd
[i
] || !dd
->rcd
[i
]->cnt
)
1330 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
1334 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
1336 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1337 ppd
= dd
->pport
+ pidx
;
1339 shutdown_led_override(ppd
);
1341 if (dd
->flags
& HFI1_HAS_SEND_DMA
)
1344 hfi1_reset_cpu_counters(dd
);
1346 ret
= hfi1_init(dd
, 1);
1350 "Reinitialize unit %u after reset failed with %d\n",
1353 dd_dev_info(dd
, "Reinitialized unit %u after resetting\n",
1360 void handle_eflags(struct hfi1_packet
*packet
)
1362 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
1363 u32 rte
= rhf_rcv_type_err(packet
->rhf
);
1365 rcv_hdrerr(rcd
, rcd
->ppd
, packet
);
1366 if (rhf_err_flags(packet
->rhf
))
1368 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
1369 rcd
->ctxt
, packet
->rhf
,
1370 packet
->rhf
& RHF_K_HDR_LEN_ERR
? "k_hdr_len " : "",
1371 packet
->rhf
& RHF_DC_UNC_ERR
? "dc_unc " : "",
1372 packet
->rhf
& RHF_DC_ERR
? "dc " : "",
1373 packet
->rhf
& RHF_TID_ERR
? "tid " : "",
1374 packet
->rhf
& RHF_LEN_ERR
? "len " : "",
1375 packet
->rhf
& RHF_ECC_ERR
? "ecc " : "",
1376 packet
->rhf
& RHF_VCRC_ERR
? "vcrc " : "",
1377 packet
->rhf
& RHF_ICRC_ERR
? "icrc " : "",
1382 * The following functions are called by the interrupt handler. They are type
1383 * specific handlers for each packet type.
1385 int process_receive_ib(struct hfi1_packet
*packet
)
1387 if (unlikely(hfi1_dbg_fault_packet(packet
)))
1388 return RHF_RCV_CONTINUE
;
1390 trace_hfi1_rcvhdr(packet
->rcd
->ppd
->dd
,
1392 rhf_err_flags(packet
->rhf
),
1397 rhf_egr_index(packet
->rhf
));
1400 (hfi1_dbg_fault_suppress_err(&packet
->rcd
->dd
->verbs_dev
) &&
1401 (packet
->rhf
& RHF_DC_ERR
))))
1402 return RHF_RCV_CONTINUE
;
1404 if (unlikely(rhf_err_flags(packet
->rhf
))) {
1405 handle_eflags(packet
);
1406 return RHF_RCV_CONTINUE
;
1409 hfi1_ib_rcv(packet
);
1410 return RHF_RCV_CONTINUE
;
1413 static inline bool hfi1_is_vnic_packet(struct hfi1_packet
*packet
)
1415 /* Packet received in VNIC context via RSM */
1416 if (packet
->rcd
->is_vnic
)
1419 if ((HFI1_GET_L2_TYPE(packet
->ebuf
) == OPA_VNIC_L2_TYPE
) &&
1420 (HFI1_GET_L4_TYPE(packet
->ebuf
) == OPA_VNIC_L4_ETHR
))
1426 int process_receive_bypass(struct hfi1_packet
*packet
)
1428 struct hfi1_devdata
*dd
= packet
->rcd
->dd
;
1430 if (unlikely(rhf_err_flags(packet
->rhf
))) {
1431 handle_eflags(packet
);
1432 } else if (hfi1_is_vnic_packet(packet
)) {
1433 hfi1_vnic_bypass_rcv(packet
);
1434 return RHF_RCV_CONTINUE
;
1437 dd_dev_err(dd
, "Unsupported bypass packet. Dropping\n");
1438 incr_cntr64(&dd
->sw_rcv_bypass_packet_errors
);
1439 if (!(dd
->err_info_rcvport
.status_and_code
& OPA_EI_STATUS_SMASK
)) {
1440 u64
*flits
= packet
->ebuf
;
1442 if (flits
&& !(packet
->rhf
& RHF_LEN_ERR
)) {
1443 dd
->err_info_rcvport
.packet_flit1
= flits
[0];
1444 dd
->err_info_rcvport
.packet_flit2
=
1445 packet
->tlen
> sizeof(flits
[0]) ? flits
[1] : 0;
1447 dd
->err_info_rcvport
.status_and_code
|=
1448 (OPA_EI_STATUS_SMASK
| BAD_L2_ERR
);
1450 return RHF_RCV_CONTINUE
;
1453 int process_receive_error(struct hfi1_packet
*packet
)
1455 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */
1457 hfi1_dbg_fault_suppress_err(&packet
->rcd
->dd
->verbs_dev
) &&
1458 rhf_rcv_type_err(packet
->rhf
) == 3))
1459 return RHF_RCV_CONTINUE
;
1461 handle_eflags(packet
);
1463 if (unlikely(rhf_err_flags(packet
->rhf
)))
1464 dd_dev_err(packet
->rcd
->dd
,
1465 "Unhandled error packet received. Dropping.\n");
1467 return RHF_RCV_CONTINUE
;
1470 int kdeth_process_expected(struct hfi1_packet
*packet
)
1472 if (unlikely(hfi1_dbg_fault_packet(packet
)))
1473 return RHF_RCV_CONTINUE
;
1474 if (unlikely(rhf_err_flags(packet
->rhf
)))
1475 handle_eflags(packet
);
1477 dd_dev_err(packet
->rcd
->dd
,
1478 "Unhandled expected packet received. Dropping.\n");
1479 return RHF_RCV_CONTINUE
;
1482 int kdeth_process_eager(struct hfi1_packet
*packet
)
1484 if (unlikely(rhf_err_flags(packet
->rhf
)))
1485 handle_eflags(packet
);
1486 if (unlikely(hfi1_dbg_fault_packet(packet
)))
1487 return RHF_RCV_CONTINUE
;
1489 dd_dev_err(packet
->rcd
->dd
,
1490 "Unhandled eager packet received. Dropping.\n");
1491 return RHF_RCV_CONTINUE
;
1494 int process_receive_invalid(struct hfi1_packet
*packet
)
1496 dd_dev_err(packet
->rcd
->dd
, "Invalid packet type %d. Dropping\n",
1497 rhf_rcv_type(packet
->rhf
));
1498 return RHF_RCV_CONTINUE
;