3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/spinlock.h>
52 #include <linux/pci.h>
54 #include <linux/delay.h>
55 #include <linux/netdevice.h>
56 #include <linux/vmalloc.h>
57 #include <linux/module.h>
58 #include <linux/prefetch.h>
66 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
69 * The size has to be longer than this string, so we can append
70 * board/chip information to it in the initialization code.
72 const char ib_hfi1_version
[] = HFI1_DRIVER_VERSION
"\n";
74 DEFINE_SPINLOCK(hfi1_devs_lock
);
75 LIST_HEAD(hfi1_dev_list
);
76 DEFINE_MUTEX(hfi1_mutex
); /* general driver use */
78 unsigned int hfi1_max_mtu
= HFI1_DEFAULT_MAX_MTU
;
79 module_param_named(max_mtu
, hfi1_max_mtu
, uint
, S_IRUGO
);
80 MODULE_PARM_DESC(max_mtu
, "Set max MTU bytes, default is 8192");
82 unsigned int hfi1_cu
= 1;
83 module_param_named(cu
, hfi1_cu
, uint
, S_IRUGO
);
84 MODULE_PARM_DESC(cu
, "Credit return units");
86 unsigned long hfi1_cap_mask
= HFI1_CAP_MASK_DEFAULT
;
87 static int hfi1_caps_set(const char *, const struct kernel_param
*);
88 static int hfi1_caps_get(char *, const struct kernel_param
*);
89 static const struct kernel_param_ops cap_ops
= {
93 module_param_cb(cap_mask
, &cap_ops
, &hfi1_cap_mask
, S_IWUSR
| S_IRUGO
);
94 MODULE_PARM_DESC(cap_mask
, "Bit mask of enabled/disabled HW features");
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
98 MODULE_VERSION(HFI1_DRIVER_VERSION
);
101 * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
103 #define MAX_PKT_RECV 64
104 #define EGR_HEAD_UPDATE_THRESHOLD 16
106 struct hfi1_ib_stats hfi1_stats
;
108 static int hfi1_caps_set(const char *val
, const struct kernel_param
*kp
)
111 unsigned long *cap_mask_ptr
= (unsigned long *)kp
->arg
,
112 cap_mask
= *cap_mask_ptr
, value
, diff
,
113 write_mask
= ((HFI1_CAP_WRITABLE_MASK
<< HFI1_CAP_USER_SHIFT
) |
114 HFI1_CAP_WRITABLE_MASK
);
116 ret
= kstrtoul(val
, 0, &value
);
118 pr_warn("Invalid module parameter value for 'cap_mask'\n");
121 /* Get the changed bits (except the locked bit) */
122 diff
= value
^ (cap_mask
& ~HFI1_CAP_LOCKED_SMASK
);
124 /* Remove any bits that are not allowed to change after driver load */
125 if (HFI1_CAP_LOCKED() && (diff
& ~write_mask
)) {
126 pr_warn("Ignoring non-writable capability bits %#lx\n",
131 /* Mask off any reserved bits */
132 diff
&= ~HFI1_CAP_RESERVED_MASK
;
133 /* Clear any previously set and changing bits */
135 /* Update the bits with the new capability */
136 cap_mask
|= (value
& diff
);
137 /* Check for any kernel/user restrictions */
138 diff
= (cap_mask
& (HFI1_CAP_MUST_HAVE_KERN
<< HFI1_CAP_USER_SHIFT
)) ^
139 ((cap_mask
& HFI1_CAP_MUST_HAVE_KERN
) << HFI1_CAP_USER_SHIFT
);
141 /* Set the bitmask to the final set */
142 *cap_mask_ptr
= cap_mask
;
147 static int hfi1_caps_get(char *buffer
, const struct kernel_param
*kp
)
149 unsigned long cap_mask
= *(unsigned long *)kp
->arg
;
151 cap_mask
&= ~HFI1_CAP_LOCKED_SMASK
;
152 cap_mask
|= ((cap_mask
& HFI1_CAP_K2U
) << HFI1_CAP_USER_SHIFT
);
154 return scnprintf(buffer
, PAGE_SIZE
, "0x%lx", cap_mask
);
157 const char *get_unit_name(int unit
)
159 static char iname
[16];
161 snprintf(iname
, sizeof(iname
), DRIVER_NAME
"_%u", unit
);
166 * Return count of units with at least one port ACTIVE.
168 int hfi1_count_active_units(void)
170 struct hfi1_devdata
*dd
;
171 struct hfi1_pportdata
*ppd
;
173 int pidx
, nunits_active
= 0;
175 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
176 list_for_each_entry(dd
, &hfi1_dev_list
, list
) {
177 if (!(dd
->flags
& HFI1_PRESENT
) || !dd
->kregbase
)
179 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
180 ppd
= dd
->pport
+ pidx
;
181 if (ppd
->lid
&& ppd
->linkup
) {
187 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
188 return nunits_active
;
192 * Return count of all units, optionally return in arguments
193 * the number of usable (present) units, and the number of
196 int hfi1_count_units(int *npresentp
, int *nupp
)
198 int nunits
= 0, npresent
= 0, nup
= 0;
199 struct hfi1_devdata
*dd
;
202 struct hfi1_pportdata
*ppd
;
204 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
206 list_for_each_entry(dd
, &hfi1_dev_list
, list
) {
208 if ((dd
->flags
& HFI1_PRESENT
) && dd
->kregbase
)
210 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
211 ppd
= dd
->pport
+ pidx
;
212 if (ppd
->lid
&& ppd
->linkup
)
217 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
220 *npresentp
= npresent
;
228 * Get address of eager buffer from it's index (allocated in chunks, not
231 static inline void *get_egrbuf(const struct hfi1_ctxtdata
*rcd
, u64 rhf
,
234 u32 idx
= rhf_egr_index(rhf
), offset
= rhf_egr_buf_offset(rhf
);
236 *update
|= !(idx
& (rcd
->egrbufs
.threshold
- 1)) && !offset
;
237 return (void *)(((u64
)(rcd
->egrbufs
.rcvtids
[idx
].addr
)) +
238 (offset
* RCV_BUF_BLOCK_SIZE
));
242 * Validate and encode the a given RcvArray Buffer size.
243 * The function will check whether the given size falls within
244 * allowed size ranges for the respective type and, optionally,
245 * return the proper encoding.
247 inline int hfi1_rcvbuf_validate(u32 size
, u8 type
, u16
*encoded
)
249 if (unlikely(!IS_ALIGNED(size
, PAGE_SIZE
)))
251 if (unlikely(size
< MIN_EAGER_BUFFER
))
254 (type
== PT_EAGER
? MAX_EAGER_BUFFER
: MAX_EXPECTED_BUFFER
))
257 *encoded
= ilog2(size
/ PAGE_SIZE
) + 1;
261 static void rcv_hdrerr(struct hfi1_ctxtdata
*rcd
, struct hfi1_pportdata
*ppd
,
262 struct hfi1_packet
*packet
)
264 struct hfi1_message_header
*rhdr
= packet
->hdr
;
265 u32 rte
= rhf_rcv_type_err(packet
->rhf
);
266 int lnh
= be16_to_cpu(rhdr
->lrh
[0]) & 3;
267 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
269 if (packet
->rhf
& (RHF_VCRC_ERR
| RHF_ICRC_ERR
))
272 if (packet
->rhf
& RHF_TID_ERR
) {
273 /* For TIDERR and RC QPs preemptively schedule a NAK */
274 struct hfi1_ib_header
*hdr
= (struct hfi1_ib_header
*)rhdr
;
275 struct hfi1_other_headers
*ohdr
= NULL
;
276 u32 tlen
= rhf_pkt_len(packet
->rhf
); /* in bytes */
277 u16 lid
= be16_to_cpu(hdr
->lrh
[1]);
281 /* Sanity check packet */
286 if (lnh
== HFI1_LRH_BTH
)
288 else if (lnh
== HFI1_LRH_GRH
) {
291 ohdr
= &hdr
->u
.l
.oth
;
292 if (hdr
->u
.l
.grh
.next_hdr
!= IB_GRH_NEXT_HDR
)
294 vtf
= be32_to_cpu(hdr
->u
.l
.grh
.version_tclass_flow
);
295 if ((vtf
>> IB_GRH_VERSION_SHIFT
) != IB_GRH_VERSION
)
297 rcv_flags
|= HFI1_HAS_GRH
;
301 /* Get the destination QP number. */
302 qp_num
= be32_to_cpu(ohdr
->bth
[1]) & HFI1_QPN_MASK
;
303 if (lid
< HFI1_MULTICAST_LID_BASE
) {
308 qp
= hfi1_lookup_qpn(ibp
, qp_num
);
315 * Handle only RC QPs - for other QP types drop error
318 spin_lock_irqsave(&qp
->r_lock
, flags
);
320 /* Check for valid receive state. */
321 if (!(ib_hfi1_state_ops
[qp
->state
] &
322 HFI1_PROCESS_RECV_OK
)) {
326 switch (qp
->ibqp
.qp_type
) {
335 /* For now don't handle any other QP types */
339 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
342 } /* Valid packet with TIDErr */
344 /* handle "RcvTypeErr" flags */
346 case RHF_RTE_ERROR_OP_CODE_ERR
:
352 if (rhf_use_egr_bfr(packet
->rhf
))
356 goto drop
; /* this should never happen */
358 if (lnh
== HFI1_LRH_BTH
)
359 bth
= (__be32
*)ebuf
;
360 else if (lnh
== HFI1_LRH_GRH
)
361 bth
= (__be32
*)((char *)ebuf
+ sizeof(struct ib_grh
));
365 opcode
= be32_to_cpu(bth
[0]) >> 24;
368 if (opcode
== IB_OPCODE_CNP
) {
370 * Only in pre-B0 h/w is the CNP_OPCODE handled
371 * via this code path (errata 291394).
373 struct hfi1_qp
*qp
= NULL
;
376 u8 svc_type
, sl
, sc5
;
378 sc5
= (be16_to_cpu(rhdr
->lrh
[0]) >> 12) & 0xf;
379 if (rhf_dc_info(packet
->rhf
))
381 sl
= ibp
->sc_to_sl
[sc5
];
383 lqpn
= be32_to_cpu(bth
[1]) & HFI1_QPN_MASK
;
385 qp
= hfi1_lookup_qpn(ibp
, lqpn
);
391 switch (qp
->ibqp
.qp_type
) {
395 svc_type
= IB_CC_SVCTYPE_UD
;
398 rlid
= be16_to_cpu(rhdr
->lrh
[3]);
399 rqpn
= qp
->remote_qpn
;
400 svc_type
= IB_CC_SVCTYPE_UC
;
406 process_becn(ppd
, sl
, rlid
, lqpn
, rqpn
, svc_type
);
410 packet
->rhf
&= ~RHF_RCV_TYPE_ERR_SMASK
;
421 static inline void init_packet(struct hfi1_ctxtdata
*rcd
,
422 struct hfi1_packet
*packet
)
425 packet
->rsize
= rcd
->rcvhdrqentsize
; /* words */
426 packet
->maxcnt
= rcd
->rcvhdrq_cnt
* packet
->rsize
; /* words */
430 packet
->rhf_addr
= get_rhf_addr(rcd
);
431 packet
->rhf
= rhf_to_cpu(packet
->rhf_addr
);
432 packet
->rhqoff
= rcd
->head
;
434 packet
->rcv_flags
= 0;
437 #ifndef CONFIG_PRESCAN_RXQ
438 static void prescan_rxq(struct hfi1_packet
*packet
) {}
439 #else /* CONFIG_PRESCAN_RXQ */
440 static int prescan_receive_queue
;
442 static void process_ecn(struct hfi1_qp
*qp
, struct hfi1_ib_header
*hdr
,
443 struct hfi1_other_headers
*ohdr
,
444 u64 rhf
, struct ib_grh
*grh
)
446 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
449 int is_fecn
, is_becn
;
451 switch (qp
->ibqp
.qp_type
) {
453 svc_type
= IB_CC_SVCTYPE_UD
;
455 case IB_QPT_UC
: /* LATER */
456 case IB_QPT_RC
: /* LATER */
461 is_fecn
= (be32_to_cpu(ohdr
->bth
[1]) >> HFI1_FECN_SHIFT
) &
463 is_becn
= (be32_to_cpu(ohdr
->bth
[1]) >> HFI1_BECN_SHIFT
) &
466 sc5
= (be16_to_cpu(hdr
->lrh
[0]) >> 12) & 0xf;
467 if (rhf_dc_info(rhf
))
471 u32 src_qpn
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]) & HFI1_QPN_MASK
;
472 u16 pkey
= (u16
)be32_to_cpu(ohdr
->bth
[0]);
473 u16 dlid
= be16_to_cpu(hdr
->lrh
[1]);
474 u16 slid
= be16_to_cpu(hdr
->lrh
[3]);
476 return_cnp(ibp
, qp
, src_qpn
, pkey
, dlid
, slid
, sc5
, grh
);
480 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
481 u32 lqpn
= be32_to_cpu(ohdr
->bth
[1]) & HFI1_QPN_MASK
;
482 u8 sl
= ibp
->sc_to_sl
[sc5
];
484 process_becn(ppd
, sl
, 0, lqpn
, 0, svc_type
);
487 /* turn off BECN, or FECN */
488 bth1
= be32_to_cpu(ohdr
->bth
[1]);
489 bth1
&= ~(HFI1_FECN_MASK
<< HFI1_FECN_SHIFT
);
490 bth1
&= ~(HFI1_BECN_MASK
<< HFI1_BECN_SHIFT
);
491 ohdr
->bth
[1] = cpu_to_be32(bth1
);
495 struct hfi1_ctxtdata
*rcd
;
503 static inline void init_ps_mdata(struct ps_mdata
*mdata
,
504 struct hfi1_packet
*packet
)
506 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
509 mdata
->rsize
= packet
->rsize
;
510 mdata
->maxcnt
= packet
->maxcnt
;
512 mdata
->ps_head
= packet
->rhqoff
;
514 if (HFI1_CAP_IS_KSET(DMA_RTAIL
)) {
515 mdata
->ps_tail
= get_rcvhdrtail(rcd
);
516 mdata
->ps_seq
= 0; /* not used with DMA_RTAIL */
518 mdata
->ps_tail
= 0; /* used only with DMA_RTAIL*/
519 mdata
->ps_seq
= rcd
->seq_cnt
;
523 static inline int ps_done(struct ps_mdata
*mdata
, u64 rhf
)
525 if (HFI1_CAP_IS_KSET(DMA_RTAIL
))
526 return mdata
->ps_head
== mdata
->ps_tail
;
527 return mdata
->ps_seq
!= rhf_rcv_seq(rhf
);
530 static inline void update_ps_mdata(struct ps_mdata
*mdata
)
532 mdata
->ps_head
+= mdata
->rsize
;
533 if (mdata
->ps_head
>= mdata
->maxcnt
)
535 if (!HFI1_CAP_IS_KSET(DMA_RTAIL
)) {
536 if (++mdata
->ps_seq
> 13)
542 * prescan_rxq - search through the receive queue looking for packets
543 * containing Excplicit Congestion Notifications (FECNs, or BECNs).
544 * When an ECN is found, process the Congestion Notification, and toggle
547 static void prescan_rxq(struct hfi1_packet
*packet
)
549 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
550 struct ps_mdata mdata
;
552 if (!prescan_receive_queue
)
555 init_ps_mdata(&mdata
, packet
);
558 struct hfi1_devdata
*dd
= rcd
->dd
;
559 struct hfi1_ibport
*ibp
= &rcd
->ppd
->ibport_data
;
560 __le32
*rhf_addr
= (__le32
*) rcd
->rcvhdrq
+ mdata
.ps_head
+
563 struct hfi1_ib_header
*hdr
;
564 struct hfi1_other_headers
*ohdr
;
565 struct ib_grh
*grh
= NULL
;
566 u64 rhf
= rhf_to_cpu(rhf_addr
);
567 u32 etype
= rhf_rcv_type(rhf
), qpn
;
571 if (ps_done(&mdata
, rhf
))
574 if (etype
!= RHF_RCV_TYPE_IB
)
577 hdr
= (struct hfi1_ib_header
*)
578 hfi1_get_msgheader(dd
, rhf_addr
);
579 lnh
= be16_to_cpu(hdr
->lrh
[0]) & 3;
581 if (lnh
== HFI1_LRH_BTH
)
583 else if (lnh
== HFI1_LRH_GRH
) {
584 ohdr
= &hdr
->u
.l
.oth
;
587 goto next
; /* just in case */
589 is_ecn
|= be32_to_cpu(ohdr
->bth
[1]) &
590 (HFI1_FECN_MASK
<< HFI1_FECN_SHIFT
);
591 is_ecn
|= be32_to_cpu(ohdr
->bth
[1]) &
592 (HFI1_BECN_MASK
<< HFI1_BECN_SHIFT
);
597 qpn
= be32_to_cpu(ohdr
->bth
[1]) & HFI1_QPN_MASK
;
599 qp
= hfi1_lookup_qpn(ibp
, qpn
);
606 process_ecn(qp
, hdr
, ohdr
, rhf
, grh
);
609 update_ps_mdata(&mdata
);
612 #endif /* CONFIG_PRESCAN_RXQ */
614 static inline int process_rcv_packet(struct hfi1_packet
*packet
, int thread
)
616 int ret
= RCV_PKT_OK
;
618 packet
->hdr
= hfi1_get_msgheader(packet
->rcd
->dd
,
620 packet
->hlen
= (u8
*)packet
->rhf_addr
- (u8
*)packet
->hdr
;
621 packet
->etype
= rhf_rcv_type(packet
->rhf
);
623 packet
->tlen
= rhf_pkt_len(packet
->rhf
); /* in bytes */
624 /* retrieve eager buffer details */
626 if (rhf_use_egr_bfr(packet
->rhf
)) {
627 packet
->etail
= rhf_egr_index(packet
->rhf
);
628 packet
->ebuf
= get_egrbuf(packet
->rcd
, packet
->rhf
,
631 * Prefetch the contents of the eager buffer. It is
632 * OK to send a negative length to prefetch_range().
633 * The +2 is the size of the RHF.
635 prefetch_range(packet
->ebuf
,
636 packet
->tlen
- ((packet
->rcd
->rcvhdrqentsize
-
637 (rhf_hdrq_offset(packet
->rhf
)+2)) * 4));
641 * Call a type specific handler for the packet. We
642 * should be able to trust that etype won't be beyond
643 * the range of valid indexes. If so something is really
644 * wrong and we can probably just let things come
645 * crashing down. There is no need to eat another
646 * comparison in this performance critical code.
648 packet
->rcd
->dd
->rhf_rcv_function_map
[packet
->etype
](packet
);
651 /* Set up for the next packet */
652 packet
->rhqoff
+= packet
->rsize
;
653 if (packet
->rhqoff
>= packet
->maxcnt
)
656 if (unlikely((packet
->numpkt
& (MAX_PKT_RECV
- 1)) == 0)) {
661 this_cpu_inc(*packet
->rcd
->dd
->rcv_limit
);
665 packet
->rhf_addr
= (__le32
*) packet
->rcd
->rcvhdrq
+ packet
->rhqoff
+
666 packet
->rcd
->dd
->rhf_offset
;
667 packet
->rhf
= rhf_to_cpu(packet
->rhf_addr
);
672 static inline void process_rcv_update(int last
, struct hfi1_packet
*packet
)
675 * Update head regs etc., every 16 packets, if not last pkt,
676 * to help prevent rcvhdrq overflows, when many packets
677 * are processed and queue is nearly full.
678 * Don't request an interrupt for intermediate updates.
680 if (!last
&& !(packet
->numpkt
& 0xf)) {
681 update_usrhead(packet
->rcd
, packet
->rhqoff
, packet
->updegr
,
682 packet
->etail
, 0, 0);
685 packet
->rcv_flags
= 0;
688 static inline void finish_packet(struct hfi1_packet
*packet
)
692 * Nothing we need to free for the packet.
694 * The only thing we need to do is a final update and call for an
697 update_usrhead(packet
->rcd
, packet
->rcd
->head
, packet
->updegr
,
698 packet
->etail
, rcv_intr_dynamic
, packet
->numpkt
);
702 static inline void process_rcv_qp_work(struct hfi1_packet
*packet
)
705 struct hfi1_ctxtdata
*rcd
;
706 struct hfi1_qp
*qp
, *nqp
;
709 rcd
->head
= packet
->rhqoff
;
712 * Iterate over all QPs waiting to respond.
713 * The list won't change since the IRQ is only run on one CPU.
715 list_for_each_entry_safe(qp
, nqp
, &rcd
->qp_wait_list
, rspwait
) {
716 list_del_init(&qp
->rspwait
);
717 if (qp
->r_flags
& HFI1_R_RSP_NAK
) {
718 qp
->r_flags
&= ~HFI1_R_RSP_NAK
;
719 hfi1_send_rc_ack(rcd
, qp
, 0);
721 if (qp
->r_flags
& HFI1_R_RSP_SEND
) {
724 qp
->r_flags
&= ~HFI1_R_RSP_SEND
;
725 spin_lock_irqsave(&qp
->s_lock
, flags
);
726 if (ib_hfi1_state_ops
[qp
->state
] &
727 HFI1_PROCESS_OR_FLUSH_SEND
)
728 hfi1_schedule_send(qp
);
729 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
731 if (atomic_dec_and_test(&qp
->refcount
))
737 * Handle receive interrupts when using the no dma rtail option.
739 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata
*rcd
, int thread
)
742 int last
= RCV_PKT_OK
;
743 struct hfi1_packet packet
;
745 init_packet(rcd
, &packet
);
746 seq
= rhf_rcv_seq(packet
.rhf
);
747 if (seq
!= rcd
->seq_cnt
) {
752 prescan_rxq(&packet
);
754 while (last
== RCV_PKT_OK
) {
755 last
= process_rcv_packet(&packet
, thread
);
756 seq
= rhf_rcv_seq(packet
.rhf
);
757 if (++rcd
->seq_cnt
> 13)
759 if (seq
!= rcd
->seq_cnt
)
761 process_rcv_update(last
, &packet
);
763 process_rcv_qp_work(&packet
);
765 finish_packet(&packet
);
769 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata
*rcd
, int thread
)
772 int last
= RCV_PKT_OK
;
773 struct hfi1_packet packet
;
775 init_packet(rcd
, &packet
);
776 hdrqtail
= get_rcvhdrtail(rcd
);
777 if (packet
.rhqoff
== hdrqtail
) {
781 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
783 prescan_rxq(&packet
);
785 while (last
== RCV_PKT_OK
) {
786 last
= process_rcv_packet(&packet
, thread
);
787 hdrqtail
= get_rcvhdrtail(rcd
);
788 if (packet
.rhqoff
== hdrqtail
)
790 process_rcv_update(last
, &packet
);
792 process_rcv_qp_work(&packet
);
794 finish_packet(&packet
);
798 static inline void set_all_nodma_rtail(struct hfi1_devdata
*dd
)
802 for (i
= 0; i
< dd
->first_user_ctxt
; i
++)
803 dd
->rcd
[i
]->do_interrupt
=
804 &handle_receive_interrupt_nodma_rtail
;
807 static inline void set_all_dma_rtail(struct hfi1_devdata
*dd
)
811 for (i
= 0; i
< dd
->first_user_ctxt
; i
++)
812 dd
->rcd
[i
]->do_interrupt
=
813 &handle_receive_interrupt_dma_rtail
;
817 * handle_receive_interrupt - receive a packet
820 * Called from interrupt handler for errors or receive interrupt.
821 * This is the slow path interrupt handler.
823 int handle_receive_interrupt(struct hfi1_ctxtdata
*rcd
, int thread
)
825 struct hfi1_devdata
*dd
= rcd
->dd
;
827 int last
= RCV_PKT_OK
, needset
= 1;
828 struct hfi1_packet packet
;
830 init_packet(rcd
, &packet
);
832 if (!HFI1_CAP_IS_KSET(DMA_RTAIL
)) {
833 u32 seq
= rhf_rcv_seq(packet
.rhf
);
835 if (seq
!= rcd
->seq_cnt
) {
841 hdrqtail
= get_rcvhdrtail(rcd
);
842 if (packet
.rhqoff
== hdrqtail
) {
846 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
849 prescan_rxq(&packet
);
851 while (last
== RCV_PKT_OK
) {
853 if (unlikely(dd
->do_drop
&& atomic_xchg(&dd
->drop_packet
,
854 DROP_PACKET_OFF
) == DROP_PACKET_ON
)) {
857 /* On to the next packet */
858 packet
.rhqoff
+= packet
.rsize
;
859 packet
.rhf_addr
= (__le32
*) rcd
->rcvhdrq
+
862 packet
.rhf
= rhf_to_cpu(packet
.rhf_addr
);
865 last
= process_rcv_packet(&packet
, thread
);
868 if (!HFI1_CAP_IS_KSET(DMA_RTAIL
)) {
869 u32 seq
= rhf_rcv_seq(packet
.rhf
);
871 if (++rcd
->seq_cnt
> 13)
873 if (seq
!= rcd
->seq_cnt
)
877 "Switching to NO_DMA_RTAIL\n");
878 set_all_nodma_rtail(dd
);
882 if (packet
.rhqoff
== hdrqtail
)
886 "Switching to DMA_RTAIL\n");
887 set_all_dma_rtail(dd
);
892 process_rcv_update(last
, &packet
);
895 process_rcv_qp_work(&packet
);
899 * Always write head at end, and setup rcv interrupt, even
900 * if no packets were processed.
902 finish_packet(&packet
);
907 * Convert a given MTU size to the on-wire MAD packet enumeration.
908 * Return -1 if the size is invalid.
910 int mtu_to_enum(u32 mtu
, int default_if_bad
)
913 case 0: return OPA_MTU_0
;
914 case 256: return OPA_MTU_256
;
915 case 512: return OPA_MTU_512
;
916 case 1024: return OPA_MTU_1024
;
917 case 2048: return OPA_MTU_2048
;
918 case 4096: return OPA_MTU_4096
;
919 case 8192: return OPA_MTU_8192
;
920 case 10240: return OPA_MTU_10240
;
922 return default_if_bad
;
925 u16
enum_to_mtu(int mtu
)
928 case OPA_MTU_0
: return 0;
929 case OPA_MTU_256
: return 256;
930 case OPA_MTU_512
: return 512;
931 case OPA_MTU_1024
: return 1024;
932 case OPA_MTU_2048
: return 2048;
933 case OPA_MTU_4096
: return 4096;
934 case OPA_MTU_8192
: return 8192;
935 case OPA_MTU_10240
: return 10240;
936 default: return 0xffff;
941 * set_mtu - set the MTU
942 * @ppd: the per port data
944 * We can handle "any" incoming size, the issue here is whether we
945 * need to restrict our outgoing size. We do not deal with what happens
946 * to programs that are already running when the size changes.
948 int set_mtu(struct hfi1_pportdata
*ppd
)
950 struct hfi1_devdata
*dd
= ppd
->dd
;
951 int i
, drain
, ret
= 0, is_up
= 0;
954 for (i
= 0; i
< ppd
->vls_supported
; i
++)
955 if (ppd
->ibmtu
< dd
->vld
[i
].mtu
)
956 ppd
->ibmtu
= dd
->vld
[i
].mtu
;
957 ppd
->ibmaxlen
= ppd
->ibmtu
+ lrh_max_header_bytes(ppd
->dd
);
959 mutex_lock(&ppd
->hls_lock
);
960 if (ppd
->host_link_state
== HLS_UP_INIT
961 || ppd
->host_link_state
== HLS_UP_ARMED
962 || ppd
->host_link_state
== HLS_UP_ACTIVE
)
965 drain
= !is_ax(dd
) && is_up
;
969 * MTU is specified per-VL. To ensure that no packet gets
970 * stuck (due, e.g., to the MTU for the packet's VL being
971 * reduced), empty the per-VL FIFOs before adjusting MTU.
973 ret
= stop_drain_data_vls(dd
);
976 dd_dev_err(dd
, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
981 hfi1_set_ib_cfg(ppd
, HFI1_IB_CFG_MTU
, 0);
984 open_fill_data_vls(dd
); /* reopen all VLs */
987 mutex_unlock(&ppd
->hls_lock
);
992 int hfi1_set_lid(struct hfi1_pportdata
*ppd
, u32 lid
, u8 lmc
)
994 struct hfi1_devdata
*dd
= ppd
->dd
;
998 hfi1_set_ib_cfg(ppd
, HFI1_IB_CFG_LIDLMC
, 0);
1000 dd_dev_info(dd
, "IB%u:%u got a lid: 0x%x\n", dd
->unit
, ppd
->port
, lid
);
1006 * Following deal with the "obviously simple" task of overriding the state
1007 * of the LEDs, which normally indicate link physical and logical status.
1008 * The complications arise in dealing with different hardware mappings
1009 * and the board-dependent routine being called from interrupts.
1010 * and then there's the requirement to _flash_ them.
1012 #define LED_OVER_FREQ_SHIFT 8
1013 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
1014 /* Below is "non-zero" to force override, but both actual LEDs are off */
1015 #define LED_OVER_BOTH_OFF (8)
1017 static void run_led_override(unsigned long opaque
)
1019 struct hfi1_pportdata
*ppd
= (struct hfi1_pportdata
*)opaque
;
1020 struct hfi1_devdata
*dd
= ppd
->dd
;
1024 if (!(dd
->flags
& HFI1_INITTED
))
1027 ph_idx
= ppd
->led_override_phase
++ & 1;
1028 ppd
->led_override
= ppd
->led_override_vals
[ph_idx
];
1029 timeoff
= ppd
->led_override_timeoff
;
1032 * don't re-fire the timer if user asked for it to be off; we let
1033 * it fire one more time after they turn it off to simplify
1035 if (ppd
->led_override_vals
[0] || ppd
->led_override_vals
[1])
1036 mod_timer(&ppd
->led_override_timer
, jiffies
+ timeoff
);
1039 void hfi1_set_led_override(struct hfi1_pportdata
*ppd
, unsigned int val
)
1041 struct hfi1_devdata
*dd
= ppd
->dd
;
1044 if (!(dd
->flags
& HFI1_INITTED
))
1047 /* First check if we are blinking. If not, use 1HZ polling */
1049 freq
= (val
& LED_OVER_FREQ_MASK
) >> LED_OVER_FREQ_SHIFT
;
1052 /* For blink, set each phase from one nybble of val */
1053 ppd
->led_override_vals
[0] = val
& 0xF;
1054 ppd
->led_override_vals
[1] = (val
>> 4) & 0xF;
1055 timeoff
= (HZ
<< 4)/freq
;
1057 /* Non-blink set both phases the same. */
1058 ppd
->led_override_vals
[0] = val
& 0xF;
1059 ppd
->led_override_vals
[1] = val
& 0xF;
1061 ppd
->led_override_timeoff
= timeoff
;
1064 * If the timer has not already been started, do so. Use a "quick"
1065 * timeout so the function will be called soon, to look at our request.
1067 if (atomic_inc_return(&ppd
->led_override_timer_active
) == 1) {
1068 /* Need to start timer */
1069 setup_timer(&ppd
->led_override_timer
, run_led_override
,
1070 (unsigned long)ppd
);
1072 ppd
->led_override_timer
.expires
= jiffies
+ 1;
1073 add_timer(&ppd
->led_override_timer
);
1075 if (ppd
->led_override_vals
[0] || ppd
->led_override_vals
[1])
1076 mod_timer(&ppd
->led_override_timer
, jiffies
+ 1);
1077 atomic_dec(&ppd
->led_override_timer_active
);
1082 * hfi1_reset_device - reset the chip if possible
1083 * @unit: the device to reset
1085 * Whether or not reset is successful, we attempt to re-initialize the chip
1086 * (that is, much like a driver unload/reload). We clear the INITTED flag
1087 * so that the various entry points will fail until we reinitialize. For
1088 * now, we only allow this if no user contexts are open that use chip resources
1090 int hfi1_reset_device(int unit
)
1093 struct hfi1_devdata
*dd
= hfi1_lookup(unit
);
1094 struct hfi1_pportdata
*ppd
;
1095 unsigned long flags
;
1103 dd_dev_info(dd
, "Reset on unit %u requested\n", unit
);
1105 if (!dd
->kregbase
|| !(dd
->flags
& HFI1_PRESENT
)) {
1107 "Invalid unit number %u or not initialized or not present\n",
1113 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
1115 for (i
= dd
->first_user_ctxt
; i
< dd
->num_rcv_contexts
; i
++) {
1116 if (!dd
->rcd
[i
] || !dd
->rcd
[i
]->cnt
)
1118 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
1122 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
1124 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1125 ppd
= dd
->pport
+ pidx
;
1126 if (atomic_read(&ppd
->led_override_timer_active
)) {
1127 /* Need to stop LED timer, _then_ shut off LEDs */
1128 del_timer_sync(&ppd
->led_override_timer
);
1129 atomic_set(&ppd
->led_override_timer_active
, 0);
1132 /* Shut off LEDs after we are sure timer is not running */
1133 ppd
->led_override
= LED_OVER_BOTH_OFF
;
1135 if (dd
->flags
& HFI1_HAS_SEND_DMA
)
1138 hfi1_reset_cpu_counters(dd
);
1140 ret
= hfi1_init(dd
, 1);
1144 "Reinitialize unit %u after reset failed with %d\n",
1147 dd_dev_info(dd
, "Reinitialized unit %u after resetting\n",
1154 void handle_eflags(struct hfi1_packet
*packet
)
1156 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
1157 u32 rte
= rhf_rcv_type_err(packet
->rhf
);
1160 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
1161 rcd
->ctxt
, packet
->rhf
,
1162 packet
->rhf
& RHF_K_HDR_LEN_ERR
? "k_hdr_len " : "",
1163 packet
->rhf
& RHF_DC_UNC_ERR
? "dc_unc " : "",
1164 packet
->rhf
& RHF_DC_ERR
? "dc " : "",
1165 packet
->rhf
& RHF_TID_ERR
? "tid " : "",
1166 packet
->rhf
& RHF_LEN_ERR
? "len " : "",
1167 packet
->rhf
& RHF_ECC_ERR
? "ecc " : "",
1168 packet
->rhf
& RHF_VCRC_ERR
? "vcrc " : "",
1169 packet
->rhf
& RHF_ICRC_ERR
? "icrc " : "",
1172 rcv_hdrerr(rcd
, rcd
->ppd
, packet
);
1176 * The following functions are called by the interrupt handler. They are type
1177 * specific handlers for each packet type.
1179 int process_receive_ib(struct hfi1_packet
*packet
)
1181 trace_hfi1_rcvhdr(packet
->rcd
->ppd
->dd
,
1183 rhf_err_flags(packet
->rhf
),
1188 rhf_egr_index(packet
->rhf
));
1190 if (unlikely(rhf_err_flags(packet
->rhf
))) {
1191 handle_eflags(packet
);
1192 return RHF_RCV_CONTINUE
;
1195 hfi1_ib_rcv(packet
);
1196 return RHF_RCV_CONTINUE
;
1199 int process_receive_bypass(struct hfi1_packet
*packet
)
1201 if (unlikely(rhf_err_flags(packet
->rhf
)))
1202 handle_eflags(packet
);
1204 dd_dev_err(packet
->rcd
->dd
,
1205 "Bypass packets are not supported in normal operation. Dropping\n");
1206 return RHF_RCV_CONTINUE
;
1209 int process_receive_error(struct hfi1_packet
*packet
)
1211 handle_eflags(packet
);
1213 if (unlikely(rhf_err_flags(packet
->rhf
)))
1214 dd_dev_err(packet
->rcd
->dd
,
1215 "Unhandled error packet received. Dropping.\n");
1217 return RHF_RCV_CONTINUE
;
1220 int kdeth_process_expected(struct hfi1_packet
*packet
)
1222 if (unlikely(rhf_err_flags(packet
->rhf
)))
1223 handle_eflags(packet
);
1225 dd_dev_err(packet
->rcd
->dd
,
1226 "Unhandled expected packet received. Dropping.\n");
1227 return RHF_RCV_CONTINUE
;
1230 int kdeth_process_eager(struct hfi1_packet
*packet
)
1232 if (unlikely(rhf_err_flags(packet
->rhf
)))
1233 handle_eflags(packet
);
1235 dd_dev_err(packet
->rcd
->dd
,
1236 "Unhandled eager packet received. Dropping.\n");
1237 return RHF_RCV_CONTINUE
;
1240 int process_receive_invalid(struct hfi1_packet
*packet
)
1242 dd_dev_err(packet
->rcd
->dd
, "Invalid packet type %d. Dropping\n",
1243 rhf_rcv_type(packet
->rhf
));
1244 return RHF_RCV_CONTINUE
;