4 * Copyright(c) 2015-2017 Intel Corporation.
6 * This file is provided under a dual BSD/GPLv2 license. When using or
7 * redistributing this file, you may do so under either license.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * - Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in
30 * the documentation and/or other materials provided with the
32 * - Neither the name of Intel Corporation nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <linux/interrupt.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/mutex.h>
54 #include <linux/list.h>
55 #include <linux/scatterlist.h>
56 #include <linux/slab.h>
57 #include <linux/idr.h>
60 #include <linux/completion.h>
61 #include <linux/kref.h>
62 #include <linux/sched.h>
63 #include <linux/cdev.h>
64 #include <linux/delay.h>
65 #include <linux/kthread.h>
66 #include <linux/i2c.h>
67 #include <linux/i2c-algo-bit.h>
68 #include <rdma/ib_hdrs.h>
69 #include <rdma/opa_addr.h>
70 #include <linux/rhashtable.h>
71 #include <linux/netdevice.h>
72 #include <rdma/rdma_vt.h>
73 #include <rdma/opa_addr.h>
75 #include "chip_registers.h"
85 /* bumped 1 from s/w major version of TrueScale */
86 #define HFI1_CHIP_VERS_MAJ 3U
88 /* don't care about this except printing */
89 #define HFI1_CHIP_VERS_MIN 0U
91 /* The Organization Unique Identifier (Mfg code), and its position in GUID */
92 #define HFI1_OUI 0x001175
93 #define HFI1_OUI_LSB 40
95 #define DROP_PACKET_OFF 0
96 #define DROP_PACKET_ON 1
98 #define NEIGHBOR_TYPE_HFI 0
99 #define NEIGHBOR_TYPE_SWITCH 1
101 extern unsigned long hfi1_cap_mask
;
102 #define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
103 #define HFI1_CAP_UGET_MASK(mask, cap) \
104 (((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap)
105 #define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap))
106 #define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap))
107 #define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap))
108 #define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap))
109 #define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \
111 /* Offline Disabled Reason is 4-bits */
112 #define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON)
115 * Control context is always 0 and handles the error packets.
116 * It also handles the VL15 and multicast packets.
118 #define HFI1_CTRL_CTXT 0
121 * Driver context will store software counters for each of the events
122 * associated with these status registers
124 #define NUM_CCE_ERR_STATUS_COUNTERS 41
125 #define NUM_RCV_ERR_STATUS_COUNTERS 64
126 #define NUM_MISC_ERR_STATUS_COUNTERS 13
127 #define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36
128 #define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4
129 #define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64
130 #define NUM_SEND_ERR_STATUS_COUNTERS 3
131 #define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5
132 #define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
135 * per driver stats, either not device nor port-specific, or
136 * summed over all of the devices and ports.
137 * They are described by name via ipathfs filesystem, so layout
138 * and number of elements can change without breaking compatibility.
139 * If members are added or deleted hfi1_statnames[] in debugfs.c must
142 struct hfi1_ib_stats
{
143 __u64 sps_ints
; /* number of interrupts handled */
144 __u64 sps_errints
; /* number of error interrupts */
145 __u64 sps_txerrs
; /* tx-related packet errors */
146 __u64 sps_rcverrs
; /* non-crc rcv packet errors */
147 __u64 sps_hwerrs
; /* hardware errors reported (parity, etc.) */
148 __u64 sps_nopiobufs
; /* no pio bufs avail from kernel */
149 __u64 sps_ctxts
; /* number of contexts currently open */
150 __u64 sps_lenerrs
; /* number of kernel packets where RHF != LRH len */
155 extern struct hfi1_ib_stats hfi1_stats
;
156 extern const struct pci_error_handlers hfi1_pci_err_handler
;
158 extern int num_driver_cntrs
;
161 * First-cut criterion for "device is active" is
162 * two thousand dwords combined Tx, Rx traffic per
163 * 5-second interval. SMA packets are 64 dwords,
164 * and occur "a few per second", presumably each way.
166 #define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000)
169 * Below contains all data related to a single context (formerly called port).
172 struct hfi1_opcode_stats_perctx
;
174 struct ctxt_eager_bufs
{
175 ssize_t size
; /* total size of eager buffers */
176 u32 count
; /* size of buffers array */
177 u32 numbufs
; /* number of buffers allocated */
178 u32 alloced
; /* number of rcvarray entries used */
179 u32 rcvtid_size
; /* size of each eager rcv tid */
180 u32 threshold
; /* head update threshold */
181 struct eager_buffer
{
193 struct list_head list
;
197 struct hfi1_ctxtdata
{
198 /* shadow the ctxt's RcvCtrl register */
200 /* rcvhdrq base, needs mmap before useful */
202 /* kernel virtual address where hdrqtail is updated */
203 volatile __le64
*rcvhdrtail_kvaddr
;
204 /* when waiting for rcv or pioavail */
205 wait_queue_head_t wait
;
206 /* rcvhdrq size (for freeing) */
208 /* number of rcvhdrq entries */
210 /* size of each of the rcvhdrq entries */
212 /* mmap of hdrq, must fit in 44 bits */
213 dma_addr_t rcvhdrq_dma
;
214 dma_addr_t rcvhdrqtailaddr_dma
;
215 struct ctxt_eager_bufs egrbufs
;
216 /* this receive context's assigned PIO ACK send context */
217 struct send_context
*sc
;
219 /* dynamic receive available interrupt timeout */
220 u32 rcvavail_timeout
;
221 /* Reference count the base context usage */
224 /* Device context index */
227 * non-zero if ctxt can be shared, and defines the maximum number of
228 * sub-contexts for this device context.
231 /* non-zero if ctxt is being shared. */
236 /* number of RcvArray groups for this context. */
237 u32 rcv_array_groups
;
238 /* index of first eager TID entry. */
240 /* number of expected TID entries */
242 /* index of first expected TID entry. */
245 struct exp_tid_set tid_group_list
;
246 struct exp_tid_set tid_used_list
;
247 struct exp_tid_set tid_full_list
;
249 /* lock protecting all Expected TID data */
250 struct mutex exp_lock
;
251 /* per-context configuration flags */
253 /* per-context event flags for fileops/intr communication */
254 unsigned long event_flags
;
255 /* total number of polled urgent packets */
257 /* saved total number of polled urgent packets for poll edge trigger */
259 /* same size as task_struct .comm[], command that opened context */
260 char comm
[TASK_COMM_LEN
];
261 /* so file ops can get at unit */
262 struct hfi1_devdata
*dd
;
263 /* so functions that need physical port can get it easily */
264 struct hfi1_pportdata
*ppd
;
265 /* associated msix interrupt */
267 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
268 void *subctxt_uregbase
;
269 /* An array of pages for the eager receive buffers * N */
270 void *subctxt_rcvegrbuf
;
271 /* An array of pages for the eager header queue entries * N */
272 void *subctxt_rcvhdr_base
;
273 /* Bitmask of in use context(s) */
274 DECLARE_BITMAP(in_use_ctxts
, HFI1_MAX_SHARED_CTXTS
);
275 /* The version of the library which opened this ctxt */
277 /* Type of packets or conditions we want to poll for */
279 /* receive packet sequence counter */
281 /* ctxt rcvhdrq head offset */
283 /* QPs waiting for context processing */
284 struct list_head qp_wait_list
;
285 /* interrupt handling */
286 u64 imask
; /* clear interrupt mask */
287 int ireg
; /* clear interrupt register */
288 unsigned numa_id
; /* numa node of this context */
289 /* verbs rx_stats per rcd */
290 struct hfi1_opcode_stats_perctx
*opstats
;
292 /* Is ASPM interrupt supported for this context */
293 bool aspm_intr_supported
;
294 /* ASPM state (enabled/disabled) for this context */
296 /* Timer for re-enabling ASPM if interrupt activity quietens down */
297 struct timer_list aspm_timer
;
298 /* Lock to serialize between intr, timer intr and user threads */
299 spinlock_t aspm_lock
;
300 /* Is ASPM processing enabled for this context (in intr context) */
301 bool aspm_intr_enable
;
302 /* Last interrupt timestamp */
303 ktime_t aspm_ts_last_intr
;
304 /* Last timestamp at which we scheduled a timer for this context */
305 ktime_t aspm_ts_timer_sched
;
308 * The interrupt handler for a particular receive context can vary
309 * throughout it's lifetime. This is not a lock protected data member so
310 * it must be updated atomically and the prev and new value must always
311 * be valid. Worst case is we process an extra interrupt and up to 64
312 * packets with the wrong interrupt handler.
314 int (*do_interrupt
)(struct hfi1_ctxtdata
*rcd
, int threaded
);
316 /* Indicates that this is vnic context */
319 /* vnic queue index this context is mapped to */
324 * Represents a single packet at a high level. Put commonly computed things in
325 * here so we do not have to keep doing them over and over. The rule of thumb is
326 * if something is used one time to derive some value, store that something in
327 * here. If it is used multiple times, then store the result of that derivation
334 struct hfi1_ctxtdata
*rcd
;
337 struct ib_other_headers
*ohdr
;
361 #define HFI1_PKT_TYPE_9B 0
362 #define HFI1_PKT_TYPE_16B 1
367 #define OPA_16B_L4_MASK 0xFFull
368 #define OPA_16B_SC_MASK 0x1F00000ull
369 #define OPA_16B_SC_SHIFT 20
370 #define OPA_16B_LID_MASK 0xFFFFFull
371 #define OPA_16B_DLID_MASK 0xF000ull
372 #define OPA_16B_DLID_SHIFT 20
373 #define OPA_16B_DLID_HIGH_SHIFT 12
374 #define OPA_16B_SLID_MASK 0xF00ull
375 #define OPA_16B_SLID_SHIFT 20
376 #define OPA_16B_SLID_HIGH_SHIFT 8
377 #define OPA_16B_BECN_MASK 0x80000000ull
378 #define OPA_16B_BECN_SHIFT 31
379 #define OPA_16B_FECN_MASK 0x10000000ull
380 #define OPA_16B_FECN_SHIFT 28
381 #define OPA_16B_L2_MASK 0x60000000ull
382 #define OPA_16B_L2_SHIFT 29
383 #define OPA_16B_PKEY_MASK 0xFFFF0000ull
384 #define OPA_16B_PKEY_SHIFT 16
385 #define OPA_16B_LEN_MASK 0x7FF00000ull
386 #define OPA_16B_LEN_SHIFT 20
387 #define OPA_16B_RC_MASK 0xE000000ull
388 #define OPA_16B_RC_SHIFT 25
389 #define OPA_16B_AGE_MASK 0xFF0000ull
390 #define OPA_16B_AGE_SHIFT 16
391 #define OPA_16B_ENTROPY_MASK 0xFFFFull
394 * OPA 16B L2/L4 Encodings
396 #define OPA_16B_L4_9B 0x00
397 #define OPA_16B_L2_TYPE 0x02
398 #define OPA_16B_L4_IB_LOCAL 0x09
399 #define OPA_16B_L4_IB_GLOBAL 0x0A
400 #define OPA_16B_L4_ETHR OPA_VNIC_L4_ETHR
402 static inline u8
hfi1_16B_get_l4(struct hfi1_16b_header
*hdr
)
404 return (u8
)(hdr
->lrh
[2] & OPA_16B_L4_MASK
);
407 static inline u8
hfi1_16B_get_sc(struct hfi1_16b_header
*hdr
)
409 return (u8
)((hdr
->lrh
[1] & OPA_16B_SC_MASK
) >> OPA_16B_SC_SHIFT
);
412 static inline u32
hfi1_16B_get_dlid(struct hfi1_16b_header
*hdr
)
414 return (u32
)((hdr
->lrh
[1] & OPA_16B_LID_MASK
) |
415 (((hdr
->lrh
[2] & OPA_16B_DLID_MASK
) >>
416 OPA_16B_DLID_HIGH_SHIFT
) << OPA_16B_DLID_SHIFT
));
419 static inline u32
hfi1_16B_get_slid(struct hfi1_16b_header
*hdr
)
421 return (u32
)((hdr
->lrh
[0] & OPA_16B_LID_MASK
) |
422 (((hdr
->lrh
[2] & OPA_16B_SLID_MASK
) >>
423 OPA_16B_SLID_HIGH_SHIFT
) << OPA_16B_SLID_SHIFT
));
426 static inline u8
hfi1_16B_get_becn(struct hfi1_16b_header
*hdr
)
428 return (u8
)((hdr
->lrh
[0] & OPA_16B_BECN_MASK
) >> OPA_16B_BECN_SHIFT
);
431 static inline u8
hfi1_16B_get_fecn(struct hfi1_16b_header
*hdr
)
433 return (u8
)((hdr
->lrh
[1] & OPA_16B_FECN_MASK
) >> OPA_16B_FECN_SHIFT
);
436 static inline u8
hfi1_16B_get_l2(struct hfi1_16b_header
*hdr
)
438 return (u8
)((hdr
->lrh
[1] & OPA_16B_L2_MASK
) >> OPA_16B_L2_SHIFT
);
441 static inline u16
hfi1_16B_get_pkey(struct hfi1_16b_header
*hdr
)
443 return (u16
)((hdr
->lrh
[2] & OPA_16B_PKEY_MASK
) >> OPA_16B_PKEY_SHIFT
);
446 static inline u8
hfi1_16B_get_rc(struct hfi1_16b_header
*hdr
)
448 return (u8
)((hdr
->lrh
[1] & OPA_16B_RC_MASK
) >> OPA_16B_RC_SHIFT
);
451 static inline u8
hfi1_16B_get_age(struct hfi1_16b_header
*hdr
)
453 return (u8
)((hdr
->lrh
[3] & OPA_16B_AGE_MASK
) >> OPA_16B_AGE_SHIFT
);
456 static inline u16
hfi1_16B_get_len(struct hfi1_16b_header
*hdr
)
458 return (u16
)((hdr
->lrh
[0] & OPA_16B_LEN_MASK
) >> OPA_16B_LEN_SHIFT
);
461 static inline u16
hfi1_16B_get_entropy(struct hfi1_16b_header
*hdr
)
463 return (u16
)(hdr
->lrh
[3] & OPA_16B_ENTROPY_MASK
);
466 #define OPA_16B_MAKE_QW(low_dw, high_dw) (((u64)(high_dw) << 32) | (low_dw))
471 #define OPA_16B_BTH_PAD_MASK 7
472 static inline u8
hfi1_16B_bth_get_pad(struct ib_other_headers
*ohdr
)
474 return (u8
)((be32_to_cpu(ohdr
->bth
[0]) >> IB_BTH_PAD_SHIFT
) &
475 OPA_16B_BTH_PAD_MASK
);
478 struct rvt_sge_state
;
481 * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
482 * Mostly for MADs that set or query link parameters, also ipath
485 #define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
486 #define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
487 #define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
488 #define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
489 #define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
490 #define HFI1_IB_CFG_SPD 5 /* current Link spd */
491 #define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
492 #define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
493 #define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
494 #define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
495 #define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */
496 #define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
497 #define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
498 #define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
499 #define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
500 #define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
501 #define HFI1_IB_CFG_PKEYS 16 /* update partition keys */
502 #define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */
503 #define HFI1_IB_CFG_VL_HIGH_LIMIT 19
504 #define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
505 #define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */
508 * HFI or Host Link States
510 * These describe the states the driver thinks the logical and physical
511 * states are in. Used as an argument to set_link_state(). Implemented
512 * as bits for easy multi-state checking. The actual state can only be
515 #define __HLS_UP_INIT_BP 0
516 #define __HLS_UP_ARMED_BP 1
517 #define __HLS_UP_ACTIVE_BP 2
518 #define __HLS_DN_DOWNDEF_BP 3 /* link down default */
519 #define __HLS_DN_POLL_BP 4
520 #define __HLS_DN_DISABLE_BP 5
521 #define __HLS_DN_OFFLINE_BP 6
522 #define __HLS_VERIFY_CAP_BP 7
523 #define __HLS_GOING_UP_BP 8
524 #define __HLS_GOING_OFFLINE_BP 9
525 #define __HLS_LINK_COOLDOWN_BP 10
527 #define HLS_UP_INIT BIT(__HLS_UP_INIT_BP)
528 #define HLS_UP_ARMED BIT(__HLS_UP_ARMED_BP)
529 #define HLS_UP_ACTIVE BIT(__HLS_UP_ACTIVE_BP)
530 #define HLS_DN_DOWNDEF BIT(__HLS_DN_DOWNDEF_BP) /* link down default */
531 #define HLS_DN_POLL BIT(__HLS_DN_POLL_BP)
532 #define HLS_DN_DISABLE BIT(__HLS_DN_DISABLE_BP)
533 #define HLS_DN_OFFLINE BIT(__HLS_DN_OFFLINE_BP)
534 #define HLS_VERIFY_CAP BIT(__HLS_VERIFY_CAP_BP)
535 #define HLS_GOING_UP BIT(__HLS_GOING_UP_BP)
536 #define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP)
537 #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
539 #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
540 #define HLS_DOWN ~(HLS_UP)
542 #define HLS_DEFAULT HLS_DN_POLL
544 /* use this MTU size if none other is given */
545 #define HFI1_DEFAULT_ACTIVE_MTU 10240
546 /* use this MTU size as the default maximum */
547 #define HFI1_DEFAULT_MAX_MTU 10240
548 /* default partition key */
549 #define DEFAULT_PKEY 0xffff
552 * Possible fabric manager config parameters for fm_{get,set}_table()
554 #define FM_TBL_VL_HIGH_ARB 1 /* Get/set VL high prio weights */
555 #define FM_TBL_VL_LOW_ARB 2 /* Get/set VL low prio weights */
556 #define FM_TBL_BUFFER_CONTROL 3 /* Get/set Buffer Control */
557 #define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */
558 #define FM_TBL_VL_PREEMPT_ELEMS 5 /* Get (no set) VL preempt elems */
559 #define FM_TBL_VL_PREEMPT_MATRIX 6 /* Get (no set) VL preempt matrix */
562 * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
563 * these are bits so they can be combined, e.g.
564 * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
566 #define HFI1_RCVCTRL_TAILUPD_ENB 0x01
567 #define HFI1_RCVCTRL_TAILUPD_DIS 0x02
568 #define HFI1_RCVCTRL_CTXT_ENB 0x04
569 #define HFI1_RCVCTRL_CTXT_DIS 0x08
570 #define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10
571 #define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20
572 #define HFI1_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
573 #define HFI1_RCVCTRL_PKEY_DIS 0x80
574 #define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400
575 #define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800
576 #define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000
577 #define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000
578 #define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000
579 #define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
580 #define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
581 #define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
583 /* partition enforcement flags */
584 #define HFI1_PART_ENFORCE_IN 0x1
585 #define HFI1_PART_ENFORCE_OUT 0x2
587 /* how often we check for synthetic counter wrap around */
588 #define SYNTH_CNT_TIME 3
591 #define CNTR_NORMAL 0x0 /* Normal counters, just read register */
592 #define CNTR_SYNTH 0x1 /* Synthetic counters, saturate at all 1s */
593 #define CNTR_DISABLED 0x2 /* Disable this counter */
594 #define CNTR_32BIT 0x4 /* Simulate 64 bits for this counter */
595 #define CNTR_VL 0x8 /* Per VL counter */
596 #define CNTR_SDMA 0x10
597 #define CNTR_INVALID_VL -1 /* Specifies invalid VL */
598 #define CNTR_MODE_W 0x0
599 #define CNTR_MODE_R 0x1
601 /* VLs Supported/Operational */
602 #define HFI1_MIN_VLS_SUPPORTED 1
603 #define HFI1_MAX_VLS_SUPPORTED 8
605 #define HFI1_GUIDS_PER_PORT 5
606 #define HFI1_PORT_GUID_INDEX 0
608 static inline void incr_cntr64(u64
*cntr
)
610 if (*cntr
< (u64
)-1LL)
614 static inline void incr_cntr32(u32
*cntr
)
616 if (*cntr
< (u32
)-1LL)
620 #define MAX_NAME_SIZE 64
621 struct hfi1_msix_entry
{
626 struct irq_affinity_notify notify
;
629 /* per-SL CCA information */
631 struct hrtimer hrtimer
;
632 struct hfi1_pportdata
*ppd
; /* read-only */
633 int sl
; /* read-only */
634 u16 ccti
; /* read/write - current value of CCTI */
637 struct link_down_reason
{
639 * SMA-facing value. Should be set from .latest when
640 * HLS_UP_* -> HLS_DN_* transition actually occurs.
652 struct vl_arb_cache
{
653 /* protect vl arb cache */
655 struct ib_vl_weight_elem table
[VL_ARB_TABLE_SIZE
];
659 * The structure below encapsulates data relevant to a physical IB Port.
660 * Current chips support only one such port, but the separation
661 * clarifies things a bit. Note that to conform to IB conventions,
662 * port-numbers are one-based. The first or only port is port1.
664 struct hfi1_pportdata
{
665 struct hfi1_ibport ibport_data
;
667 struct hfi1_devdata
*dd
;
668 struct kobject pport_cc_kobj
;
669 struct kobject sc2vl_kobj
;
670 struct kobject sl2sc_kobj
;
671 struct kobject vl2mtu_kobj
;
674 struct qsfp_data qsfp_info
;
675 /* Values for SI tuning of SerDes */
685 /* did we read platform config from scratch registers? */
686 bool config_from_scratch
;
688 /* GUIDs for this interface, in host order, guids[0] is a port guid */
689 u64 guids
[HFI1_GUIDS_PER_PORT
];
691 /* GUID for peer interface, in host order */
694 /* up or down physical link state */
698 * this address is mapped read-only into user processes so they can
699 * get status cheaply, whenever they want. One qword of status per port
703 /* SendDMA related entries */
705 struct workqueue_struct
*hfi1_wq
;
706 struct workqueue_struct
*link_wq
;
708 /* move out of interrupt context */
709 struct work_struct link_vc_work
;
710 struct work_struct link_up_work
;
711 struct work_struct link_down_work
;
712 struct work_struct sma_message_work
;
713 struct work_struct freeze_work
;
714 struct work_struct link_downgrade_work
;
715 struct work_struct link_bounce_work
;
716 struct delayed_work start_link_work
;
717 /* host link state variables */
718 struct mutex hls_lock
;
721 /* these are the "32 bit" regs */
723 u32 ibmtu
; /* The MTU programmed for this unit */
725 * Current max size IB packet (in bytes) including IB headers, that
726 * we can send. Changes when ibmtu changes.
729 u32 current_egress_rate
; /* units [10^6 bits/sec] */
730 /* LID programmed for this instance */
732 /* list of pkeys programmed; 0 if not set */
733 u16 pkeys
[MAX_PKEY_VALUES
];
734 u16 link_width_supported
;
735 u16 link_width_downgrade_supported
;
736 u16 link_speed_supported
;
737 u16 link_width_enabled
;
738 u16 link_width_downgrade_enabled
;
739 u16 link_speed_enabled
;
740 u16 link_width_active
;
741 u16 link_width_downgrade_tx_active
;
742 u16 link_width_downgrade_rx_active
;
743 u16 link_speed_active
;
746 u8 actual_vls_operational
;
747 /* LID mask control */
749 /* Rx Polarity inversion (compensate for ~tx on partner) */
752 u8 hw_pidx
; /* physical port index */
753 u8 port
; /* IB port number and index into dd->pports - 1 */
754 /* type of neighbor node */
757 u8 neighbor_fm_security
; /* 1 if firmware checking is disabled */
758 u8 neighbor_port_number
;
759 u8 is_sm_config_started
;
760 u8 offline_disabled_reason
;
761 u8 is_active_optimize_enabled
;
762 u8 driver_link_ready
; /* driver ready for active link */
763 u8 link_enabled
; /* link enabled? */
765 u8 local_tx_rate
; /* rate given to 8051 firmware */
768 /* placeholders for IB MAD packet settings */
769 u8 overrun_threshold
;
770 u8 phy_error_threshold
;
771 unsigned int is_link_down_queued
;
773 /* Used to override LED behavior for things like maintenance beaconing*/
775 * Alternates per phase of blink
776 * [0] holds LED off duration, [1] holds LED on duration
778 unsigned long led_override_vals
[2];
779 u8 led_override_phase
; /* LSB picks from vals[] */
780 atomic_t led_override_timer_active
;
781 /* Used to flash LEDs in override mode */
782 struct timer_list led_override_timer
;
788 * cca_timer_lock protects access to the per-SL cca_timer
789 * structures (specifically the ccti member).
791 spinlock_t cca_timer_lock ____cacheline_aligned_in_smp
;
792 struct cca_timer cca_timer
[OPA_MAX_SLS
];
794 /* List of congestion control table entries */
795 struct ib_cc_table_entry_shadow ccti_entries
[CC_TABLE_SHADOW_MAX
];
797 /* congestion entries, each entry corresponding to a SL */
798 struct opa_congestion_setting_entry_shadow
799 congestion_entries
[OPA_MAX_SLS
];
802 * cc_state_lock protects (write) access to the per-port
805 spinlock_t cc_state_lock ____cacheline_aligned_in_smp
;
807 struct cc_state __rcu
*cc_state
;
809 /* Total number of congestion control table entries */
812 /* Bit map identifying service level */
813 u32 cc_sl_control_map
;
815 /* CA's max number of 64 entry units in the congestion control table */
816 u8 cc_max_table_entries
;
819 * begin congestion log related entries
820 * cc_log_lock protects all congestion log related data
822 spinlock_t cc_log_lock ____cacheline_aligned_in_smp
;
823 u8 threshold_cong_event_map
[OPA_MAX_SLS
/ 8];
824 u16 threshold_event_counter
;
825 struct opa_hfi1_cong_log_event_internal cc_events
[OPA_CONG_LOG_ELEMS
];
826 int cc_log_idx
; /* index for logging events */
827 int cc_mad_idx
; /* index for reporting events */
828 /* end congestion log related entries */
830 struct vl_arb_cache vl_arb_cache
[MAX_PRIO_TABLE
];
832 /* port relative counter buffer */
834 /* port relative synthetic counter buffer */
836 /* port_xmit_discards are synthesized from different egress errors */
837 u64 port_xmit_discards
;
838 u64 port_xmit_discards_vl
[C_VL_COUNT
];
839 u64 port_xmit_constraint_errors
;
840 u64 port_rcv_constraint_errors
;
841 /* count of 'link_err' interrupts from DC */
843 /* number of times link retrained successfully */
845 /* number of times a link unknown frame was reported */
846 u64 unknown_frame_count
;
847 /* port_ltp_crc_mode is returned in 'portinfo' MADs */
848 u16 port_ltp_crc_mode
;
849 /* port_crc_mode_enabled is the crc we support */
850 u8 port_crc_mode_enabled
;
851 /* mgmt_allowed is also returned in 'portinfo' MADs */
853 u8 part_enforce
; /* partition enforcement flags */
854 struct link_down_reason local_link_down_reason
;
855 struct link_down_reason neigh_link_down_reason
;
856 /* Value to be sent to link peer on LinkDown .*/
857 u8 remote_link_down_reason
;
858 /* Error events that will cause a port bounce. */
859 u32 port_error_action
;
860 struct work_struct linkstate_active_work
;
861 /* Does this port need to prescan for FECNs */
865 typedef int (*rhf_rcv_function_ptr
)(struct hfi1_packet
*packet
);
867 typedef void (*opcode_handler
)(struct hfi1_packet
*packet
);
868 typedef void (*hfi1_make_req
)(struct rvt_qp
*qp
,
869 struct hfi1_pkt_state
*ps
,
870 struct rvt_swqe
*wqe
);
873 /* return values for the RHF receive functions */
874 #define RHF_RCV_CONTINUE 0 /* keep going */
875 #define RHF_RCV_DONE 1 /* stop, this packet processed */
876 #define RHF_RCV_REPROCESS 2 /* stop. retain this packet */
878 struct rcv_array_data
{
886 struct send_context
*sc
;
889 /* 16 to directly index */
890 #define PER_VL_SEND_CONTEXTS 16
892 struct err_info_rcvport
{
898 struct err_info_constraint
{
905 unsigned int curr
; /* current temperature */
906 unsigned int lo_lim
; /* low temperature limit */
907 unsigned int hi_lim
; /* high temperature limit */
908 unsigned int crit_lim
; /* critical temperature limit */
909 u8 triggers
; /* temperature triggers */
912 struct hfi1_i2c_bus
{
913 struct hfi1_devdata
*controlling_dd
; /* current controlling device */
914 struct i2c_adapter adapter
; /* bus details */
915 struct i2c_algo_bit_data algo
; /* bus algorithm details */
916 int num
; /* bus number, 0 or 1 */
919 /* common data between shared ASIC HFIs */
920 struct hfi1_asic_data
{
921 struct hfi1_devdata
*dds
[2]; /* back pointers */
922 struct mutex asic_resource_mutex
;
923 struct hfi1_i2c_bus
*i2c_bus0
;
924 struct hfi1_i2c_bus
*i2c_bus1
;
927 /* sizes for both the QP and RSM map tables */
928 #define NUM_MAP_ENTRIES 256
929 #define NUM_MAP_REGS 32
932 * Number of VNIC contexts used. Ensure it is less than or equal to
933 * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
935 #define HFI1_NUM_VNIC_CTXT 8
937 /* Number of VNIC RSM entries */
938 #define NUM_VNIC_MAP_ENTRIES 8
940 /* Virtual NIC information */
941 struct hfi1_vnic_data
{
942 struct hfi1_ctxtdata
*ctxt
[HFI1_NUM_VNIC_CTXT
];
943 struct kmem_cache
*txreq_cache
;
951 struct hfi1_vnic_vport_info
;
953 /* device data struct now contains only "general per-device" info.
954 * fields related to a physical IB port are in a hfi1_pportdata struct.
959 #define BOARD_VERS_MAX 96 /* how long the version string can be */
960 #define SERIAL_MAX 16 /* length of the serial number */
962 typedef int (*send_routine
)(struct rvt_qp
*, struct hfi1_pkt_state
*, u64
);
963 struct hfi1_devdata
{
964 struct hfi1_ibdev verbs_dev
; /* must be first */
965 struct list_head list
;
966 /* pointers to related structs for this device */
967 /* pci access data structure */
968 struct pci_dev
*pcidev
;
969 struct cdev user_cdev
;
970 struct cdev diag_cdev
;
972 struct device
*user_device
;
973 struct device
*diag_device
;
974 struct device
*ui_device
;
976 /* first mapping up to RcvArray */
977 u8 __iomem
*kregbase1
;
978 resource_size_t physaddr
;
980 /* second uncached mapping from RcvArray to pio send buffers */
981 u8 __iomem
*kregbase2
;
982 /* for detecting offset above kregbase2 address */
985 /* Per VL data. Enough for all VLs but not all elements are set/used. */
986 struct per_vl_data vld
[PER_VL_SEND_CONTEXTS
];
987 /* send context data */
988 struct send_context_info
*send_contexts
;
989 /* map hardware send contexts to software index */
991 /* spinlock for allocating and releasing send context resources */
993 /* lock for pio_map */
994 spinlock_t pio_map_lock
;
995 /* Send Context initialization lock. */
996 spinlock_t sc_init_lock
;
997 /* lock for sdma_map */
998 spinlock_t sde_map_lock
;
999 /* array of kernel send contexts */
1000 struct send_context
**kernel_send_context
;
1001 /* array of vl maps */
1002 struct pio_vl_map __rcu
*pio_map
;
1003 /* default flags to last descriptor */
1006 /* fields common to all SDMA engines */
1008 volatile __le64
*sdma_heads_dma
; /* DMA'ed by chip */
1009 dma_addr_t sdma_heads_phys
;
1010 void *sdma_pad_dma
; /* DMA'ed by chip */
1011 dma_addr_t sdma_pad_phys
;
1012 /* for deallocation */
1013 size_t sdma_heads_size
;
1014 /* number from the chip */
1015 u32 chip_sdma_engines
;
1018 /* array of engines sized by num_sdma */
1019 struct sdma_engine
*per_sdma
;
1020 /* array of vl maps */
1021 struct sdma_vl_map __rcu
*sdma_map
;
1022 /* SPC freeze waitqueue and variable */
1023 wait_queue_head_t sdma_unfreeze_wq
;
1024 atomic_t sdma_unfreeze_count
;
1026 u32 lcb_access_count
; /* count of LCB users */
1028 /* common data between shared ASIC HFIs in this OS */
1029 struct hfi1_asic_data
*asic_data
;
1031 /* mem-mapped pointer to base of PIO buffers */
1032 void __iomem
*piobase
;
1034 * write-combining mem-mapped pointer to base of RcvArray
1037 void __iomem
*rcvarray_wc
;
1039 * credit return base - a per-NUMA range of DMA address that
1040 * the chip will use to update the per-context free counter
1042 struct credit_return_base
*cr_base
;
1044 /* send context numbers and sizes for each type */
1045 struct sc_config_sizes sc_sizes
[SC_MAX
];
1047 char *boardname
; /* human readable board info */
1052 u64 z_send_schedule
;
1054 u64 __percpu
*send_schedule
;
1055 /* number of reserved contexts for VNIC usage */
1056 u16 num_vnic_contexts
;
1057 /* number of receive contexts in use by the driver */
1058 u32 num_rcv_contexts
;
1059 /* number of pio send contexts in use by the driver */
1060 u32 num_send_contexts
;
1062 * number of ctxts available for PSM open
1065 /* total number of available user/PSM contexts */
1066 u32 num_user_contexts
;
1067 /* base receive interrupt timeout, in CSR units */
1068 u32 rcv_intr_timeout_csr
;
1070 u32 freezelen
; /* max length of freezemsg */
1071 u64 __iomem
*egrtidbase
;
1072 spinlock_t sendctrl_lock
; /* protect changes to SendCtrl */
1073 spinlock_t rcvctrl_lock
; /* protect changes to RcvCtrl */
1074 spinlock_t uctxt_lock
; /* protect rcd changes */
1075 struct mutex dc8051_lock
; /* exclusive access to 8051 */
1076 struct workqueue_struct
*update_cntr_wq
;
1077 struct work_struct update_cntr_work
;
1078 /* exclusive access to 8051 memory */
1079 spinlock_t dc8051_memlock
;
1080 int dc8051_timed_out
; /* remember if the 8051 timed out */
1082 * A page that will hold event notification bitmaps for all
1083 * contexts. This page will be mapped into all processes.
1085 unsigned long *events
;
1087 * per unit status, see also portdata statusp
1088 * mapped read-only into user processes so they can get unit and
1089 * IB link status cheaply
1091 struct hfi1_status
*status
;
1093 /* revision register shadow */
1095 /* Base GUID for device (network order) */
1098 /* these are the "32 bit" regs */
1100 /* value we put in kr_rcvhdrsize */
1102 /* number of receive contexts the chip supports */
1103 u32 chip_rcv_contexts
;
1104 /* number of receive array entries */
1105 u32 chip_rcv_array_count
;
1106 /* number of PIO send contexts the chip supports */
1107 u32 chip_send_contexts
;
1108 /* number of bytes in the PIO memory buffer */
1109 u32 chip_pio_mem_size
;
1110 /* number of bytes in the SDMA memory buffer */
1111 u32 chip_sdma_mem_size
;
1113 /* size of each rcvegrbuffer */
1116 u16 rcvegrbufsize_shift
;
1117 /* both sides of the PCIe link are gen3 capable */
1118 u8 link_gen3_capable
;
1120 /* localbus width (1, 2,4,8,16,32) from config space */
1122 /* localbus speed in MHz */
1124 int unit
; /* unit # of this chip */
1125 int node
; /* home node of this chip */
1127 /* save these PCI fields to restore after a reset */
1139 * ASCII serial number, from flash, large enough for original
1140 * all digit strings, and longer serial number format
1142 u8 serial
[SERIAL_MAX
];
1143 /* human readable board version */
1144 u8 boardversion
[BOARD_VERS_MAX
];
1145 u8 lbus_info
[32]; /* human readable localbus info */
1146 /* chip major rev, from CceRevision */
1148 /* chip minor rev, from CceRevision */
1152 /* implementation code */
1154 /* vAU of this device */
1156 /* vCU of this device */
1158 /* link credits of this device */
1160 /* initial vl15 credits to use */
1164 * Cached value for vl15buf, read during verify cap interrupt. VL15
1165 * credits are to be kept at 0 and set when handling the link-up
1166 * interrupt. This removes the possibility of receiving VL15 MAD
1167 * packets before this HFI is ready.
1171 /* Misc small ints */
1175 u16 irev
; /* implementation revision */
1176 u32 dc8051_ver
; /* 8051 firmware version */
1178 spinlock_t hfi1_diag_trans_lock
; /* protect diag observer ops */
1179 struct platform_config platform_config
;
1180 struct platform_config_cache pcfg_cache
;
1182 struct diag_client
*diag_client
;
1184 /* MSI-X information */
1185 struct hfi1_msix_entry
*msix_entries
;
1186 u32 num_msix_entries
;
1187 u32 first_dyn_msix_idx
;
1189 /* INTx information */
1190 u32 requested_intx_irq
; /* did we request one? */
1192 /* general interrupt: mask of handled interrupts */
1193 u64 gi_mask
[CCE_NUM_INT_CSRS
];
1195 struct rcv_array_data rcv_entries
;
1197 /* cycle length of PS* counters in HW (in picoseconds) */
1198 u16 psxmitwait_check_rate
;
1201 * 64 bit synthetic counters
1203 struct timer_list synth_stats_timer
;
1209 size_t cntrnameslen
;
1215 * remembered values for synthetic counters
1224 char *portcntrnames
;
1225 size_t portcntrnameslen
;
1227 struct err_info_rcvport err_info_rcvport
;
1228 struct err_info_constraint err_info_rcv_constraint
;
1229 struct err_info_constraint err_info_xmit_constraint
;
1231 atomic_t drop_packet
;
1233 u8 err_info_uncorrectable
;
1234 u8 err_info_fmconfig
;
1237 * Software counters for the status bits defined by the
1238 * associated error status registers
1240 u64 cce_err_status_cnt
[NUM_CCE_ERR_STATUS_COUNTERS
];
1241 u64 rcv_err_status_cnt
[NUM_RCV_ERR_STATUS_COUNTERS
];
1242 u64 misc_err_status_cnt
[NUM_MISC_ERR_STATUS_COUNTERS
];
1243 u64 send_pio_err_status_cnt
[NUM_SEND_PIO_ERR_STATUS_COUNTERS
];
1244 u64 send_dma_err_status_cnt
[NUM_SEND_DMA_ERR_STATUS_COUNTERS
];
1245 u64 send_egress_err_status_cnt
[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS
];
1246 u64 send_err_status_cnt
[NUM_SEND_ERR_STATUS_COUNTERS
];
1248 /* Software counter that spans all contexts */
1249 u64 sw_ctxt_err_status_cnt
[NUM_SEND_CTXT_ERR_STATUS_COUNTERS
];
1250 /* Software counter that spans all DMA engines */
1251 u64 sw_send_dma_eng_err_status_cnt
[
1252 NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS
];
1253 /* Software counter that aggregates all cce_err_status errors */
1254 u64 sw_cce_err_status_aggregate
;
1255 /* Software counter that aggregates all bypass packet rcv errors */
1256 u64 sw_rcv_bypass_packet_errors
;
1257 /* receive interrupt function */
1258 rhf_rcv_function_ptr normal_rhf_rcv_functions
[8];
1260 /* Save the enabled LCB error bits */
1264 * Capability to have different send engines simply by changing a
1267 send_routine process_pio_send ____cacheline_aligned_in_smp
;
1268 send_routine process_dma_send
;
1269 void (*pio_inline_send
)(struct hfi1_devdata
*dd
, struct pio_buf
*pbuf
,
1270 u64 pbc
, const void *from
, size_t count
);
1271 int (*process_vnic_dma_send
)(struct hfi1_devdata
*dd
, u8 q_idx
,
1272 struct hfi1_vnic_vport_info
*vinfo
,
1273 struct sk_buff
*skb
, u64 pbc
, u8 plen
);
1274 /* hfi1_pportdata, points to array of (physical) port-specific
1275 * data structs, indexed by pidx (0..n-1)
1277 struct hfi1_pportdata
*pport
;
1278 /* receive context data */
1279 struct hfi1_ctxtdata
**rcd
;
1280 u64 __percpu
*int_counter
;
1281 /* verbs tx opcode stats */
1282 struct hfi1_opcode_stats_perctx __percpu
*tx_opstats
;
1283 /* device (not port) flags, basically device capabilities */
1285 /* Number of physical ports available */
1287 /* Lowest context number which can be used by user processes or VNIC */
1288 u8 first_dyn_alloc_ctxt
;
1289 /* adding a new field here would make it part of this cacheline */
1291 /* seqlock for sc2vl */
1292 seqlock_t sc2vl_lock ____cacheline_aligned_in_smp
;
1294 /* receive interrupt functions */
1295 rhf_rcv_function_ptr
*rhf_rcv_function_map
;
1296 u64 __percpu
*rcv_limit
;
1297 u16 rhf_offset
; /* offset of RHF within receive header entry */
1298 /* adding a new field here would make it part of this cacheline */
1300 /* OUI comes from the HW. Used everywhere as 3 separate bytes. */
1305 /* Timer and counter used to detect RcvBufOvflCnt changes */
1306 struct timer_list rcverr_timer
;
1308 wait_queue_head_t event_queue
;
1310 /* receive context tail dummy address */
1311 __le64
*rcvhdrtail_dummy_kvaddr
;
1312 dma_addr_t rcvhdrtail_dummy_dma
;
1315 /* Serialize ASPM enable/disable between multiple verbs contexts */
1316 spinlock_t aspm_lock
;
1317 /* Number of verbs contexts which have disabled ASPM */
1318 atomic_t aspm_disabled_cnt
;
1319 /* Keeps track of user space clients */
1320 atomic_t user_refcount
;
1321 /* Used to wait for outstanding user space clients before dev removal */
1322 struct completion user_comp
;
1324 bool eprom_available
; /* true if EPROM is available for this device */
1325 bool aspm_supported
; /* Does HW support ASPM */
1326 bool aspm_enabled
; /* ASPM state: enabled/disabled */
1327 struct rhashtable
*sdma_rht
;
1329 struct kobject kobj
;
1332 struct hfi1_vnic_data vnic
;
1335 static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata
*dd
, int spare
)
1337 return (dd
->vnic
.rmt_start
+ spare
) > NUM_MAP_ENTRIES
;
1340 /* 8051 firmware version helper */
1341 #define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
1342 #define dc8051_ver_maj(a) (((a) & 0xff0000) >> 16)
1343 #define dc8051_ver_min(a) (((a) & 0x00ff00) >> 8)
1344 #define dc8051_ver_patch(a) ((a) & 0x0000ff)
1346 /* f_put_tid types */
1347 #define PT_EXPECTED 0
1349 #define PT_INVALID_FLUSH 2
1350 #define PT_INVALID 3
1354 struct mmu_rb_handler
;
1356 /* Private data for file operations */
1357 struct hfi1_filedata
{
1358 struct srcu_struct pq_srcu
;
1359 struct hfi1_devdata
*dd
;
1360 struct hfi1_ctxtdata
*uctxt
;
1361 struct hfi1_user_sdma_comp_q
*cq
;
1362 /* update side lock for SRCU */
1363 spinlock_t pq_rcu_lock
;
1364 struct hfi1_user_sdma_pkt_q __rcu
*pq
;
1366 /* for cpu affinity; -1 if none */
1369 struct mmu_rb_handler
*handler
;
1370 struct tid_rb_node
**entry_to_rb
;
1371 spinlock_t tid_lock
; /* protect tid_[limit,used] counters */
1375 u32 invalid_tid_idx
;
1376 /* protect invalid_tids array and invalid_tid_idx */
1377 spinlock_t invalid_lock
;
1378 struct mm_struct
*mm
;
1381 extern struct list_head hfi1_dev_list
;
1382 extern spinlock_t hfi1_devs_lock
;
1383 struct hfi1_devdata
*hfi1_lookup(int unit
);
1385 static inline unsigned long uctxt_offset(struct hfi1_ctxtdata
*uctxt
)
1387 return (uctxt
->ctxt
- uctxt
->dd
->first_dyn_alloc_ctxt
) *
1388 HFI1_MAX_SHARED_CTXTS
;
1391 int hfi1_init(struct hfi1_devdata
*dd
, int reinit
);
1392 int hfi1_count_active_units(void);
1394 int hfi1_diag_add(struct hfi1_devdata
*dd
);
1395 void hfi1_diag_remove(struct hfi1_devdata
*dd
);
1396 void handle_linkup_change(struct hfi1_devdata
*dd
, u32 linkup
);
1398 void handle_user_interrupt(struct hfi1_ctxtdata
*rcd
);
1400 int hfi1_create_rcvhdrq(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
);
1401 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata
*rcd
);
1402 int hfi1_create_kctxts(struct hfi1_devdata
*dd
);
1403 int hfi1_create_ctxtdata(struct hfi1_pportdata
*ppd
, int numa
,
1404 struct hfi1_ctxtdata
**rcd
);
1405 void hfi1_free_ctxt(struct hfi1_ctxtdata
*rcd
);
1406 void hfi1_init_pportdata(struct pci_dev
*pdev
, struct hfi1_pportdata
*ppd
,
1407 struct hfi1_devdata
*dd
, u8 hw_pidx
, u8 port
);
1408 void hfi1_free_ctxtdata(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
);
1409 int hfi1_rcd_put(struct hfi1_ctxtdata
*rcd
);
1410 int hfi1_rcd_get(struct hfi1_ctxtdata
*rcd
);
1411 struct hfi1_ctxtdata
*hfi1_rcd_get_by_index_safe(struct hfi1_devdata
*dd
,
1413 struct hfi1_ctxtdata
*hfi1_rcd_get_by_index(struct hfi1_devdata
*dd
, u16 ctxt
);
1414 int handle_receive_interrupt(struct hfi1_ctxtdata
*rcd
, int thread
);
1415 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata
*rcd
, int thread
);
1416 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata
*rcd
, int thread
);
1417 void set_all_slowpath(struct hfi1_devdata
*dd
);
1418 void hfi1_vnic_synchronize_irq(struct hfi1_devdata
*dd
);
1419 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata
*rcd
);
1420 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata
*rcd
);
1422 extern const struct pci_device_id hfi1_pci_tbl
[];
1423 void hfi1_make_ud_req_9B(struct rvt_qp
*qp
,
1424 struct hfi1_pkt_state
*ps
,
1425 struct rvt_swqe
*wqe
);
1427 void hfi1_make_ud_req_16B(struct rvt_qp
*qp
,
1428 struct hfi1_pkt_state
*ps
,
1429 struct rvt_swqe
*wqe
);
1431 /* receive packet handler dispositions */
1432 #define RCV_PKT_OK 0x0 /* keep going */
1433 #define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
1434 #define RCV_PKT_DONE 0x2 /* stop, no more packets detected */
1436 /* calculate the current RHF address */
1437 static inline __le32
*get_rhf_addr(struct hfi1_ctxtdata
*rcd
)
1439 return (__le32
*)rcd
->rcvhdrq
+ rcd
->head
+ rcd
->dd
->rhf_offset
;
1442 int hfi1_reset_device(int);
1444 void receive_interrupt_work(struct work_struct
*work
);
1446 /* extract service channel from header and rhf */
1447 static inline int hfi1_9B_get_sc5(struct ib_header
*hdr
, u64 rhf
)
1449 return ib_get_sc(hdr
) | ((!!(rhf_dc_info(rhf
))) << 4);
1452 #define HFI1_JKEY_WIDTH 16
1453 #define HFI1_JKEY_MASK (BIT(16) - 1)
1454 #define HFI1_ADMIN_JKEY_RANGE 32
1457 * J_KEYs are split and allocated in the following groups:
1458 * 0 - 31 - users with administrator privileges
1459 * 32 - 63 - kernel protocols using KDETH packets
1460 * 64 - 65535 - all other users using KDETH packets
1462 static inline u16
generate_jkey(kuid_t uid
)
1464 u16 jkey
= from_kuid(current_user_ns(), uid
) & HFI1_JKEY_MASK
;
1466 if (capable(CAP_SYS_ADMIN
))
1467 jkey
&= HFI1_ADMIN_JKEY_RANGE
- 1;
1469 jkey
|= BIT(HFI1_JKEY_WIDTH
- 1);
1475 * active_egress_rate
1477 * returns the active egress rate in units of [10^6 bits/sec]
1479 static inline u32
active_egress_rate(struct hfi1_pportdata
*ppd
)
1481 u16 link_speed
= ppd
->link_speed_active
;
1482 u16 link_width
= ppd
->link_width_active
;
1485 if (link_speed
== OPA_LINK_SPEED_25G
)
1486 egress_rate
= 25000;
1487 else /* assume OPA_LINK_SPEED_12_5G */
1488 egress_rate
= 12500;
1490 switch (link_width
) {
1491 case OPA_LINK_WIDTH_4X
:
1494 case OPA_LINK_WIDTH_3X
:
1497 case OPA_LINK_WIDTH_2X
:
1501 /* assume IB_WIDTH_1X */
1511 * Returns the number of 'fabric clock cycles' to egress a packet
1512 * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock
1513 * rate is (approximately) 805 MHz, the units of the returned value
1516 static inline u32
egress_cycles(u32 len
, u32 rate
)
1523 * (length) [bits] / (rate) [bits/sec]
1524 * ---------------------------------------------------
1525 * fabric_clock_period == 1 /(805 * 10^6) [cycles/sec]
1528 cycles
= len
* 8; /* bits */
1535 void set_link_ipg(struct hfi1_pportdata
*ppd
);
1536 void process_becn(struct hfi1_pportdata
*ppd
, u8 sl
, u32 rlid
, u32 lqpn
,
1537 u32 rqpn
, u8 svc_type
);
1538 void return_cnp(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
, u32 remote_qpn
,
1539 u16 pkey
, u32 slid
, u32 dlid
, u8 sc5
,
1540 const struct ib_grh
*old_grh
);
1541 void return_cnp_16B(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
,
1542 u32 remote_qpn
, u16 pkey
, u32 slid
, u32 dlid
,
1543 u8 sc5
, const struct ib_grh
*old_grh
);
1544 typedef void (*hfi1_handle_cnp
)(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
,
1545 u32 remote_qpn
, u16 pkey
, u32 slid
, u32 dlid
,
1546 u8 sc5
, const struct ib_grh
*old_grh
);
1548 #define PKEY_CHECK_INVALID -1
1549 int egress_pkey_check(struct hfi1_pportdata
*ppd
, u32 slid
, u16 pkey
,
1550 u8 sc5
, int8_t s_pkey_index
);
1552 #define PACKET_EGRESS_TIMEOUT 350
1553 static inline void pause_for_credit_return(struct hfi1_devdata
*dd
)
1555 /* Pause at least 1us, to ensure chip returns all credits */
1556 u32 usec
= cclock_to_ns(dd
, PACKET_EGRESS_TIMEOUT
) / 1000;
1558 udelay(usec
? usec
: 1);
1562 * sc_to_vlt() reverse lookup sc to vl
1566 static inline u8
sc_to_vlt(struct hfi1_devdata
*dd
, u8 sc5
)
1571 if (sc5
>= OPA_MAX_SCS
)
1575 seq
= read_seqbegin(&dd
->sc2vl_lock
);
1576 rval
= *(((u8
*)dd
->sc2vl
) + sc5
);
1577 } while (read_seqretry(&dd
->sc2vl_lock
, seq
));
1582 #define PKEY_MEMBER_MASK 0x8000
1583 #define PKEY_LOW_15_MASK 0x7fff
1586 * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1587 * being an entry from the ingress partition key table), return 0
1588 * otherwise. Use the matching criteria for ingress partition keys
1589 * specified in the OPAv1 spec., section 9.10.14.
1591 static inline int ingress_pkey_matches_entry(u16 pkey
, u16 ent
)
1593 u16 mkey
= pkey
& PKEY_LOW_15_MASK
;
1594 u16 ment
= ent
& PKEY_LOW_15_MASK
;
1598 * If pkey[15] is clear (limited partition member),
1599 * is bit 15 in the corresponding table element
1600 * clear (limited member)?
1602 if (!(pkey
& PKEY_MEMBER_MASK
))
1603 return !!(ent
& PKEY_MEMBER_MASK
);
1610 * ingress_pkey_table_search - search the entire pkey table for
1611 * an entry which matches 'pkey'. return 0 if a match is found,
1614 static int ingress_pkey_table_search(struct hfi1_pportdata
*ppd
, u16 pkey
)
1618 for (i
= 0; i
< MAX_PKEY_VALUES
; i
++) {
1619 if (ingress_pkey_matches_entry(pkey
, ppd
->pkeys
[i
]))
1626 * ingress_pkey_table_fail - record a failure of ingress pkey validation,
1627 * i.e., increment port_rcv_constraint_errors for the port, and record
1628 * the 'error info' for this failure.
1630 static void ingress_pkey_table_fail(struct hfi1_pportdata
*ppd
, u16 pkey
,
1633 struct hfi1_devdata
*dd
= ppd
->dd
;
1635 incr_cntr64(&ppd
->port_rcv_constraint_errors
);
1636 if (!(dd
->err_info_rcv_constraint
.status
& OPA_EI_STATUS_SMASK
)) {
1637 dd
->err_info_rcv_constraint
.status
|= OPA_EI_STATUS_SMASK
;
1638 dd
->err_info_rcv_constraint
.slid
= slid
;
1639 dd
->err_info_rcv_constraint
.pkey
= pkey
;
1644 * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
1645 * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx
1646 * is a hint as to the best place in the partition key table to begin
1647 * searching. This function should not be called on the data path because
1648 * of performance reasons. On datapath pkey check is expected to be done
1649 * by HW and rcv_pkey_check function should be called instead.
1651 static inline int ingress_pkey_check(struct hfi1_pportdata
*ppd
, u16 pkey
,
1652 u8 sc5
, u8 idx
, u32 slid
, bool force
)
1654 if (!(force
) && !(ppd
->part_enforce
& HFI1_PART_ENFORCE_IN
))
1657 /* If SC15, pkey[0:14] must be 0x7fff */
1658 if ((sc5
== 0xf) && ((pkey
& PKEY_LOW_15_MASK
) != PKEY_LOW_15_MASK
))
1661 /* Is the pkey = 0x0, or 0x8000? */
1662 if ((pkey
& PKEY_LOW_15_MASK
) == 0)
1665 /* The most likely matching pkey has index 'idx' */
1666 if (ingress_pkey_matches_entry(pkey
, ppd
->pkeys
[idx
]))
1669 /* no match - try the whole table */
1670 if (!ingress_pkey_table_search(ppd
, pkey
))
1674 ingress_pkey_table_fail(ppd
, pkey
, slid
);
1679 * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
1680 * otherwise. It only ensures pkey is vlid for QP0. This function
1681 * should be called on the data path instead of ingress_pkey_check
1682 * as on data path, pkey check is done by HW (except for QP0).
1684 static inline int rcv_pkey_check(struct hfi1_pportdata
*ppd
, u16 pkey
,
1687 if (!(ppd
->part_enforce
& HFI1_PART_ENFORCE_IN
))
1690 /* If SC15, pkey[0:14] must be 0x7fff */
1691 if ((sc5
== 0xf) && ((pkey
& PKEY_LOW_15_MASK
) != PKEY_LOW_15_MASK
))
1696 ingress_pkey_table_fail(ppd
, pkey
, slid
);
1702 /* MTU enumeration, 256-4k match IB */
1704 #define OPA_MTU_256 1
1705 #define OPA_MTU_512 2
1706 #define OPA_MTU_1024 3
1707 #define OPA_MTU_2048 4
1708 #define OPA_MTU_4096 5
1710 u32
lrh_max_header_bytes(struct hfi1_devdata
*dd
);
1711 int mtu_to_enum(u32 mtu
, int default_if_bad
);
1712 u16
enum_to_mtu(int mtu
);
1713 static inline int valid_ib_mtu(unsigned int mtu
)
1715 return mtu
== 256 || mtu
== 512 ||
1716 mtu
== 1024 || mtu
== 2048 ||
1720 static inline int valid_opa_max_mtu(unsigned int mtu
)
1722 return mtu
>= 2048 &&
1723 (valid_ib_mtu(mtu
) || mtu
== 8192 || mtu
== 10240);
1726 int set_mtu(struct hfi1_pportdata
*ppd
);
1728 int hfi1_set_lid(struct hfi1_pportdata
*ppd
, u32 lid
, u8 lmc
);
1729 void hfi1_disable_after_error(struct hfi1_devdata
*dd
);
1730 int hfi1_set_uevent_bits(struct hfi1_pportdata
*ppd
, const int evtbit
);
1731 int hfi1_rcvbuf_validate(u32 size
, u8 type
, u16
*encode
);
1733 int fm_get_table(struct hfi1_pportdata
*ppd
, int which
, void *t
);
1734 int fm_set_table(struct hfi1_pportdata
*ppd
, int which
, void *t
);
1736 void set_up_vau(struct hfi1_devdata
*dd
, u8 vau
);
1737 void set_up_vl15(struct hfi1_devdata
*dd
, u16 vl15buf
);
1738 void reset_link_credits(struct hfi1_devdata
*dd
);
1739 void assign_remote_cm_au_table(struct hfi1_devdata
*dd
, u8 vcu
);
1741 int set_buffer_control(struct hfi1_pportdata
*ppd
, struct buffer_control
*bc
);
1743 static inline struct hfi1_devdata
*dd_from_ppd(struct hfi1_pportdata
*ppd
)
1748 static inline struct hfi1_devdata
*dd_from_dev(struct hfi1_ibdev
*dev
)
1750 return container_of(dev
, struct hfi1_devdata
, verbs_dev
);
1753 static inline struct hfi1_devdata
*dd_from_ibdev(struct ib_device
*ibdev
)
1755 return dd_from_dev(to_idev(ibdev
));
1758 static inline struct hfi1_pportdata
*ppd_from_ibp(struct hfi1_ibport
*ibp
)
1760 return container_of(ibp
, struct hfi1_pportdata
, ibport_data
);
1763 static inline struct hfi1_ibdev
*dev_from_rdi(struct rvt_dev_info
*rdi
)
1765 return container_of(rdi
, struct hfi1_ibdev
, rdi
);
1768 static inline struct hfi1_ibport
*to_iport(struct ib_device
*ibdev
, u8 port
)
1770 struct hfi1_devdata
*dd
= dd_from_ibdev(ibdev
);
1771 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
1773 WARN_ON(pidx
>= dd
->num_pports
);
1774 return &dd
->pport
[pidx
].ibport_data
;
1777 static inline struct hfi1_ibport
*rcd_to_iport(struct hfi1_ctxtdata
*rcd
)
1779 return &rcd
->ppd
->ibport_data
;
1783 * hfi1_may_ecn - Check whether FECN or BECN processing should be done
1784 * @pkt: the packet to be evaluated
1786 * Check whether the FECN or BECN bits in the packet's header are
1787 * enabled, depending on packet type.
1789 * This function only checks for FECN and BECN bits. Additional checks
1790 * are done in the slowpath (hfi1_process_ecn_slowpath()) in order to
1791 * ensure correct handling.
1793 static inline bool hfi1_may_ecn(struct hfi1_packet
*pkt
)
1795 struct ib_other_headers
*ohdr
= pkt
->ohdr
;
1800 if (pkt
->etype
== RHF_RCV_TYPE_BYPASS
) {
1801 fecn
= hfi1_16B_get_fecn(pkt
->hdr
);
1802 becn
= hfi1_16B_get_becn(pkt
->hdr
);
1804 bth1
= be32_to_cpu(ohdr
->bth
[1]);
1805 fecn
= bth1
& IB_FECN_SMASK
;
1806 becn
= bth1
& IB_BECN_SMASK
;
1808 return fecn
|| becn
;
1811 bool hfi1_process_ecn_slowpath(struct rvt_qp
*qp
, struct hfi1_packet
*pkt
,
1813 static inline bool process_ecn(struct rvt_qp
*qp
, struct hfi1_packet
*pkt
)
1817 do_work
= hfi1_may_ecn(pkt
);
1818 if (unlikely(do_work
))
1819 return hfi1_process_ecn_slowpath(qp
, pkt
, false);
1824 * Return the indexed PKEY from the port PKEY table.
1826 static inline u16
hfi1_get_pkey(struct hfi1_ibport
*ibp
, unsigned index
)
1828 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1831 if (index
>= ARRAY_SIZE(ppd
->pkeys
))
1834 ret
= ppd
->pkeys
[index
];
1840 * Return the indexed GUID from the port GUIDs table.
1842 static inline __be64
get_sguid(struct hfi1_ibport
*ibp
, unsigned int index
)
1844 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1846 WARN_ON(index
>= HFI1_GUIDS_PER_PORT
);
1847 return cpu_to_be64(ppd
->guids
[index
]);
1851 * Called by readers of cc_state only, must call under rcu_read_lock().
1853 static inline struct cc_state
*get_cc_state(struct hfi1_pportdata
*ppd
)
1855 return rcu_dereference(ppd
->cc_state
);
1859 * Called by writers of cc_state only, must call under cc_state_lock.
1862 struct cc_state
*get_cc_state_protected(struct hfi1_pportdata
*ppd
)
1864 return rcu_dereference_protected(ppd
->cc_state
,
1865 lockdep_is_held(&ppd
->cc_state_lock
));
1869 * values for dd->flags (_device_ related flags)
1871 #define HFI1_INITTED 0x1 /* chip and driver up and initted */
1872 #define HFI1_PRESENT 0x2 /* chip accesses can be done */
1873 #define HFI1_FROZEN 0x4 /* chip in SPC freeze */
1874 #define HFI1_HAS_SDMA_TIMEOUT 0x8
1875 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
1876 #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
1877 #define HFI1_SHUTDOWN 0x100 /* device is shutting down */
1879 /* IB dword length mask in PBC (lower 11 bits); same for all chips */
1880 #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
1882 /* ctxt_flag bit offsets */
1883 /* base context has not finished initializing */
1884 #define HFI1_CTXT_BASE_UNINIT 1
1885 /* base context initaliation failed */
1886 #define HFI1_CTXT_BASE_FAILED 2
1887 /* waiting for a packet to arrive */
1888 #define HFI1_CTXT_WAITING_RCV 3
1889 /* waiting for an urgent packet to arrive */
1890 #define HFI1_CTXT_WAITING_URG 4
1892 /* free up any allocated data at closes */
1893 struct hfi1_devdata
*hfi1_init_dd(struct pci_dev
*pdev
,
1894 const struct pci_device_id
*ent
);
1895 void hfi1_free_devdata(struct hfi1_devdata
*dd
);
1896 struct hfi1_devdata
*hfi1_alloc_devdata(struct pci_dev
*pdev
, size_t extra
);
1898 /* LED beaconing functions */
1899 void hfi1_start_led_override(struct hfi1_pportdata
*ppd
, unsigned int timeon
,
1900 unsigned int timeoff
);
1901 void shutdown_led_override(struct hfi1_pportdata
*ppd
);
1903 #define HFI1_CREDIT_RETURN_RATE (100)
1906 * The number of words for the KDETH protocol field. If this is
1907 * larger then the actual field used, then part of the payload
1908 * will be in the header.
1910 * Optimally, we want this sized so that a typical case will
1911 * use full cache lines. The typical local KDETH header would
1922 * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
1924 #define DEFAULT_RCVHDRSIZE 9
1927 * Maximal header byte count:
1938 * We also want to maintain a cache line alignment to assist DMA'ing
1939 * of the header bytes. Round up to a good size.
1941 #define DEFAULT_RCVHDR_ENTSIZE 32
1943 bool hfi1_can_pin_pages(struct hfi1_devdata
*dd
, struct mm_struct
*mm
,
1944 u32 nlocked
, u32 npages
);
1945 int hfi1_acquire_user_pages(struct mm_struct
*mm
, unsigned long vaddr
,
1946 size_t npages
, bool writable
, struct page
**pages
);
1947 void hfi1_release_user_pages(struct mm_struct
*mm
, struct page
**p
,
1948 size_t npages
, bool dirty
);
1950 static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata
*rcd
)
1952 *((u64
*)rcd
->rcvhdrtail_kvaddr
) = 0ULL;
1955 static inline u32
get_rcvhdrtail(const struct hfi1_ctxtdata
*rcd
)
1958 * volatile because it's a DMA target from the chip, routine is
1959 * inlined, and don't want register caching or reordering.
1961 return (u32
)le64_to_cpu(*rcd
->rcvhdrtail_kvaddr
);
1968 extern const char ib_hfi1_version
[];
1970 int hfi1_device_create(struct hfi1_devdata
*dd
);
1971 void hfi1_device_remove(struct hfi1_devdata
*dd
);
1973 int hfi1_create_port_files(struct ib_device
*ibdev
, u8 port_num
,
1974 struct kobject
*kobj
);
1975 int hfi1_verbs_register_sysfs(struct hfi1_devdata
*dd
);
1976 void hfi1_verbs_unregister_sysfs(struct hfi1_devdata
*dd
);
1977 /* Hook for sysfs read of QSFP */
1978 int qsfp_dump(struct hfi1_pportdata
*ppd
, char *buf
, int len
);
1980 int hfi1_pcie_init(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
1981 void hfi1_clean_up_interrupts(struct hfi1_devdata
*dd
);
1982 void hfi1_pcie_cleanup(struct pci_dev
*pdev
);
1983 int hfi1_pcie_ddinit(struct hfi1_devdata
*dd
, struct pci_dev
*pdev
);
1984 void hfi1_pcie_ddcleanup(struct hfi1_devdata
*);
1985 int pcie_speeds(struct hfi1_devdata
*dd
);
1986 int request_msix(struct hfi1_devdata
*dd
, u32 msireq
);
1987 int restore_pci_variables(struct hfi1_devdata
*dd
);
1988 int save_pci_variables(struct hfi1_devdata
*dd
);
1989 int do_pcie_gen3_transition(struct hfi1_devdata
*dd
);
1990 int parse_platform_config(struct hfi1_devdata
*dd
);
1991 int get_platform_config_field(struct hfi1_devdata
*dd
,
1992 enum platform_config_table_type_encoding
1993 table_type
, int table_index
, int field_index
,
1994 u32
*data
, u32 len
);
1996 const char *get_unit_name(int unit
);
1997 const char *get_card_name(struct rvt_dev_info
*rdi
);
1998 struct pci_dev
*get_pci_dev(struct rvt_dev_info
*rdi
);
2001 * Flush write combining store buffers (if present) and perform a write
2004 static inline void flush_wc(void)
2006 asm volatile("sfence" : : : "memory");
2009 void handle_eflags(struct hfi1_packet
*packet
);
2010 int process_receive_ib(struct hfi1_packet
*packet
);
2011 int process_receive_bypass(struct hfi1_packet
*packet
);
2012 int process_receive_error(struct hfi1_packet
*packet
);
2013 int kdeth_process_expected(struct hfi1_packet
*packet
);
2014 int kdeth_process_eager(struct hfi1_packet
*packet
);
2015 int process_receive_invalid(struct hfi1_packet
*packet
);
2016 void seqfile_dump_rcd(struct seq_file
*s
, struct hfi1_ctxtdata
*rcd
);
2018 /* global module parameter variables */
2019 extern unsigned int hfi1_max_mtu
;
2020 extern unsigned int hfi1_cu
;
2021 extern unsigned int user_credit_return_threshold
;
2022 extern int num_user_contexts
;
2023 extern unsigned long n_krcvqs
;
2024 extern uint krcvqs
[];
2025 extern int krcvqsset
;
2026 extern uint kdeth_qp
;
2027 extern uint loopback
;
2028 extern uint quick_linkup
;
2029 extern uint rcv_intr_timeout
;
2030 extern uint rcv_intr_count
;
2031 extern uint rcv_intr_dynamic
;
2032 extern ushort link_crc_mask
;
2034 extern struct mutex hfi1_mutex
;
2036 /* Number of seconds before our card status check... */
2037 #define STATUS_TIMEOUT 60
2039 #define DRIVER_NAME "hfi1"
2040 #define HFI1_USER_MINOR_BASE 0
2041 #define HFI1_TRACE_MINOR 127
2042 #define HFI1_NMINORS 255
2044 #define PCI_VENDOR_ID_INTEL 0x8086
2045 #define PCI_DEVICE_ID_INTEL0 0x24f0
2046 #define PCI_DEVICE_ID_INTEL1 0x24f1
2048 #define HFI1_PKT_USER_SC_INTEGRITY \
2049 (SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK \
2050 | SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK \
2051 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK \
2052 | SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK)
2054 #define HFI1_PKT_KERNEL_SC_INTEGRITY \
2055 (SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK)
2057 static inline u64
hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata
*dd
,
2060 u64 base_sc_integrity
;
2062 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2063 if (HFI1_CAP_IS_KSET(NO_INTEGRITY
))
2067 SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2068 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
2069 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2070 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2071 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2072 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
2073 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2074 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2075 | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2076 | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK
2077 | SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2078 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2079 | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
2080 | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
2081 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
2082 | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK
;
2084 if (ctxt_type
== SC_USER
)
2085 base_sc_integrity
|= HFI1_PKT_USER_SC_INTEGRITY
;
2087 base_sc_integrity
|= HFI1_PKT_KERNEL_SC_INTEGRITY
;
2089 /* turn on send-side job key checks if !A0 */
2091 base_sc_integrity
|= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
2093 return base_sc_integrity
;
2096 static inline u64
hfi1_pkt_base_sdma_integrity(struct hfi1_devdata
*dd
)
2098 u64 base_sdma_integrity
;
2100 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2101 if (HFI1_CAP_IS_KSET(NO_INTEGRITY
))
2104 base_sdma_integrity
=
2105 SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2106 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2107 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2108 | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2109 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2110 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2111 | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2112 | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK
2113 | SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2114 | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2115 | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
2116 | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
2117 | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
2118 | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK
;
2120 if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL
))
2121 base_sdma_integrity
|=
2122 SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
;
2124 /* turn on send-side job key checks if !A0 */
2126 base_sdma_integrity
|=
2127 SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
2129 return base_sdma_integrity
;
2133 * hfi1_early_err is used (only!) to print early errors before devdata is
2134 * allocated, or when dd->pcidev may not be valid, and at the tail end of
2135 * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is
2136 * the same as dd_dev_err, but is used when the message really needs
2137 * the IB port# to be definitive as to what's happening..
2139 #define hfi1_early_err(dev, fmt, ...) \
2140 dev_err(dev, fmt, ##__VA_ARGS__)
2142 #define hfi1_early_info(dev, fmt, ...) \
2143 dev_info(dev, fmt, ##__VA_ARGS__)
2145 #define dd_dev_emerg(dd, fmt, ...) \
2146 dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
2147 get_unit_name((dd)->unit), ##__VA_ARGS__)
2149 #define dd_dev_err(dd, fmt, ...) \
2150 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
2151 get_unit_name((dd)->unit), ##__VA_ARGS__)
2153 #define dd_dev_err_ratelimited(dd, fmt, ...) \
2154 dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2155 get_unit_name((dd)->unit), ##__VA_ARGS__)
2157 #define dd_dev_warn(dd, fmt, ...) \
2158 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
2159 get_unit_name((dd)->unit), ##__VA_ARGS__)
2161 #define dd_dev_warn_ratelimited(dd, fmt, ...) \
2162 dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2163 get_unit_name((dd)->unit), ##__VA_ARGS__)
2165 #define dd_dev_info(dd, fmt, ...) \
2166 dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
2167 get_unit_name((dd)->unit), ##__VA_ARGS__)
2169 #define dd_dev_info_ratelimited(dd, fmt, ...) \
2170 dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2171 get_unit_name((dd)->unit), ##__VA_ARGS__)
2173 #define dd_dev_dbg(dd, fmt, ...) \
2174 dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
2175 get_unit_name((dd)->unit), ##__VA_ARGS__)
2177 #define hfi1_dev_porterr(dd, port, fmt, ...) \
2178 dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
2179 get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
2182 * this is used for formatting hw error messages...
2184 struct hfi1_hwerror_msgs
{
2191 void hfi1_format_hwerrors(u64 hwerrs
,
2192 const struct hfi1_hwerror_msgs
*hwerrmsgs
,
2193 size_t nhwerrmsgs
, char *msg
, size_t lmsg
);
2195 #define USER_OPCODE_CHECK_VAL 0xC0
2196 #define USER_OPCODE_CHECK_MASK 0xC0
2197 #define OPCODE_CHECK_VAL_DISABLED 0x0
2198 #define OPCODE_CHECK_MASK_DISABLED 0x0
2200 static inline void hfi1_reset_cpu_counters(struct hfi1_devdata
*dd
)
2202 struct hfi1_pportdata
*ppd
;
2205 dd
->z_int_counter
= get_all_cpu_total(dd
->int_counter
);
2206 dd
->z_rcv_limit
= get_all_cpu_total(dd
->rcv_limit
);
2207 dd
->z_send_schedule
= get_all_cpu_total(dd
->send_schedule
);
2209 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
2210 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
2211 ppd
->ibport_data
.rvp
.z_rc_acks
=
2212 get_all_cpu_total(ppd
->ibport_data
.rvp
.rc_acks
);
2213 ppd
->ibport_data
.rvp
.z_rc_qacks
=
2214 get_all_cpu_total(ppd
->ibport_data
.rvp
.rc_qacks
);
2218 /* Control LED state */
2219 static inline void setextled(struct hfi1_devdata
*dd
, u32 on
)
2222 write_csr(dd
, DCC_CFG_LED_CNTRL
, 0x1F);
2224 write_csr(dd
, DCC_CFG_LED_CNTRL
, 0x10);
2227 /* return the i2c resource given the target */
2228 static inline u32
i2c_target(u32 target
)
2230 return target
? CR_I2C2
: CR_I2C1
;
2233 /* return the i2c chain chip resource that this HFI uses for QSFP */
2234 static inline u32
qsfp_resource(struct hfi1_devdata
*dd
)
2236 return i2c_target(dd
->hfi1_id
);
2239 /* Is this device integrated or discrete? */
2240 static inline bool is_integrated(struct hfi1_devdata
*dd
)
2242 return dd
->pcidev
->device
== PCI_DEVICE_ID_INTEL1
;
2245 int hfi1_tempsense_rd(struct hfi1_devdata
*dd
, struct hfi1_temp
*temp
);
2247 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
2248 #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
2250 static inline void hfi1_update_ah_attr(struct ib_device
*ibdev
,
2251 struct rdma_ah_attr
*attr
)
2253 struct hfi1_pportdata
*ppd
;
2254 struct hfi1_ibport
*ibp
;
2255 u32 dlid
= rdma_ah_get_dlid(attr
);
2258 * Kernel clients may not have setup GRH information
2261 ibp
= to_iport(ibdev
, rdma_ah_get_port_num(attr
));
2262 ppd
= ppd_from_ibp(ibp
);
2263 if ((((dlid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
)) ||
2264 (ppd
->lid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
))) &&
2265 (dlid
!= be32_to_cpu(OPA_LID_PERMISSIVE
)) &&
2266 (dlid
!= be16_to_cpu(IB_LID_PERMISSIVE
)) &&
2267 (!(rdma_ah_get_ah_flags(attr
) & IB_AH_GRH
))) ||
2268 (rdma_ah_get_make_grd(attr
))) {
2269 rdma_ah_set_ah_flags(attr
, IB_AH_GRH
);
2270 rdma_ah_set_interface_id(attr
, OPA_MAKE_ID(dlid
));
2271 rdma_ah_set_subnet_prefix(attr
, ibp
->rvp
.gid_prefix
);
2276 * hfi1_check_mcast- Check if the given lid is
2277 * in the OPA multicast range.
2279 * The LID might either reside in ah.dlid or might be
2280 * in the GRH of the address handle as DGID if extended
2281 * addresses are in use.
2283 static inline bool hfi1_check_mcast(u32 lid
)
2285 return ((lid
>= opa_get_mcast_base(OPA_MCAST_NR
)) &&
2286 (lid
!= be32_to_cpu(OPA_LID_PERMISSIVE
)));
2289 #define opa_get_lid(lid, format) \
2290 __opa_get_lid(lid, OPA_PORT_PACKET_FORMAT_##format)
2292 /* Convert a lid to a specific lid space */
2293 static inline u32
__opa_get_lid(u32 lid
, u8 format
)
2295 bool is_mcast
= hfi1_check_mcast(lid
);
2298 case OPA_PORT_PACKET_FORMAT_8B
:
2299 case OPA_PORT_PACKET_FORMAT_10B
:
2301 return (lid
- opa_get_mcast_base(OPA_MCAST_NR
) +
2303 return lid
& 0xFFFFF;
2304 case OPA_PORT_PACKET_FORMAT_16B
:
2306 return (lid
- opa_get_mcast_base(OPA_MCAST_NR
) +
2308 return lid
& 0xFFFFFF;
2309 case OPA_PORT_PACKET_FORMAT_9B
:
2312 opa_get_mcast_base(OPA_MCAST_NR
) +
2313 be16_to_cpu(IB_MULTICAST_LID_BASE
));
2315 return lid
& 0xFFFF;
2321 /* Return true if the given lid is the OPA 16B multicast range */
2322 static inline bool hfi1_is_16B_mcast(u32 lid
)
2325 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR
), 16B
)) &&
2326 (lid
!= opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE
), 16B
)));
2329 static inline void hfi1_make_opa_lid(struct rdma_ah_attr
*attr
)
2331 const struct ib_global_route
*grh
= rdma_ah_read_grh(attr
);
2332 u32 dlid
= rdma_ah_get_dlid(attr
);
2334 /* Modify ah_attr.dlid to be in the 32 bit LID space.
2335 * This is how the address will be laid out:
2336 * Assuming MCAST_NR to be 4,
2337 * 32 bit permissive LID = 0xFFFFFFFF
2338 * Multicast LID range = 0xFFFFFFFE to 0xF0000000
2339 * Unicast LID range = 0xEFFFFFFF to 1
2342 if (ib_is_opa_gid(&grh
->dgid
))
2343 dlid
= opa_get_lid_from_gid(&grh
->dgid
);
2344 else if ((dlid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
)) &&
2345 (dlid
!= be16_to_cpu(IB_LID_PERMISSIVE
)) &&
2346 (dlid
!= be32_to_cpu(OPA_LID_PERMISSIVE
)))
2347 dlid
= dlid
- be16_to_cpu(IB_MULTICAST_LID_BASE
) +
2348 opa_get_mcast_base(OPA_MCAST_NR
);
2349 else if (dlid
== be16_to_cpu(IB_LID_PERMISSIVE
))
2350 dlid
= be32_to_cpu(OPA_LID_PERMISSIVE
);
2352 rdma_ah_set_dlid(attr
, dlid
);
2355 static inline u8
hfi1_get_packet_type(u32 lid
)
2357 /* 9B if lid > 0xF0000000 */
2358 if (lid
>= opa_get_mcast_base(OPA_MCAST_NR
))
2359 return HFI1_PKT_TYPE_9B
;
2361 /* 16B if lid > 0xC000 */
2362 if (lid
>= opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR
), 9B
))
2363 return HFI1_PKT_TYPE_16B
;
2365 return HFI1_PKT_TYPE_9B
;
2368 static inline bool hfi1_get_hdr_type(u32 lid
, struct rdma_ah_attr
*attr
)
2371 * If there was an incoming 16B packet with permissive
2372 * LIDs, OPA GIDs would have been programmed when those
2373 * packets were received. A 16B packet will have to
2374 * be sent in response to that packet. Return a 16B
2375 * header type if that's the case.
2377 if (rdma_ah_get_dlid(attr
) == be32_to_cpu(OPA_LID_PERMISSIVE
))
2378 return (ib_is_opa_gid(&rdma_ah_read_grh(attr
)->dgid
)) ?
2379 HFI1_PKT_TYPE_16B
: HFI1_PKT_TYPE_9B
;
2382 * Return a 16B header type if either the the destination
2383 * or source lid is extended.
2385 if (hfi1_get_packet_type(rdma_ah_get_dlid(attr
)) == HFI1_PKT_TYPE_16B
)
2386 return HFI1_PKT_TYPE_16B
;
2388 return hfi1_get_packet_type(lid
);
2391 static inline void hfi1_make_ext_grh(struct hfi1_packet
*packet
,
2392 struct ib_grh
*grh
, u32 slid
,
2395 struct hfi1_ibport
*ibp
= &packet
->rcd
->ppd
->ibport_data
;
2396 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
2402 grh
->sgid
.global
.subnet_prefix
= ibp
->rvp
.gid_prefix
;
2403 if (slid
== opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE
), 16B
))
2404 grh
->sgid
.global
.interface_id
=
2405 OPA_MAKE_ID(be32_to_cpu(OPA_LID_PERMISSIVE
));
2407 grh
->sgid
.global
.interface_id
= OPA_MAKE_ID(slid
);
2410 * Upper layers (like mad) may compare the dgid in the
2411 * wc that is obtained here with the sgid_index in
2412 * the wr. Since sgid_index in wr is always 0 for
2413 * extended lids, set the dgid here to the default
2416 grh
->dgid
.global
.subnet_prefix
= ibp
->rvp
.gid_prefix
;
2417 grh
->dgid
.global
.interface_id
=
2418 cpu_to_be64(ppd
->guids
[HFI1_PORT_GUID_INDEX
]);
2421 static inline int hfi1_get_16b_padding(u32 hdr_size
, u32 payload
)
2423 return -(hdr_size
+ payload
+ (SIZE_OF_CRC
<< 2) +
2427 static inline void hfi1_make_ib_hdr(struct ib_header
*hdr
,
2431 hdr
->lrh
[0] = cpu_to_be16(lrh0
);
2432 hdr
->lrh
[1] = cpu_to_be16(dlid
);
2433 hdr
->lrh
[2] = cpu_to_be16(len
);
2434 hdr
->lrh
[3] = cpu_to_be16(slid
);
2437 static inline void hfi1_make_16b_hdr(struct hfi1_16b_header
*hdr
,
2440 u8 becn
, u8 fecn
, u8 l4
,
2444 u32 lrh1
= 0x40000000;
2448 lrh0
= (lrh0
& ~OPA_16B_BECN_MASK
) | (becn
<< OPA_16B_BECN_SHIFT
);
2449 lrh0
= (lrh0
& ~OPA_16B_LEN_MASK
) | (len
<< OPA_16B_LEN_SHIFT
);
2450 lrh0
= (lrh0
& ~OPA_16B_LID_MASK
) | (slid
& OPA_16B_LID_MASK
);
2451 lrh1
= (lrh1
& ~OPA_16B_FECN_MASK
) | (fecn
<< OPA_16B_FECN_SHIFT
);
2452 lrh1
= (lrh1
& ~OPA_16B_SC_MASK
) | (sc
<< OPA_16B_SC_SHIFT
);
2453 lrh1
= (lrh1
& ~OPA_16B_LID_MASK
) | (dlid
& OPA_16B_LID_MASK
);
2454 lrh2
= (lrh2
& ~OPA_16B_SLID_MASK
) |
2455 ((slid
>> OPA_16B_SLID_SHIFT
) << OPA_16B_SLID_HIGH_SHIFT
);
2456 lrh2
= (lrh2
& ~OPA_16B_DLID_MASK
) |
2457 ((dlid
>> OPA_16B_DLID_SHIFT
) << OPA_16B_DLID_HIGH_SHIFT
);
2458 lrh2
= (lrh2
& ~OPA_16B_PKEY_MASK
) | ((u32
)pkey
<< OPA_16B_PKEY_SHIFT
);
2459 lrh2
= (lrh2
& ~OPA_16B_L4_MASK
) | l4
;
2466 #endif /* _HFI1_KERNEL_H */