]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/staging/rdma/ipath/ipath_kernel.h
Merge branches 'powercap', 'pm-cpufreq' and 'pm-domains'
[mirror_ubuntu-jammy-kernel.git] / drivers / staging / rdma / ipath / ipath_kernel.h
1 #ifndef _IPATH_KERNEL_H
2 #define _IPATH_KERNEL_H
3 /*
4 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 /*
37 * This header file is the base header file for infinipath kernel code
38 * ipath_user.h serves a similar purpose for user code.
39 */
40
41 #include <linux/interrupt.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/mutex.h>
45 #include <linux/list.h>
46 #include <linux/scatterlist.h>
47 #include <linux/sched.h>
48 #include <asm/io.h>
49 #include <rdma/ib_verbs.h>
50
51 #include "ipath_common.h"
52 #include "ipath_debug.h"
53 #include "ipath_registers.h"
54
55 /* only s/w major version of InfiniPath we can handle */
56 #define IPATH_CHIP_VERS_MAJ 2U
57
58 /* don't care about this except printing */
59 #define IPATH_CHIP_VERS_MIN 0U
60
61 /* temporary, maybe always */
62 extern struct infinipath_stats ipath_stats;
63
64 #define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
65 /*
66 * First-cut critierion for "device is active" is
67 * two thousand dwords combined Tx, Rx traffic per
68 * 5-second interval. SMA packets are 64 dwords,
69 * and occur "a few per second", presumably each way.
70 */
71 #define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
72 /*
73 * Struct used to indicate which errors are logged in each of the
74 * error-counters that are logged to EEPROM. A counter is incremented
75 * _once_ (saturating at 255) for each event with any bits set in
76 * the error or hwerror register masks below.
77 */
78 #define IPATH_EEP_LOG_CNT (4)
79 struct ipath_eep_log_mask {
80 u64 errs_to_log;
81 u64 hwerrs_to_log;
82 };
83
84 struct ipath_portdata {
85 void **port_rcvegrbuf;
86 dma_addr_t *port_rcvegrbuf_phys;
87 /* rcvhdrq base, needs mmap before useful */
88 void *port_rcvhdrq;
89 /* kernel virtual address where hdrqtail is updated */
90 void *port_rcvhdrtail_kvaddr;
91 /*
92 * temp buffer for expected send setup, allocated at open, instead
93 * of each setup call
94 */
95 void *port_tid_pg_list;
96 /* when waiting for rcv or pioavail */
97 wait_queue_head_t port_wait;
98 /*
99 * rcvegr bufs base, physical, must fit
100 * in 44 bits so 32 bit programs mmap64 44 bit works)
101 */
102 dma_addr_t port_rcvegr_phys;
103 /* mmap of hdrq, must fit in 44 bits */
104 dma_addr_t port_rcvhdrq_phys;
105 dma_addr_t port_rcvhdrqtailaddr_phys;
106 /*
107 * number of opens (including slave subports) on this instance
108 * (ignoring forks, dup, etc. for now)
109 */
110 int port_cnt;
111 /*
112 * how much space to leave at start of eager TID entries for
113 * protocol use, on each TID
114 */
115 /* instead of calculating it */
116 unsigned port_port;
117 /* non-zero if port is being shared. */
118 u16 port_subport_cnt;
119 /* non-zero if port is being shared. */
120 u16 port_subport_id;
121 /* number of pio bufs for this port (all procs, if shared) */
122 u32 port_piocnt;
123 /* first pio buffer for this port */
124 u32 port_pio_base;
125 /* chip offset of PIO buffers for this port */
126 u32 port_piobufs;
127 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
128 u32 port_rcvegrbuf_chunks;
129 /* how many egrbufs per chunk */
130 u32 port_rcvegrbufs_perchunk;
131 /* order for port_rcvegrbuf_pages */
132 size_t port_rcvegrbuf_size;
133 /* rcvhdrq size (for freeing) */
134 size_t port_rcvhdrq_size;
135 /* next expected TID to check when looking for free */
136 u32 port_tidcursor;
137 /* next expected TID to check */
138 unsigned long port_flag;
139 /* what happened */
140 unsigned long int_flag;
141 /* WAIT_RCV that timed out, no interrupt */
142 u32 port_rcvwait_to;
143 /* WAIT_PIO that timed out, no interrupt */
144 u32 port_piowait_to;
145 /* WAIT_RCV already happened, no wait */
146 u32 port_rcvnowait;
147 /* WAIT_PIO already happened, no wait */
148 u32 port_pionowait;
149 /* total number of rcvhdrqfull errors */
150 u32 port_hdrqfull;
151 /*
152 * Used to suppress multiple instances of same
153 * port staying stuck at same point.
154 */
155 u32 port_lastrcvhdrqtail;
156 /* saved total number of rcvhdrqfull errors for poll edge trigger */
157 u32 port_hdrqfull_poll;
158 /* total number of polled urgent packets */
159 u32 port_urgent;
160 /* saved total number of polled urgent packets for poll edge trigger */
161 u32 port_urgent_poll;
162 /* pid of process using this port */
163 struct pid *port_pid;
164 struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
165 /* same size as task_struct .comm[] */
166 char port_comm[TASK_COMM_LEN];
167 /* pkeys set by this use of this port */
168 u16 port_pkeys[4];
169 /* so file ops can get at unit */
170 struct ipath_devdata *port_dd;
171 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
172 void *subport_uregbase;
173 /* An array of pages for the eager receive buffers * N */
174 void *subport_rcvegrbuf;
175 /* An array of pages for the eager header queue entries * N */
176 void *subport_rcvhdr_base;
177 /* The version of the library which opened this port */
178 u32 userversion;
179 /* Bitmask of active slaves */
180 u32 active_slaves;
181 /* Type of packets or conditions we want to poll for */
182 u16 poll_type;
183 /* port rcvhdrq head offset */
184 u32 port_head;
185 /* receive packet sequence counter */
186 u32 port_seq_cnt;
187 };
188
189 struct sk_buff;
190 struct ipath_sge_state;
191 struct ipath_verbs_txreq;
192
193 /*
194 * control information for layered drivers
195 */
196 struct _ipath_layer {
197 void *l_arg;
198 };
199
200 struct ipath_skbinfo {
201 struct sk_buff *skb;
202 dma_addr_t phys;
203 };
204
205 struct ipath_sdma_txreq {
206 int flags;
207 int sg_count;
208 union {
209 struct scatterlist *sg;
210 void *map_addr;
211 };
212 void (*callback)(void *, int);
213 void *callback_cookie;
214 int callback_status;
215 u16 start_idx; /* sdma private */
216 u16 next_descq_idx; /* sdma private */
217 struct list_head list; /* sdma private */
218 };
219
220 struct ipath_sdma_desc {
221 __le64 qw[2];
222 };
223
224 #define IPATH_SDMA_TXREQ_F_USELARGEBUF 0x1
225 #define IPATH_SDMA_TXREQ_F_HEADTOHOST 0x2
226 #define IPATH_SDMA_TXREQ_F_INTREQ 0x4
227 #define IPATH_SDMA_TXREQ_F_FREEBUF 0x8
228 #define IPATH_SDMA_TXREQ_F_FREEDESC 0x10
229 #define IPATH_SDMA_TXREQ_F_VL15 0x20
230
231 #define IPATH_SDMA_TXREQ_S_OK 0
232 #define IPATH_SDMA_TXREQ_S_SENDERROR 1
233 #define IPATH_SDMA_TXREQ_S_ABORTED 2
234 #define IPATH_SDMA_TXREQ_S_SHUTDOWN 3
235
236 #define IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG (1ull << 63)
237 #define IPATH_SDMA_STATUS_ABORT_IN_PROG (1ull << 62)
238 #define IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE (1ull << 61)
239 #define IPATH_SDMA_STATUS_SCB_EMPTY (1ull << 30)
240
241 /* max dwords in small buffer packet */
242 #define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
243
244 /*
245 * Possible IB config parameters for ipath_f_get/set_ib_cfg()
246 */
247 #define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
248 #define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
249 #define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
250 #define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
251 #define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
252 #define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
253 #define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
254 #define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
255 #define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
256 #define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
257 #define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
258
259
260 struct ipath_devdata {
261 struct list_head ipath_list;
262
263 struct ipath_kregs const *ipath_kregs;
264 struct ipath_cregs const *ipath_cregs;
265
266 /* mem-mapped pointer to base of chip regs */
267 u64 __iomem *ipath_kregbase;
268 /* end of mem-mapped chip space; range checking */
269 u64 __iomem *ipath_kregend;
270 /* physical address of chip for io_remap, etc. */
271 unsigned long ipath_physaddr;
272 /* base of memory alloced for ipath_kregbase, for free */
273 u64 *ipath_kregalloc;
274 /* ipath_cfgports pointers */
275 struct ipath_portdata **ipath_pd;
276 /* sk_buffs used by port 0 eager receive queue */
277 struct ipath_skbinfo *ipath_port0_skbinfo;
278 /* kvirt address of 1st 2k pio buffer */
279 void __iomem *ipath_pio2kbase;
280 /* kvirt address of 1st 4k pio buffer */
281 void __iomem *ipath_pio4kbase;
282 /*
283 * points to area where PIOavail registers will be DMA'ed.
284 * Has to be on a page of it's own, because the page will be
285 * mapped into user program space. This copy is *ONLY* ever
286 * written by DMA, not by the driver! Need a copy per device
287 * when we get to multiple devices
288 */
289 volatile __le64 *ipath_pioavailregs_dma;
290 /* physical address where updates occur */
291 dma_addr_t ipath_pioavailregs_phys;
292 struct _ipath_layer ipath_layer;
293 /* setup intr */
294 int (*ipath_f_intrsetup)(struct ipath_devdata *);
295 /* fallback to alternate interrupt type if possible */
296 int (*ipath_f_intr_fallback)(struct ipath_devdata *);
297 /* setup on-chip bus config */
298 int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
299 /* hard reset chip */
300 int (*ipath_f_reset)(struct ipath_devdata *);
301 int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
302 size_t);
303 void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
304 void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
305 size_t);
306 void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
307 int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
308 int (*ipath_f_early_init)(struct ipath_devdata *);
309 void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
310 void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
311 u32, unsigned long);
312 void (*ipath_f_tidtemplate)(struct ipath_devdata *);
313 void (*ipath_f_cleanup)(struct ipath_devdata *);
314 void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
315 /* fill out chip-specific fields */
316 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
317 /* free irq */
318 void (*ipath_f_free_irq)(struct ipath_devdata *);
319 struct ipath_message_header *(*ipath_f_get_msgheader)
320 (struct ipath_devdata *, __le32 *);
321 void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
322 int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
323 int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
324 void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
325 void (*ipath_f_read_counters)(struct ipath_devdata *,
326 struct infinipath_counters *);
327 void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
328 /* per chip actions needed for IB Link up/down changes */
329 int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
330
331 unsigned ipath_lastegr_idx;
332 struct ipath_ibdev *verbs_dev;
333 struct timer_list verbs_timer;
334 /* total dwords sent (summed from counter) */
335 u64 ipath_sword;
336 /* total dwords rcvd (summed from counter) */
337 u64 ipath_rword;
338 /* total packets sent (summed from counter) */
339 u64 ipath_spkts;
340 /* total packets rcvd (summed from counter) */
341 u64 ipath_rpkts;
342 /* ipath_statusp initially points to this. */
343 u64 _ipath_status;
344 /* GUID for this interface, in network order */
345 __be64 ipath_guid;
346 /*
347 * aggregrate of error bits reported since last cleared, for
348 * limiting of error reporting
349 */
350 ipath_err_t ipath_lasterror;
351 /*
352 * aggregrate of error bits reported since last cleared, for
353 * limiting of hwerror reporting
354 */
355 ipath_err_t ipath_lasthwerror;
356 /* errors masked because they occur too fast */
357 ipath_err_t ipath_maskederrs;
358 u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
359 /* these 5 fields are used to establish deltas for IB Symbol
360 * errors and linkrecovery errors. They can be reported on
361 * some chips during link negotiation prior to INIT, and with
362 * DDR when faking DDR negotiations with non-IBTA switches.
363 * The chip counters are adjusted at driver unload if there is
364 * a non-zero delta.
365 */
366 u64 ibdeltainprog;
367 u64 ibsymdelta;
368 u64 ibsymsnap;
369 u64 iblnkerrdelta;
370 u64 iblnkerrsnap;
371
372 /* time in jiffies at which to re-enable maskederrs */
373 unsigned long ipath_unmasktime;
374 /* count of egrfull errors, combined for all ports */
375 u64 ipath_last_tidfull;
376 /* for ipath_qcheck() */
377 u64 ipath_lastport0rcv_cnt;
378 /* template for writing TIDs */
379 u64 ipath_tidtemplate;
380 /* value to write to free TIDs */
381 u64 ipath_tidinvalid;
382 /* IBA6120 rcv interrupt setup */
383 u64 ipath_rhdrhead_intr_off;
384
385 /* size of memory at ipath_kregbase */
386 u32 ipath_kregsize;
387 /* number of registers used for pioavail */
388 u32 ipath_pioavregs;
389 /* IPATH_POLL, etc. */
390 u32 ipath_flags;
391 /* ipath_flags driver is waiting for */
392 u32 ipath_state_wanted;
393 /* last buffer for user use, first buf for kernel use is this
394 * index. */
395 u32 ipath_lastport_piobuf;
396 /* is a stats timer active */
397 u32 ipath_stats_timer_active;
398 /* number of interrupts for this device -- saturates... */
399 u32 ipath_int_counter;
400 /* dwords sent read from counter */
401 u32 ipath_lastsword;
402 /* dwords received read from counter */
403 u32 ipath_lastrword;
404 /* sent packets read from counter */
405 u32 ipath_lastspkts;
406 /* received packets read from counter */
407 u32 ipath_lastrpkts;
408 /* pio bufs allocated per port */
409 u32 ipath_pbufsport;
410 /* if remainder on bufs/port, ports < extrabuf get 1 extra */
411 u32 ipath_ports_extrabuf;
412 u32 ipath_pioupd_thresh; /* update threshold, some chips */
413 /*
414 * number of ports configured as max; zero is set to number chip
415 * supports, less gives more pio bufs/port, etc.
416 */
417 u32 ipath_cfgports;
418 /* count of port 0 hdrqfull errors */
419 u32 ipath_p0_hdrqfull;
420 /* port 0 number of receive eager buffers */
421 u32 ipath_p0_rcvegrcnt;
422
423 /*
424 * index of last piobuffer we used. Speeds up searching, by
425 * starting at this point. Doesn't matter if multiple cpu's use and
426 * update, last updater is only write that matters. Whenever it
427 * wraps, we update shadow copies. Need a copy per device when we
428 * get to multiple devices
429 */
430 u32 ipath_lastpioindex;
431 u32 ipath_lastpioindexl;
432 /* max length of freezemsg */
433 u32 ipath_freezelen;
434 /*
435 * consecutive times we wanted a PIO buffer but were unable to
436 * get one
437 */
438 u32 ipath_consec_nopiobuf;
439 /*
440 * hint that we should update ipath_pioavailshadow before
441 * looking for a PIO buffer
442 */
443 u32 ipath_upd_pio_shadow;
444 /* so we can rewrite it after a chip reset */
445 u32 ipath_pcibar0;
446 /* so we can rewrite it after a chip reset */
447 u32 ipath_pcibar1;
448 u32 ipath_x1_fix_tries;
449 u32 ipath_autoneg_tries;
450 u32 serdes_first_init_done;
451
452 struct ipath_relock {
453 atomic_t ipath_relock_timer_active;
454 struct timer_list ipath_relock_timer;
455 unsigned int ipath_relock_interval; /* in jiffies */
456 } ipath_relock_singleton;
457
458 /* interrupt number */
459 int ipath_irq;
460 /* HT/PCI Vendor ID (here for NodeInfo) */
461 u16 ipath_vendorid;
462 /* HT/PCI Device ID (here for NodeInfo) */
463 u16 ipath_deviceid;
464 /* offset in HT config space of slave/primary interface block */
465 u8 ipath_ht_slave_off;
466 /* for write combining settings */
467 int wc_cookie;
468 /* ref count for each pkey */
469 atomic_t ipath_pkeyrefs[4];
470 /* shadow copy of struct page *'s for exp tid pages */
471 struct page **ipath_pageshadow;
472 /* shadow copy of dma handles for exp tid pages */
473 dma_addr_t *ipath_physshadow;
474 u64 __iomem *ipath_egrtidbase;
475 /* lock to workaround chip bug 9437 and others */
476 spinlock_t ipath_kernel_tid_lock;
477 spinlock_t ipath_user_tid_lock;
478 spinlock_t ipath_sendctrl_lock;
479 /* around ipath_pd and (user ports) port_cnt use (intr vs free) */
480 spinlock_t ipath_uctxt_lock;
481
482 /*
483 * IPATH_STATUS_*,
484 * this address is mapped readonly into user processes so they can
485 * get status cheaply, whenever they want.
486 */
487 u64 *ipath_statusp;
488 /* freeze msg if hw error put chip in freeze */
489 char *ipath_freezemsg;
490 /* pci access data structure */
491 struct pci_dev *pcidev;
492 struct cdev *user_cdev;
493 struct cdev *diag_cdev;
494 struct device *user_dev;
495 struct device *diag_dev;
496 /* timer used to prevent stats overflow, error throttling, etc. */
497 struct timer_list ipath_stats_timer;
498 /* timer to verify interrupts work, and fallback if possible */
499 struct timer_list ipath_intrchk_timer;
500 void *ipath_dummy_hdrq; /* used after port close */
501 dma_addr_t ipath_dummy_hdrq_phys;
502
503 /* SendDMA related entries */
504 spinlock_t ipath_sdma_lock;
505 unsigned long ipath_sdma_status;
506 unsigned long ipath_sdma_abort_jiffies;
507 unsigned long ipath_sdma_abort_intr_timeout;
508 unsigned long ipath_sdma_buf_jiffies;
509 struct ipath_sdma_desc *ipath_sdma_descq;
510 u64 ipath_sdma_descq_added;
511 u64 ipath_sdma_descq_removed;
512 int ipath_sdma_desc_nreserved;
513 u16 ipath_sdma_descq_cnt;
514 u16 ipath_sdma_descq_tail;
515 u16 ipath_sdma_descq_head;
516 u16 ipath_sdma_next_intr;
517 u16 ipath_sdma_reset_wait;
518 u8 ipath_sdma_generation;
519 struct tasklet_struct ipath_sdma_abort_task;
520 struct tasklet_struct ipath_sdma_notify_task;
521 struct list_head ipath_sdma_activelist;
522 struct list_head ipath_sdma_notifylist;
523 atomic_t ipath_sdma_vl15_count;
524 struct timer_list ipath_sdma_vl15_timer;
525
526 dma_addr_t ipath_sdma_descq_phys;
527 volatile __le64 *ipath_sdma_head_dma;
528 dma_addr_t ipath_sdma_head_phys;
529
530 unsigned long ipath_ureg_align; /* user register alignment */
531
532 struct delayed_work ipath_autoneg_work;
533 wait_queue_head_t ipath_autoneg_wait;
534
535 /* HoL blocking / user app forward-progress state */
536 unsigned ipath_hol_state;
537 unsigned ipath_hol_next;
538 struct timer_list ipath_hol_timer;
539
540 /*
541 * Shadow copies of registers; size indicates read access size.
542 * Most of them are readonly, but some are write-only register,
543 * where we manipulate the bits in the shadow copy, and then write
544 * the shadow copy to infinipath.
545 *
546 * We deliberately make most of these 32 bits, since they have
547 * restricted range. For any that we read, we won't to generate 32
548 * bit accesses, since Opteron will generate 2 separate 32 bit HT
549 * transactions for a 64 bit read, and we want to avoid unnecessary
550 * HT transactions.
551 */
552
553 /* This is the 64 bit group */
554
555 /*
556 * shadow of pioavail, check to be sure it's large enough at
557 * init time.
558 */
559 unsigned long ipath_pioavailshadow[8];
560 /* bitmap of send buffers available for the kernel to use with PIO. */
561 unsigned long ipath_pioavailkernel[8];
562 /* shadow of kr_gpio_out, for rmw ops */
563 u64 ipath_gpio_out;
564 /* shadow the gpio mask register */
565 u64 ipath_gpio_mask;
566 /* shadow the gpio output enable, etc... */
567 u64 ipath_extctrl;
568 /* kr_revision shadow */
569 u64 ipath_revision;
570 /*
571 * shadow of ibcctrl, for interrupt handling of link changes,
572 * etc.
573 */
574 u64 ipath_ibcctrl;
575 /*
576 * last ibcstatus, to suppress "duplicate" status change messages,
577 * mostly from 2 to 3
578 */
579 u64 ipath_lastibcstat;
580 /* hwerrmask shadow */
581 ipath_err_t ipath_hwerrmask;
582 ipath_err_t ipath_errormask; /* errormask shadow */
583 /* interrupt config reg shadow */
584 u64 ipath_intconfig;
585 /* kr_sendpiobufbase value */
586 u64 ipath_piobufbase;
587 /* kr_ibcddrctrl shadow */
588 u64 ipath_ibcddrctrl;
589
590 /* these are the "32 bit" regs */
591
592 /*
593 * number of GUIDs in the flash for this interface; may need some
594 * rethinking for setting on other ifaces
595 */
596 u32 ipath_nguid;
597 /*
598 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
599 * all expect bit fields to be "unsigned long"
600 */
601 /* shadow kr_rcvctrl */
602 unsigned long ipath_rcvctrl;
603 /* shadow kr_sendctrl */
604 unsigned long ipath_sendctrl;
605 /* to not count armlaunch after cancel */
606 unsigned long ipath_lastcancel;
607 /* count cases where special trigger was needed (double write) */
608 unsigned long ipath_spectriggerhit;
609
610 /* value we put in kr_rcvhdrcnt */
611 u32 ipath_rcvhdrcnt;
612 /* value we put in kr_rcvhdrsize */
613 u32 ipath_rcvhdrsize;
614 /* value we put in kr_rcvhdrentsize */
615 u32 ipath_rcvhdrentsize;
616 /* offset of last entry in rcvhdrq */
617 u32 ipath_hdrqlast;
618 /* kr_portcnt value */
619 u32 ipath_portcnt;
620 /* kr_pagealign value */
621 u32 ipath_palign;
622 /* number of "2KB" PIO buffers */
623 u32 ipath_piobcnt2k;
624 /* size in bytes of "2KB" PIO buffers */
625 u32 ipath_piosize2k;
626 /* number of "4KB" PIO buffers */
627 u32 ipath_piobcnt4k;
628 /* size in bytes of "4KB" PIO buffers */
629 u32 ipath_piosize4k;
630 u32 ipath_pioreserved; /* reserved special-inkernel; */
631 /* kr_rcvegrbase value */
632 u32 ipath_rcvegrbase;
633 /* kr_rcvegrcnt value */
634 u32 ipath_rcvegrcnt;
635 /* kr_rcvtidbase value */
636 u32 ipath_rcvtidbase;
637 /* kr_rcvtidcnt value */
638 u32 ipath_rcvtidcnt;
639 /* kr_sendregbase */
640 u32 ipath_sregbase;
641 /* kr_userregbase */
642 u32 ipath_uregbase;
643 /* kr_counterregbase */
644 u32 ipath_cregbase;
645 /* shadow the control register contents */
646 u32 ipath_control;
647 /* PCI revision register (HTC rev on FPGA) */
648 u32 ipath_pcirev;
649
650 /* chip address space used by 4k pio buffers */
651 u32 ipath_4kalign;
652 /* The MTU programmed for this unit */
653 u32 ipath_ibmtu;
654 /*
655 * The max size IB packet, included IB headers that we can send.
656 * Starts same as ipath_piosize, but is affected when ibmtu is
657 * changed, or by size of eager buffers
658 */
659 u32 ipath_ibmaxlen;
660 /*
661 * ibmaxlen at init time, limited by chip and by receive buffer
662 * size. Not changed after init.
663 */
664 u32 ipath_init_ibmaxlen;
665 /* size of each rcvegrbuffer */
666 u32 ipath_rcvegrbufsize;
667 /* localbus width (1, 2,4,8,16,32) from config space */
668 u32 ipath_lbus_width;
669 /* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
670 u32 ipath_lbus_speed;
671 /*
672 * number of sequential ibcstatus change for polling active/quiet
673 * (i.e., link not coming up).
674 */
675 u32 ipath_ibpollcnt;
676 /* low and high portions of MSI capability/vector */
677 u32 ipath_msi_lo;
678 /* saved after PCIe init for restore after reset */
679 u32 ipath_msi_hi;
680 /* MSI data (vector) saved for restore */
681 u16 ipath_msi_data;
682 /* MLID programmed for this instance */
683 u16 ipath_mlid;
684 /* LID programmed for this instance */
685 u16 ipath_lid;
686 /* list of pkeys programmed; 0 if not set */
687 u16 ipath_pkeys[4];
688 /*
689 * ASCII serial number, from flash, large enough for original
690 * all digit strings, and longer QLogic serial number format
691 */
692 u8 ipath_serial[16];
693 /* human readable board version */
694 u8 ipath_boardversion[96];
695 u8 ipath_lbus_info[32]; /* human readable localbus info */
696 /* chip major rev, from ipath_revision */
697 u8 ipath_majrev;
698 /* chip minor rev, from ipath_revision */
699 u8 ipath_minrev;
700 /* board rev, from ipath_revision */
701 u8 ipath_boardrev;
702 /* saved for restore after reset */
703 u8 ipath_pci_cacheline;
704 /* LID mask control */
705 u8 ipath_lmc;
706 /* link width supported */
707 u8 ipath_link_width_supported;
708 /* link speed supported */
709 u8 ipath_link_speed_supported;
710 u8 ipath_link_width_enabled;
711 u8 ipath_link_speed_enabled;
712 u8 ipath_link_width_active;
713 u8 ipath_link_speed_active;
714 /* Rx Polarity inversion (compensate for ~tx on partner) */
715 u8 ipath_rx_pol_inv;
716
717 u8 ipath_r_portenable_shift;
718 u8 ipath_r_intravail_shift;
719 u8 ipath_r_tailupd_shift;
720 u8 ipath_r_portcfg_shift;
721
722 /* unit # of this chip, if present */
723 int ipath_unit;
724
725 /* local link integrity counter */
726 u32 ipath_lli_counter;
727 /* local link integrity errors */
728 u32 ipath_lli_errors;
729 /*
730 * Above counts only cases where _successive_ LocalLinkIntegrity
731 * errors were seen in the receive headers of kern-packets.
732 * Below are the three (monotonically increasing) counters
733 * maintained via GPIO interrupts on iba6120-rev2.
734 */
735 u32 ipath_rxfc_unsupvl_errs;
736 u32 ipath_overrun_thresh_errs;
737 u32 ipath_lli_errs;
738
739 /*
740 * Not all devices managed by a driver instance are the same
741 * type, so these fields must be per-device.
742 */
743 u64 ipath_i_bitsextant;
744 ipath_err_t ipath_e_bitsextant;
745 ipath_err_t ipath_hwe_bitsextant;
746
747 /*
748 * Below should be computable from number of ports,
749 * since they are never modified.
750 */
751 u64 ipath_i_rcvavail_mask;
752 u64 ipath_i_rcvurg_mask;
753 u16 ipath_i_rcvurg_shift;
754 u16 ipath_i_rcvavail_shift;
755
756 /*
757 * Register bits for selecting i2c direction and values, used for
758 * I2C serial flash.
759 */
760 u8 ipath_gpio_sda_num;
761 u8 ipath_gpio_scl_num;
762 u8 ipath_i2c_chain_type;
763 u64 ipath_gpio_sda;
764 u64 ipath_gpio_scl;
765
766 /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
767 spinlock_t ipath_gpio_lock;
768
769 /*
770 * IB link and linktraining states and masks that vary per chip in
771 * some way. Set at init, to avoid each IB status change interrupt
772 */
773 u8 ibcs_ls_shift;
774 u8 ibcs_lts_mask;
775 u32 ibcs_mask;
776 u32 ib_init;
777 u32 ib_arm;
778 u32 ib_active;
779
780 u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
781
782 /*
783 * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
784 * reg. Changes for IBA7220
785 */
786 u8 ibcc_lic_mask; /* LinkInitCmd */
787 u8 ibcc_lc_shift; /* LinkCmd */
788 u8 ibcc_mpl_shift; /* Maxpktlen */
789
790 u8 delay_mult;
791
792 /* used to override LED behavior */
793 u8 ipath_led_override; /* Substituted for normal value, if non-zero */
794 u16 ipath_led_override_timeoff; /* delta to next timer event */
795 u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
796 u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
797 atomic_t ipath_led_override_timer_active;
798 /* Used to flash LEDs in override mode */
799 struct timer_list ipath_led_override_timer;
800
801 /* Support (including locks) for EEPROM logging of errors and time */
802 /* control access to actual counters, timer */
803 spinlock_t ipath_eep_st_lock;
804 /* control high-level access to EEPROM */
805 struct mutex ipath_eep_lock;
806 /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
807 uint64_t ipath_traffic_wds;
808 /* active time is kept in seconds, but logged in hours */
809 atomic_t ipath_active_time;
810 /* Below are nominal shadow of EEPROM, new since last EEPROM update */
811 uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
812 uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
813 uint16_t ipath_eep_hrs;
814 /*
815 * masks for which bits of errs, hwerrs that cause
816 * each of the counters to increment.
817 */
818 struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
819
820 /* interrupt mitigation reload register info */
821 u16 ipath_jint_idle_ticks; /* idle clock ticks */
822 u16 ipath_jint_max_packets; /* max packets across all ports */
823
824 /*
825 * lock for access to SerDes, and flags to sequence preset
826 * versus steady-state. 7220-only at the moment.
827 */
828 spinlock_t ipath_sdepb_lock;
829 u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
830 };
831
832 /* ipath_hol_state values (stopping/starting user proc, send flushing) */
833 #define IPATH_HOL_UP 0
834 #define IPATH_HOL_DOWN 1
835 /* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
836 #define IPATH_HOL_DOWNSTOP 0
837 #define IPATH_HOL_DOWNCONT 1
838
839 /* bit positions for sdma_status */
840 #define IPATH_SDMA_ABORTING 0
841 #define IPATH_SDMA_DISARMED 1
842 #define IPATH_SDMA_DISABLED 2
843 #define IPATH_SDMA_LAYERBUF 3
844 #define IPATH_SDMA_RUNNING 30
845 #define IPATH_SDMA_SHUTDOWN 31
846
847 /* bit combinations that correspond to abort states */
848 #define IPATH_SDMA_ABORT_NONE 0
849 #define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
850 #define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
851 (1UL << IPATH_SDMA_DISARMED))
852 #define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
853 (1UL << IPATH_SDMA_DISABLED))
854 #define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
855 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
856 #define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
857 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
858
859 #define IPATH_SDMA_BUF_NONE 0
860 #define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
861
862 /* Private data for file operations */
863 struct ipath_filedata {
864 struct ipath_portdata *pd;
865 unsigned subport;
866 unsigned tidcursor;
867 struct ipath_user_sdma_queue *pq;
868 };
869 extern struct list_head ipath_dev_list;
870 extern spinlock_t ipath_devs_lock;
871 extern struct ipath_devdata *ipath_lookup(int unit);
872
873 int ipath_init_chip(struct ipath_devdata *, int);
874 int ipath_enable_wc(struct ipath_devdata *dd);
875 void ipath_disable_wc(struct ipath_devdata *dd);
876 int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
877 void ipath_shutdown_device(struct ipath_devdata *);
878 void ipath_clear_freeze(struct ipath_devdata *);
879
880 struct file_operations;
881 int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
882 struct cdev **cdevp, struct device **devp);
883 void ipath_cdev_cleanup(struct cdev **cdevp,
884 struct device **devp);
885
886 int ipath_diag_add(struct ipath_devdata *);
887 void ipath_diag_remove(struct ipath_devdata *);
888
889 extern wait_queue_head_t ipath_state_wait;
890
891 int ipath_user_add(struct ipath_devdata *dd);
892 void ipath_user_remove(struct ipath_devdata *dd);
893
894 struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
895
896 extern int ipath_diag_inuse;
897
898 irqreturn_t ipath_intr(int irq, void *devid);
899 int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
900 ipath_err_t err);
901 #if __IPATH_INFO || __IPATH_DBG
902 extern const char *ipath_ibcstatus_str[];
903 #endif
904
905 /* clean up any per-chip chip-specific stuff */
906 void ipath_chip_cleanup(struct ipath_devdata *);
907 /* clean up any chip type-specific stuff */
908 void ipath_chip_done(void);
909
910 void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
911 unsigned cnt);
912 void ipath_cancel_sends(struct ipath_devdata *, int);
913
914 int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
915 void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
916
917 int ipath_parse_ushort(const char *str, unsigned short *valp);
918
919 void ipath_kreceive(struct ipath_portdata *);
920 int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
921 int ipath_reset_device(int);
922 void ipath_get_faststats(unsigned long);
923 int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
924 int ipath_set_linkstate(struct ipath_devdata *, u8);
925 int ipath_set_mtu(struct ipath_devdata *, u16);
926 int ipath_set_lid(struct ipath_devdata *, u32, u8);
927 int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
928 void ipath_enable_armlaunch(struct ipath_devdata *);
929 void ipath_disable_armlaunch(struct ipath_devdata *);
930 void ipath_hol_down(struct ipath_devdata *);
931 void ipath_hol_up(struct ipath_devdata *);
932 void ipath_hol_event(unsigned long);
933 void ipath_toggle_rclkrls(struct ipath_devdata *);
934 void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
935 void ipath_set_relock_poll(struct ipath_devdata *, int);
936 void ipath_shutdown_relock_poll(struct ipath_devdata *);
937
938 /* for use in system calls, where we want to know device type, etc. */
939 #define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
940 #define subport_fp(fp) \
941 ((struct ipath_filedata *)(fp)->private_data)->subport
942 #define tidcursor_fp(fp) \
943 ((struct ipath_filedata *)(fp)->private_data)->tidcursor
944 #define user_sdma_queue_fp(fp) \
945 ((struct ipath_filedata *)(fp)->private_data)->pq
946
947 /*
948 * values for ipath_flags
949 */
950 /* chip can report link latency (IB 1.2) */
951 #define IPATH_HAS_LINK_LATENCY 0x1
952 /* The chip is up and initted */
953 #define IPATH_INITTED 0x2
954 /* set if any user code has set kr_rcvhdrsize */
955 #define IPATH_RCVHDRSZ_SET 0x4
956 /* The chip is present and valid for accesses */
957 #define IPATH_PRESENT 0x8
958 /* HT link0 is only 8 bits wide, ignore upper byte crc
959 * errors, etc. */
960 #define IPATH_8BIT_IN_HT0 0x10
961 /* HT link1 is only 8 bits wide, ignore upper byte crc
962 * errors, etc. */
963 #define IPATH_8BIT_IN_HT1 0x20
964 /* The link is down */
965 #define IPATH_LINKDOWN 0x40
966 /* The link level is up (0x11) */
967 #define IPATH_LINKINIT 0x80
968 /* The link is in the armed (0x21) state */
969 #define IPATH_LINKARMED 0x100
970 /* The link is in the active (0x31) state */
971 #define IPATH_LINKACTIVE 0x200
972 /* link current state is unknown */
973 #define IPATH_LINKUNK 0x400
974 /* Write combining flush needed for PIO */
975 #define IPATH_PIO_FLUSH_WC 0x1000
976 /* DMA Receive tail pointer */
977 #define IPATH_NODMA_RTAIL 0x2000
978 /* no IB cable, or no device on IB cable */
979 #define IPATH_NOCABLE 0x4000
980 /* Supports port zero per packet receive interrupts via
981 * GPIO */
982 #define IPATH_GPIO_INTR 0x8000
983 /* uses the coded 4byte TID, not 8 byte */
984 #define IPATH_4BYTE_TID 0x10000
985 /* packet/word counters are 32 bit, else those 4 counters
986 * are 64bit */
987 #define IPATH_32BITCOUNTERS 0x20000
988 /* Interrupt register is 64 bits */
989 #define IPATH_INTREG_64 0x40000
990 /* can miss port0 rx interrupts */
991 #define IPATH_DISABLED 0x80000 /* administratively disabled */
992 /* Use GPIO interrupts for new counters */
993 #define IPATH_GPIO_ERRINTRS 0x100000
994 #define IPATH_SWAP_PIOBUFS 0x200000
995 /* Supports Send DMA */
996 #define IPATH_HAS_SEND_DMA 0x400000
997 /* Supports Send Count (not just word count) in PBC */
998 #define IPATH_HAS_PBC_CNT 0x800000
999 /* Suppress heartbeat, even if turning off loopback */
1000 #define IPATH_NO_HRTBT 0x1000000
1001 #define IPATH_HAS_THRESH_UPDATE 0x4000000
1002 #define IPATH_HAS_MULT_IB_SPEED 0x8000000
1003 #define IPATH_IB_AUTONEG_INPROG 0x10000000
1004 #define IPATH_IB_AUTONEG_FAILED 0x20000000
1005 /* Linkdown-disable intentionally, Do not attempt to bring up */
1006 #define IPATH_IB_LINK_DISABLED 0x40000000
1007 #define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
1008
1009 /* Bits in GPIO for the added interrupts */
1010 #define IPATH_GPIO_PORT0_BIT 2
1011 #define IPATH_GPIO_RXUVL_BIT 3
1012 #define IPATH_GPIO_OVRUN_BIT 4
1013 #define IPATH_GPIO_LLI_BIT 5
1014 #define IPATH_GPIO_ERRINTR_MASK 0x38
1015
1016 /* portdata flag bit offsets */
1017 /* waiting for a packet to arrive */
1018 #define IPATH_PORT_WAITING_RCV 2
1019 /* master has not finished initializing */
1020 #define IPATH_PORT_MASTER_UNINIT 4
1021 /* waiting for an urgent packet to arrive */
1022 #define IPATH_PORT_WAITING_URG 5
1023
1024 /* free up any allocated data at closes */
1025 void ipath_free_data(struct ipath_portdata *dd);
1026 u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
1027 void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1028 unsigned len, int avail);
1029 void ipath_init_iba6110_funcs(struct ipath_devdata *);
1030 void ipath_get_eeprom_info(struct ipath_devdata *);
1031 int ipath_update_eeprom_log(struct ipath_devdata *dd);
1032 void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
1033 u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
1034 void ipath_disarm_senderrbufs(struct ipath_devdata *);
1035 void ipath_force_pio_avail_update(struct ipath_devdata *);
1036 void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
1037
1038 /*
1039 * Set LED override, only the two LSBs have "public" meaning, but
1040 * any non-zero value substitutes them for the Link and LinkTrain
1041 * LED states.
1042 */
1043 #define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
1044 #define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */
1045 void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
1046
1047 /* send dma routines */
1048 int setup_sdma(struct ipath_devdata *);
1049 void teardown_sdma(struct ipath_devdata *);
1050 void ipath_restart_sdma(struct ipath_devdata *);
1051 void ipath_sdma_intr(struct ipath_devdata *);
1052 int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
1053 u32, struct ipath_verbs_txreq *);
1054 /* ipath_sdma_lock should be locked before calling this. */
1055 int ipath_sdma_make_progress(struct ipath_devdata *dd);
1056
1057 /* must be called under ipath_sdma_lock */
1058 static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
1059 {
1060 return dd->ipath_sdma_descq_cnt -
1061 (dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
1062 1 - dd->ipath_sdma_desc_nreserved;
1063 }
1064
1065 static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
1066 {
1067 dd->ipath_sdma_desc_nreserved += cnt;
1068 }
1069
1070 static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
1071 {
1072 dd->ipath_sdma_desc_nreserved -= cnt;
1073 }
1074
1075 /*
1076 * number of words used for protocol header if not set by ipath_userinit();
1077 */
1078 #define IPATH_DFLT_RCVHDRSIZE 9
1079
1080 int ipath_get_user_pages(unsigned long, size_t, struct page **);
1081 void ipath_release_user_pages(struct page **, size_t);
1082 void ipath_release_user_pages_on_close(struct page **, size_t);
1083 int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
1084 int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
1085 int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
1086 int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
1087
1088 /* these are used for the registers that vary with port */
1089 void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
1090 unsigned, u64);
1091
1092 /*
1093 * We could have a single register get/put routine, that takes a group type,
1094 * but this is somewhat clearer and cleaner. It also gives us some error
1095 * checking. 64 bit register reads should always work, but are inefficient
1096 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
1097 * so we use kreg32 wherever possible. User register and counter register
1098 * reads are always 32 bit reads, so only one form of those routines.
1099 */
1100
1101 /*
1102 * At the moment, none of the s-registers are writable, so no
1103 * ipath_write_sreg().
1104 */
1105
1106 /**
1107 * ipath_read_ureg32 - read 32-bit virtualized per-port register
1108 * @dd: device
1109 * @regno: register number
1110 * @port: port number
1111 *
1112 * Return the contents of a register that is virtualized to be per port.
1113 * Returns -1 on errors (not distinguishable from valid contents at
1114 * runtime; we may add a separate error variable at some point).
1115 */
1116 static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
1117 ipath_ureg regno, int port)
1118 {
1119 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1120 return 0;
1121
1122 return readl(regno + (u64 __iomem *)
1123 (dd->ipath_uregbase +
1124 (char __iomem *)dd->ipath_kregbase +
1125 dd->ipath_ureg_align * port));
1126 }
1127
1128 /**
1129 * ipath_write_ureg - write 32-bit virtualized per-port register
1130 * @dd: device
1131 * @regno: register number
1132 * @value: value
1133 * @port: port
1134 *
1135 * Write the contents of a register that is virtualized to be per port.
1136 */
1137 static inline void ipath_write_ureg(const struct ipath_devdata *dd,
1138 ipath_ureg regno, u64 value, int port)
1139 {
1140 u64 __iomem *ubase = (u64 __iomem *)
1141 (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
1142 dd->ipath_ureg_align * port);
1143 if (dd->ipath_kregbase)
1144 writeq(value, &ubase[regno]);
1145 }
1146
1147 static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
1148 ipath_kreg regno)
1149 {
1150 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1151 return -1;
1152 return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
1153 }
1154
1155 static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
1156 ipath_kreg regno)
1157 {
1158 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1159 return -1;
1160
1161 return readq(&dd->ipath_kregbase[regno]);
1162 }
1163
1164 static inline void ipath_write_kreg(const struct ipath_devdata *dd,
1165 ipath_kreg regno, u64 value)
1166 {
1167 if (dd->ipath_kregbase)
1168 writeq(value, &dd->ipath_kregbase[regno]);
1169 }
1170
1171 static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
1172 ipath_sreg regno)
1173 {
1174 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1175 return 0;
1176
1177 return readq(regno + (u64 __iomem *)
1178 (dd->ipath_cregbase +
1179 (char __iomem *)dd->ipath_kregbase));
1180 }
1181
1182 static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
1183 ipath_sreg regno)
1184 {
1185 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1186 return 0;
1187 return readl(regno + (u64 __iomem *)
1188 (dd->ipath_cregbase +
1189 (char __iomem *)dd->ipath_kregbase));
1190 }
1191
1192 static inline void ipath_write_creg(const struct ipath_devdata *dd,
1193 ipath_creg regno, u64 value)
1194 {
1195 if (dd->ipath_kregbase)
1196 writeq(value, regno + (u64 __iomem *)
1197 (dd->ipath_cregbase +
1198 (char __iomem *)dd->ipath_kregbase));
1199 }
1200
1201 static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
1202 {
1203 *((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
1204 }
1205
1206 static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
1207 {
1208 return (u32) le64_to_cpu(*((volatile __le64 *)
1209 pd->port_rcvhdrtail_kvaddr));
1210 }
1211
1212 static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
1213 {
1214 const struct ipath_devdata *dd = pd->port_dd;
1215 u32 hdrqtail;
1216
1217 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1218 __le32 *rhf_addr;
1219 u32 seq;
1220
1221 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1222 pd->port_head + dd->ipath_rhf_offset;
1223 seq = ipath_hdrget_seq(rhf_addr);
1224 hdrqtail = pd->port_head;
1225 if (seq == pd->port_seq_cnt)
1226 hdrqtail++;
1227 } else
1228 hdrqtail = ipath_get_rcvhdrtail(pd);
1229
1230 return hdrqtail;
1231 }
1232
1233 static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
1234 {
1235 return (dd->ipath_flags & IPATH_INTREG_64) ?
1236 ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
1237 }
1238
1239 /*
1240 * from contents of IBCStatus (or a saved copy), return linkstate
1241 * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
1242 * everywhere, anyway (and should be, for almost all purposes).
1243 */
1244 static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
1245 {
1246 u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
1247 INFINIPATH_IBCS_LINKSTATE_MASK;
1248 if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
1249 state = INFINIPATH_IBCS_L_STATE_ACTIVE;
1250 return state;
1251 }
1252
1253 /* from contents of IBCStatus (or a saved copy), return linktrainingstate */
1254 static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
1255 {
1256 return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1257 dd->ibcs_lts_mask;
1258 }
1259
1260 /*
1261 * from contents of IBCStatus (or a saved copy), return logical link state
1262 * combination of link state and linktraining state (down, active, init,
1263 * arm, etc.
1264 */
1265 static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
1266 {
1267 u32 ibs;
1268 ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1269 dd->ibcs_lts_mask;
1270 ibs |= (u32)(ibcs &
1271 (INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
1272 return ibs;
1273 }
1274
1275 /*
1276 * sysfs interface.
1277 */
1278
1279 struct device_driver;
1280
1281 extern const char ib_ipath_version[];
1282
1283 extern const struct attribute_group *ipath_driver_attr_groups[];
1284
1285 int ipath_device_create_group(struct device *, struct ipath_devdata *);
1286 void ipath_device_remove_group(struct device *, struct ipath_devdata *);
1287 int ipath_expose_reset(struct device *);
1288
1289 int ipath_init_ipathfs(void);
1290 void ipath_exit_ipathfs(void);
1291 int ipathfs_add_device(struct ipath_devdata *);
1292 int ipathfs_remove_device(struct ipath_devdata *);
1293
1294 /*
1295 * dma_addr wrappers - all 0's invalid for hw
1296 */
1297 dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
1298 size_t, int);
1299 dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
1300 const char *ipath_get_unit_name(int unit);
1301
1302 /*
1303 * Flush write combining store buffers (if present) and perform a write
1304 * barrier.
1305 */
1306 #if defined(CONFIG_X86_64)
1307 #define ipath_flush_wc() asm volatile("sfence" ::: "memory")
1308 #else
1309 #define ipath_flush_wc() wmb()
1310 #endif
1311
1312 extern unsigned ipath_debug; /* debugging bit mask */
1313 extern unsigned ipath_linkrecovery;
1314 extern unsigned ipath_mtu4096;
1315 extern struct mutex ipath_mutex;
1316
1317 #define IPATH_DRV_NAME "ib_ipath"
1318 #define IPATH_MAJOR 233
1319 #define IPATH_USER_MINOR_BASE 0
1320 #define IPATH_DIAGPKT_MINOR 127
1321 #define IPATH_DIAG_MINOR_BASE 129
1322 #define IPATH_NMINORS 255
1323
1324 #define ipath_dev_err(dd,fmt,...) \
1325 do { \
1326 const struct ipath_devdata *__dd = (dd); \
1327 if (__dd->pcidev) \
1328 dev_err(&__dd->pcidev->dev, "%s: " fmt, \
1329 ipath_get_unit_name(__dd->ipath_unit), \
1330 ##__VA_ARGS__); \
1331 else \
1332 printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
1333 ipath_get_unit_name(__dd->ipath_unit), \
1334 ##__VA_ARGS__); \
1335 } while (0)
1336
1337 #if _IPATH_DEBUGGING
1338
1339 # define __IPATH_DBG_WHICH(which,fmt,...) \
1340 do { \
1341 if (unlikely(ipath_debug & (which))) \
1342 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
1343 __func__,##__VA_ARGS__); \
1344 } while(0)
1345
1346 # define ipath_dbg(fmt,...) \
1347 __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
1348 # define ipath_cdbg(which,fmt,...) \
1349 __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
1350
1351 #else /* ! _IPATH_DEBUGGING */
1352
1353 # define ipath_dbg(fmt,...)
1354 # define ipath_cdbg(which,fmt,...)
1355
1356 #endif /* _IPATH_DEBUGGING */
1357
1358 /*
1359 * this is used for formatting hw error messages...
1360 */
1361 struct ipath_hwerror_msgs {
1362 u64 mask;
1363 const char *msg;
1364 };
1365
1366 #define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b }
1367
1368 /* in ipath_intr.c... */
1369 void ipath_format_hwerrors(u64 hwerrs,
1370 const struct ipath_hwerror_msgs *hwerrmsgs,
1371 size_t nhwerrmsgs,
1372 char *msg, size_t lmsg);
1373
1374 #endif /* _IPATH_KERNEL_H */