1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/bitops.h>
19 #include <linux/delay.h>
20 #include <linux/debugfs.h>
21 #include <linux/seq_file.h>
23 #include <linux/genhd.h>
24 #include <linux/idr.h>
26 #include "rsxx_priv.h"
30 #define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
32 MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
33 MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
34 MODULE_LICENSE("GPL");
35 MODULE_VERSION(DRIVER_VERSION
);
37 static unsigned int force_legacy
= NO_LEGACY
;
38 module_param(force_legacy
, uint
, 0444);
39 MODULE_PARM_DESC(force_legacy
, "Force the use of legacy type PCI interrupts");
41 static unsigned int sync_start
= 1;
42 module_param(sync_start
, uint
, 0444);
43 MODULE_PARM_DESC(sync_start
, "On by Default: Driver load will not complete "
44 "until the card startup has completed.");
46 static DEFINE_IDA(rsxx_disk_ida
);
48 /* --------------------Debugfs Setup ------------------- */
50 static int rsxx_attr_pci_regs_show(struct seq_file
*m
, void *p
)
52 struct rsxx_cardinfo
*card
= m
->private;
54 seq_printf(m
, "HWID 0x%08x\n",
55 ioread32(card
->regmap
+ HWID
));
56 seq_printf(m
, "SCRATCH 0x%08x\n",
57 ioread32(card
->regmap
+ SCRATCH
));
58 seq_printf(m
, "IER 0x%08x\n",
59 ioread32(card
->regmap
+ IER
));
60 seq_printf(m
, "IPR 0x%08x\n",
61 ioread32(card
->regmap
+ IPR
));
62 seq_printf(m
, "CREG_CMD 0x%08x\n",
63 ioread32(card
->regmap
+ CREG_CMD
));
64 seq_printf(m
, "CREG_ADD 0x%08x\n",
65 ioread32(card
->regmap
+ CREG_ADD
));
66 seq_printf(m
, "CREG_CNT 0x%08x\n",
67 ioread32(card
->regmap
+ CREG_CNT
));
68 seq_printf(m
, "CREG_STAT 0x%08x\n",
69 ioread32(card
->regmap
+ CREG_STAT
));
70 seq_printf(m
, "CREG_DATA0 0x%08x\n",
71 ioread32(card
->regmap
+ CREG_DATA0
));
72 seq_printf(m
, "CREG_DATA1 0x%08x\n",
73 ioread32(card
->regmap
+ CREG_DATA1
));
74 seq_printf(m
, "CREG_DATA2 0x%08x\n",
75 ioread32(card
->regmap
+ CREG_DATA2
));
76 seq_printf(m
, "CREG_DATA3 0x%08x\n",
77 ioread32(card
->regmap
+ CREG_DATA3
));
78 seq_printf(m
, "CREG_DATA4 0x%08x\n",
79 ioread32(card
->regmap
+ CREG_DATA4
));
80 seq_printf(m
, "CREG_DATA5 0x%08x\n",
81 ioread32(card
->regmap
+ CREG_DATA5
));
82 seq_printf(m
, "CREG_DATA6 0x%08x\n",
83 ioread32(card
->regmap
+ CREG_DATA6
));
84 seq_printf(m
, "CREG_DATA7 0x%08x\n",
85 ioread32(card
->regmap
+ CREG_DATA7
));
86 seq_printf(m
, "INTR_COAL 0x%08x\n",
87 ioread32(card
->regmap
+ INTR_COAL
));
88 seq_printf(m
, "HW_ERROR 0x%08x\n",
89 ioread32(card
->regmap
+ HW_ERROR
));
90 seq_printf(m
, "DEBUG0 0x%08x\n",
91 ioread32(card
->regmap
+ PCI_DEBUG0
));
92 seq_printf(m
, "DEBUG1 0x%08x\n",
93 ioread32(card
->regmap
+ PCI_DEBUG1
));
94 seq_printf(m
, "DEBUG2 0x%08x\n",
95 ioread32(card
->regmap
+ PCI_DEBUG2
));
96 seq_printf(m
, "DEBUG3 0x%08x\n",
97 ioread32(card
->regmap
+ PCI_DEBUG3
));
98 seq_printf(m
, "DEBUG4 0x%08x\n",
99 ioread32(card
->regmap
+ PCI_DEBUG4
));
100 seq_printf(m
, "DEBUG5 0x%08x\n",
101 ioread32(card
->regmap
+ PCI_DEBUG5
));
102 seq_printf(m
, "DEBUG6 0x%08x\n",
103 ioread32(card
->regmap
+ PCI_DEBUG6
));
104 seq_printf(m
, "DEBUG7 0x%08x\n",
105 ioread32(card
->regmap
+ PCI_DEBUG7
));
106 seq_printf(m
, "RECONFIG 0x%08x\n",
107 ioread32(card
->regmap
+ PCI_RECONFIG
));
112 static int rsxx_attr_stats_show(struct seq_file
*m
, void *p
)
114 struct rsxx_cardinfo
*card
= m
->private;
117 for (i
= 0; i
< card
->n_targets
; i
++) {
118 seq_printf(m
, "Ctrl %d CRC Errors = %d\n",
119 i
, card
->ctrl
[i
].stats
.crc_errors
);
120 seq_printf(m
, "Ctrl %d Hard Errors = %d\n",
121 i
, card
->ctrl
[i
].stats
.hard_errors
);
122 seq_printf(m
, "Ctrl %d Soft Errors = %d\n",
123 i
, card
->ctrl
[i
].stats
.soft_errors
);
124 seq_printf(m
, "Ctrl %d Writes Issued = %d\n",
125 i
, card
->ctrl
[i
].stats
.writes_issued
);
126 seq_printf(m
, "Ctrl %d Writes Failed = %d\n",
127 i
, card
->ctrl
[i
].stats
.writes_failed
);
128 seq_printf(m
, "Ctrl %d Reads Issued = %d\n",
129 i
, card
->ctrl
[i
].stats
.reads_issued
);
130 seq_printf(m
, "Ctrl %d Reads Failed = %d\n",
131 i
, card
->ctrl
[i
].stats
.reads_failed
);
132 seq_printf(m
, "Ctrl %d Reads Retried = %d\n",
133 i
, card
->ctrl
[i
].stats
.reads_retried
);
134 seq_printf(m
, "Ctrl %d Discards Issued = %d\n",
135 i
, card
->ctrl
[i
].stats
.discards_issued
);
136 seq_printf(m
, "Ctrl %d Discards Failed = %d\n",
137 i
, card
->ctrl
[i
].stats
.discards_failed
);
138 seq_printf(m
, "Ctrl %d DMA SW Errors = %d\n",
139 i
, card
->ctrl
[i
].stats
.dma_sw_err
);
140 seq_printf(m
, "Ctrl %d DMA HW Faults = %d\n",
141 i
, card
->ctrl
[i
].stats
.dma_hw_fault
);
142 seq_printf(m
, "Ctrl %d DMAs Cancelled = %d\n",
143 i
, card
->ctrl
[i
].stats
.dma_cancelled
);
144 seq_printf(m
, "Ctrl %d SW Queue Depth = %d\n",
145 i
, card
->ctrl
[i
].stats
.sw_q_depth
);
146 seq_printf(m
, "Ctrl %d HW Queue Depth = %d\n",
147 i
, atomic_read(&card
->ctrl
[i
].stats
.hw_q_depth
));
153 static int rsxx_attr_stats_open(struct inode
*inode
, struct file
*file
)
155 return single_open(file
, rsxx_attr_stats_show
, inode
->i_private
);
158 static int rsxx_attr_pci_regs_open(struct inode
*inode
, struct file
*file
)
160 return single_open(file
, rsxx_attr_pci_regs_show
, inode
->i_private
);
163 static ssize_t
rsxx_cram_read(struct file
*fp
, char __user
*ubuf
,
164 size_t cnt
, loff_t
*ppos
)
166 struct rsxx_cardinfo
*card
= file_inode(fp
)->i_private
;
170 buf
= kzalloc(cnt
, GFP_KERNEL
);
174 st
= rsxx_creg_read(card
, CREG_ADD_CRAM
+ (u32
)*ppos
, cnt
, buf
, 1);
176 st
= copy_to_user(ubuf
, buf
, cnt
);
184 static ssize_t
rsxx_cram_write(struct file
*fp
, const char __user
*ubuf
,
185 size_t cnt
, loff_t
*ppos
)
187 struct rsxx_cardinfo
*card
= file_inode(fp
)->i_private
;
191 buf
= memdup_user(ubuf
, cnt
);
195 st
= rsxx_creg_write(card
, CREG_ADD_CRAM
+ (u32
)*ppos
, cnt
, buf
, 1);
203 static const struct file_operations debugfs_cram_fops
= {
204 .owner
= THIS_MODULE
,
205 .read
= rsxx_cram_read
,
206 .write
= rsxx_cram_write
,
209 static const struct file_operations debugfs_stats_fops
= {
210 .owner
= THIS_MODULE
,
211 .open
= rsxx_attr_stats_open
,
214 .release
= single_release
,
217 static const struct file_operations debugfs_pci_regs_fops
= {
218 .owner
= THIS_MODULE
,
219 .open
= rsxx_attr_pci_regs_open
,
222 .release
= single_release
,
225 static void rsxx_debugfs_dev_new(struct rsxx_cardinfo
*card
)
227 struct dentry
*debugfs_stats
;
228 struct dentry
*debugfs_pci_regs
;
229 struct dentry
*debugfs_cram
;
231 card
->debugfs_dir
= debugfs_create_dir(card
->gendisk
->disk_name
, NULL
);
232 if (IS_ERR_OR_NULL(card
->debugfs_dir
))
233 goto failed_debugfs_dir
;
235 debugfs_stats
= debugfs_create_file("stats", 0444,
236 card
->debugfs_dir
, card
,
237 &debugfs_stats_fops
);
238 if (IS_ERR_OR_NULL(debugfs_stats
))
239 goto failed_debugfs_stats
;
241 debugfs_pci_regs
= debugfs_create_file("pci_regs", 0444,
242 card
->debugfs_dir
, card
,
243 &debugfs_pci_regs_fops
);
244 if (IS_ERR_OR_NULL(debugfs_pci_regs
))
245 goto failed_debugfs_pci_regs
;
247 debugfs_cram
= debugfs_create_file("cram", 0644,
248 card
->debugfs_dir
, card
,
250 if (IS_ERR_OR_NULL(debugfs_cram
))
251 goto failed_debugfs_cram
;
255 debugfs_remove(debugfs_pci_regs
);
256 failed_debugfs_pci_regs
:
257 debugfs_remove(debugfs_stats
);
258 failed_debugfs_stats
:
259 debugfs_remove(card
->debugfs_dir
);
261 card
->debugfs_dir
= NULL
;
264 /*----------------- Interrupt Control & Handling -------------------*/
266 static void rsxx_mask_interrupts(struct rsxx_cardinfo
*card
)
272 static void __enable_intr(unsigned int *mask
, unsigned int intr
)
277 static void __disable_intr(unsigned int *mask
, unsigned int intr
)
283 * NOTE: Disabling the IER will disable the hardware interrupt.
284 * Disabling the ISR will disable the software handling of the ISR bit.
286 * Enable/Disable interrupt functions assume the card->irq_lock
287 * is held by the caller.
289 void rsxx_enable_ier(struct rsxx_cardinfo
*card
, unsigned int intr
)
291 if (unlikely(card
->halt
) ||
292 unlikely(card
->eeh_state
))
295 __enable_intr(&card
->ier_mask
, intr
);
296 iowrite32(card
->ier_mask
, card
->regmap
+ IER
);
299 void rsxx_disable_ier(struct rsxx_cardinfo
*card
, unsigned int intr
)
301 if (unlikely(card
->eeh_state
))
304 __disable_intr(&card
->ier_mask
, intr
);
305 iowrite32(card
->ier_mask
, card
->regmap
+ IER
);
308 void rsxx_enable_ier_and_isr(struct rsxx_cardinfo
*card
,
311 if (unlikely(card
->halt
) ||
312 unlikely(card
->eeh_state
))
315 __enable_intr(&card
->isr_mask
, intr
);
316 __enable_intr(&card
->ier_mask
, intr
);
317 iowrite32(card
->ier_mask
, card
->regmap
+ IER
);
319 void rsxx_disable_ier_and_isr(struct rsxx_cardinfo
*card
,
322 if (unlikely(card
->eeh_state
))
325 __disable_intr(&card
->isr_mask
, intr
);
326 __disable_intr(&card
->ier_mask
, intr
);
327 iowrite32(card
->ier_mask
, card
->regmap
+ IER
);
330 static irqreturn_t
rsxx_isr(int irq
, void *pdata
)
332 struct rsxx_cardinfo
*card
= pdata
;
338 spin_lock(&card
->irq_lock
);
343 if (unlikely(card
->eeh_state
))
346 isr
= ioread32(card
->regmap
+ ISR
);
347 if (isr
== 0xffffffff) {
349 * A few systems seem to have an intermittent issue
350 * where PCI reads return all Fs, but retrying the read
351 * a little later will return as expected.
353 dev_info(CARD_TO_DEV(card
),
354 "ISR = 0xFFFFFFFF, retrying later\n");
358 isr
&= card
->isr_mask
;
362 for (i
= 0; i
< card
->n_targets
; i
++) {
363 if (isr
& CR_INTR_DMA(i
)) {
364 if (card
->ier_mask
& CR_INTR_DMA(i
)) {
365 rsxx_disable_ier(card
, CR_INTR_DMA(i
));
368 queue_work(card
->ctrl
[i
].done_wq
,
369 &card
->ctrl
[i
].dma_done_work
);
374 if (isr
& CR_INTR_CREG
) {
375 queue_work(card
->creg_ctrl
.creg_wq
,
376 &card
->creg_ctrl
.done_work
);
380 if (isr
& CR_INTR_EVENT
) {
381 queue_work(card
->event_wq
, &card
->event_work
);
382 rsxx_disable_ier_and_isr(card
, CR_INTR_EVENT
);
385 } while (reread_isr
);
387 spin_unlock(&card
->irq_lock
);
389 return handled
? IRQ_HANDLED
: IRQ_NONE
;
392 /*----------------- Card Event Handler -------------------*/
393 static const char * const rsxx_card_state_to_str(unsigned int state
)
395 static const char * const state_strings
[] = {
396 "Unknown", "Shutdown", "Starting", "Formatting",
397 "Uninitialized", "Good", "Shutting Down",
398 "Fault", "Read Only Fault", "dStroying"
401 return state_strings
[ffs(state
)];
404 static void card_state_change(struct rsxx_cardinfo
*card
,
405 unsigned int new_state
)
409 dev_info(CARD_TO_DEV(card
),
410 "card state change detected.(%s -> %s)\n",
411 rsxx_card_state_to_str(card
->state
),
412 rsxx_card_state_to_str(new_state
));
414 card
->state
= new_state
;
416 /* Don't attach DMA interfaces if the card has an invalid config */
417 if (!card
->config_valid
)
421 case CARD_STATE_RD_ONLY_FAULT
:
422 dev_crit(CARD_TO_DEV(card
),
423 "Hardware has entered read-only mode!\n");
425 * Fall through so the DMA devices can be attached and
426 * the user can attempt to pull off their data.
429 case CARD_STATE_GOOD
:
430 st
= rsxx_get_card_size8(card
, &card
->size8
);
432 dev_err(CARD_TO_DEV(card
),
433 "Failed attaching DMA devices\n");
435 if (card
->config_valid
)
436 set_capacity(card
->gendisk
, card
->size8
>> 9);
439 case CARD_STATE_FAULT
:
440 dev_crit(CARD_TO_DEV(card
),
441 "Hardware Fault reported!\n");
444 /* Everything else, detach DMA interface if it's attached. */
445 case CARD_STATE_SHUTDOWN
:
446 case CARD_STATE_STARTING
:
447 case CARD_STATE_FORMATTING
:
448 case CARD_STATE_UNINITIALIZED
:
449 case CARD_STATE_SHUTTING_DOWN
:
451 * dStroy is a term coined by marketing to represent the low level
454 case CARD_STATE_DSTROYING
:
455 set_capacity(card
->gendisk
, 0);
460 static void card_event_handler(struct work_struct
*work
)
462 struct rsxx_cardinfo
*card
;
467 card
= container_of(work
, struct rsxx_cardinfo
, event_work
);
469 if (unlikely(card
->halt
))
473 * Enable the interrupt now to avoid any weird race conditions where a
474 * state change might occur while rsxx_get_card_state() is
475 * processing a returned creg cmd.
477 spin_lock_irqsave(&card
->irq_lock
, flags
);
478 rsxx_enable_ier_and_isr(card
, CR_INTR_EVENT
);
479 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
481 st
= rsxx_get_card_state(card
, &state
);
483 dev_info(CARD_TO_DEV(card
),
484 "Failed reading state after event.\n");
488 if (card
->state
!= state
)
489 card_state_change(card
, state
);
491 if (card
->creg_ctrl
.creg_stats
.stat
& CREG_STAT_LOG_PENDING
)
492 rsxx_read_hw_log(card
);
495 /*----------------- Card Operations -------------------*/
496 static int card_shutdown(struct rsxx_cardinfo
*card
)
500 const int timeout
= msecs_to_jiffies(120000);
503 /* We can't issue a shutdown if the card is in a transition state */
506 st
= rsxx_get_card_state(card
, &state
);
509 } while (state
== CARD_STATE_STARTING
&&
510 (jiffies
- start
< timeout
));
512 if (state
== CARD_STATE_STARTING
)
515 /* Only issue a shutdown if we need to */
516 if ((state
!= CARD_STATE_SHUTTING_DOWN
) &&
517 (state
!= CARD_STATE_SHUTDOWN
)) {
518 st
= rsxx_issue_card_cmd(card
, CARD_CMD_SHUTDOWN
);
525 st
= rsxx_get_card_state(card
, &state
);
528 } while (state
!= CARD_STATE_SHUTDOWN
&&
529 (jiffies
- start
< timeout
));
531 if (state
!= CARD_STATE_SHUTDOWN
)
537 static int rsxx_eeh_frozen(struct pci_dev
*dev
)
539 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
543 dev_warn(&dev
->dev
, "IBM Flash Adapter PCI: preparing for slot reset.\n");
546 rsxx_mask_interrupts(card
);
549 * We need to guarantee that the write for eeh_state and masking
550 * interrupts does not become reordered. This will prevent a possible
551 * race condition with the EEH code.
555 pci_disable_device(dev
);
557 st
= rsxx_eeh_save_issued_dmas(card
);
561 rsxx_eeh_save_issued_creg(card
);
563 for (i
= 0; i
< card
->n_targets
; i
++) {
564 if (card
->ctrl
[i
].status
.buf
)
565 dma_free_coherent(&card
->dev
->dev
,
567 card
->ctrl
[i
].status
.buf
,
568 card
->ctrl
[i
].status
.dma_addr
);
569 if (card
->ctrl
[i
].cmd
.buf
)
570 dma_free_coherent(&card
->dev
->dev
,
571 COMMAND_BUFFER_SIZE8
,
572 card
->ctrl
[i
].cmd
.buf
,
573 card
->ctrl
[i
].cmd
.dma_addr
);
579 static void rsxx_eeh_failure(struct pci_dev
*dev
)
581 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
585 dev_err(&dev
->dev
, "IBM Flash Adapter PCI: disabling failed card.\n");
590 for (i
= 0; i
< card
->n_targets
; i
++) {
591 spin_lock_bh(&card
->ctrl
[i
].queue_lock
);
592 cnt
= rsxx_cleanup_dma_queue(&card
->ctrl
[i
],
593 &card
->ctrl
[i
].queue
,
595 spin_unlock_bh(&card
->ctrl
[i
].queue_lock
);
597 cnt
+= rsxx_dma_cancel(&card
->ctrl
[i
]);
600 dev_info(CARD_TO_DEV(card
),
601 "Freed %d queued DMAs on channel %d\n",
602 cnt
, card
->ctrl
[i
].id
);
606 static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo
*card
)
611 /* We need to wait for the hardware to reset */
612 while (iter
++ < 10) {
613 status
= ioread32(card
->regmap
+ PCI_RECONFIG
);
615 if (status
& RSXX_FLUSH_BUSY
) {
620 if (status
& RSXX_FLUSH_TIMEOUT
)
621 dev_warn(CARD_TO_DEV(card
), "HW: flash controller timeout\n");
625 /* Hardware failed resetting itself. */
629 static pci_ers_result_t
rsxx_error_detected(struct pci_dev
*dev
,
630 pci_channel_state_t error
)
634 if (dev
->revision
< RSXX_EEH_SUPPORT
)
635 return PCI_ERS_RESULT_NONE
;
637 if (error
== pci_channel_io_perm_failure
) {
638 rsxx_eeh_failure(dev
);
639 return PCI_ERS_RESULT_DISCONNECT
;
642 st
= rsxx_eeh_frozen(dev
);
644 dev_err(&dev
->dev
, "Slot reset setup failed\n");
645 rsxx_eeh_failure(dev
);
646 return PCI_ERS_RESULT_DISCONNECT
;
649 return PCI_ERS_RESULT_NEED_RESET
;
652 static pci_ers_result_t
rsxx_slot_reset(struct pci_dev
*dev
)
654 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
660 "IBM Flash Adapter PCI: recovering from slot reset.\n");
662 st
= pci_enable_device(dev
);
664 goto failed_hw_setup
;
668 st
= rsxx_eeh_fifo_flush_poll(card
);
670 goto failed_hw_setup
;
672 rsxx_dma_queue_reset(card
);
674 for (i
= 0; i
< card
->n_targets
; i
++) {
675 st
= rsxx_hw_buffers_init(dev
, &card
->ctrl
[i
]);
677 goto failed_hw_buffers_init
;
680 if (card
->config_valid
)
681 rsxx_dma_configure(card
);
683 /* Clears the ISR register from spurious interrupts */
684 st
= ioread32(card
->regmap
+ ISR
);
688 spin_lock_irqsave(&card
->irq_lock
, flags
);
689 if (card
->n_targets
& RSXX_MAX_TARGETS
)
690 rsxx_enable_ier_and_isr(card
, CR_INTR_ALL_G
);
692 rsxx_enable_ier_and_isr(card
, CR_INTR_ALL_C
);
693 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
695 rsxx_kick_creg_queue(card
);
697 for (i
= 0; i
< card
->n_targets
; i
++) {
698 spin_lock(&card
->ctrl
[i
].queue_lock
);
699 if (list_empty(&card
->ctrl
[i
].queue
)) {
700 spin_unlock(&card
->ctrl
[i
].queue_lock
);
703 spin_unlock(&card
->ctrl
[i
].queue_lock
);
705 queue_work(card
->ctrl
[i
].issue_wq
,
706 &card
->ctrl
[i
].issue_dma_work
);
709 dev_info(&dev
->dev
, "IBM Flash Adapter PCI: recovery complete.\n");
711 return PCI_ERS_RESULT_RECOVERED
;
713 failed_hw_buffers_init
:
714 for (i
= 0; i
< card
->n_targets
; i
++) {
715 if (card
->ctrl
[i
].status
.buf
)
716 dma_free_coherent(&card
->dev
->dev
,
718 card
->ctrl
[i
].status
.buf
,
719 card
->ctrl
[i
].status
.dma_addr
);
720 if (card
->ctrl
[i
].cmd
.buf
)
721 dma_free_coherent(&card
->dev
->dev
,
722 COMMAND_BUFFER_SIZE8
,
723 card
->ctrl
[i
].cmd
.buf
,
724 card
->ctrl
[i
].cmd
.dma_addr
);
727 rsxx_eeh_failure(dev
);
728 return PCI_ERS_RESULT_DISCONNECT
;
732 /*----------------- Driver Initialization & Setup -------------------*/
733 /* Returns: 0 if the driver is compatible with the device
734 -1 if the driver is NOT compatible with the device */
735 static int rsxx_compatibility_check(struct rsxx_cardinfo
*card
)
737 unsigned char pci_rev
;
739 pci_read_config_byte(card
->dev
, PCI_REVISION_ID
, &pci_rev
);
741 if (pci_rev
> RS70_PCI_REV_SUPPORTED
)
746 static int rsxx_pci_probe(struct pci_dev
*dev
,
747 const struct pci_device_id
*id
)
749 struct rsxx_cardinfo
*card
;
751 unsigned int sync_timeout
;
753 dev_info(&dev
->dev
, "PCI-Flash SSD discovered\n");
755 card
= kzalloc(sizeof(*card
), GFP_KERNEL
);
760 pci_set_drvdata(dev
, card
);
762 st
= ida_alloc(&rsxx_disk_ida
, GFP_KERNEL
);
767 st
= pci_enable_device(dev
);
773 st
= dma_set_mask(&dev
->dev
, DMA_BIT_MASK(64));
775 dev_err(CARD_TO_DEV(card
),
776 "No usable DMA configuration,aborting\n");
777 goto failed_dma_mask
;
780 st
= pci_request_regions(dev
, DRIVER_NAME
);
782 dev_err(CARD_TO_DEV(card
),
783 "Failed to request memory region\n");
784 goto failed_request_regions
;
787 if (pci_resource_len(dev
, 0) == 0) {
788 dev_err(CARD_TO_DEV(card
), "BAR0 has length 0!\n");
793 card
->regmap
= pci_iomap(dev
, 0, 0);
795 dev_err(CARD_TO_DEV(card
), "Failed to map BAR0\n");
800 spin_lock_init(&card
->irq_lock
);
804 spin_lock_irq(&card
->irq_lock
);
805 rsxx_disable_ier_and_isr(card
, CR_INTR_ALL
);
806 spin_unlock_irq(&card
->irq_lock
);
809 st
= pci_enable_msi(dev
);
811 dev_warn(CARD_TO_DEV(card
),
812 "Failed to enable MSI\n");
815 st
= request_irq(dev
->irq
, rsxx_isr
, IRQF_SHARED
,
818 dev_err(CARD_TO_DEV(card
),
819 "Failed requesting IRQ%d\n", dev
->irq
);
823 /************* Setup Processor Command Interface *************/
824 st
= rsxx_creg_setup(card
);
826 dev_err(CARD_TO_DEV(card
), "Failed to setup creg interface.\n");
827 goto failed_creg_setup
;
830 spin_lock_irq(&card
->irq_lock
);
831 rsxx_enable_ier_and_isr(card
, CR_INTR_CREG
);
832 spin_unlock_irq(&card
->irq_lock
);
834 st
= rsxx_compatibility_check(card
);
836 dev_warn(CARD_TO_DEV(card
),
837 "Incompatible driver detected. Please update the driver.\n");
839 goto failed_compatiblity_check
;
842 /************* Load Card Config *************/
843 st
= rsxx_load_config(card
);
845 dev_err(CARD_TO_DEV(card
),
846 "Failed loading card config\n");
848 /************* Setup DMA Engine *************/
849 st
= rsxx_get_num_targets(card
, &card
->n_targets
);
851 dev_info(CARD_TO_DEV(card
),
852 "Failed reading the number of DMA targets\n");
854 card
->ctrl
= kcalloc(card
->n_targets
, sizeof(*card
->ctrl
),
858 goto failed_dma_setup
;
861 st
= rsxx_dma_setup(card
);
863 dev_info(CARD_TO_DEV(card
),
864 "Failed to setup DMA engine\n");
865 goto failed_dma_setup
;
868 /************* Setup Card Event Handler *************/
869 card
->event_wq
= create_singlethread_workqueue(DRIVER_NAME
"_event");
870 if (!card
->event_wq
) {
871 dev_err(CARD_TO_DEV(card
), "Failed card event setup.\n");
872 goto failed_event_handler
;
875 INIT_WORK(&card
->event_work
, card_event_handler
);
877 st
= rsxx_setup_dev(card
);
879 goto failed_create_dev
;
881 rsxx_get_card_state(card
, &card
->state
);
883 dev_info(CARD_TO_DEV(card
),
885 rsxx_card_state_to_str(card
->state
));
888 * Now that the DMA Engine and devices have been setup,
889 * we can enable the event interrupt(it kicks off actions in
890 * those layers so we couldn't enable it right away.)
892 spin_lock_irq(&card
->irq_lock
);
893 rsxx_enable_ier_and_isr(card
, CR_INTR_EVENT
);
894 spin_unlock_irq(&card
->irq_lock
);
896 if (card
->state
== CARD_STATE_SHUTDOWN
) {
897 st
= rsxx_issue_card_cmd(card
, CARD_CMD_STARTUP
);
899 dev_crit(CARD_TO_DEV(card
),
900 "Failed issuing card startup\n");
902 sync_timeout
= SYNC_START_TIMEOUT
;
904 dev_info(CARD_TO_DEV(card
),
905 "Waiting for card to startup\n");
911 rsxx_get_card_state(card
, &card
->state
);
912 } while (sync_timeout
&&
913 (card
->state
== CARD_STATE_STARTING
));
915 if (card
->state
== CARD_STATE_STARTING
) {
916 dev_warn(CARD_TO_DEV(card
),
917 "Card startup timed out\n");
920 dev_info(CARD_TO_DEV(card
),
922 rsxx_card_state_to_str(card
->state
));
923 st
= rsxx_get_card_size8(card
, &card
->size8
);
928 } else if (card
->state
== CARD_STATE_GOOD
||
929 card
->state
== CARD_STATE_RD_ONLY_FAULT
) {
930 st
= rsxx_get_card_size8(card
, &card
->size8
);
935 rsxx_attach_dev(card
);
937 /************* Setup Debugfs *************/
938 rsxx_debugfs_dev_new(card
);
943 destroy_workqueue(card
->event_wq
);
944 card
->event_wq
= NULL
;
945 failed_event_handler
:
946 rsxx_dma_destroy(card
);
948 failed_compatiblity_check
:
949 destroy_workqueue(card
->creg_ctrl
.creg_wq
);
950 card
->creg_ctrl
.creg_wq
= NULL
;
952 spin_lock_irq(&card
->irq_lock
);
953 rsxx_disable_ier_and_isr(card
, CR_INTR_ALL
);
954 spin_unlock_irq(&card
->irq_lock
);
955 free_irq(dev
->irq
, card
);
957 pci_disable_msi(dev
);
959 pci_iounmap(dev
, card
->regmap
);
961 pci_release_regions(dev
);
962 failed_request_regions
:
964 pci_disable_device(dev
);
966 ida_free(&rsxx_disk_ida
, card
->disk_id
);
973 static void rsxx_pci_remove(struct pci_dev
*dev
)
975 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
983 dev_info(CARD_TO_DEV(card
),
984 "Removing PCI-Flash SSD.\n");
986 rsxx_detach_dev(card
);
988 for (i
= 0; i
< card
->n_targets
; i
++) {
989 spin_lock_irqsave(&card
->irq_lock
, flags
);
990 rsxx_disable_ier_and_isr(card
, CR_INTR_DMA(i
));
991 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
994 st
= card_shutdown(card
);
996 dev_crit(CARD_TO_DEV(card
), "Shutdown failed!\n");
998 /* Sync outstanding event handlers. */
999 spin_lock_irqsave(&card
->irq_lock
, flags
);
1000 rsxx_disable_ier_and_isr(card
, CR_INTR_EVENT
);
1001 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
1003 cancel_work_sync(&card
->event_work
);
1005 destroy_workqueue(card
->event_wq
);
1006 rsxx_destroy_dev(card
);
1007 rsxx_dma_destroy(card
);
1008 destroy_workqueue(card
->creg_ctrl
.creg_wq
);
1010 spin_lock_irqsave(&card
->irq_lock
, flags
);
1011 rsxx_disable_ier_and_isr(card
, CR_INTR_ALL
);
1012 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
1014 /* Prevent work_structs from re-queuing themselves. */
1017 debugfs_remove_recursive(card
->debugfs_dir
);
1019 free_irq(dev
->irq
, card
);
1022 pci_disable_msi(dev
);
1024 rsxx_creg_destroy(card
);
1026 pci_iounmap(dev
, card
->regmap
);
1028 pci_disable_device(dev
);
1029 pci_release_regions(dev
);
1031 ida_free(&rsxx_disk_ida
, card
->disk_id
);
1035 static int rsxx_pci_suspend(struct pci_dev
*dev
, pm_message_t state
)
1037 /* We don't support suspend at this time. */
1041 static void rsxx_pci_shutdown(struct pci_dev
*dev
)
1043 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
1044 unsigned long flags
;
1050 dev_info(CARD_TO_DEV(card
), "Shutting down PCI-Flash SSD.\n");
1052 rsxx_detach_dev(card
);
1054 for (i
= 0; i
< card
->n_targets
; i
++) {
1055 spin_lock_irqsave(&card
->irq_lock
, flags
);
1056 rsxx_disable_ier_and_isr(card
, CR_INTR_DMA(i
));
1057 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
1060 card_shutdown(card
);
1063 static const struct pci_error_handlers rsxx_err_handler
= {
1064 .error_detected
= rsxx_error_detected
,
1065 .slot_reset
= rsxx_slot_reset
,
1068 static const struct pci_device_id rsxx_pci_ids
[] = {
1069 {PCI_DEVICE(PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_FS70_FLASH
)},
1070 {PCI_DEVICE(PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_FS80_FLASH
)},
1074 MODULE_DEVICE_TABLE(pci
, rsxx_pci_ids
);
1076 static struct pci_driver rsxx_pci_driver
= {
1077 .name
= DRIVER_NAME
,
1078 .id_table
= rsxx_pci_ids
,
1079 .probe
= rsxx_pci_probe
,
1080 .remove
= rsxx_pci_remove
,
1081 .suspend
= rsxx_pci_suspend
,
1082 .shutdown
= rsxx_pci_shutdown
,
1083 .err_handler
= &rsxx_err_handler
,
1086 static int __init
rsxx_core_init(void)
1090 st
= rsxx_dev_init();
1094 st
= rsxx_dma_init();
1096 goto dma_init_failed
;
1098 st
= rsxx_creg_init();
1100 goto creg_init_failed
;
1102 return pci_register_driver(&rsxx_pci_driver
);
1112 static void __exit
rsxx_core_cleanup(void)
1114 pci_unregister_driver(&rsxx_pci_driver
);
1115 rsxx_creg_cleanup();
1120 module_init(rsxx_core_init
);
1121 module_exit(rsxx_core_cleanup
);