2 * Huawei SSD device driver
3 * Copyright (c) 2016, Huawei Technologies Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #ifndef LINUX_VERSION_CODE
16 #include <linux/version.h>
18 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
19 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/bio.h>
25 #include <linux/timer.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/blkdev.h>
31 #include <linux/sched.h>
32 #include <linux/fcntl.h>
33 #include <linux/interrupt.h>
34 #include <linux/compiler.h>
35 #include <linux/bitops.h>
36 #include <linux/delay.h>
37 #include <linux/time.h>
38 #include <linux/stat.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
44 #include <linux/ioctl.h>
45 #include <linux/hdreg.h> /* HDIO_GETGEO */
46 #include <linux/list.h>
47 #include <linux/reboot.h>
48 #include <linux/kthread.h>
49 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
50 #include <linux/seq_file.h>
52 #include <asm/uaccess.h>
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
54 #include <linux/scatterlist.h>
55 #include <linux/vmalloc.h>
57 #include <asm/scatterlist.h>
60 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
61 #include <linux/devfs_fs_kernel.h>
65 #define MODULE_NAME "hio"
66 #define DRIVER_VERSION "2.1.0.40"
67 #define DRIVER_VERSION_LEN 16
69 #define SSD_FW_MIN 0x1
71 #define SSD_DEV_NAME MODULE_NAME
72 #define SSD_DEV_NAME_LEN 16
73 #define SSD_CDEV_NAME "c"SSD_DEV_NAME
74 #define SSD_SDEV_NAME "s"SSD_DEV_NAME
79 #define SSD_MAJOR_SL 0
82 #define SSD_MAX_DEV 702
83 #define SSD_ALPHABET_NUM 26
85 #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg)
86 #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg)
87 #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg)
88 #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg)
91 #define SSD_SLAVE_PORT_DEVID 0x000a
95 /* 2.6.9 msi affinity bug, should turn msi & msi-x off */
97 #define SSD_ESCAPE_IRQ
103 #define SSD_MSIX_VEC 8
106 #undef SSD_ESCAPE_IRQ
107 #define SSD_MSIX_AFFINITY_FORCE
112 /* Over temperature protect */
113 #define SSD_OT_PROTECT
115 #ifdef SSD_QUEUE_PBIO
116 #define BIO_SSD_PBIO 20
120 //#define SSD_DEBUG_ERR
123 #define SSD_CMD_TIMEOUT (60*HZ)
126 #define SSD_SPI_TIMEOUT (5*HZ)
127 #define SSD_I2C_TIMEOUT (5*HZ)
129 #define SSD_I2C_MAX_DATA (127)
130 #define SSD_SMBUS_BLOCK_MAX (32)
131 #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2)
134 #define SSD_INIT_WAIT (1000) //1s
135 #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s
136 #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s
137 #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s
138 #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
139 #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
141 /* blkdev busy wait */
142 #define SSD_DEV_BUSY_WAIT 1000 //ms
143 #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s
146 #define SSD_SMBUS_RETRY_INTERVAL (5) //ms
147 #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL)
149 #define SSD_BM_RETRY_MAX 7
151 /* bm routine interval */
152 #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000)
154 /* routine interval */
155 #define SSD_ROUTINE_INTERVAL (10*1000) //10s
156 #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL)
157 #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30)
158 #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover
161 #define SSD_DMA_ALIGN (16)
163 /* some hw defalut */
164 #define SSD_LOG_MAX_SZ 4096
166 #define SSD_NAND_OOB_SZ 1024
167 #define SSD_NAND_ID_SZ 8
168 #define SSD_NAND_ID_BUFF_SZ 1024
169 #define SSD_NAND_MAX_CE 2
171 #define SSD_BBT_RESERVED 8
173 #define SSD_ECC_MAX_FLIP (64+1)
175 #define SSD_RAM_ALIGN 16
178 #define SSD_RELOAD_FLAG 0x3333CCCC
179 #define SSD_RELOAD_FW 0xAA5555AA
180 #define SSD_RESET_NOINIT 0xAA5555AA
181 #define SSD_RESET 0x55AAAA55
182 #define SSD_RESET_FULL 0x5A
183 //#define SSD_RESET_WAIT 1000 //1s
184 //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s
188 #define SSD_PROTOCOL_V1 0x0
190 #define SSD_ROM_SIZE (16*1024*1024)
191 #define SSD_ROM_BLK_SIZE (256*1024)
192 #define SSD_ROM_PAGE_SIZE (256)
193 #define SSD_ROM_NR_BRIDGE_FW 2
194 #define SSD_ROM_NR_CTRL_FW 2
195 #define SSD_ROM_BRIDGE_FW_BASE 0
196 #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024)
197 #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE)
198 #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024)
199 #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW)
200 #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE)
203 #define SSD_PROTOCOL_V3 0x3000000
204 #define SSD_PROTOCOL_V3_1_1 0x3010001
205 #define SSD_PROTOCOL_V3_1_3 0x3010003
206 #define SSD_PROTOCOL_V3_2 0x3020000
207 #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */
208 #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */
209 #define SSD_PROTOCOL_V3_2_4 0x3020004
212 #define SSD_PV3_ROM_NR_BM_FW 1
213 #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8)
215 #define SSD_ROM_LOG_SZ (64*1024*4)
217 #define SSD_ROM_NR_SMART_MAX 2
218 #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX
219 #define SSD_PV3_ROM_SMART_SZ (64*1024)
222 #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */
223 #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */
227 #define SSD_REQ_FIFO_REG 0x0000
228 #define SSD_RESP_FIFO_REG 0x0008 //0x0010
229 #define SSD_RESP_PTR_REG 0x0010 //0x0018
230 #define SSD_INTR_INTERVAL_REG 0x0018
231 #define SSD_READY_REG 0x001C
232 #define SSD_BRIDGE_TEST_REG 0x0020
233 #define SSD_STRIPE_SIZE_REG 0x0028
234 #define SSD_CTRL_VER_REG 0x0030 //controller
235 #define SSD_BRIDGE_VER_REG 0x0034 //bridge
236 #define SSD_PCB_VER_REG 0x0038
237 #define SSD_BURN_FLAG_REG 0x0040
238 #define SSD_BRIDGE_INFO_REG 0x0044
240 #define SSD_WL_VAL_REG 0x0048 //32-bit
242 #define SSD_BB_INFO_REG 0x004C
244 #define SSD_ECC_TEST_REG 0x0050 //test only
245 #define SSD_ERASE_TEST_REG 0x0058 //test only
246 #define SSD_WRITE_TEST_REG 0x0060 //test only
248 #define SSD_RESET_REG 0x0068
249 #define SSD_RELOAD_FW_REG 0x0070
251 #define SSD_RESERVED_BLKS_REG 0x0074
252 #define SSD_VALID_PAGES_REG 0x0078
253 #define SSD_CH_INFO_REG 0x007C
255 #define SSD_CTRL_TEST_REG_SZ 0x8
256 #define SSD_CTRL_TEST_REG0 0x0080
257 #define SSD_CTRL_TEST_REG1 0x0088
258 #define SSD_CTRL_TEST_REG2 0x0090
259 #define SSD_CTRL_TEST_REG3 0x0098
260 #define SSD_CTRL_TEST_REG4 0x00A0
261 #define SSD_CTRL_TEST_REG5 0x00A8
262 #define SSD_CTRL_TEST_REG6 0x00B0
263 #define SSD_CTRL_TEST_REG7 0x00B8
265 #define SSD_FLASH_INFO_REG0 0x00C0
266 #define SSD_FLASH_INFO_REG1 0x00C8
267 #define SSD_FLASH_INFO_REG2 0x00D0
268 #define SSD_FLASH_INFO_REG3 0x00D8
269 #define SSD_FLASH_INFO_REG4 0x00E0
270 #define SSD_FLASH_INFO_REG5 0x00E8
271 #define SSD_FLASH_INFO_REG6 0x00F0
272 #define SSD_FLASH_INFO_REG7 0x00F8
274 #define SSD_RESP_INFO_REG 0x01B8
275 #define SSD_NAND_BUFF_BASE 0x01BC //for nand write
277 #define SSD_CHIP_INFO_REG_SZ 0x10
278 #define SSD_CHIP_INFO_REG0 0x0100 //128 bit
279 #define SSD_CHIP_INFO_REG1 0x0110
280 #define SSD_CHIP_INFO_REG2 0x0120
281 #define SSD_CHIP_INFO_REG3 0x0130
282 #define SSD_CHIP_INFO_REG4 0x0140
283 #define SSD_CHIP_INFO_REG5 0x0150
284 #define SSD_CHIP_INFO_REG6 0x0160
285 #define SSD_CHIP_INFO_REG7 0x0170
287 #define SSD_RAM_INFO_REG 0x01C4
289 #define SSD_BBT_BASE_REG 0x01C8
290 #define SSD_ECT_BASE_REG 0x01CC
292 #define SSD_CLEAR_INTR_REG 0x01F0
294 #define SSD_INIT_STATE_REG_SZ 0x8
295 #define SSD_INIT_STATE_REG0 0x0200
296 #define SSD_INIT_STATE_REG1 0x0208
297 #define SSD_INIT_STATE_REG2 0x0210
298 #define SSD_INIT_STATE_REG3 0x0218
299 #define SSD_INIT_STATE_REG4 0x0220
300 #define SSD_INIT_STATE_REG5 0x0228
301 #define SSD_INIT_STATE_REG6 0x0230
302 #define SSD_INIT_STATE_REG7 0x0238
304 #define SSD_ROM_INFO_REG 0x0600
305 #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604
306 #define SSD_ROM_CTRL_FW_INFO_REG 0x0608
307 #define SSD_ROM_VP_INFO_REG 0x060C
309 #define SSD_LOG_INFO_REG 0x0610
310 #define SSD_LED_REG 0x0614
311 #define SSD_MSG_BASE_REG 0x06F8
314 #define SSD_SPI_REG_CMD 0x0180
315 #define SSD_SPI_REG_CMD_HI 0x0184
316 #define SSD_SPI_REG_WDATA 0x0188
317 #define SSD_SPI_REG_ID 0x0190
318 #define SSD_SPI_REG_STATUS 0x0198
319 #define SSD_SPI_REG_RDATA 0x01A0
320 #define SSD_SPI_REG_READY 0x01A8
323 #define SSD_I2C_CTRL_REG 0x06F0
324 #define SSD_I2C_RDATA_REG 0x06F4
326 /* temperature reg */
327 #define SSD_BRIGE_TEMP_REG 0x0618
329 #define SSD_CTRL_TEMP_REG0 0x0700
330 #define SSD_CTRL_TEMP_REG1 0x0708
331 #define SSD_CTRL_TEMP_REG2 0x0710
332 #define SSD_CTRL_TEMP_REG3 0x0718
333 #define SSD_CTRL_TEMP_REG4 0x0720
334 #define SSD_CTRL_TEMP_REG5 0x0728
335 #define SSD_CTRL_TEMP_REG6 0x0730
336 #define SSD_CTRL_TEMP_REG7 0x0738
338 /* reversion 3 reg */
339 #define SSD_PROTOCOL_VER_REG 0x01B4
341 #define SSD_FLUSH_TIMEOUT_REG 0x02A4
342 #define SSD_BM_FAULT_REG 0x0660
344 #define SSD_PV3_RAM_STATUS_REG_SZ 0x4
345 #define SSD_PV3_RAM_STATUS_REG0 0x0260
346 #define SSD_PV3_RAM_STATUS_REG1 0x0264
347 #define SSD_PV3_RAM_STATUS_REG2 0x0268
348 #define SSD_PV3_RAM_STATUS_REG3 0x026C
349 #define SSD_PV3_RAM_STATUS_REG4 0x0270
350 #define SSD_PV3_RAM_STATUS_REG5 0x0274
351 #define SSD_PV3_RAM_STATUS_REG6 0x0278
352 #define SSD_PV3_RAM_STATUS_REG7 0x027C
354 #define SSD_PV3_CHIP_INFO_REG_SZ 0x40
355 #define SSD_PV3_CHIP_INFO_REG0 0x0300
356 #define SSD_PV3_CHIP_INFO_REG1 0x0340
357 #define SSD_PV3_CHIP_INFO_REG2 0x0380
358 #define SSD_PV3_CHIP_INFO_REG3 0x03B0
359 #define SSD_PV3_CHIP_INFO_REG4 0x0400
360 #define SSD_PV3_CHIP_INFO_REG5 0x0440
361 #define SSD_PV3_CHIP_INFO_REG6 0x0480
362 #define SSD_PV3_CHIP_INFO_REG7 0x04B0
364 #define SSD_PV3_INIT_STATE_REG_SZ 0x20
365 #define SSD_PV3_INIT_STATE_REG0 0x0500
366 #define SSD_PV3_INIT_STATE_REG1 0x0520
367 #define SSD_PV3_INIT_STATE_REG2 0x0540
368 #define SSD_PV3_INIT_STATE_REG3 0x0560
369 #define SSD_PV3_INIT_STATE_REG4 0x0580
370 #define SSD_PV3_INIT_STATE_REG5 0x05A0
371 #define SSD_PV3_INIT_STATE_REG6 0x05C0
372 #define SSD_PV3_INIT_STATE_REG7 0x05E0
374 /* reversion 3.1.1 reg */
375 #define SSD_FULL_RESET_REG 0x01B0
377 #define SSD_CTRL_REG_ZONE_SZ 0x800
379 #define SSD_BB_THRESHOLD_L1_REG 0x2C0
380 #define SSD_BB_THRESHOLD_L2_REG 0x2C4
382 #define SSD_BB_ACC_REG_SZ 0x4
383 #define SSD_BB_ACC_REG0 0x21C0
384 #define SSD_BB_ACC_REG1 0x29C0
385 #define SSD_BB_ACC_REG2 0x31C0
387 #define SSD_EC_THRESHOLD_L1_REG 0x2C8
388 #define SSD_EC_THRESHOLD_L2_REG 0x2CC
390 #define SSD_EC_ACC_REG_SZ 0x4
391 #define SSD_EC_ACC_REG0 0x21E0
392 #define SSD_EC_ACC_REG1 0x29E0
393 #define SSD_EC_ACC_REG2 0x31E0
395 /* reversion 3.1.2 & 3.1.3 reg */
396 #define SSD_HW_STATUS_REG 0x02AC
398 #define SSD_PLP_INFO_REG 0x0664
400 /*reversion 3.2 reg*/
401 #define SSD_POWER_ON_REG 0x01EC
402 #define SSD_PCIE_LINKSTATUS_REG 0x01F8
403 #define SSD_PL_CAP_LEARN_REG 0x01FC
405 #define SSD_FPGA_1V0_REG0 0x2070
406 #define SSD_FPGA_1V8_REG0 0x2078
407 #define SSD_FPGA_1V0_REG1 0x2870
408 #define SSD_FPGA_1V8_REG1 0x2878
410 /*reversion 3.2 reg*/
411 #define SSD_READ_OT_REG0 0x2260
412 #define SSD_WRITE_OT_REG0 0x2264
413 #define SSD_READ_OT_REG1 0x2A60
414 #define SSD_WRITE_OT_REG1 0x2A64
418 #define SSD_FUNC_READ 0x01
419 #define SSD_FUNC_WRITE 0x02
420 #define SSD_FUNC_NAND_READ_WOOB 0x03
421 #define SSD_FUNC_NAND_READ 0x04
422 #define SSD_FUNC_NAND_WRITE 0x05
423 #define SSD_FUNC_NAND_ERASE 0x06
424 #define SSD_FUNC_NAND_READ_ID 0x07
425 #define SSD_FUNC_READ_LOG 0x08
426 #define SSD_FUNC_TRIM 0x09
427 #define SSD_FUNC_RAM_READ 0x10
428 #define SSD_FUNC_RAM_WRITE 0x11
429 #define SSD_FUNC_FLUSH 0x12 //cache / bbt
432 #define SSD_SPI_CMD_PROGRAM 0x02
433 #define SSD_SPI_CMD_READ 0x03
434 #define SSD_SPI_CMD_W_DISABLE 0x04
435 #define SSD_SPI_CMD_READ_STATUS 0x05
436 #define SSD_SPI_CMD_W_ENABLE 0x06
437 #define SSD_SPI_CMD_ERASE 0xd8
438 #define SSD_SPI_CMD_CLSR 0x30
439 #define SSD_SPI_CMD_READ_ID 0x9f
442 #define SSD_I2C_CTRL_READ 0x00
443 #define SSD_I2C_CTRL_WRITE 0x01
445 /* i2c internal register */
446 #define SSD_I2C_CFG_REG 0x00
447 #define SSD_I2C_DATA_REG 0x01
448 #define SSD_I2C_CMD_REG 0x02
449 #define SSD_I2C_STATUS_REG 0x03
450 #define SSD_I2C_SADDR_REG 0x04
451 #define SSD_I2C_LEN_REG 0x05
452 #define SSD_I2C_RLEN_REG 0x06
453 #define SSD_I2C_WLEN_REG 0x07
454 #define SSD_I2C_RESET_REG 0x08 //write for reset
455 #define SSD_I2C_PRER_REG 0x09
459 /* FPGA volt = ADC_value / 4096 * 3v */
460 #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v
461 #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v
462 #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v
463 #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98
466 #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4)
467 #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4)
468 #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4)
469 #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12)
471 #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt))
482 SSD_CLOCK_166M_LOST
= 0,
490 #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1)
491 #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1)
493 #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8))
495 #define SSD_INLET_OT_TEMP (55) //55 DegC
496 #define SSD_INLET_OT_HYST (50) //50 DegC
497 #define SSD_FLASH_OT_TEMP (70) //70 DegC
498 #define SSD_FLASH_OT_HYST (65) //65 DegC
511 SSD_LM75_REG_TEMP
= 0,
518 #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2)
519 #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2)
520 #define SSD_LM80_REG_IN(nr) (0x20 + (nr))
522 #define SSD_LM80_REG_FAN1 0x28
523 #define SSD_LM80_REG_FAN2 0x29
524 #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr))
526 #define SSD_LM80_REG_TEMP 0x27
527 #define SSD_LM80_REG_TEMP_HOT_MAX 0x38
528 #define SSD_LM80_REG_TEMP_HOT_HYST 0x39
529 #define SSD_LM80_REG_TEMP_OS_MAX 0x3a
530 #define SSD_LM80_REG_TEMP_OS_HYST 0x3b
532 #define SSD_LM80_REG_CONFIG 0x00
533 #define SSD_LM80_REG_ALARM1 0x01
534 #define SSD_LM80_REG_ALARM2 0x02
535 #define SSD_LM80_REG_MASK1 0x03
536 #define SSD_LM80_REG_MASK2 0x04
537 #define SSD_LM80_REG_FANDIV 0x05
538 #define SSD_LM80_REG_RES 0x06
540 #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8)
542 #define SSD_LM80_3V3_VOLT(val) ((val)*33/19)
544 #define SSD_LM80_CONV_INTERVAL (1000)
553 SSD_LM80_IN_FPGA_3V3
,
558 struct ssd_lm80_limit
564 /* +/- 5% except cap in*/
565 static struct ssd_lm80_limit ssd_lm80_limit
[SSD_LM80_IN_NR
] = {
566 {171, 217}, /* CAP in: 1710 ~ 2170 */
575 /* temperature sensors */
585 #ifdef SSD_OT_PROTECT
586 #define SSD_OT_DELAY (60) //ms
588 #define SSD_OT_TEMP (90) //90 DegC
590 #define SSD_OT_TEMP_HYST (85) //85 DegC
593 /* fpga temperature */
594 //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f)
595 #define CONVERT_TEMP(val) ((val)*504/4096-273)
597 #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4))
598 #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4))
599 #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4))
603 #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP)
604 #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8)
605 #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2)))
606 #define SSD_PL_CAP_LEARN_WAIT (20) //20ms
607 #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s
609 #define SSD_PL_CAP_CHARGE_WAIT (1000)
610 #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s
612 #define SSD_PL_CAP_VOLT(val) (val*7)
614 #define SSD_PL_CAP_VOLT_FULL (13700)
615 #define SSD_PL_CAP_VOLT_READY (12880)
617 #define SSD_PL_CAP_THRESHOLD (8900)
618 #define SSD_PL_CAP_CP_THRESHOLD (5800)
619 #define SSD_PL_CAP_THRESHOLD_HYST (100)
621 enum ssd_pl_cap_status
629 SSD_PL_CAP_DEFAULT
= 0, /* 4 cap */
630 SSD_PL_CAP_CP
/* 3 cap */
635 #define SSD_HWMON_OFFS_TEMP (0)
636 #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR)
637 #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR)
638 #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR)
639 #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR)
640 #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR)
642 #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx)
643 #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx)
644 #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx)
645 #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx)
646 #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx)
647 #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx)
663 static int sfifo_alloc(struct sfifo
*fifo
, uint32_t size
, uint32_t esize
)
667 if (!fifo
|| size
> INT_MAX
|| esize
== 0) {
671 while (__size
< size
) __size
<<= 1;
677 fifo
->data
= vmalloc(esize
* __size
);
684 fifo
->mask
= __size
- 1;
687 spin_lock_init(&fifo
->lock
);
692 static void sfifo_free(struct sfifo
*fifo
)
707 static int __sfifo_put(struct sfifo
*fifo
, void *val
)
709 if (((fifo
->in
+ 1) & fifo
->mask
) == fifo
->out
) {
713 memcpy((fifo
->data
+ (fifo
->in
* fifo
->esize
)), val
, fifo
->esize
);
714 fifo
->in
= (fifo
->in
+ 1) & fifo
->mask
;
719 static int sfifo_put(struct sfifo
*fifo
, void *val
)
727 if (!in_interrupt()) {
728 spin_lock_irq(&fifo
->lock
);
729 ret
= __sfifo_put(fifo
, val
);
730 spin_unlock_irq(&fifo
->lock
);
732 spin_lock(&fifo
->lock
);
733 ret
= __sfifo_put(fifo
, val
);
734 spin_unlock(&fifo
->lock
);
740 static int __sfifo_get(struct sfifo
*fifo
, void *val
)
742 if (fifo
->out
== fifo
->in
) {
746 memcpy(val
, (fifo
->data
+ (fifo
->out
* fifo
->esize
)), fifo
->esize
);
747 fifo
->out
= (fifo
->out
+ 1) & fifo
->mask
;
752 static int sfifo_get(struct sfifo
*fifo
, void *val
)
760 if (!in_interrupt()) {
761 spin_lock_irq(&fifo
->lock
);
762 ret
= __sfifo_get(fifo
, val
);
763 spin_unlock_irq(&fifo
->lock
);
765 spin_lock(&fifo
->lock
);
766 ret
= __sfifo_get(fifo
, val
);
767 spin_unlock(&fifo
->lock
);
774 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
780 static inline void ssd_blist_init(struct ssd_blist
*ssd_bl
)
786 static inline struct bio
*ssd_blist_get(struct ssd_blist
*ssd_bl
)
788 struct bio
*bio
= ssd_bl
->prev
;
796 static inline void ssd_blist_add(struct ssd_blist
*ssd_bl
, struct bio
*bio
)
801 ssd_bl
->next
->bi_next
= bio
;
810 #define ssd_blist bio_list
811 #define ssd_blist_init bio_list_init
812 #define ssd_blist_get bio_list_get
813 #define ssd_blist_add bio_list_add
816 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
817 #define bio_start(bio) (bio->bi_sector)
819 #define bio_start(bio) (bio->bi_iter.bi_sector)
823 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
824 #define mutex_lock down
825 #define mutex_unlock up
826 #define mutex semaphore
827 #define mutex_init init_MUTEX
831 typedef union ssd_i2c_ctrl
{
839 }__attribute__((packed
)) ssd_i2c_ctrl_t
;
841 typedef union ssd_i2c_data
{
848 }__attribute__((packed
)) ssd_i2c_data_t
;
853 SSD_WMODE_BUFFER
= 0,
870 typedef struct ssd_sg_entry
875 }__attribute__((packed
))ssd_sg_entry_t
;
877 typedef struct ssd_rw_msg
883 uint32_t reserved
; //for 64-bit align
884 struct ssd_sg_entry sge
[1]; //base
885 }__attribute__((packed
))ssd_rw_msg_t
;
887 typedef struct ssd_resp_msg
895 }__attribute__((packed
))ssd_resp_msg_t
;
897 typedef struct ssd_flush_msg
900 uint8_t flag
:2; //flash cache 0 or bbt 1
904 uint32_t reserved
; //align
905 }__attribute__((packed
))ssd_flush_msg_t
;
907 typedef struct ssd_nand_op_msg
913 uint32_t reserved
; //align
919 }__attribute__((packed
))ssd_nand_op_msg_t
;
921 typedef struct ssd_ram_op_msg
927 uint32_t reserved
; //align
931 }__attribute__((packed
))ssd_ram_op_msg_t
;
935 typedef struct ssd_log_msg
941 uint32_t reserved
; //align
943 }__attribute__((packed
))ssd_log_msg_t
;
945 typedef struct ssd_log_op_msg
951 uint32_t reserved
; //align
952 uint64_t reserved1
; //align
954 }__attribute__((packed
))ssd_log_op_msg_t
;
956 typedef struct ssd_log_resp_msg
960 uint16_t reserved1
:2; //align with the normal resp msg
964 }__attribute__((packed
))ssd_log_resp_msg_t
;
968 typedef union ssd_response_msq
970 ssd_resp_msg_t resp_msg
;
971 ssd_log_resp_msg_t log_resp_msg
;
974 } ssd_response_msq_t
;
978 typedef struct ssd_protocol_info
981 uint32_t init_state_reg
;
982 uint32_t init_state_reg_sz
;
983 uint32_t chip_info_reg
;
984 uint32_t chip_info_reg_sz
;
985 } ssd_protocol_info_t
;
987 typedef struct ssd_hw_info
992 uint32_t cmd_fifo_sz
;
993 uint32_t cmd_fifo_sz_mask
;
996 uint32_t resp_ptr_sz
;
997 uint32_t resp_msg_sz
;
1001 uint16_t nr_data_ch
;
1007 uint8_t upper_pcb_ver
;
1009 uint8_t nand_vendor_id
;
1010 uint8_t nand_dev_id
;
1017 uint16_t bbf_seek
; //
1019 uint16_t page_count
; //per block
1021 uint32_t block_count
; //per flash
1025 uint32_t ram_max_len
;
1029 uint64_t md_base
; //metadata
1031 uint32_t md_entry_sz
;
1035 uint64_t nand_wbuff_base
;
1037 uint32_t md_reserved_blks
;
1038 uint32_t reserved_blks
;
1039 uint32_t valid_pages
;
1040 uint32_t max_valid_pages
;
1044 typedef struct ssd_hw_info_extend
1050 uint8_t form_factor
;
1053 }ssd_hw_info_extend_t
;
1055 typedef struct ssd_rom_info
1058 uint32_t block_size
;
1060 uint8_t nr_bridge_fw
;
1064 uint32_t bridge_fw_base
;
1065 uint32_t bridge_fw_sz
;
1066 uint32_t ctrl_fw_base
;
1067 uint32_t ctrl_fw_sz
;
1068 uint32_t bm_fw_base
;
1072 uint32_t smart_base
;
1075 uint32_t label_base
;
1083 SSD_DEBUG_WRITE_ERR
,
1093 typedef struct ssd_debug_info
1109 #define SSD_LABEL_FIELD_SZ 32
1110 #define SSD_SN_SZ 16
1112 typedef struct ssd_label
1114 char date
[SSD_LABEL_FIELD_SZ
];
1115 char sn
[SSD_LABEL_FIELD_SZ
];
1116 char part
[SSD_LABEL_FIELD_SZ
];
1117 char desc
[SSD_LABEL_FIELD_SZ
];
1118 char other
[SSD_LABEL_FIELD_SZ
];
1119 char maf
[SSD_LABEL_FIELD_SZ
];
1122 #define SSD_LABEL_DESC_SZ 256
1124 typedef struct ssd_labelv3
1126 char boardtype
[SSD_LABEL_FIELD_SZ
];
1127 char barcode
[SSD_LABEL_FIELD_SZ
];
1128 char item
[SSD_LABEL_FIELD_SZ
];
1129 char description
[SSD_LABEL_DESC_SZ
];
1130 char manufactured
[SSD_LABEL_FIELD_SZ
];
1131 char vendorname
[SSD_LABEL_FIELD_SZ
];
1132 char issuenumber
[SSD_LABEL_FIELD_SZ
];
1133 char cleicode
[SSD_LABEL_FIELD_SZ
];
1134 char bom
[SSD_LABEL_FIELD_SZ
];
1138 typedef struct ssd_battery_info
1141 } ssd_battery_info_t
;
1143 /* ssd power stat */
1144 typedef struct ssd_power_stat
1146 uint64_t nr_poweron
;
1147 uint64_t nr_powerloss
;
1148 uint64_t init_failed
;
1152 typedef struct ssd_io_stat
1165 typedef struct ssd_ecc_info
1167 uint64_t bitflip
[SSD_ECC_MAX_FLIP
];
1173 SSD_LOG_LEVEL_INFO
= 0,
1174 SSD_LOG_LEVEL_NOTICE
,
1175 SSD_LOG_LEVEL_WARNING
,
1180 typedef struct ssd_log_info
1183 uint64_t stat
[SSD_LOG_NR_LEVEL
];
1187 #define SSD_SMART_MAGIC (0x5452414D53445353ull)
1189 typedef struct ssd_smart
1191 struct ssd_power_stat pstat
;
1192 struct ssd_io_stat io_stat
;
1193 struct ssd_ecc_info ecc_info
;
1194 struct ssd_log_info log_info
;
1200 typedef struct ssd_internal_log
1204 } ssd_internal_log_t
;
1207 typedef struct ssd_cmd
1210 struct scatterlist
*sgl
;
1211 struct list_head list
;
1214 int flag
; /*pbio(1) or bio(0)*/
1220 unsigned long start_time
;
1223 unsigned int nr_log
;
1225 struct timer_list cmd_timer
;
1226 struct completion
*waiting
;
1229 typedef void (*send_cmd_func
)(struct ssd_cmd
*);
1230 typedef int (*ssd_event_call
)(struct gendisk
*, int, int); /* gendisk, event id, event level */
1233 #define SSD_DCMD_MAX_SZ 32
1235 typedef struct ssd_dcmd
1237 struct list_head list
;
1239 uint8_t msg
[SSD_DCMD_MAX_SZ
];
1255 #define SSD_QUEUE_NAME_LEN 16
1256 typedef struct ssd_queue
{
1257 char name
[SSD_QUEUE_NAME_LEN
];
1263 uint32_t resp_idx_mask
;
1264 uint32_t resp_msg_sz
;
1269 struct ssd_cmd
*cmd
;
1271 struct ssd_io_stat io_stat
;
1272 struct ssd_ecc_info ecc_info
;
1275 typedef struct ssd_device
{
1276 char name
[SSD_DEV_NAME_LEN
];
1283 #ifdef SSD_ESCAPE_IRQ
1289 int ot_delay
; //in ms
1293 atomic_t in_flight
[2]; //r&w
1297 struct list_head list
;
1298 struct pci_dev
*pdev
;
1300 unsigned long mmio_base
;
1301 unsigned long mmio_len
;
1302 void __iomem
*ctrlp
;
1304 struct mutex spi_mutex
;
1305 struct mutex i2c_mutex
;
1307 struct ssd_protocol_info protocol_info
;
1308 struct ssd_hw_info hw_info
;
1309 struct ssd_rom_info rom_info
;
1310 struct ssd_label label
;
1312 struct ssd_smart smart
;
1315 spinlock_t sendq_lock
;
1316 struct ssd_blist sendq
;
1317 struct task_struct
*send_thread
;
1318 wait_queue_head_t send_waitq
;
1321 spinlock_t doneq_lock
;
1322 struct ssd_blist doneq
;
1323 struct task_struct
*done_thread
;
1324 wait_queue_head_t done_waitq
;
1326 struct ssd_dcmd
*dcmd
;
1327 spinlock_t dcmd_lock
;
1328 struct list_head dcmd_list
; /* direct cmd list */
1329 wait_queue_head_t dcmd_wq
;
1331 unsigned long *tag_map
;
1332 wait_queue_head_t tag_wq
;
1334 spinlock_t cmd_lock
;
1335 struct ssd_cmd
*cmd
;
1338 ssd_event_call event_call
;
1340 dma_addr_t msg_base_dma
;
1343 void *resp_msg_base
;
1344 void *resp_ptr_base
;
1345 dma_addr_t resp_msg_base_dma
;
1346 dma_addr_t resp_ptr_base_dma
;
1349 struct msix_entry entry
[SSD_MSIX_VEC
];
1350 struct ssd_queue queue
[SSD_MSIX_VEC
];
1352 struct request_queue
*rq
; /* The device request queue */
1353 struct gendisk
*gd
; /* The gendisk structure */
1355 struct mutex internal_log_mutex
;
1356 struct ssd_internal_log internal_log
;
1357 struct workqueue_struct
*workq
;
1358 struct work_struct log_work
; /* get log */
1361 unsigned long state
; /* device state, for example, block device inited */
1363 struct module
*owner
;
1374 struct mutex gd_mutex
;
1375 struct ssd_log_info log_info
; /* volatile */
1377 atomic_t queue_depth
;
1378 struct mutex barrier_mutex
;
1379 struct mutex fw_mutex
;
1381 struct ssd_hw_info_extend hw_info_ext
;
1382 struct ssd_labelv3 labelv3
;
1386 struct mutex bm_mutex
;
1387 struct work_struct bm_work
; /* check bm */
1388 struct timer_list bm_timer
;
1389 struct sfifo log_fifo
;
1391 struct timer_list routine_timer
;
1392 unsigned long routine_tick
;
1393 unsigned long hwmon
;
1395 struct work_struct hwmon_work
; /* check hw */
1396 struct work_struct capmon_work
; /* check battery */
1397 struct work_struct tempmon_work
; /* check temp */
1400 struct ssd_debug_info db_info
;
1401 uint64_t reset_time
;
1402 int has_non_0x98_reg_access
;
1403 spinlock_t in_flight_lock
;
1405 uint64_t last_poweron_id
;
1411 typedef struct ssd_acc_info
{
1412 uint32_t threshold_l1
;
1413 uint32_t threshold_l2
;
1417 typedef struct ssd_reg_op_info
1421 } ssd_reg_op_info_t
;
1423 typedef struct ssd_spi_op_info
1428 } ssd_spi_op_info_t
;
1430 typedef struct ssd_i2c_op_info
1437 } ssd_i2c_op_info_t
;
1439 typedef struct ssd_smbus_op_info
1445 } ssd_smbus_op_info_t
;
1447 typedef struct ssd_ram_op_info
{
1451 uint8_t __user
*buf
;
1452 } ssd_ram_op_info_t
;
1454 typedef struct ssd_flash_op_info
{
1459 uint8_t __user
*buf
;
1460 } ssd_flash_op_info_t
;
1462 typedef struct ssd_sw_log_info
{
1466 } ssd_sw_log_info_t
;
1468 typedef struct ssd_version_info
1470 uint32_t bridge_ver
; /* bridge fw version */
1471 uint32_t ctrl_ver
; /* controller fw version */
1472 uint32_t bm_ver
; /* battery manager fw version */
1473 uint8_t pcb_ver
; /* main pcb version */
1474 uint8_t upper_pcb_ver
;
1477 } ssd_version_info_t
;
1479 typedef struct pci_addr
1487 typedef struct ssd_drv_param_info
{
1497 } ssd_drv_param_info_t
;
1501 enum ssd_form_factor
1503 SSD_FORM_FACTOR_HHHL
= 0,
1504 SSD_FORM_FACTOR_FHHL
1508 /* ssd power loss protect */
1517 #define SSD_BM_SLAVE_ADDRESS 0x16
1518 #define SSD_BM_CAP 5
1521 #define SSD_BM_SAFETYSTATUS 0x51
1522 #define SSD_BM_OPERATIONSTATUS 0x54
1524 /* ManufacturerAccess */
1525 #define SSD_BM_MANUFACTURERACCESS 0x00
1526 #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */
1528 /* Data flash access */
1529 #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77
1530 #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78
1531 #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56
1532 #define SSD_BM_CONFIGURATION_REGISTERS_ID 64
1534 /* min cap voltage */
1535 #define SSD_BM_CAP_VOLT_MIN 500
1540 SSD_BM_CAP_VINA = 1,
1546 SSD_BMSTATUS_OK
= 0,
1547 SSD_BMSTATUS_CHARGING
, /* not fully charged */
1548 SSD_BMSTATUS_WARNING
1553 SBS_UNIT_TEMPERATURE
,
1558 SBS_UNIT_CAPACITANCE
1586 uint16_t cap_volt
[SSD_BM_CAP
];
1593 struct ssd_bm_manufacturer_data
1595 uint16_t pack_lot_code
;
1596 uint16_t pcb_lot_code
;
1597 uint16_t firmware_ver
;
1598 uint16_t hardware_ver
;
1601 struct ssd_bm_configuration_registers
1614 uint16_t fet_action
;
1619 #define SBS_VALUE_MASK 0xffff
1621 #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var)
1622 #define bm_var(start, offset) ((void *) start + (offset))
1624 static struct sbs_cmd ssd_bm_sbs
[] = {
1625 {0x08, SBS_SIZE_WORD
, SBS_UNIT_TEMPERATURE
, bm_var_offset(temp
), SBS_VALUE_MASK
, "Temperature"},
1626 {0x09, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(volt
), SBS_VALUE_MASK
, "Voltage"},
1627 {0x0a, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(curr
), SBS_VALUE_MASK
, "Current"},
1628 {0x0b, SBS_SIZE_WORD
, SBS_UNIT_ESR
, bm_var_offset(esr
), SBS_VALUE_MASK
, "ESR"},
1629 {0x0d, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(rsoc
), SBS_VALUE_MASK
, "RelativeStateOfCharge"},
1630 {0x0e, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(health
), SBS_VALUE_MASK
, "Health"},
1631 {0x10, SBS_SIZE_WORD
, SBS_UNIT_CAPACITANCE
, bm_var_offset(cap
), SBS_VALUE_MASK
, "Capacitance"},
1632 {0x14, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(chg_curr
), SBS_VALUE_MASK
, "ChargingCurrent"},
1633 {0x15, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(chg_volt
), SBS_VALUE_MASK
, "ChargingVoltage"},
1634 {0x3b, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[4]), SBS_VALUE_MASK
, "CapacitorVoltage5"},
1635 {0x3c, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[3]), SBS_VALUE_MASK
, "CapacitorVoltage4"},
1636 {0x3d, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[2]), SBS_VALUE_MASK
, "CapacitorVoltage3"},
1637 {0x3e, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[1]), SBS_VALUE_MASK
, "CapacitorVoltage2"},
1638 {0x3f, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[0]), SBS_VALUE_MASK
, "CapacitorVoltage1"},
1639 {0x50, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_alert
), 0x870F, "SafetyAlert"},
1640 {0x51, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_status
), 0xE7BF, "SafetyStatus"},
1641 {0x54, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(op_status
), 0x79F4, "OperationStatus"},
1642 {0x5a, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(sys_volt
), SBS_VALUE_MASK
, "SystemVoltage"},
1643 {0, 0, 0, 0, 0, NULL
},
1647 #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info)
1648 #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info)
1649 #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info)
1650 #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart)
1651 #define SSD_CMD_GET_IDX _IOR('H', 105, int)
1652 #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int)
1653 #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int)
1654 #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN])
1656 #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info)
1657 #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info)
1659 #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend)
1661 #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info)
1662 #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info)
1664 #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info)
1665 #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info)
1666 #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info)
1668 #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info)
1669 #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info)
1670 #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info)
1672 #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info)
1673 #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info)
1674 #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info)
1675 #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info)
1676 #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info)
1677 #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info)
1678 #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info)
1679 #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info)
1681 #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t)
1682 #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int)
1683 #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int)
1684 #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t)
1685 #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int)
1687 #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info)
1688 #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info)
1690 #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info)
1691 #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob
1692 #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info)
1693 #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info)
1694 #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO
1696 #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info)
1698 #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int)
1699 #define SSD_CMD_SET_ALARM _IOW('H', 191, int)
1701 #define SSD_CMD_RESET _IOW('H', 200, int)
1702 #define SSD_CMD_RELOAD_FW _IOW('H', 201, int)
1703 #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int)
1704 #define SSD_CMD_LOAD_DEV _IOW('H', 203, int)
1705 #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t)
1706 #define SSD_CMD_FULL_RESET _IOW('H', 206, int)
1708 #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t)
1709 #define SSD_CMD_GET_LOG _IOR('H', 221, void *)
1710 #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int)
1712 #define SSD_CMD_OT_PROTECT _IOW('H', 223, int)
1713 #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int)
1715 #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int)
1716 #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int)
1718 #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info)
1720 #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label)
1721 #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info)
1722 #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int)
1723 #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int)
1724 #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *)
1727 #define SSD_CMD_FLUSH _IOW('H', 240, int)
1728 #define SSD_CMD_SAVE_MD _IOW('H', 241, int)
1730 #define SSD_CMD_SET_WMODE _IOW('H', 242, int)
1731 #define SSD_CMD_GET_WMODE _IOR('H', 243, int)
1732 #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int)
1734 #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info)
1735 #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info)
1737 #define SSD_CMD_CLEAR_WARNING _IOW('H', 260, int)
1741 #define SSD_LOG_MAX_SZ 4096
1742 #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE
1743 #define SSD_DIF_WITH_OLD_LOG 0x3f
1747 SSD_LOG_DATA_NONE
= 0,
1752 typedef struct ssd_log_entry
1770 }__attribute__((packed
))ssd_log_entry_t
;
1772 typedef struct ssd_log
1775 uint64_t ctrl_idx
:8;
1777 } __attribute__((packed
)) ssd_log_t
;
1779 typedef struct ssd_log_desc
1787 } __attribute__((packed
)) ssd_log_desc_t
;
1789 #define SSD_LOG_SW_IDX 0xF
1790 #define SSD_UNKNOWN_EVENT ((uint16_t)-1)
1791 static struct ssd_log_desc ssd_log_desc
[] = {
1792 /* event, level, show flash, show block, show page, desc */
1793 {0x0, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Create BBT failure"}, //g3
1794 {0x1, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Read BBT failure"}, //g3
1795 {0x2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Mark bad block"},
1796 {0x3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flush BBT failure"},
1797 {0x4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1798 {0x7, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "No available blocks"},
1799 {0x8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Bad EC header"},
1800 {0x9, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 0, "Bad VID header"}, //g3
1801 {0xa, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Wear leveling"},
1802 {0xb, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "WL read back failure"},
1803 {0x11, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Data recovery failure"}, // err
1804 {0x20, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan mapping table failure"}, // err g3
1805 {0x21, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1806 {0x22, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1807 {0x23, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1808 {0x24, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Merge: read mapping page failure"},
1809 {0x25, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: read back failure"},
1810 {0x26, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1811 {0x27, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Data corrupted for abnormal power down"}, //g3
1812 {0x28, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: mapping page corrupted"},
1813 {0x29, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: no mapping page"},
1814 {0x2a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: mapping pages incomplete"},
1815 {0x2b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read back failure after programming failure"}, // err
1816 {0xf1, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure without recovery"}, // err
1817 {0xf2, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available blocks"}, // maybe err g3
1818 {0xf3, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: RAID incomplete"}, // err g3
1819 {0xf4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1820 {0xf5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure in moving data"},
1821 {0xf6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1822 {0xf7, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Init: RAID not complete"},
1823 {0xf8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: data moving interrupted"},
1824 {0xfe, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Data inspection failure"},
1825 {0xff, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "IO: ECC failed"},
1828 {0x2e, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available reserved blocks" }, // err
1829 {0x30, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PMT membership not found"},
1830 {0x31, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PMT corrupted"},
1831 {0x32, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT membership not found"},
1832 {0x33, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT not found"},
1833 {0x34, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT corrupted"},
1834 {0x35, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT page read failure"},
1835 {0x36, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT page read failure"},
1836 {0x37, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT backup page read failure"},
1837 {0x38, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT read failure"},
1838 {0x39, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT scan failure"}, // err
1839 {0x3a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page read failure"},
1840 {0x3b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page scan failure"}, // err
1841 {0x3c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan unclosed block failure"}, // err
1842 {0x3d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: write pointer mismatch"},
1843 {0x3e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: PBMT read failure"},
1844 {0x3f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: PMT recovery: PBMT scan failure"},
1845 {0x40, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: data page read failure"}, //err
1846 {0x41, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT write pointer mismatch"},
1847 {0x42, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT latest version corrupted"},
1848 {0x43, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: too many unclosed blocks"},
1849 {0x44, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PDW block found"},
1850 {0x45, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Init: more than one PDW block found"}, //err
1851 {0x46, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page is blank or read failure"},
1852 {0x47, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PDW block not found"},
1854 {0x50, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: hit error data"}, // err
1855 {0x51, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: read back failure"}, // err
1856 {0x52, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Cache: unknown command"}, //?
1857 {0x53, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "GC/WL read back failure"}, // err
1859 {0x60, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Erase failure"},
1861 {0x70, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "LPA not matched"},
1862 {0x71, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "PBN not matched"},
1863 {0x72, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read retry failure"},
1864 {0x73, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Need raid recovery"},
1865 {0x74, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "Need read retry"},
1866 {0x75, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read invalid data page"},
1867 {0x76, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN matched"},
1868 {0x77, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN not matched"},
1869 {0x78, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in flash, PBN not matched"},
1870 {0x79, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in cache, LPA not matched"},
1871 {0x7a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in flash, LPA not matched"},
1872 {0x7b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in cache, LPA not matched"},
1873 {0x7c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in flash, LPA not matched"},
1874 {0x7d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data page status error"},
1875 {0x7e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1876 {0x7f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Access flash timeout"},
1878 {0x80, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "EC overflow"},
1879 {0x81, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_NONE
, 0, 0, "Scrubbing completed"},
1880 {0x82, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Unstable block(too much bit flip)"},
1881 {0x83, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: ram error"}, //?
1882 {0x84, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: one PBMT read failure"},
1884 {0x88, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: mark bad block"},
1885 {0x89, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: invalid page count error"}, // maybe err
1886 {0x8a, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Bad Block close to limit"},
1887 {0x8b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: Bad Block over limit"},
1888 {0x8c, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: P/E cycles close to limit"},
1889 {0x8d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: P/E cycles over limit"},
1891 {0x90, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Over temperature"}, //90
1892 {0x91, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature is OK"}, //80
1893 {0x92, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Battery fault"},
1894 {0x93, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault"}, //err
1895 {0x94, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "DDR error"}, //err
1896 {0x95, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Controller serdes error"}, //err
1897 {0x96, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 1 error"}, //err
1898 {0x97, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 2 error"}, //err
1899 {0x98, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault (corrected)"}, //err
1900 {0x99, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Battery is OK"},
1901 {0x9a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature close to limit"}, //85
1903 {0x9b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (low)"},
1904 {0x9c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (high)"},
1905 {0x9d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "I2C fault" },
1906 {0x9e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "DDR single bit error" },
1907 {0x9f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Board voltage fault" },
1909 {0xa0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "LPA not matched"},
1910 {0xa1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Re-read data in cache"},
1911 {0xa2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1912 {0xa3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Read blank page"},
1913 {0xa4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: new data in cache"},
1914 {0xa5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: PBN not matched"},
1915 {0xa6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data with error flag"},
1916 {0xa7, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: recoverd data with error flag"},
1917 {0xa8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Blank page in cache, PBN matched"},
1918 {0xa9, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Blank page in cache, PBN matched"},
1919 {0xaa, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flash init failure"},
1920 {0xab, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Mapping table recovery failure"},
1921 {0xac, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: ECC failed"},
1922 {0xb0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 95 degrees C"},
1923 {0xb1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 100 degrees C"},
1925 {0x300, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "CMD timeout"},
1926 {0x301, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Power on"},
1927 {0x302, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Power off"},
1928 {0x303, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear log"},
1929 {0x304, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity"},
1930 {0x305, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data"},
1931 {0x306, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "BM safety status"},
1932 {0x307, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "I/O error"},
1933 {0x308, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CMD error"},
1934 {0x309, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set wmode"},
1935 {0x30a, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "DDR init failed" },
1936 {0x30b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "PCIe link status" },
1937 {0x30c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Controller reset sync error" },
1938 {0x30d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Clock fault" },
1939 {0x30e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "FPGA voltage fault status" },
1940 {0x30f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity finished"},
1941 {0x310, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data finished"},
1942 {0x311, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Reset"},
1943 {0x312, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "CAP: voltage fault"},
1944 {0x313, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: learn fault"},
1945 {0x314, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CAP status"},
1946 {0x315, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Board voltage fault status"},
1947 {0x316, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 55 degrees C"}, //55
1948 {0x317, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 50 degrees C"}, //50
1949 {0x318, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash over temperature"}, //70
1950 {0x319, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash temperature is OK"}, //65
1951 {0x31a, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: short circuit"},
1952 {0x31b, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "Sensor fault"},
1953 {0x31c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data"},
1954 {0x31d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data finished"},
1955 {0x320, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Temperature sensor event"},
1957 {0x350, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear smart"},
1958 {0x351, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear warning"},
1960 {SSD_UNKNOWN_EVENT
, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "unknown event"},
1963 #define SSD_LOG_OVER_TEMP 0x90
1964 #define SSD_LOG_NORMAL_TEMP 0x91
1965 #define SSD_LOG_WARN_TEMP 0x9a
1966 #define SSD_LOG_SEU_FAULT 0x93
1967 #define SSD_LOG_SEU_FAULT1 0x98
1968 #define SSD_LOG_BATTERY_FAULT 0x92
1969 #define SSD_LOG_BATTERY_OK 0x99
1970 #define SSD_LOG_BOARD_VOLT_FAULT 0x9f
1973 #define SSD_LOG_TIMEOUT 0x300
1974 #define SSD_LOG_POWER_ON 0x301
1975 #define SSD_LOG_POWER_OFF 0x302
1976 #define SSD_LOG_CLEAR_LOG 0x303
1977 #define SSD_LOG_SET_CAPACITY 0x304
1978 #define SSD_LOG_CLEAR_DATA 0x305
1979 #define SSD_LOG_BM_SFSTATUS 0x306
1980 #define SSD_LOG_EIO 0x307
1981 #define SSD_LOG_ECMD 0x308
1982 #define SSD_LOG_SET_WMODE 0x309
1983 #define SSD_LOG_DDR_INIT_ERR 0x30a
1984 #define SSD_LOG_PCIE_LINK_STATUS 0x30b
1985 #define SSD_LOG_CTRL_RST_SYNC 0x30c
1986 #define SSD_LOG_CLK_FAULT 0x30d
1987 #define SSD_LOG_VOLT_FAULT 0x30e
1988 #define SSD_LOG_SET_CAPACITY_END 0x30F
1989 #define SSD_LOG_CLEAR_DATA_END 0x310
1990 #define SSD_LOG_RESET 0x311
1991 #define SSD_LOG_CAP_VOLT_FAULT 0x312
1992 #define SSD_LOG_CAP_LEARN_FAULT 0x313
1993 #define SSD_LOG_CAP_STATUS 0x314
1994 #define SSD_LOG_VOLT_STATUS 0x315
1995 #define SSD_LOG_INLET_OVER_TEMP 0x316
1996 #define SSD_LOG_INLET_NORMAL_TEMP 0x317
1997 #define SSD_LOG_FLASH_OVER_TEMP 0x318
1998 #define SSD_LOG_FLASH_NORMAL_TEMP 0x319
1999 #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a
2000 #define SSD_LOG_SENSOR_FAULT 0x31b
2001 #define SSD_LOG_ERASE_ALL 0x31c
2002 #define SSD_LOG_ERASE_ALL_END 0x31d
2003 #define SSD_LOG_TEMP_SENSOR_EVENT 0x320
2004 #define SSD_LOG_CLEAR_SMART 0x350
2005 #define SSD_LOG_CLEAR_WARNING 0x351
2008 /* sw log fifo depth */
2009 #define SSD_LOG_FIFO_SZ 1024
2013 static DEFINE_PER_CPU(struct list_head
, ssd_doneq
);
2014 static DEFINE_PER_CPU(struct tasklet_struct
, ssd_tasklet
);
2017 /* unloading driver */
2018 static volatile int ssd_exiting
= 0;
2020 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
2021 static struct class_simple
*ssd_class
;
2023 static struct class *ssd_class
;
2026 static int ssd_cmajor
= SSD_CMAJOR
;
2028 /* ssd block device major, minors */
2029 static int ssd_major
= SSD_MAJOR
;
2030 static int ssd_major_sl
= SSD_MAJOR_SL
;
2031 static int ssd_minors
= SSD_MINORS
;
2033 /* ssd device list */
2034 static struct list_head ssd_list
;
2035 static unsigned long ssd_index_bits
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2036 static unsigned long ssd_index_bits_sl
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2037 static atomic_t ssd_nr
;
2042 SSD_DRV_MODE_STANDARD
= 0, /* full */
2043 SSD_DRV_MODE_DEBUG
= 2, /* debug */
2044 SSD_DRV_MODE_BASE
/* base only */
2054 #if (defined SSD_MSIX)
2055 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2056 #elif (defined SSD_MSI)
2057 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2059 /* auto select the defaut int mode according to the kernel version*/
2060 /* suse 11 sp1 irqbalance bug: use msi instead*/
2061 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5))
2062 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2064 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2068 static int mode
= SSD_DRV_MODE_STANDARD
;
2069 static int status_mask
= 0xFF;
2070 static int int_mode
= SSD_INT_MODE_DEFAULT
;
2071 static int threaded_irq
= 0;
2072 static int log_level
= SSD_LOG_LEVEL_WARNING
;
2073 static int ot_protect
= 1;
2074 static int wmode
= SSD_WMODE_DEFAULT
;
2075 static int finject
= 0;
2077 module_param(mode
, int, 0);
2078 module_param(status_mask
, int, 0);
2079 module_param(int_mode
, int, 0);
2080 module_param(threaded_irq
, int, 0);
2081 module_param(log_level
, int, 0);
2082 module_param(ot_protect
, int, 0);
2083 module_param(wmode
, int, 0);
2084 module_param(finject
, int, 0);
2087 MODULE_PARM_DESC(mode
, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode");
2088 MODULE_PARM_DESC(status_mask
, "command status mask, 0 - without command error, 0xff - with command error");
2089 MODULE_PARM_DESC(int_mode
, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix");
2090 MODULE_PARM_DESC(threaded_irq
, "threaded irq, 0 - normal irq, 1 - threaded irq");
2091 MODULE_PARM_DESC(log_level
, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only");
2092 MODULE_PARM_DESC(ot_protect
, "over temperature protect, 0 - disable, 1 - enable");
2093 MODULE_PARM_DESC(wmode
, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default");
2094 MODULE_PARM_DESC(finject
, "enable fault simulation, 0 - off, 1 - on, for debug purpose only");
2096 // API adaption layer
2097 static inline void ssd_bio_endio(struct bio
*bio
, int error
)
2099 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
2100 bio
->bi_error
= error
;
2102 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
2103 bio_endio(bio
, error
);
2105 bio_endio(bio
, bio
->bi_size
, error
);
2109 static inline int ssd_bio_has_discard(struct bio
*bio
)
2113 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2114 return bio_op(bio
) & REQ_OP_DISCARD
;
2115 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
2116 return bio
->bi_rw
& REQ_DISCARD
;
2117 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
2118 return bio_rw_flagged(bio
, BIO_RW_DISCARD
);
2124 static inline int ssd_bio_has_flush(struct bio
*bio
)
2126 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2127 return bio_op(bio
) & REQ_OP_FLUSH
;
2128 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
2129 return bio
->bi_rw
& REQ_FLUSH
;
2135 static inline int ssd_bio_has_barrier_or_fua(struct bio
* bio
)
2137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2138 return bio
->bi_opf
& REQ_FUA
;
2139 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
2140 return bio
->bi_rw
& REQ_FUA
;
2141 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
2142 return bio
->bi_rw
& REQ_HARDBARRIER
;
2143 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
2144 return bio_rw_flagged(bio
, BIO_RW_BARRIER
);
2146 return bio_barrier(bio
);
2151 static int __init
ssd_drv_mode(char *str
)
2153 mode
= (int)simple_strtoul(str
, NULL
, 0);
2158 static int __init
ssd_status_mask(char *str
)
2160 status_mask
= (int)simple_strtoul(str
, NULL
, 16);
2165 static int __init
ssd_int_mode(char *str
)
2167 int_mode
= (int)simple_strtoul(str
, NULL
, 0);
2172 static int __init
ssd_threaded_irq(char *str
)
2174 threaded_irq
= (int)simple_strtoul(str
, NULL
, 0);
2179 static int __init
ssd_log_level(char *str
)
2181 log_level
= (int)simple_strtoul(str
, NULL
, 0);
2186 static int __init
ssd_ot_protect(char *str
)
2188 ot_protect
= (int)simple_strtoul(str
, NULL
, 0);
2193 static int __init
ssd_wmode(char *str
)
2195 wmode
= (int)simple_strtoul(str
, NULL
, 0);
2200 static int __init
ssd_finject(char *str
)
2202 finject
= (int)simple_strtoul(str
, NULL
, 0);
2207 __setup(MODULE_NAME
"_mode=", ssd_drv_mode
);
2208 __setup(MODULE_NAME
"_status_mask=", ssd_status_mask
);
2209 __setup(MODULE_NAME
"_int_mode=", ssd_int_mode
);
2210 __setup(MODULE_NAME
"_threaded_irq=", ssd_threaded_irq
);
2211 __setup(MODULE_NAME
"_log_level=", ssd_log_level
);
2212 __setup(MODULE_NAME
"_ot_protect=", ssd_ot_protect
);
2213 __setup(MODULE_NAME
"_wmode=", ssd_wmode
);
2214 __setup(MODULE_NAME
"_finject=", ssd_finject
);
2218 #ifdef CONFIG_PROC_FS
2219 #include <linux/proc_fs.h>
2220 #include <asm/uaccess.h>
2222 #define SSD_PROC_DIR MODULE_NAME
2223 #define SSD_PROC_INFO "info"
2225 static struct proc_dir_entry
*ssd_proc_dir
= NULL
;
2226 static struct proc_dir_entry
*ssd_proc_info
= NULL
;
2228 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2229 static int ssd_proc_read(char *page
, char **start
,
2230 off_t off
, int count
, int *eof
, void *data
)
2232 struct ssd_device
*dev
= NULL
;
2233 struct ssd_device
*n
= NULL
;
2239 if (ssd_exiting
|| off
!= 0) {
2243 len
+= snprintf((page
+ len
), (count
- len
), "Driver Version:\t%s\n", DRIVER_VERSION
);
2245 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2247 size
= dev
->hw_info
.size
;
2248 do_div(size
, 1000000000);
2250 len
+= snprintf((page
+ len
), (count
- len
), "\n");
2252 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2254 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2255 if (dev
->hw_info
.ctrl_ver
!= 0) {
2256 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2259 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2261 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2262 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2265 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Device:\t%s\n", idx
, dev
->name
);
2274 static int ssd_proc_show(struct seq_file
*m
, void *v
)
2276 struct ssd_device
*dev
= NULL
;
2277 struct ssd_device
*n
= NULL
;
2285 seq_printf(m
, "Driver Version:\t%s\n", DRIVER_VERSION
);
2287 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2289 size
= dev
->hw_info
.size
;
2290 do_div(size
, 1000000000);
2292 seq_printf(m
, "\n");
2294 seq_printf(m
, "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2296 seq_printf(m
, "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2297 if (dev
->hw_info
.ctrl_ver
!= 0) {
2298 seq_printf(m
, "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2301 seq_printf(m
, "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2303 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2304 seq_printf(m
, "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2307 seq_printf(m
, "HIO %d Device:\t%s\n", idx
, dev
->name
);
2313 static int ssd_proc_open(struct inode
*inode
, struct file
*file
)
2315 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
2316 return single_open(file
, ssd_proc_show
, PDE(inode
)->data
);
2318 return single_open(file
, ssd_proc_show
, PDE_DATA(inode
));
2322 static const struct file_operations ssd_proc_fops
= {
2323 .open
= ssd_proc_open
,
2325 .llseek
= seq_lseek
,
2326 .release
= single_release
,
2331 static void ssd_cleanup_proc(void)
2333 if (ssd_proc_info
) {
2334 remove_proc_entry(SSD_PROC_INFO
, ssd_proc_dir
);
2335 ssd_proc_info
= NULL
;
2338 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2339 ssd_proc_dir
= NULL
;
2342 static int ssd_init_proc(void)
2344 ssd_proc_dir
= proc_mkdir(SSD_PROC_DIR
, NULL
);
2346 goto out_proc_mkdir
;
2348 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2349 ssd_proc_info
= create_proc_entry(SSD_PROC_INFO
, S_IFREG
| S_IRUGO
| S_IWUSR
, ssd_proc_dir
);
2351 goto out_create_proc_entry
;
2353 ssd_proc_info
->read_proc
= ssd_proc_read
;
2356 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
2357 ssd_proc_info
->owner
= THIS_MODULE
;
2360 ssd_proc_info
= proc_create(SSD_PROC_INFO
, 0600, ssd_proc_dir
, &ssd_proc_fops
);
2362 goto out_create_proc_entry
;
2367 out_create_proc_entry
:
2368 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2374 static void ssd_cleanup_proc(void)
2378 static int ssd_init_proc(void)
2382 #endif /* CONFIG_PROC_FS */
2385 static void ssd_unregister_sysfs(struct ssd_device
*dev
)
2390 static int ssd_register_sysfs(struct ssd_device
*dev
)
2395 static void ssd_cleanup_sysfs(void)
2400 static int ssd_init_sysfs(void)
2405 static inline void ssd_put_index(int slave
, int index
)
2407 unsigned long *index_bits
= ssd_index_bits
;
2410 index_bits
= ssd_index_bits_sl
;
2413 if (test_and_clear_bit(index
, index_bits
)) {
2414 atomic_dec(&ssd_nr
);
2418 static inline int ssd_get_index(int slave
)
2420 unsigned long *index_bits
= ssd_index_bits
;
2424 index_bits
= ssd_index_bits_sl
;
2428 if ((index
= find_first_zero_bit(index_bits
, SSD_MAX_DEV
)) >= SSD_MAX_DEV
) {
2432 if (test_and_set_bit(index
, index_bits
)) {
2436 atomic_inc(&ssd_nr
);
2441 static void ssd_cleanup_index(void)
2446 static int ssd_init_index(void)
2448 INIT_LIST_HEAD(&ssd_list
);
2449 atomic_set(&ssd_nr
, 0);
2450 memset(ssd_index_bits
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2451 memset(ssd_index_bits_sl
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2456 static void ssd_set_dev_name(char *name
, size_t size
, int idx
)
2458 if(idx
< SSD_ALPHABET_NUM
) {
2459 snprintf(name
, size
, "%c", 'a'+idx
);
2461 idx
-= SSD_ALPHABET_NUM
;
2462 snprintf(name
, size
, "%c%c", 'a'+(idx
/SSD_ALPHABET_NUM
), 'a'+(idx
%SSD_ALPHABET_NUM
));
2466 /* pci register r&w */
2467 static inline void ssd_reg_write(void *addr
, uint64_t val
)
2469 iowrite32((uint32_t)val
, addr
);
2470 iowrite32((uint32_t)(val
>> 32), addr
+ 4);
2474 static inline uint64_t ssd_reg_read(void *addr
)
2477 uint32_t val_lo
, val_hi
;
2479 val_lo
= ioread32(addr
);
2480 val_hi
= ioread32(addr
+ 4);
2483 val
= val_lo
| ((uint64_t)val_hi
<< 32);
2489 #define ssd_reg32_write(addr, val) writel(val, addr)
2490 #define ssd_reg32_read(addr) readl(addr)
2493 static void ssd_clear_alarm(struct ssd_device
*dev
)
2497 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2501 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2503 /* firmware control */
2506 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2509 static void ssd_set_alarm(struct ssd_device
*dev
)
2513 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2517 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2521 /* software control */
2524 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2527 #define u32_swap(x) \
2529 (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
2530 (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
2531 (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
2532 (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24)))
2534 #define u16_swap(x) \
2536 (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \
2537 (((uint16_t)(x) & (uint16_t)0xff00) >> 8) ))
2541 /* No lock, for init only*/
2542 static int ssd_spi_read_id(struct ssd_device
*dev
, uint32_t *id
)
2552 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_ID
);
2554 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2555 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2556 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2557 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2561 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2562 if (val
== 0x1000000) {
2566 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2573 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_ID
);
2582 static int ssd_init_spi(struct ssd_device
*dev
)
2588 mutex_lock(&dev
->spi_mutex
);
2591 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2594 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2596 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2601 } while (val
!= 0x1000000);
2603 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2608 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2616 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2618 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2621 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2622 mutex_unlock(&dev
->spi_mutex
);
2629 static int ssd_spi_page_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2640 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2641 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
) {
2645 mutex_lock(&dev
->spi_mutex
);
2646 while (rlen
< size
) {
2647 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, ((off
+ rlen
) >> 24));
2649 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, (((off
+ rlen
) << 8) | SSD_SPI_CMD_READ
));
2651 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2652 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2653 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2654 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2658 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2659 if (val
== 0x1000000) {
2663 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2670 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
2671 *(uint32_t *)(buf
+ rlen
)= u32_swap(val
);
2673 rlen
+= sizeof(uint32_t);
2677 mutex_unlock(&dev
->spi_mutex
);
2681 static int ssd_spi_page_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2693 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2694 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
||
2695 (off
/ dev
->rom_info
.page_size
) != ((off
+ size
- 1) / dev
->rom_info
.page_size
)) {
2699 mutex_lock(&dev
->spi_mutex
);
2701 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2703 wlen
= size
/ sizeof(uint32_t);
2704 for (i
=0; i
<(int)wlen
; i
++) {
2705 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_WDATA
, u32_swap(*((uint32_t *)buf
+ i
)));
2709 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2711 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_PROGRAM
));
2717 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2719 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2721 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2726 } while (val
!= 0x1000000);
2728 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2733 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2740 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2741 if ((val
>> 6) & 0x1) {
2748 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2750 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2753 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2755 mutex_unlock(&dev
->spi_mutex
);
2760 static int ssd_spi_block_erase(struct ssd_device
*dev
, uint32_t off
)
2770 if ((off
% dev
->rom_info
.block_size
) != 0 || off
>= dev
->rom_info
.size
) {
2774 mutex_lock(&dev
->spi_mutex
);
2776 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2777 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2780 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2782 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_ERASE
));
2786 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2789 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2791 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2796 } while (val
!= 0x1000000);
2798 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2803 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2810 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2811 if ((val
>> 5) & 0x1) {
2818 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2820 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2823 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2825 mutex_unlock(&dev
->spi_mutex
);
2830 static int ssd_spi_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2841 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2842 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2846 while (len
< size
) {
2847 roff
= (off
+ len
) % dev
->rom_info
.page_size
;
2848 rsize
= dev
->rom_info
.page_size
- roff
;
2849 if ((size
- len
) < rsize
) {
2850 rsize
= (size
- len
);
2854 ret
= ssd_spi_page_read(dev
, (buf
+ len
), roff
, rsize
);
2868 static int ssd_spi_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2879 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2880 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2884 while (len
< size
) {
2885 woff
= (off
+ len
) % dev
->rom_info
.page_size
;
2886 wsize
= dev
->rom_info
.page_size
- woff
;
2887 if ((size
- len
) < wsize
) {
2888 wsize
= (size
- len
);
2892 ret
= ssd_spi_page_write(dev
, (buf
+ len
), woff
, wsize
);
2906 static int ssd_spi_erase(struct ssd_device
*dev
, uint32_t off
, uint32_t size
)
2916 if (size
== 0 || ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
||
2917 (off
% dev
->rom_info
.block_size
) != 0 || (size
% dev
->rom_info
.block_size
) != 0) {
2921 while (len
< size
) {
2924 ret
= ssd_spi_block_erase(dev
, eoff
);
2929 len
+= dev
->rom_info
.block_size
;
2939 static uint32_t __ssd_i2c_reg32_read(void *addr
)
2941 return ssd_reg32_read(addr
);
2944 static void __ssd_i2c_reg32_write(void *addr
, uint32_t val
)
2946 ssd_reg32_write(addr
, val
);
2947 ssd_reg32_read(addr
);
2950 static int __ssd_i2c_clear(struct ssd_device
*dev
, uint8_t saddr
)
2952 ssd_i2c_ctrl_t ctrl
;
2953 ssd_i2c_data_t data
;
2960 ctrl
.bits
.wdata
= 0;
2961 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
2962 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2963 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2967 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2968 if (data
.bits
.valid
== 0) {
2973 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2979 status
= data
.bits
.rdata
;
2981 if (!(status
& 0x4)) {
2982 /* clear read fifo data */
2983 ctrl
.bits
.wdata
= 0;
2984 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2985 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2986 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2990 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2991 if (data
.bits
.valid
== 0) {
2996 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3004 if (nr_data
<= SSD_I2C_MAX_DATA
) {
3013 ctrl
.bits
.wdata
= 0x04;
3014 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3015 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3016 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3019 if (!(status
& 0x8)) {
3021 /* reset i2c controller */
3022 ctrl
.bits
.wdata
= 0x0;
3023 ctrl
.bits
.addr
= SSD_I2C_RESET_REG
;
3024 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3025 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3032 static int ssd_i2c_write(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3034 ssd_i2c_ctrl_t ctrl
;
3035 ssd_i2c_data_t data
;
3041 mutex_lock(&dev
->i2c_mutex
);
3046 ctrl
.bits
.wdata
= saddr
;
3047 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3048 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3049 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3052 while (off
< size
) {
3053 ctrl
.bits
.wdata
= buf
[off
];
3054 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3055 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3056 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3062 ctrl
.bits
.wdata
= 0x01;
3063 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3064 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3065 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3070 ctrl
.bits
.wdata
= 0;
3071 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3072 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3073 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3076 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3077 if (data
.bits
.valid
== 0) {
3082 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3089 status
= data
.bits
.rdata
;
3094 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3101 if (!(status
& 0x1)) {
3107 if (status
& 0x20) {
3113 if (status
& 0x10) {
3120 if (__ssd_i2c_clear(dev
, saddr
)) {
3124 mutex_unlock(&dev
->i2c_mutex
);
3129 static int ssd_i2c_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3131 ssd_i2c_ctrl_t ctrl
;
3132 ssd_i2c_data_t data
;
3138 mutex_lock(&dev
->i2c_mutex
);
3143 ctrl
.bits
.wdata
= saddr
;
3144 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3145 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3146 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3149 ctrl
.bits
.wdata
= size
;
3150 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3151 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3152 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3155 ctrl
.bits
.wdata
= 0x02;
3156 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3157 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3158 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3163 ctrl
.bits
.wdata
= 0;
3164 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3165 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3166 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3169 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3170 if (data
.bits
.valid
== 0) {
3175 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3182 status
= data
.bits
.rdata
;
3187 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3194 if (!(status
& 0x2)) {
3200 if (status
& 0x20) {
3206 if (status
& 0x10) {
3212 while (off
< size
) {
3213 ctrl
.bits
.wdata
= 0;
3214 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3215 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3216 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3220 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3221 if (data
.bits
.valid
== 0) {
3226 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3233 buf
[off
] = data
.bits
.rdata
;
3240 if (__ssd_i2c_clear(dev
, saddr
)) {
3244 mutex_unlock(&dev
->i2c_mutex
);
3249 static int ssd_i2c_write_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t wsize
, uint8_t *wbuf
, uint8_t rsize
, uint8_t *rbuf
)
3251 ssd_i2c_ctrl_t ctrl
;
3252 ssd_i2c_data_t data
;
3258 mutex_lock(&dev
->i2c_mutex
);
3263 ctrl
.bits
.wdata
= saddr
;
3264 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3265 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3266 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3270 while (off
< wsize
) {
3271 ctrl
.bits
.wdata
= wbuf
[off
];
3272 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3273 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3274 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3280 ctrl
.bits
.wdata
= rsize
;
3281 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3282 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3283 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3286 ctrl
.bits
.wdata
= 0x03;
3287 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3288 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3289 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3294 ctrl
.bits
.wdata
= 0;
3295 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3296 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3297 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3300 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3301 if (data
.bits
.valid
== 0) {
3306 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3313 status
= data
.bits
.rdata
;
3318 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3325 if (!(status
& 0x2)) {
3331 if (status
& 0x20) {
3337 if (status
& 0x10) {
3344 while (off
< rsize
) {
3345 ctrl
.bits
.wdata
= 0;
3346 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3347 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3348 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3352 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3353 if (data
.bits
.valid
== 0) {
3358 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3365 rbuf
[off
] = data
.bits
.rdata
;
3372 if (__ssd_i2c_clear(dev
, saddr
)) {
3375 mutex_unlock(&dev
->i2c_mutex
);
3380 static int ssd_smbus_send_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3386 ret
= ssd_i2c_write(dev
, saddr
, 1, buf
);
3387 if (!ret
|| -ETIMEDOUT
== ret
) {
3392 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3395 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3401 static int ssd_smbus_receive_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3407 ret
= ssd_i2c_read(dev
, saddr
, 1, buf
);
3408 if (!ret
|| -ETIMEDOUT
== ret
) {
3413 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3416 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3422 static int ssd_smbus_write_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3424 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3429 memcpy((smb_data
+ 1), buf
, 1);
3432 ret
= ssd_i2c_write(dev
, saddr
, 2, smb_data
);
3433 if (!ret
|| -ETIMEDOUT
== ret
) {
3438 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3441 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3447 static int ssd_smbus_read_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3449 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3456 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 1, buf
);
3457 if (!ret
|| -ETIMEDOUT
== ret
) {
3462 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3465 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3471 static int ssd_smbus_write_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3473 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3478 memcpy((smb_data
+ 1), buf
, 2);
3481 ret
= ssd_i2c_write(dev
, saddr
, 3, smb_data
);
3482 if (!ret
|| -ETIMEDOUT
== ret
) {
3487 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3490 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3496 static int ssd_smbus_read_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3498 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3505 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 2, buf
);
3506 if (!ret
|| -ETIMEDOUT
== ret
) {
3511 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3514 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3520 static int ssd_smbus_write_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3522 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3528 memcpy((smb_data
+ 2), buf
, size
);
3531 ret
= ssd_i2c_write(dev
, saddr
, (2 + size
), smb_data
);
3532 if (!ret
|| -ETIMEDOUT
== ret
) {
3537 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3540 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3546 static int ssd_smbus_read_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3548 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3556 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, (SSD_SMBUS_BLOCK_MAX
+ 1), (smb_data
+ 1));
3557 if (!ret
|| -ETIMEDOUT
== ret
) {
3562 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3565 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3571 rsize
= smb_data
[1];
3573 if (rsize
> size
) {
3577 memcpy(buf
, (smb_data
+ 2), rsize
);
3583 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
);
3586 static int ssd_init_lm75(struct ssd_device
*dev
, uint8_t saddr
)
3591 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3596 conf
&= (uint8_t)(~1u);
3598 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3607 static int ssd_lm75_read(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3612 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM75_REG_TEMP
, (uint8_t *)&val
);
3617 *data
= u16_swap(val
);
3622 static int ssd_init_lm80(struct ssd_device
*dev
, uint8_t saddr
)
3631 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3638 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_RES
, &val
);
3643 /* set volt limit */
3644 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3645 high
= ssd_lm80_limit
[i
].high
;
3646 low
= ssd_lm80_limit
[i
].low
;
3648 if (SSD_LM80_IN_CAP
== i
) {
3652 if (dev
->hw_info
.nr_ctrl
<= 1 && SSD_LM80_IN_1V2
== i
) {
3658 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MAX(i
), &high
);
3664 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MIN(i
), &low
);
3670 /* set interrupt mask: allow volt in interrupt except cap in*/
3672 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3677 /* set interrupt mask: disable others */
3679 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK2
, &val
);
3686 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3695 static int ssd_lm80_enable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3700 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3704 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3709 val
&= ~(1UL << (uint32_t)idx
);
3711 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3720 static int ssd_lm80_disable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3725 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3729 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3734 val
|= (1UL << (uint32_t)idx
);
3736 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3745 static int ssd_lm80_read_temp(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3750 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_TEMP
, (uint8_t *)&val
);
3755 *data
= u16_swap(val
);
3759 static int ssd_generate_sensor_fault_log(struct ssd_device
*dev
, uint16_t event
, uint8_t addr
,uint32_t ret
)
3762 data
= ((ret
& 0xffff) << 16) | (addr
<< 8) | addr
;
3763 ssd_gen_swlog(dev
,event
,data
);
3766 static int ssd_lm80_check_event(struct ssd_device
*dev
, uint8_t saddr
)
3769 uint16_t val
= 0, status
;
3770 uint8_t alarm1
= 0, alarm2
= 0;
3775 /* read interrupt status to clear interrupt */
3776 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM1
, &alarm1
);
3781 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM2
, &alarm2
);
3786 status
= (uint16_t)alarm1
| ((uint16_t)alarm2
<< 8);
3788 /* parse inetrrupt status */
3789 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3790 if (!((status
>> (uint32_t)i
) & 0x1)) {
3791 if (test_and_clear_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3792 /* enable INx irq */
3793 ret
= ssd_lm80_enable_in(dev
, saddr
, i
);
3802 /* disable INx irq */
3803 ret
= ssd_lm80_disable_in(dev
, saddr
, i
);
3808 if (test_and_set_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3812 high
= (uint32_t)ssd_lm80_limit
[i
].high
* (uint32_t)10;
3813 low
= (uint32_t)ssd_lm80_limit
[i
].low
* (uint32_t)10;
3815 for (j
=0; j
<3; j
++) {
3816 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_IN(i
), (uint8_t *)&val
);
3820 volt
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
3821 if ((volt
>high
) || (volt
<=low
)) {
3823 msleep(SSD_LM80_CONV_INTERVAL
);
3835 case SSD_LM80_IN_CAP
: {
3837 ssd_gen_swlog(dev
, SSD_LOG_CAP_SHORT_CIRCUIT
, 0);
3839 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(volt
));
3844 case SSD_LM80_IN_1V2
:
3845 case SSD_LM80_IN_1V2a
:
3846 case SSD_LM80_IN_1V5
:
3847 case SSD_LM80_IN_1V8
: {
3848 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, volt
));
3851 case SSD_LM80_IN_FPGA_3V3
:
3852 case SSD_LM80_IN_3V3
: {
3853 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, SSD_LM80_3V3_VOLT(volt
)));
3863 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3864 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, (uint32_t)saddr
,ret
);
3867 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3873 static int ssd_init_sensor(struct ssd_device
*dev
)
3877 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3881 ret
= ssd_init_lm75(dev
, SSD_SENSOR_LM75_SADDRESS
);
3883 hio_warn("%s: init lm75 failed\n", dev
->name
);
3884 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3885 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
,ret
);
3890 if (dev
->hw_info
.pcb_ver
>= 'B' || dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_HHHL
) {
3891 ret
= ssd_init_lm80(dev
, SSD_SENSOR_LM80_SADDRESS
);
3893 hio_warn("%s: init lm80 failed\n", dev
->name
);
3894 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3895 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
3902 /* skip error if not in standard mode */
3903 if (mode
!= SSD_DRV_MODE_STANDARD
) {
3910 static int ssd_mon_boardvolt(struct ssd_device
*dev
)
3912 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3916 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3920 return ssd_lm80_check_event(dev
, SSD_SENSOR_LM80_SADDRESS
);
3924 static int ssd_mon_temp(struct ssd_device
*dev
)
3930 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3934 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3939 ret
= ssd_lm80_read_temp(dev
, SSD_SENSOR_LM80_SADDRESS
, &val
);
3941 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3942 ssd_generate_sensor_fault_log(dev
, SSD_LOG_TEMP_SENSOR_EVENT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
3946 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3948 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3949 if (cur
>= SSD_INLET_OT_TEMP
) {
3950 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3951 ssd_gen_swlog(dev
, SSD_LOG_INLET_OVER_TEMP
, (uint32_t)cur
);
3953 } else if(cur
< SSD_INLET_OT_HYST
) {
3954 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3955 ssd_gen_swlog(dev
, SSD_LOG_INLET_NORMAL_TEMP
, (uint32_t)cur
);
3960 ret
= ssd_lm75_read(dev
, SSD_SENSOR_LM75_SADDRESS
, &val
);
3962 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3963 ssd_generate_sensor_fault_log(dev
, SSD_LOG_TEMP_SENSOR_EVENT
, SSD_SENSOR_LM75_SADDRESS
,ret
);
3967 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
);
3969 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3970 if (cur
>= SSD_FLASH_OT_TEMP
) {
3971 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3972 ssd_gen_swlog(dev
, SSD_LOG_FLASH_OVER_TEMP
, (uint32_t)cur
);
3974 } else if(cur
< SSD_FLASH_OT_HYST
) {
3975 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3976 ssd_gen_swlog(dev
, SSD_LOG_FLASH_NORMAL_TEMP
, (uint32_t)cur
);
3985 static inline void ssd_put_tag(struct ssd_device
*dev
, int tag
)
3987 test_and_clear_bit(tag
, dev
->tag_map
);
3988 wake_up(&dev
->tag_wq
);
3991 static inline int ssd_get_tag(struct ssd_device
*dev
, int wait
)
3996 while ((tag
= find_first_zero_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
)) >= atomic_read(&dev
->queue_depth
)) {
3997 DEFINE_WAIT(__wait
);
4003 prepare_to_wait_exclusive(&dev
->tag_wq
, &__wait
, TASK_UNINTERRUPTIBLE
);
4006 finish_wait(&dev
->tag_wq
, &__wait
);
4009 if (test_and_set_bit(tag
, dev
->tag_map
)) {
4016 static void ssd_barrier_put_tag(struct ssd_device
*dev
, int tag
)
4018 test_and_clear_bit(tag
, dev
->tag_map
);
4021 static int ssd_barrier_get_tag(struct ssd_device
*dev
)
4025 if (test_and_set_bit(tag
, dev
->tag_map
)) {
4032 static void ssd_barrier_end(struct ssd_device
*dev
)
4034 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4035 wake_up_all(&dev
->tag_wq
);
4037 mutex_unlock(&dev
->barrier_mutex
);
4040 static int ssd_barrier_start(struct ssd_device
*dev
)
4044 mutex_lock(&dev
->barrier_mutex
);
4046 atomic_set(&dev
->queue_depth
, 0);
4048 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4049 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4053 __set_current_state(TASK_INTERRUPTIBLE
);
4054 schedule_timeout(1);
4057 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4058 wake_up_all(&dev
->tag_wq
);
4060 mutex_unlock(&dev
->barrier_mutex
);
4065 static int ssd_busy(struct ssd_device
*dev
)
4067 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4074 static int ssd_wait_io(struct ssd_device
*dev
)
4078 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4079 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4083 __set_current_state(TASK_INTERRUPTIBLE
);
4084 schedule_timeout(1);
4091 static int ssd_in_barrier(struct ssd_device
*dev
)
4093 return (0 == atomic_read(&dev
->queue_depth
));
4097 static void ssd_cleanup_tag(struct ssd_device
*dev
)
4099 kfree(dev
->tag_map
);
4102 static int ssd_init_tag(struct ssd_device
*dev
)
4104 int nr_ulongs
= ALIGN(dev
->hw_info
.cmd_fifo_sz
, BITS_PER_LONG
) / BITS_PER_LONG
;
4106 mutex_init(&dev
->barrier_mutex
);
4108 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4110 dev
->tag_map
= kmalloc(nr_ulongs
* sizeof(unsigned long), GFP_ATOMIC
);
4111 if (!dev
->tag_map
) {
4115 memset(dev
->tag_map
, 0, nr_ulongs
* sizeof(unsigned long));
4117 init_waitqueue_head(&dev
->tag_wq
);
4123 static void ssd_end_io_acct(struct ssd_cmd
*cmd
)
4125 struct ssd_device
*dev
= cmd
->dev
;
4126 struct bio
*bio
= cmd
->bio
;
4127 unsigned long dur
= jiffies
- cmd
->start_time
;
4128 int rw
= bio_data_dir(bio
);
4129 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4134 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4135 int cpu
= part_stat_lock();
4136 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4137 part_round_stats(cpu
, part
);
4138 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4139 part_dec_in_flight(part
, rw
);
4141 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4142 int cpu
= part_stat_lock();
4143 struct hd_struct
*part
= &dev
->gd
->part0
;
4144 part_round_stats(cpu
, part
);
4145 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4147 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4148 part
->in_flight
[rw
]--;
4149 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4153 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4155 disk_round_stats(dev
->gd
);
4156 disk_stat_add(dev
->gd
, ticks
[rw
], dur
);
4158 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4159 dev
->gd
->in_flight
--;
4160 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4166 disk_round_stats(dev
->gd
);
4168 disk_stat_add(dev
->gd
, write_ticks
, dur
);
4170 disk_stat_add(dev
->gd
, read_ticks
, dur
);
4172 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4173 dev
->gd
->in_flight
--;
4174 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4181 static void ssd_start_io_acct(struct ssd_cmd
*cmd
)
4183 struct ssd_device
*dev
= cmd
->dev
;
4184 struct bio
*bio
= cmd
->bio
;
4185 int rw
= bio_data_dir(bio
);
4186 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4191 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4192 int cpu
= part_stat_lock();
4193 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4194 part_round_stats(cpu
, part
);
4195 part_stat_inc(cpu
, part
, ios
[rw
]);
4196 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4197 part_inc_in_flight(part
, rw
);
4199 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4200 int cpu
= part_stat_lock();
4201 struct hd_struct
*part
= &dev
->gd
->part0
;
4202 part_round_stats(cpu
, part
);
4203 part_stat_inc(cpu
, part
, ios
[rw
]);
4204 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4206 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4207 part
->in_flight
[rw
]++;
4208 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4212 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4214 disk_round_stats(dev
->gd
);
4215 disk_stat_inc(dev
->gd
, ios
[rw
]);
4216 disk_stat_add(dev
->gd
, sectors
[rw
], bio_sectors(bio
));
4218 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4219 dev
->gd
->in_flight
++;
4220 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4225 disk_round_stats(dev
->gd
);
4227 disk_stat_inc(dev
->gd
, writes
);
4228 disk_stat_add(dev
->gd
, write_sectors
, bio_sectors(bio
));
4230 disk_stat_inc(dev
->gd
, reads
);
4231 disk_stat_add(dev
->gd
, read_sectors
, bio_sectors(bio
));
4234 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4235 dev
->gd
->in_flight
++;
4236 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4242 cmd
->start_time
= jiffies
;
4246 static void ssd_queue_bio(struct ssd_device
*dev
, struct bio
*bio
)
4248 spin_lock(&dev
->sendq_lock
);
4249 ssd_blist_add(&dev
->sendq
, bio
);
4250 spin_unlock(&dev
->sendq_lock
);
4252 atomic_inc(&dev
->in_sendq
);
4253 wake_up(&dev
->send_waitq
);
4256 static inline void ssd_end_request(struct ssd_cmd
*cmd
)
4258 struct ssd_device
*dev
= cmd
->dev
;
4259 struct bio
*bio
= cmd
->bio
;
4260 int errors
= cmd
->errors
;
4264 if (!ssd_bio_has_discard(bio
)) {
4265 ssd_end_io_acct(cmd
);
4267 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4268 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4273 ssd_put_tag(dev
, tag
);
4275 if (SSD_INT_MSIX
== dev
->int_mode
|| tag
< 16 || errors
) {
4276 ssd_bio_endio(bio
, errors
);
4277 } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ {
4278 spin_lock(&dev
->doneq_lock
);
4279 ssd_blist_add(&dev
->doneq
, bio
);
4280 spin_unlock(&dev
->doneq_lock
);
4282 atomic_inc(&dev
->in_doneq
);
4283 wake_up(&dev
->done_waitq
);
4287 complete(cmd
->waiting
);
4292 static void ssd_end_timeout_request(struct ssd_cmd
*cmd
)
4294 struct ssd_device
*dev
= cmd
->dev
;
4295 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4298 for (i
=0; i
<dev
->nr_queue
; i
++) {
4299 disable_irq(dev
->entry
[i
].vector
);
4302 atomic_inc(&dev
->tocnt
);
4304 hio_err("%s: cmd timeout: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4305 cmd
->errors
= -ETIMEDOUT
;
4306 ssd_end_request(cmd
);
4309 for (i
=0; i
<dev
->nr_queue
; i
++) {
4310 enable_irq(dev
->entry
[i
].vector
);
4318 static void ssd_cmd_add_timer(struct ssd_cmd
*cmd
, int timeout
, void (*complt
)(struct ssd_cmd
*))
4320 init_timer(&cmd
->cmd_timer
);
4322 cmd
->cmd_timer
.data
= (unsigned long)cmd
;
4323 cmd
->cmd_timer
.expires
= jiffies
+ timeout
;
4324 cmd
->cmd_timer
.function
= (void (*)(unsigned long)) complt
;
4326 add_timer(&cmd
->cmd_timer
);
4329 static int ssd_cmd_del_timer(struct ssd_cmd
*cmd
)
4331 return del_timer(&cmd
->cmd_timer
);
4334 static void ssd_add_timer(struct timer_list
*timer
, int timeout
, void (*complt
)(void *), void *data
)
4338 timer
->data
= (unsigned long)data
;
4339 timer
->expires
= jiffies
+ timeout
;
4340 timer
->function
= (void (*)(unsigned long)) complt
;
4345 static int ssd_del_timer(struct timer_list
*timer
)
4347 return del_timer(timer
);
4350 static void ssd_cmd_timeout(struct ssd_cmd
*cmd
)
4352 struct ssd_device
*dev
= cmd
->dev
;
4353 uint32_t msg
= *(uint32_t *)cmd
->msg
;
4355 ssd_end_timeout_request(cmd
);
4357 ssd_gen_swlog(dev
, SSD_LOG_TIMEOUT
, msg
);
4361 static void __ssd_done(unsigned long data
)
4363 struct ssd_cmd
*cmd
;
4366 local_irq_disable();
4367 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4368 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4370 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4374 while (!list_empty(&localq
)) {
4375 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4376 list_del_init(&cmd
->list
);
4378 ssd_end_request(cmd
);
4382 static void __ssd_done_db(unsigned long data
)
4384 struct ssd_cmd
*cmd
;
4385 struct ssd_device
*dev
;
4389 local_irq_disable();
4390 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4391 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4393 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4397 while (!list_empty(&localq
)) {
4398 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4399 list_del_init(&cmd
->list
);
4401 dev
= (struct ssd_device
*)cmd
->dev
;
4405 sector_t off
= dev
->db_info
.data
.loc
.off
;
4406 uint32_t len
= dev
->db_info
.data
.loc
.len
;
4408 switch (dev
->db_info
.type
) {
4409 case SSD_DEBUG_READ_ERR
:
4410 if (bio_data_dir(bio
) == READ
&&
4411 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4415 case SSD_DEBUG_WRITE_ERR
:
4416 if (bio_data_dir(bio
) == WRITE
&&
4417 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4418 cmd
->errors
= -EROFS
;
4421 case SSD_DEBUG_RW_ERR
:
4422 if (!((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4423 if (bio_data_dir(bio
) == READ
) {
4426 cmd
->errors
= -EROFS
;
4435 ssd_end_request(cmd
);
4439 static inline void ssd_done_bh(struct ssd_cmd
*cmd
)
4441 unsigned long flags
= 0;
4443 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4444 struct ssd_device
*dev
= cmd
->dev
;
4445 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4446 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4453 local_irq_save(flags
);
4454 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4455 list_add_tail(&cmd
->list
, &__get_cpu_var(ssd_doneq
));
4456 tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet
));
4458 list_add_tail(&cmd
->list
, this_cpu_ptr(&ssd_doneq
));
4459 tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet
));
4461 local_irq_restore(flags
);
4466 static inline void ssd_done(struct ssd_cmd
*cmd
)
4468 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4469 struct ssd_device
*dev
= cmd
->dev
;
4470 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4471 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4478 ssd_end_request(cmd
);
4483 static inline void ssd_dispatch_cmd(struct ssd_cmd
*cmd
)
4485 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4487 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4489 spin_lock(&dev
->cmd_lock
);
4490 ssd_reg_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, cmd
->msg_dma
);
4491 spin_unlock(&dev
->cmd_lock
);
4494 static inline void ssd_send_cmd(struct ssd_cmd
*cmd
)
4496 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4498 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4500 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4503 static inline void ssd_send_cmd_db(struct ssd_cmd
*cmd
)
4505 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4506 struct bio
*bio
= cmd
->bio
;
4508 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4511 switch (dev
->db_info
.type
) {
4512 case SSD_DEBUG_READ_TO
:
4513 if (bio_data_dir(bio
) == READ
) {
4517 case SSD_DEBUG_WRITE_TO
:
4518 if (bio_data_dir(bio
) == WRITE
) {
4522 case SSD_DEBUG_RW_TO
:
4530 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4534 /* fixed for BIOVEC_PHYS_MERGEABLE */
4535 #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED
4536 #include <linux/bio.h>
4537 #include <linux/io.h>
4538 #include <xen/page.h>
4540 static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec
*vec1
,
4541 const struct bio_vec
*vec2
)
4543 unsigned long mfn1
= pfn_to_mfn(page_to_pfn(vec1
->bv_page
));
4544 unsigned long mfn2
= pfn_to_mfn(page_to_pfn(vec2
->bv_page
));
4546 return __BIOVEC_PHYS_MERGEABLE(vec1
, vec2
) &&
4547 ((mfn1
== mfn2
) || ((mfn1
+1) == mfn2
));
4550 #ifdef BIOVEC_PHYS_MERGEABLE
4551 #undef BIOVEC_PHYS_MERGEABLE
4553 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
4554 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
4555 (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2)))
4559 static inline int ssd_bio_map_sg(struct ssd_device
*dev
, struct bio
*bio
, struct scatterlist
*sgl
)
4561 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
4562 struct bio_vec
*bvec
, *bvprv
= NULL
;
4563 struct scatterlist
*sg
= NULL
;
4564 int i
= 0, nsegs
= 0;
4566 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23))
4567 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4571 * for each segment in bio
4573 bio_for_each_segment(bvec
, bio
, i
) {
4574 if (bvprv
&& BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
)) {
4575 sg
->length
+= bvec
->bv_len
;
4577 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4581 sg
= sg
? (sg
+ 1) : sgl
;
4582 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4583 sg_set_page(sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
4585 sg
->page
= bvec
->bv_page
;
4586 sg
->length
= bvec
->bv_len
;
4587 sg
->offset
= bvec
->bv_offset
;
4594 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4604 struct bio_vec bvec
, bvprv
;
4605 struct bvec_iter iter
;
4606 struct scatterlist
*sg
= NULL
;
4610 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4613 * for each segment in bio
4615 bio_for_each_segment(bvec
, bio
, iter
) {
4616 if (!first
&& BIOVEC_PHYS_MERGEABLE(&bvprv
, &bvec
)) {
4617 sg
->length
+= bvec
.bv_len
;
4619 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4623 sg
= sg
? (sg
+ 1) : sgl
;
4625 sg_set_page(sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
4642 static int __ssd_submit_pbio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4644 struct ssd_cmd
*cmd
;
4645 struct ssd_rw_msg
*msg
;
4646 struct ssd_sg_entry
*sge
;
4647 sector_t block
= bio_start(bio
);
4651 tag
= ssd_get_tag(dev
, wait
);
4656 cmd
= &dev
->cmd
[tag
];
4660 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4662 if (ssd_bio_has_discard(bio
)) {
4663 unsigned int length
= bio_sectors(bio
);
4665 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4667 msg
->fun
= SSD_FUNC_TRIM
;
4670 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4672 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4675 block
+= sge
->length
;
4676 length
-= sge
->length
;
4684 msg
->nsegs
= cmd
->nsegs
= i
;
4690 //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl);
4691 msg
->nsegs
= cmd
->nsegs
= bio
->bi_vcnt
;
4694 if (bio_data_dir(bio
) == READ
) {
4695 msg
->fun
= SSD_FUNC_READ
;
4698 msg
->fun
= SSD_FUNC_WRITE
;
4699 msg
->flag
= dev
->wmode
;
4703 for (i
=0; i
<bio
->bi_vcnt
; i
++) {
4705 sge
->length
= bio
->bi_io_vec
[i
].bv_len
>> 9;
4706 sge
->buf
= (uint64_t)((void *)bio
->bi_io_vec
[i
].bv_page
+ bio
->bi_io_vec
[i
].bv_offset
);
4708 block
+= sge
->length
;
4714 #ifdef SSD_OT_PROTECT
4715 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4716 msleep_interruptible(dev
->ot_delay
);
4720 ssd_start_io_acct(cmd
);
4726 static inline int ssd_submit_bio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4728 struct ssd_cmd
*cmd
;
4729 struct ssd_rw_msg
*msg
;
4730 struct ssd_sg_entry
*sge
;
4731 struct scatterlist
*sgl
;
4732 sector_t block
= bio_start(bio
);
4736 tag
= ssd_get_tag(dev
, wait
);
4741 cmd
= &dev
->cmd
[tag
];
4745 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4749 if (ssd_bio_has_discard(bio
)) {
4750 unsigned int length
= bio_sectors(bio
);
4752 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4754 msg
->fun
= SSD_FUNC_TRIM
;
4757 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4759 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4762 block
+= sge
->length
;
4763 length
-= sge
->length
;
4771 msg
->nsegs
= cmd
->nsegs
= i
;
4777 msg
->nsegs
= cmd
->nsegs
= ssd_bio_map_sg(dev
, bio
, sgl
);
4780 if (bio_data_dir(bio
) == READ
) {
4781 msg
->fun
= SSD_FUNC_READ
;
4783 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_FROMDEVICE
);
4785 msg
->fun
= SSD_FUNC_WRITE
;
4786 msg
->flag
= dev
->wmode
;
4787 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_TODEVICE
);
4791 for (i
=0; i
<cmd
->nsegs
; i
++) {
4793 sge
->length
= sg_dma_len(sgl
) >> 9;
4794 sge
->buf
= sg_dma_address(sgl
);
4796 block
+= sge
->length
;
4803 #ifdef SSD_OT_PROTECT
4804 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4805 msleep_interruptible(dev
->ot_delay
);
4809 ssd_start_io_acct(cmd
);
4816 static int ssd_done_thread(void *data
)
4818 struct ssd_device
*dev
;
4827 current
->flags
|= PF_NOFREEZE
;
4828 //set_user_nice(current, -5);
4830 while (!kthread_should_stop()) {
4831 wait_event_interruptible(dev
->done_waitq
, (atomic_read(&dev
->in_doneq
) || kthread_should_stop()));
4833 while (atomic_read(&dev
->in_doneq
)) {
4835 spin_lock(&dev
->doneq_lock
);
4836 bio
= ssd_blist_get(&dev
->doneq
);
4837 spin_unlock(&dev
->doneq_lock
);
4839 spin_lock_irq(&dev
->doneq_lock
);
4840 bio
= ssd_blist_get(&dev
->doneq
);
4841 spin_unlock_irq(&dev
->doneq_lock
);
4845 next
= bio
->bi_next
;
4846 bio
->bi_next
= NULL
;
4847 ssd_bio_endio(bio
, 0);
4848 atomic_dec(&dev
->in_doneq
);
4854 #ifdef SSD_ESCAPE_IRQ
4855 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4856 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4857 cpumask_var_t new_mask
;
4858 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4859 cpumask_setall(new_mask
);
4860 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4861 set_cpus_allowed_ptr(current
, new_mask
);
4862 free_cpumask_var(new_mask
);
4866 cpus_setall(new_mask
);
4867 cpu_clear(dev
->irq_cpu
, new_mask
);
4868 set_cpus_allowed(current
, new_mask
);
4877 static int ssd_send_thread(void *data
)
4879 struct ssd_device
*dev
;
4888 current
->flags
|= PF_NOFREEZE
;
4889 //set_user_nice(current, -5);
4891 while (!kthread_should_stop()) {
4892 wait_event_interruptible(dev
->send_waitq
, (atomic_read(&dev
->in_sendq
) || kthread_should_stop()));
4894 while (atomic_read(&dev
->in_sendq
)) {
4895 spin_lock(&dev
->sendq_lock
);
4896 bio
= ssd_blist_get(&dev
->sendq
);
4897 spin_unlock(&dev
->sendq_lock
);
4900 next
= bio
->bi_next
;
4901 bio
->bi_next
= NULL
;
4902 #ifdef SSD_QUEUE_PBIO
4903 if (test_and_clear_bit(BIO_SSD_PBIO
, &bio
->bi_flags
)) {
4904 __ssd_submit_pbio(dev
, bio
, 1);
4906 ssd_submit_bio(dev
, bio
, 1);
4909 ssd_submit_bio(dev
, bio
, 1);
4911 atomic_dec(&dev
->in_sendq
);
4917 #ifdef SSD_ESCAPE_IRQ
4918 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4919 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4920 cpumask_var_t new_mask
;
4921 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4922 cpumask_setall(new_mask
);
4923 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4924 set_cpus_allowed_ptr(current
, new_mask
);
4925 free_cpumask_var(new_mask
);
4929 cpus_setall(new_mask
);
4930 cpu_clear(dev
->irq_cpu
, new_mask
);
4931 set_cpus_allowed(current
, new_mask
);
4941 static void ssd_cleanup_thread(struct ssd_device
*dev
)
4943 kthread_stop(dev
->send_thread
);
4944 kthread_stop(dev
->done_thread
);
4947 static int ssd_init_thread(struct ssd_device
*dev
)
4951 atomic_set(&dev
->in_doneq
, 0);
4952 atomic_set(&dev
->in_sendq
, 0);
4954 spin_lock_init(&dev
->doneq_lock
);
4955 spin_lock_init(&dev
->sendq_lock
);
4957 ssd_blist_init(&dev
->doneq
);
4958 ssd_blist_init(&dev
->sendq
);
4960 init_waitqueue_head(&dev
->done_waitq
);
4961 init_waitqueue_head(&dev
->send_waitq
);
4963 dev
->done_thread
= kthread_run(ssd_done_thread
, dev
, "%s/d", dev
->name
);
4964 if (IS_ERR(dev
->done_thread
)) {
4965 ret
= PTR_ERR(dev
->done_thread
);
4966 goto out_done_thread
;
4969 dev
->send_thread
= kthread_run(ssd_send_thread
, dev
, "%s/s", dev
->name
);
4970 if (IS_ERR(dev
->send_thread
)) {
4971 ret
= PTR_ERR(dev
->send_thread
);
4972 goto out_send_thread
;
4978 kthread_stop(dev
->done_thread
);
4984 static void ssd_put_dcmd(struct ssd_dcmd
*dcmd
)
4986 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4988 spin_lock(&dev
->dcmd_lock
);
4989 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4990 spin_unlock(&dev
->dcmd_lock
);
4993 static struct ssd_dcmd
*ssd_get_dcmd(struct ssd_device
*dev
)
4995 struct ssd_dcmd
*dcmd
= NULL
;
4997 spin_lock(&dev
->dcmd_lock
);
4998 if (!list_empty(&dev
->dcmd_list
)) {
4999 dcmd
= list_entry(dev
->dcmd_list
.next
,
5000 struct ssd_dcmd
, list
);
5001 list_del_init(&dcmd
->list
);
5003 spin_unlock(&dev
->dcmd_lock
);
5008 static void ssd_cleanup_dcmd(struct ssd_device
*dev
)
5013 static int ssd_init_dcmd(struct ssd_device
*dev
)
5015 struct ssd_dcmd
*dcmd
;
5016 int dcmd_sz
= sizeof(struct ssd_dcmd
)*dev
->hw_info
.cmd_fifo_sz
;
5019 spin_lock_init(&dev
->dcmd_lock
);
5020 INIT_LIST_HEAD(&dev
->dcmd_list
);
5021 init_waitqueue_head(&dev
->dcmd_wq
);
5023 dev
->dcmd
= kmalloc(dcmd_sz
, GFP_KERNEL
);
5025 hio_warn("%s: can not alloc dcmd\n", dev
->name
);
5026 goto out_alloc_dcmd
;
5028 memset(dev
->dcmd
, 0, dcmd_sz
);
5030 for (i
=0, dcmd
=dev
->dcmd
; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++, dcmd
++) {
5032 INIT_LIST_HEAD(&dcmd
->list
);
5033 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
5042 static void ssd_put_dmsg(void *msg
)
5044 struct ssd_dcmd
*dcmd
= container_of(msg
, struct ssd_dcmd
, msg
);
5045 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
5047 memset(dcmd
->msg
, 0, SSD_DCMD_MAX_SZ
);
5049 wake_up(&dev
->dcmd_wq
);
5052 static void *ssd_get_dmsg(struct ssd_device
*dev
)
5054 struct ssd_dcmd
*dcmd
= ssd_get_dcmd(dev
);
5058 prepare_to_wait_exclusive(&dev
->dcmd_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
5061 dcmd
= ssd_get_dcmd(dev
);
5063 finish_wait(&dev
->dcmd_wq
, &wait
);
5069 static int ssd_do_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5071 DECLARE_COMPLETION(wait
);
5072 struct ssd_cmd
*cmd
;
5076 tag
= ssd_get_tag(dev
, 1);
5081 cmd
= &dev
->cmd
[tag
];
5083 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5084 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5086 cmd
->waiting
= &wait
;
5090 wait_for_completion(cmd
->waiting
);
5091 cmd
->waiting
= NULL
;
5093 if (cmd
->errors
== -ETIMEDOUT
) {
5095 } else if (cmd
->errors
) {
5100 *done
= cmd
->nr_log
;
5102 ssd_put_tag(dev
, cmd
->tag
);
5107 static int ssd_do_barrier_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5109 DECLARE_COMPLETION(wait
);
5110 struct ssd_cmd
*cmd
;
5114 tag
= ssd_barrier_get_tag(dev
);
5119 cmd
= &dev
->cmd
[tag
];
5121 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5122 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5124 cmd
->waiting
= &wait
;
5128 wait_for_completion(cmd
->waiting
);
5129 cmd
->waiting
= NULL
;
5131 if (cmd
->errors
== -ETIMEDOUT
) {
5133 } else if (cmd
->errors
) {
5138 *done
= cmd
->nr_log
;
5140 ssd_barrier_put_tag(dev
, cmd
->tag
);
5145 #ifdef SSD_OT_PROTECT
5146 static void ssd_check_temperature(struct ssd_device
*dev
, int temp
)
5153 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5157 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5160 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5161 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
5163 val
= ssd_reg_read(dev
->ctrlp
+ off
);
5164 if (val
== 0xffffffffffffffffull
) {
5168 cur
= (int)CUR_TEMP(val
);
5170 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5171 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5172 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5173 dev
->ot_delay
= SSD_OT_DELAY
;
5180 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5181 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5182 hio_warn("%s: Temperature is OK.\n", dev
->name
);
5189 static int ssd_get_ot_status(struct ssd_device
*dev
, int *status
)
5195 if (!dev
|| !status
) {
5199 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5200 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5201 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5202 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5203 if ((val
>> 22) & 0x1) {
5209 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5210 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5211 if ((val
>> 22) & 0x1) {
5217 *status
= !!dev
->ot_delay
;
5224 static void ssd_set_ot_protect(struct ssd_device
*dev
, int protect
)
5230 mutex_lock(&dev
->fw_mutex
);
5232 dev
->ot_protect
= !!protect
;
5234 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5235 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5236 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5237 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5238 if (dev
->ot_protect
) {
5243 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5246 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5247 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5248 if (dev
->ot_protect
) {
5253 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5257 mutex_unlock(&dev
->fw_mutex
);
5260 static int ssd_init_ot_protect(struct ssd_device
*dev
)
5262 ssd_set_ot_protect(dev
, ot_protect
);
5264 #ifdef SSD_OT_PROTECT
5265 ssd_check_temperature(dev
, SSD_OT_TEMP
);
5272 static int ssd_read_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
, int *nr_log
)
5274 struct ssd_log_op_msg
*msg
;
5275 struct ssd_log_msg
*lmsg
;
5277 size_t length
= dev
->hw_info
.log_sz
;
5280 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
5284 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
5285 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
5286 ret
= dma_mapping_error(buf_dma
);
5288 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
5291 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
5292 goto out_dma_mapping
;
5295 msg
= (struct ssd_log_op_msg
*)ssd_get_dmsg(dev
);
5297 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5298 lmsg
= (struct ssd_log_msg
*)msg
;
5299 lmsg
->fun
= SSD_FUNC_READ_LOG
;
5300 lmsg
->ctrl_idx
= ctrl_idx
;
5301 lmsg
->buf
= buf_dma
;
5303 msg
->fun
= SSD_FUNC_READ_LOG
;
5304 msg
->ctrl_idx
= ctrl_idx
;
5308 ret
= ssd_do_request(dev
, READ
, msg
, nr_log
);
5311 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
5317 #define SSD_LOG_PRINT_BUF_SZ 256
5318 static int ssd_parse_log(struct ssd_device
*dev
, struct ssd_log
*log
, int print
)
5320 struct ssd_log_desc
*log_desc
= ssd_log_desc
;
5321 struct ssd_log_entry
*le
;
5323 char print_buf
[SSD_LOG_PRINT_BUF_SZ
];
5329 while (log_desc
->event
!= SSD_UNKNOWN_EVENT
) {
5330 if (log_desc
->event
== le
->event
) {
5340 if (log_desc
->level
< log_level
) {
5345 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5348 sn
= dev
->labelv3
.barcode
;
5351 print_len
= snprintf(print_buf
, SSD_LOG_PRINT_BUF_SZ
, "%s (%s): <%#x>", dev
->name
, sn
, le
->event
);
5353 if (log
->ctrl_idx
!= SSD_LOG_SW_IDX
) {
5354 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " controller %d", log
->ctrl_idx
);
5357 switch (log_desc
->data
) {
5358 case SSD_LOG_DATA_NONE
:
5360 case SSD_LOG_DATA_LOC
:
5361 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5362 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc
.flash
);
5363 if (log_desc
->sblock
) {
5364 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc
.block
);
5366 if (log_desc
->spage
) {
5367 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc
.page
);
5370 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc1
.flash
);
5371 if (log_desc
->sblock
) {
5372 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc1
.block
);
5374 if (log_desc
->spage
) {
5375 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc1
.page
);
5379 case SSD_LOG_DATA_HEX
:
5380 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " info %#x", le
->data
.val
);
5385 /*print_len += */snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), ": %s", log_desc
->desc
);
5387 switch (log_desc
->level
) {
5388 case SSD_LOG_LEVEL_INFO
:
5389 hio_info("%s\n", print_buf
);
5391 case SSD_LOG_LEVEL_NOTICE
:
5392 hio_note("%s\n", print_buf
);
5394 case SSD_LOG_LEVEL_WARNING
:
5395 hio_warn("%s\n", print_buf
);
5397 case SSD_LOG_LEVEL_ERR
:
5398 hio_err("%s\n", print_buf
);
5399 //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ.");
5402 hio_warn("%s\n", print_buf
);
5407 return log_desc
->level
;
5410 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
);
5411 static int ssd_switch_wmode(struct ssd_device
*dev
, int wmode
);
5414 static int ssd_handle_event(struct ssd_device
*dev
, uint16_t event
, int level
)
5419 case SSD_LOG_OVER_TEMP
: {
5420 #ifdef SSD_OT_PROTECT
5421 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5422 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5423 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5424 dev
->ot_delay
= SSD_OT_DELAY
;
5431 case SSD_LOG_NORMAL_TEMP
: {
5432 #ifdef SSD_OT_PROTECT
5433 /* need to check all controller's temperature */
5434 ssd_check_temperature(dev
, SSD_OT_TEMP_HYST
);
5439 case SSD_LOG_BATTERY_FAULT
: {
5442 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5443 if (!ssd_bm_get_sfstatus(dev
, &sfstatus
)) {
5444 ssd_gen_swlog(dev
, SSD_LOG_BM_SFSTATUS
, sfstatus
);
5448 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5449 ssd_switch_wmode(dev
, dev
->user_wmode
);
5454 case SSD_LOG_BATTERY_OK
: {
5455 if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5456 ssd_switch_wmode(dev
, dev
->user_wmode
);
5461 case SSD_LOG_BOARD_VOLT_FAULT
: {
5462 ssd_mon_boardvolt(dev
);
5466 case SSD_LOG_CLEAR_LOG
: {
5468 memset(&dev
->smart
.log_info
, 0, sizeof(struct ssd_log_info
));
5472 case SSD_LOG_CAP_VOLT_FAULT
:
5473 case SSD_LOG_CAP_LEARN_FAULT
:
5474 case SSD_LOG_CAP_SHORT_CIRCUIT
: {
5475 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5476 ssd_switch_wmode(dev
, dev
->user_wmode
);
5485 /* ssd event call */
5486 if (dev
->event_call
) {
5487 dev
->event_call(dev
->gd
, event
, level
);
5490 if (SSD_LOG_CAP_VOLT_FAULT
== event
|| SSD_LOG_CAP_LEARN_FAULT
== event
|| SSD_LOG_CAP_SHORT_CIRCUIT
== event
) {
5491 dev
->event_call(dev
->gd
, SSD_LOG_BATTERY_FAULT
, level
);
5498 static int ssd_save_log(struct ssd_device
*dev
, struct ssd_log
*log
)
5504 mutex_lock(&dev
->internal_log_mutex
);
5506 size
= sizeof(struct ssd_log
);
5507 off
= dev
->internal_log
.nr_log
* size
;
5509 if (off
== dev
->rom_info
.log_sz
) {
5510 if (dev
->internal_log
.nr_log
== dev
->smart
.log_info
.nr_log
) {
5511 hio_warn("%s: internal log is full\n", dev
->name
);
5516 internal_log
= dev
->internal_log
.log
+ off
;
5517 memcpy(internal_log
, log
, size
);
5519 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
5520 off
+= dev
->rom_info
.log_base
;
5522 ret
= ssd_spi_write(dev
, log
, off
, size
);
5528 dev
->internal_log
.nr_log
++;
5531 mutex_unlock(&dev
->internal_log_mutex
);
5535 /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
5536 static unsigned short const crc16_table
[256] = {
5537 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
5538 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
5539 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
5540 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
5541 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
5542 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
5543 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
5544 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
5545 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
5546 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
5547 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
5548 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
5549 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
5550 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
5551 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
5552 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
5553 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
5554 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
5555 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
5556 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
5557 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
5558 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
5559 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
5560 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
5561 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
5562 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
5563 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
5564 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
5565 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
5566 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
5567 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
5568 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
5571 static unsigned short crc16_byte(unsigned short crc
, const unsigned char data
)
5573 return (crc
>> 8) ^ crc16_table
[(crc
^ data
) & 0xff];
5576 * crc16 - compute the CRC-16 for the data buffer
5577 * @crc: previous CRC value
5578 * @buffer: data pointer
5579 * @len: number of bytes in the buffer
5581 * Returns the updated CRC value.
5583 static unsigned short crc16(unsigned short crc
, unsigned char const *buffer
, int len
)
5586 crc
= crc16_byte(crc
, *buffer
++);
5590 static int ssd_save_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5597 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5600 memset(&log
, 0, sizeof(struct ssd_log
));
5602 do_gettimeofday(&tv
);
5603 log
.ctrl_idx
= SSD_LOG_SW_IDX
;
5604 log
.time
= tv
.tv_sec
;
5605 log
.le
.event
= event
;
5606 log
.le
.data
.val
= data
;
5608 log
.le
.mod
= SSD_DIF_WITH_OLD_LOG
;
5609 log
.le
.idx
= crc16(0,(const unsigned char *)&log
,14);
5610 level
= ssd_parse_log(dev
, &log
, 0);
5611 if (level
>= SSD_LOG_LEVEL
) {
5612 ret
= ssd_save_log(dev
, &log
);
5616 if (SSD_LOG_LEVEL_ERR
== level
) {
5621 dev
->smart
.log_info
.nr_log
++;
5622 dev
->smart
.log_info
.stat
[level
]++;
5625 ssd_handle_event(dev
, event
, level
);
5630 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5632 struct ssd_log_entry le
;
5635 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5643 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5647 ret
= sfifo_put(&dev
->log_fifo
, &le
);
5652 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
5653 queue_work(dev
->workq
, &dev
->log_work
);
5659 static int ssd_do_swlog(struct ssd_device
*dev
)
5661 struct ssd_log_entry le
;
5664 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5665 while (!sfifo_get(&dev
->log_fifo
, &le
)) {
5666 ret
= ssd_save_swlog(dev
, le
.event
, le
.data
.val
);
5675 static int __ssd_clear_log(struct ssd_device
*dev
)
5677 uint32_t off
, length
;
5680 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5684 if (dev
->internal_log
.nr_log
== 0) {
5688 mutex_lock(&dev
->internal_log_mutex
);
5690 off
= dev
->rom_info
.log_base
;
5691 length
= dev
->rom_info
.log_sz
;
5693 ret
= ssd_spi_erase(dev
, off
, length
);
5695 hio_warn("%s: log erase: failed\n", dev
->name
);
5699 dev
->internal_log
.nr_log
= 0;
5702 mutex_unlock(&dev
->internal_log_mutex
);
5706 static int ssd_clear_log(struct ssd_device
*dev
)
5710 ret
= __ssd_clear_log(dev
);
5712 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_LOG
, 0);
5718 static int ssd_do_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
)
5720 struct ssd_log_entry
*le
;
5727 ret
= ssd_read_log(dev
, ctrl_idx
, buf
, &nr_log
);
5732 do_gettimeofday(&tv
);
5734 log
.time
= tv
.tv_sec
;
5735 log
.ctrl_idx
= ctrl_idx
;
5737 le
= (ssd_log_entry_t
*)buf
;
5738 while (nr_log
> 0) {
5739 memcpy(&log
.le
, le
, sizeof(struct ssd_log_entry
));
5741 log
.le
.mod
= SSD_DIF_WITH_OLD_LOG
;
5742 log
.le
.idx
= crc16(0,(const unsigned char *)&log
,14);
5743 level
= ssd_parse_log(dev
, &log
, 1);
5744 if (level
>= SSD_LOG_LEVEL
) {
5745 ssd_save_log(dev
, &log
);
5749 if (SSD_LOG_LEVEL_ERR
== level
) {
5753 dev
->smart
.log_info
.nr_log
++;
5754 if (SSD_LOG_SEU_FAULT
!= le
->event
&& SSD_LOG_SEU_FAULT1
!= le
->event
) {
5755 dev
->smart
.log_info
.stat
[level
]++;
5759 /* log to the volatile log info */
5760 dev
->log_info
.nr_log
++;
5761 dev
->log_info
.stat
[level
]++;
5765 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
5766 if (le
->event
!= SSD_LOG_SEU_FAULT1
) {
5767 dev
->has_non_0x98_reg_access
= 1;
5770 /*dev->readonly = 1;
5771 set_disk_ro(dev->gd, 1);
5772 hio_warn("%s: switched to read-only mode.\n", dev->name);*/
5776 ssd_handle_event(dev
, le
->event
, level
);
5785 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5786 static void ssd_log_worker(void *data
)
5788 struct ssd_device
*dev
= (struct ssd_device
*)data
;
5790 static void ssd_log_worker(struct work_struct
*work
)
5792 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, log_work
);
5797 if (!test_bit(SSD_LOG_ERR
, &dev
->state
) && test_bit(SSD_ONLINE
, &dev
->state
)) {
5799 if (!dev
->log_buf
) {
5800 dev
->log_buf
= kmalloc(dev
->hw_info
.log_sz
, GFP_KERNEL
);
5801 if (!dev
->log_buf
) {
5802 hio_warn("%s: ssd_log_worker: no mem\n", dev
->name
);
5808 if (test_and_clear_bit(SSD_LOG_HW
, &dev
->state
)) {
5809 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5810 ret
= ssd_do_log(dev
, i
, dev
->log_buf
);
5812 (void)test_and_set_bit(SSD_LOG_ERR
, &dev
->state
);
5813 hio_warn("%s: do log fail\n", dev
->name
);
5819 ret
= ssd_do_swlog(dev
);
5821 hio_warn("%s: do swlog fail\n", dev
->name
);
5825 static void ssd_cleanup_log(struct ssd_device
*dev
)
5828 kfree(dev
->log_buf
);
5829 dev
->log_buf
= NULL
;
5832 sfifo_free(&dev
->log_fifo
);
5834 if (dev
->internal_log
.log
) {
5835 vfree(dev
->internal_log
.log
);
5836 dev
->internal_log
.nr_log
= 0;
5837 dev
->internal_log
.log
= NULL
;
5841 static int ssd_init_log(struct ssd_device
*dev
)
5843 struct ssd_log
*log
;
5848 mutex_init(&dev
->internal_log_mutex
);
5850 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5851 INIT_WORK(&dev
->log_work
, ssd_log_worker
, dev
);
5853 INIT_WORK(&dev
->log_work
, ssd_log_worker
);
5856 off
= dev
->rom_info
.log_base
;
5857 size
= dev
->rom_info
.log_sz
;
5859 dev
->internal_log
.nr_log
= 0;
5860 dev
->internal_log
.log
= vmalloc(size
);
5861 if (!dev
->internal_log
.log
) {
5866 ret
= sfifo_alloc(&dev
->log_fifo
, SSD_LOG_FIFO_SZ
, sizeof(struct ssd_log_entry
));
5868 goto out_alloc_log_fifo
;
5871 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5875 log
= (struct ssd_log
*)dev
->internal_log
.log
;
5876 while (len
< size
) {
5877 ret
= ssd_spi_read(dev
, log
, off
, sizeof(struct ssd_log
));
5882 if (log
->ctrl_idx
== 0xff) {
5886 if (log
->le
.event
== SSD_LOG_POWER_ON
) {
5887 if (dev
->internal_log
.nr_log
> dev
->last_poweron_id
) {
5888 dev
->last_poweron_id
= dev
->internal_log
.nr_log
;
5892 dev
->internal_log
.nr_log
++;
5894 len
+= sizeof(struct ssd_log
);
5895 off
+= sizeof(struct ssd_log
);
5901 sfifo_free(&dev
->log_fifo
);
5903 vfree(dev
->internal_log
.log
);
5904 dev
->internal_log
.log
= NULL
;
5905 dev
->internal_log
.nr_log
= 0;
5907 /* skip error if not in standard mode */
5908 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5915 static void ssd_stop_workq(struct ssd_device
*dev
)
5917 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
5918 flush_workqueue(dev
->workq
);
5921 static void ssd_start_workq(struct ssd_device
*dev
)
5923 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
5926 queue_work(dev
->workq
, &dev
->log_work
);
5929 static void ssd_cleanup_workq(struct ssd_device
*dev
)
5931 flush_workqueue(dev
->workq
);
5932 destroy_workqueue(dev
->workq
);
5936 static int ssd_init_workq(struct ssd_device
*dev
)
5940 dev
->workq
= create_singlethread_workqueue(dev
->name
);
5951 static int ssd_init_rom_info(struct ssd_device
*dev
)
5955 mutex_init(&dev
->spi_mutex
);
5956 mutex_init(&dev
->i2c_mutex
);
5958 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5959 /* fix bug: read data to clear status */
5960 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
5962 dev
->rom_info
.size
= SSD_ROM_SIZE
;
5963 dev
->rom_info
.block_size
= SSD_ROM_BLK_SIZE
;
5964 dev
->rom_info
.page_size
= SSD_ROM_PAGE_SIZE
;
5966 dev
->rom_info
.bridge_fw_base
= SSD_ROM_BRIDGE_FW_BASE
;
5967 dev
->rom_info
.bridge_fw_sz
= SSD_ROM_BRIDGE_FW_SIZE
;
5968 dev
->rom_info
.nr_bridge_fw
= SSD_ROM_NR_BRIDGE_FW
;
5970 dev
->rom_info
.ctrl_fw_base
= SSD_ROM_CTRL_FW_BASE
;
5971 dev
->rom_info
.ctrl_fw_sz
= SSD_ROM_CTRL_FW_SIZE
;
5972 dev
->rom_info
.nr_ctrl_fw
= SSD_ROM_NR_CTRL_FW
;
5974 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5976 dev
->rom_info
.vp_base
= SSD_ROM_VP_BASE
;
5977 dev
->rom_info
.label_base
= SSD_ROM_LABEL_BASE
;
5978 } else if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5979 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5980 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5981 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5982 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5984 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5985 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5986 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5987 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5989 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5990 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5991 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5992 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
5994 dev
->rom_info
.bm_fw_base
= dev
->rom_info
.ctrl_fw_base
+ (dev
->rom_info
.nr_ctrl_fw
* dev
->rom_info
.ctrl_fw_sz
);
5995 dev
->rom_info
.bm_fw_sz
= SSD_PV3_ROM_BM_FW_SZ
;
5996 dev
->rom_info
.nr_bm_fw
= SSD_PV3_ROM_NR_BM_FW
;
5998 dev
->rom_info
.log_base
= dev
->rom_info
.bm_fw_base
+ (dev
->rom_info
.nr_bm_fw
* dev
->rom_info
.bm_fw_sz
);
5999 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
6001 dev
->rom_info
.smart_base
= dev
->rom_info
.log_base
+ dev
->rom_info
.log_sz
;
6002 dev
->rom_info
.smart_sz
= SSD_PV3_ROM_SMART_SZ
;
6003 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
6005 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
6006 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
6007 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
+ dev
->rom_info
.block_size
;
6008 if (dev
->rom_info
.label_base
>= dev
->rom_info
.size
) {
6009 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- dev
->rom_info
.block_size
;
6012 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
6013 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
6014 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
6015 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
6017 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
6018 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6019 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6020 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
6022 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
6023 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6024 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6025 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
6027 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
6028 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
6029 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- SSD_PV3_2_ROM_SEC_SZ
;
6031 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
6032 dev
->rom_info
.smart_sz
= SSD_PV3_2_ROM_SEC_SZ
;
6033 dev
->rom_info
.smart_base
= dev
->rom_info
.label_base
- (dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
);
6034 if (dev
->rom_info
.smart_sz
> dev
->rom_info
.block_size
) {
6035 dev
->rom_info
.smart_sz
= dev
->rom_info
.block_size
;
6038 dev
->rom_info
.log_sz
= SSD_PV3_2_ROM_LOG_SZ
;
6039 dev
->rom_info
.log_base
= dev
->rom_info
.smart_base
- dev
->rom_info
.log_sz
;
6042 return ssd_init_spi(dev
);
6046 static int ssd_update_smart(struct ssd_device
*dev
, struct ssd_smart
*smart
)
6050 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
6051 struct hd_struct
*part
;
6057 if (!test_bit(SSD_INIT_BD
, &dev
->state
)) {
6061 do_gettimeofday(&tv
);
6062 if ((uint64_t)tv
.tv_sec
< dev
->uptime
) {
6065 run_time
= tv
.tv_sec
- dev
->uptime
;
6068 /* avoid frequently update */
6069 if (run_time
>= 60) {
6074 smart
->io_stat
.run_time
+= run_time
;
6076 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
6077 cpu
= part_stat_lock();
6078 part
= &dev
->gd
->part0
;
6079 part_round_stats(cpu
, part
);
6082 smart
->io_stat
.nr_read
+= part_stat_read(part
, ios
[READ
]);
6083 smart
->io_stat
.nr_write
+= part_stat_read(part
, ios
[WRITE
]);
6084 smart
->io_stat
.rsectors
+= part_stat_read(part
, sectors
[READ
]);
6085 smart
->io_stat
.wsectors
+= part_stat_read(part
, sectors
[WRITE
]);
6086 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
6088 disk_round_stats(dev
->gd
);
6091 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, ios
[READ
]);
6092 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, ios
[WRITE
]);
6093 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, sectors
[READ
]);
6094 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, sectors
[WRITE
]);
6097 disk_round_stats(dev
->gd
);
6100 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, reads
);
6101 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, writes
);
6102 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, read_sectors
);
6103 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, write_sectors
);
6106 smart
->io_stat
.nr_to
+= atomic_read(&dev
->tocnt
);
6108 for (i
=0; i
<dev
->nr_queue
; i
++) {
6109 smart
->io_stat
.nr_rwerr
+= dev
->queue
[i
].io_stat
.nr_rwerr
;
6110 smart
->io_stat
.nr_ioerr
+= dev
->queue
[i
].io_stat
.nr_ioerr
;
6113 for (i
=0; i
<dev
->nr_queue
; i
++) {
6114 for (j
=0; j
<SSD_ECC_MAX_FLIP
; j
++) {
6115 smart
->ecc_info
.bitflip
[j
] += dev
->queue
[i
].ecc_info
.bitflip
[j
];
6119 //dev->uptime = tv.tv_sec;
6124 static int __ssd_clear_smart(struct ssd_device
*dev
)
6128 uint32_t off
, length
;
6132 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6137 off
= dev
->rom_info
.smart_base
;
6138 length
= dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
;
6140 ret
= ssd_spi_erase(dev
, off
, length
);
6142 hio_warn("%s: info erase: failed\n", dev
->name
);
6146 sversion
= dev
->smart
.version
;
6148 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6149 dev
->smart
.version
= sversion
+ 1;
6150 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6152 /* clear all tmp acc */
6153 for (i
=0; i
<dev
->nr_queue
; i
++) {
6154 memset(&(dev
->queue
[i
].io_stat
), 0, sizeof(struct ssd_io_stat
));
6155 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(struct ssd_ecc_info
));
6158 atomic_set(&dev
->tocnt
, 0);
6160 /* clear tmp log info */
6161 memset(&dev
->log_info
, 0, sizeof(struct ssd_log_info
));
6163 do_gettimeofday(&tv
);
6164 dev
->uptime
= tv
.tv_sec
;
6167 //ssd_clear_alarm(dev);
6172 static int __ssd_clear_warning(struct ssd_device
*dev
)
6177 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6181 /* clear log_info warning */
6182 memset(&dev
->smart
.log_info
, 0, sizeof(dev
->smart
.log_info
));
6184 /* clear io_stat warning */
6185 dev
->smart
.io_stat
.nr_to
= 0;
6186 dev
->smart
.io_stat
.nr_rwerr
= 0;
6187 dev
->smart
.io_stat
.nr_ioerr
= 0;
6189 /* clear ecc_info warning */
6190 memset(&dev
->smart
.ecc_info
, 0, sizeof(dev
->smart
.ecc_info
));
6192 /* clear queued warnings */
6193 for (i
=0; i
<dev
->nr_queue
; i
++) {
6194 /* queued io_stat warning */
6195 dev
->queue
[i
].io_stat
.nr_to
= 0;
6196 dev
->queue
[i
].io_stat
.nr_rwerr
= 0;
6197 dev
->queue
[i
].io_stat
.nr_ioerr
= 0;
6199 /* queued ecc_info warning */
6200 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(dev
->queue
[i
].ecc_info
));
6203 /* write smart back to nor */
6204 for (i
= 0; i
< dev
->rom_info
.nr_smart
; i
++) {
6205 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6206 size
= dev
->rom_info
.smart_sz
;
6208 ret
= ssd_spi_erase(dev
, off
, size
);
6210 hio_warn("%s: warning erase: failed with code 1\n", dev
->name
);
6214 size
= sizeof(struct ssd_smart
);
6216 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6218 hio_warn("%s: warning erase: failed with code 2\n", dev
->name
);
6223 dev
->smart
.version
++;
6225 /* clear cmd timeout warning */
6226 atomic_set(&dev
->tocnt
, 0);
6228 /* clear tmp log info */
6229 memset(&dev
->log_info
, 0, sizeof(dev
->log_info
));
6235 static int ssd_clear_smart(struct ssd_device
*dev
)
6239 ret
= __ssd_clear_smart(dev
);
6241 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_SMART
, 0);
6247 static int ssd_clear_warning(struct ssd_device
*dev
)
6251 ret
= __ssd_clear_warning(dev
);
6253 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_WARNING
, 0);
6259 static int ssd_save_smart(struct ssd_device
*dev
)
6265 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
6268 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6272 if (!ssd_update_smart(dev
, &dev
->smart
)) {
6276 dev
->smart
.version
++;
6278 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6279 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6280 size
= dev
->rom_info
.smart_sz
;
6282 ret
= ssd_spi_erase(dev
, off
, size
);
6284 hio_warn("%s: info erase failed\n", dev
->name
);
6288 size
= sizeof(struct ssd_smart
);
6290 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6292 hio_warn("%s: info write failed\n", dev
->name
);
6303 static int ssd_init_smart(struct ssd_device
*dev
)
6305 struct ssd_smart
*smart
;
6307 uint32_t off
, size
, val
;
6310 int update_smart
= 0;
6312 do_gettimeofday(&tv
);
6313 dev
->uptime
= tv
.tv_sec
;
6315 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6319 smart
= kmalloc(sizeof(struct ssd_smart
) * SSD_ROM_NR_SMART_MAX
, GFP_KERNEL
);
6325 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6328 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6329 memset(&smart
[i
], 0, sizeof(struct ssd_smart
));
6331 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6332 size
= sizeof(struct ssd_smart
);
6334 ret
= ssd_spi_read(dev
, &smart
[i
], off
, size
);
6336 hio_warn("%s: info read failed\n", dev
->name
);
6340 if (smart
[i
].magic
!= SSD_SMART_MAGIC
) {
6342 smart
[i
].version
= 0;
6346 if (smart
[i
].version
> dev
->smart
.version
) {
6347 memcpy(&dev
->smart
, &smart
[i
], sizeof(struct ssd_smart
));
6351 if (dev
->smart
.magic
!= SSD_SMART_MAGIC
) {
6352 /* first time power up */
6353 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6354 dev
->smart
.version
= 1;
6357 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
);
6359 dev
->last_poweron_id
= ~0;
6360 ssd_gen_swlog(dev
, SSD_LOG_POWER_ON
, dev
->hw_info
.bridge_ver
);
6361 if (dev
->smart
.io_stat
.nr_to
) {
6362 dev
->smart
.io_stat
.nr_to
= 0;
6367 /* check log info */
6369 struct ssd_log_info log_info
;
6370 struct ssd_log
*log
= (struct ssd_log
*)dev
->internal_log
.log
;
6372 memset(&log_info
, 0, sizeof(struct ssd_log_info
));
6374 while (log_info
.nr_log
< dev
->internal_log
.nr_log
) {
6377 switch (log
->le
.event
) {
6378 /* skip the volatile log info */
6379 case SSD_LOG_SEU_FAULT
:
6380 case SSD_LOG_SEU_FAULT1
:
6383 case SSD_LOG_TIMEOUT
:
6384 skip
= (dev
->last_poweron_id
>= log_info
.nr_log
);
6389 log_info
.stat
[ssd_parse_log(dev
, log
, 0)]++;
6397 for (i
=(SSD_LOG_NR_LEVEL
-1); i
>=0; i
--) {
6398 if (log_info
.stat
[i
] != dev
->smart
.log_info
.stat
[i
]) {
6400 memcpy(&dev
->smart
.log_info
, &log_info
, sizeof(struct ssd_log_info
));
6407 ++dev
->smart
.version
;
6411 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6412 if (smart
[i
].magic
== SSD_SMART_MAGIC
&& smart
[i
].version
== dev
->smart
.version
) {
6416 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6417 size
= dev
->rom_info
.smart_sz
;
6419 ret
= ssd_spi_erase(dev
, off
, size
);
6421 hio_warn("%s: info erase failed\n", dev
->name
);
6425 size
= sizeof(struct ssd_smart
);
6426 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6428 hio_warn("%s: info write failed\n", dev
->name
);
6435 /* sync smart with alarm led */
6436 if (dev
->smart
.io_stat
.nr_to
|| dev
->smart
.io_stat
.nr_rwerr
|| dev
->smart
.log_info
.stat
[SSD_LOG_LEVEL_ERR
]) {
6437 hio_warn("%s: some fault found in the history info\n", dev
->name
);
6444 /* skip error if not in standard mode */
6445 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6452 static int __ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6454 struct ssd_bm_manufacturer_data bm_md
= {0};
6455 uint16_t sc_id
= SSD_BM_SYSTEM_DATA_SUBCLASS_ID
;
6463 mutex_lock(&dev
->bm_mutex
);
6465 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6466 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6471 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6472 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_manufacturer_data
), (uint8_t *)&bm_md
);
6477 if (bm_md
.firmware_ver
& 0xF000) {
6482 *ver
= bm_md
.firmware_ver
;
6485 mutex_unlock(&dev
->bm_mutex
);
6489 static int ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6492 int i
= SSD_BM_RETRY_MAX
;
6496 ret
= __ssd_bm_get_version(dev
, &tmp
);
6510 static int __ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6512 struct ssd_bm_configuration_registers bm_cr
;
6513 uint16_t sc_id
= SSD_BM_CONFIGURATION_REGISTERS_ID
;
6517 mutex_lock(&dev
->bm_mutex
);
6519 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6520 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6525 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6526 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_configuration_registers
), (uint8_t *)&bm_cr
);
6531 if (bm_cr
.operation_cfg
.cc
== 0 || bm_cr
.operation_cfg
.cc
> 4) {
6536 *nr_cap
= bm_cr
.operation_cfg
.cc
+ 1;
6539 mutex_unlock(&dev
->bm_mutex
);
6543 static int ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6546 int i
= SSD_BM_RETRY_MAX
;
6550 ret
= __ssd_bm_nr_cap(dev
, &tmp
);
6564 static int ssd_bm_enter_cap_learning(struct ssd_device
*dev
)
6566 uint16_t buf
= SSD_BM_ENTER_CAP_LEARNING
;
6567 uint8_t cmd
= SSD_BM_MANUFACTURERACCESS
;
6570 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&buf
);
6579 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
)
6582 uint8_t cmd
= SSD_BM_SAFETYSTATUS
;
6585 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6595 static int ssd_bm_get_opstatus(struct ssd_device
*dev
, uint16_t *status
)
6598 uint8_t cmd
= SSD_BM_OPERATIONSTATUS
;
6601 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6611 static int ssd_get_bmstruct(struct ssd_device
*dev
, struct ssd_bm
*bm_status_out
)
6613 struct sbs_cmd
*bm_sbs
= ssd_bm_sbs
;
6614 struct ssd_bm bm_status
;
6615 uint8_t buf
[2] = {0, };
6620 memset(&bm_status
, 0, sizeof(struct ssd_bm
));
6622 while (bm_sbs
->desc
!= NULL
) {
6623 switch (bm_sbs
->size
) {
6625 ret
= ssd_smbus_read_byte(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, buf
);
6627 //printf("Error: smbus read byte %#x\n", bm_sbs->cmd);
6633 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, (uint8_t *)&val
);
6635 //printf("Error: smbus read word %#x\n", bm_sbs->cmd);
6638 //val = *(uint16_t *)buf;
6646 switch (bm_sbs
->unit
) {
6647 case SBS_UNIT_VALUE
:
6648 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
& bm_sbs
->mask
;
6650 case SBS_UNIT_TEMPERATURE
:
6651 cval
= (uint16_t)(val
- 2731) / 10;
6652 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = cval
;
6654 case SBS_UNIT_VOLTAGE
:
6655 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6657 case SBS_UNIT_CURRENT
:
6658 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6661 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6663 case SBS_UNIT_PERCENT
:
6664 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6666 case SBS_UNIT_CAPACITANCE
:
6667 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6678 memcpy(bm_status_out
, &bm_status
, sizeof(struct ssd_bm
));
6684 static int __ssd_bm_status(struct ssd_device
*dev
, int *status
)
6686 struct ssd_bm bm_status
= {0};
6691 ret
= ssd_get_bmstruct(dev
, &bm_status
);
6696 /* capacitor voltage */
6697 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
6702 for (i
=0; i
<nr_cap
; i
++) {
6703 if (bm_status
.cap_volt
[i
] < SSD_BM_CAP_VOLT_MIN
) {
6704 *status
= SSD_BMSTATUS_WARNING
;
6710 if (bm_status
.sf_status
) {
6711 *status
= SSD_BMSTATUS_WARNING
;
6716 if (!((bm_status
.op_status
>> 12) & 0x1)) {
6717 *status
= SSD_BMSTATUS_CHARGING
;
6719 *status
= SSD_BMSTATUS_OK
;
6726 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int mode
);
6728 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
6729 static void ssd_bm_worker(void *data
)
6731 struct ssd_device
*dev
= (struct ssd_device
*)data
;
6733 static void ssd_bm_worker(struct work_struct
*work
)
6735 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, bm_work
);
6741 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6745 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
6749 if (dev
->hw_info_ext
.plp_type
!= SSD_PLP_SCAP
) {
6753 ret
= ssd_bm_get_opstatus(dev
, &opstatus
);
6755 hio_warn("%s: get bm operationstatus failed\n", dev
->name
);
6759 /* need cap learning ? */
6760 if (!(opstatus
& 0xF0)) {
6761 ret
= ssd_bm_enter_cap_learning(dev
);
6763 hio_warn("%s: enter capacitance learning failed\n", dev
->name
);
6769 static void ssd_bm_routine_start(void *data
)
6771 struct ssd_device
*dev
;
6778 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
6779 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6780 queue_work(dev
->workq
, &dev
->bm_work
);
6782 queue_work(dev
->workq
, &dev
->capmon_work
);
6788 static int ssd_do_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6795 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6800 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6805 /* make sure the lm80 voltage value is updated */
6806 msleep(SSD_LM80_CONV_INTERVAL
);
6808 /* check if full charged */
6811 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6813 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6814 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6818 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6819 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_FULL
) {
6824 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6828 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6831 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U2
, (uint8_t *)&val
);
6833 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6834 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6838 u2
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6845 /* enter cap learn */
6846 ssd_reg32_write(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
, 0x1);
6850 msleep(SSD_PL_CAP_LEARN_WAIT
);
6852 t
= ssd_reg32_read(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
);
6853 if (!((t
>> 1) & 0x1)) {
6858 if (wait
> SSD_PL_CAP_LEARN_MAX_WAIT
) {
6864 if ((t
>> 4) & 0x1) {
6875 *cap
= SSD_PL_CAP_LEARN(u1
, u2
, t
);
6881 static int ssd_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6889 mutex_lock(&dev
->bm_mutex
);
6891 ssd_stop_workq(dev
);
6893 ret
= ssd_do_cap_learn(dev
, cap
);
6895 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
6899 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, *cap
);
6902 ssd_start_workq(dev
);
6903 mutex_unlock(&dev
->bm_mutex
);
6908 static int ssd_check_pl_cap(struct ssd_device
*dev
)
6916 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6920 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6927 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6929 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6930 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6934 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6935 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_READY
) {
6940 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6942 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(u1
));
6945 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6948 low
= ssd_lm80_limit
[SSD_LM80_IN_CAP
].low
;
6949 ret
= ssd_smbus_write_byte(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP
), &low
);
6954 /* enable cap INx */
6955 ret
= ssd_lm80_enable_in(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_IN_CAP
);
6957 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6958 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6964 /* skip error if not in standard mode */
6965 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6971 static int ssd_check_pl_cap_fast(struct ssd_device
*dev
)
6977 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6981 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6986 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6990 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6991 if (SSD_PL_CAP_VOLT(u1
) < SSD_PL_CAP_VOLT_READY
) {
6999 static int ssd_init_pl_cap(struct ssd_device
*dev
)
7003 /* set here: user write mode */
7004 dev
->user_wmode
= wmode
;
7006 mutex_init(&dev
->bm_mutex
);
7008 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7010 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BM_FAULT_REG
);
7011 if ((val
>> 1) & 0x1) {
7012 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
7015 ret
= ssd_check_pl_cap(dev
);
7017 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
7025 static void __end_str(char *str
, int len
)
7029 for(i
=0; i
<len
; i
++) {
7030 if (*(str
+i
) == '\0')
7036 static int ssd_init_label(struct ssd_device
*dev
)
7042 /* label location */
7043 off
= dev
->rom_info
.label_base
;
7045 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7046 size
= sizeof(struct ssd_label
);
7049 ret
= ssd_spi_read(dev
, &dev
->label
, off
, size
);
7051 memset(&dev
->label
, 0, size
);
7055 __end_str(dev
->label
.date
, SSD_LABEL_FIELD_SZ
);
7056 __end_str(dev
->label
.sn
, SSD_LABEL_FIELD_SZ
);
7057 __end_str(dev
->label
.part
, SSD_LABEL_FIELD_SZ
);
7058 __end_str(dev
->label
.desc
, SSD_LABEL_FIELD_SZ
);
7059 __end_str(dev
->label
.other
, SSD_LABEL_FIELD_SZ
);
7060 __end_str(dev
->label
.maf
, SSD_LABEL_FIELD_SZ
);
7062 size
= sizeof(struct ssd_labelv3
);
7065 ret
= ssd_spi_read(dev
, &dev
->labelv3
, off
, size
);
7067 memset(&dev
->labelv3
, 0, size
);
7071 __end_str(dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
7072 __end_str(dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
7073 __end_str(dev
->labelv3
.item
, SSD_LABEL_FIELD_SZ
);
7074 __end_str(dev
->labelv3
.description
, SSD_LABEL_DESC_SZ
);
7075 __end_str(dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
7076 __end_str(dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
7077 __end_str(dev
->labelv3
.issuenumber
, SSD_LABEL_FIELD_SZ
);
7078 __end_str(dev
->labelv3
.cleicode
, SSD_LABEL_FIELD_SZ
);
7079 __end_str(dev
->labelv3
.bom
, SSD_LABEL_FIELD_SZ
);
7083 /* skip error if not in standard mode */
7084 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7090 int ssd_get_label(struct block_device
*bdev
, struct ssd_label
*label
)
7092 struct ssd_device
*dev
;
7094 if (!bdev
|| !label
|| !(bdev
->bd_disk
)) {
7098 dev
= bdev
->bd_disk
->private_data
;
7100 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7101 memset(label
, 0, sizeof(struct ssd_label
));
7102 memcpy(label
->date
, dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
7103 memcpy(label
->sn
, dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
7104 memcpy(label
->desc
, dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
7105 memcpy(label
->maf
, dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
7107 memcpy(label
, &dev
->label
, sizeof(struct ssd_label
));
7113 static int __ssd_get_version(struct ssd_device
*dev
, struct ssd_version_info
*ver
)
7115 uint16_t bm_ver
= 0;
7118 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7119 ret
= ssd_bm_get_version(dev
, &bm_ver
);
7125 ver
->bridge_ver
= dev
->hw_info
.bridge_ver
;
7126 ver
->ctrl_ver
= dev
->hw_info
.ctrl_ver
;
7127 ver
->bm_ver
= bm_ver
;
7128 ver
->pcb_ver
= dev
->hw_info
.pcb_ver
;
7129 ver
->upper_pcb_ver
= dev
->hw_info
.upper_pcb_ver
;
7136 int ssd_get_version(struct block_device
*bdev
, struct ssd_version_info
*ver
)
7138 struct ssd_device
*dev
;
7141 if (!bdev
|| !ver
|| !(bdev
->bd_disk
)) {
7145 dev
= bdev
->bd_disk
->private_data
;
7147 mutex_lock(&dev
->fw_mutex
);
7148 ret
= __ssd_get_version(dev
, ver
);
7149 mutex_unlock(&dev
->fw_mutex
);
7154 static int __ssd_get_temperature(struct ssd_device
*dev
, int *temp
)
7162 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7168 if (dev
->db_info
.type
== SSD_DEBUG_LOG
&&
7169 (dev
->db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
||
7170 dev
->db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
||
7171 dev
->db_info
.data
.log
.event
== SSD_LOG_WARN_TEMP
)) {
7172 *temp
= (int)dev
->db_info
.data
.log
.extra
;
7177 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7178 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
7180 val
= ssd_reg_read(dev
->ctrlp
+ off
);
7181 if (val
== 0xffffffffffffffffull
) {
7185 cur
= (int)CUR_TEMP(val
);
7196 int ssd_get_temperature(struct block_device
*bdev
, int *temp
)
7198 struct ssd_device
*dev
;
7201 if (!bdev
|| !temp
|| !(bdev
->bd_disk
)) {
7205 dev
= bdev
->bd_disk
->private_data
;
7208 mutex_lock(&dev
->fw_mutex
);
7209 ret
= __ssd_get_temperature(dev
, temp
);
7210 mutex_unlock(&dev
->fw_mutex
);
7215 int ssd_set_otprotect(struct block_device
*bdev
, int otprotect
)
7217 struct ssd_device
*dev
;
7219 if (!bdev
|| !(bdev
->bd_disk
)) {
7223 dev
= bdev
->bd_disk
->private_data
;
7224 ssd_set_ot_protect(dev
, !!otprotect
);
7229 int ssd_bm_status(struct block_device
*bdev
, int *status
)
7231 struct ssd_device
*dev
;
7234 if (!bdev
|| !status
|| !(bdev
->bd_disk
)) {
7238 dev
= bdev
->bd_disk
->private_data
;
7240 mutex_lock(&dev
->fw_mutex
);
7241 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7242 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7243 *status
= SSD_BMSTATUS_WARNING
;
7245 *status
= SSD_BMSTATUS_OK
;
7247 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7248 ret
= __ssd_bm_status(dev
, status
);
7250 *status
= SSD_BMSTATUS_OK
;
7252 mutex_unlock(&dev
->fw_mutex
);
7257 int ssd_get_pciaddr(struct block_device
*bdev
, struct pci_addr
*paddr
)
7259 struct ssd_device
*dev
;
7261 if (!bdev
|| !paddr
|| !bdev
->bd_disk
) {
7265 dev
= bdev
->bd_disk
->private_data
;
7267 paddr
->domain
= pci_domain_nr(dev
->pdev
->bus
);
7268 paddr
->bus
= dev
->pdev
->bus
->number
;
7269 paddr
->slot
= PCI_SLOT(dev
->pdev
->devfn
);
7270 paddr
->func
= PCI_FUNC(dev
->pdev
->devfn
);
7276 static int ssd_bb_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7281 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7285 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L1_REG
);
7286 if (0xffffffffull
== acc
->threshold_l1
) {
7289 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L2_REG
);
7290 if (0xffffffffull
== acc
->threshold_l2
) {
7295 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7296 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7297 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_BB_ACC_REG_SZ
* chip
));
7298 if (0xffffffffull
== acc
->val
) {
7301 if (val
> acc
->val
) {
7310 static int ssd_ec_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7315 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7319 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L1_REG
);
7320 if (0xffffffffull
== acc
->threshold_l1
) {
7323 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L2_REG
);
7324 if (0xffffffffull
== acc
->threshold_l2
) {
7329 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7330 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7331 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_EC_ACC_REG_SZ
* chip
));
7332 if (0xffffffffull
== acc
->val
) {
7336 if (val
> acc
->val
) {
7347 static int ssd_ram_read_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7349 struct ssd_ram_op_msg
*msg
;
7351 size_t len
= length
;
7355 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7356 || !length
|| length
> dev
->hw_info
.ram_max_len
7357 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7361 len
/= dev
->hw_info
.ram_align
;
7362 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7364 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7365 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7366 ret
= dma_mapping_error(buf_dma
);
7368 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7371 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7372 goto out_dma_mapping
;
7375 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7377 msg
->fun
= SSD_FUNC_RAM_READ
;
7378 msg
->ctrl_idx
= ctrl_idx
;
7379 msg
->start
= (uint32_t)ofs_w
;
7383 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7386 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7392 static int ssd_ram_write_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7394 struct ssd_ram_op_msg
*msg
;
7396 size_t len
= length
;
7400 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7401 || !length
|| length
> dev
->hw_info
.ram_max_len
7402 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7406 len
/= dev
->hw_info
.ram_align
;
7407 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7409 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7410 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7411 ret
= dma_mapping_error(buf_dma
);
7413 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7416 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7417 goto out_dma_mapping
;
7420 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7422 msg
->fun
= SSD_FUNC_RAM_WRITE
;
7423 msg
->ctrl_idx
= ctrl_idx
;
7424 msg
->start
= (uint32_t)ofs_w
;
7428 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7431 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7438 static int ssd_ram_read(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7445 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7446 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7451 len
= dev
->hw_info
.ram_max_len
;
7452 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7456 ret
= ssd_ram_read_4k(dev
, buf
, len
, off
, ctrl_idx
);
7469 static int ssd_ram_write(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7476 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7477 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7482 len
= dev
->hw_info
.ram_max_len
;
7483 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7487 ret
= ssd_ram_write_4k(dev
, buf
, len
, off
, ctrl_idx
);
7502 static int ssd_check_flash(struct ssd_device
*dev
, int flash
, int page
, int ctrl_idx
)
7504 int cur_ch
= flash
% dev
->hw_info
.max_ch
;
7505 int cur_chip
= flash
/dev
->hw_info
.max_ch
;
7507 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
7511 if (cur_ch
>= dev
->hw_info
.nr_ch
|| cur_chip
>= dev
->hw_info
.nr_chip
) {
7515 if (page
>= (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7521 static int ssd_nand_read_id(struct ssd_device
*dev
, void *id
, int flash
, int chip
, int ctrl_idx
)
7523 struct ssd_nand_op_msg
*msg
;
7530 buf_dma
= pci_map_single(dev
->pdev
, id
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7531 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7532 ret
= dma_mapping_error(buf_dma
);
7534 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7537 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7538 goto out_dma_mapping
;
7541 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7542 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7546 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7548 msg
->fun
= SSD_FUNC_NAND_READ_ID
;
7549 msg
->chip_no
= flash
;
7550 msg
->chip_ce
= chip
;
7551 msg
->ctrl_idx
= ctrl_idx
;
7554 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7557 pci_unmap_single(dev
->pdev
, buf_dma
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7564 static int ssd_nand_read(struct ssd_device
*dev
, void *buf
,
7565 int flash
, int chip
, int page
, int page_count
, int ctrl_idx
)
7567 struct ssd_nand_op_msg
*msg
;
7576 if ((page
+ page_count
) > dev
->hw_info
.block_count
*dev
->hw_info
.page_count
) {
7580 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7585 length
= page_count
* dev
->hw_info
.page_size
;
7587 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7588 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7589 ret
= dma_mapping_error(buf_dma
);
7591 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7594 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7595 goto out_dma_mapping
;
7598 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7599 flash
= (flash
<< 1) | chip
;
7603 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7605 msg
->fun
= SSD_FUNC_NAND_READ
;
7606 msg
->ctrl_idx
= ctrl_idx
;
7607 msg
->chip_no
= flash
;
7608 msg
->chip_ce
= chip
;
7609 msg
->page_no
= page
;
7610 msg
->page_count
= page_count
;
7613 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7616 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7623 static int ssd_nand_read_w_oob(struct ssd_device
*dev
, void *buf
,
7624 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7626 struct ssd_nand_op_msg
*msg
;
7635 if ((page
+ count
) > (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7639 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7644 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7646 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7647 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7648 ret
= dma_mapping_error(buf_dma
);
7650 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7653 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7654 goto out_dma_mapping
;
7657 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7658 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7662 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7664 msg
->fun
= SSD_FUNC_NAND_READ_WOOB
;
7665 msg
->ctrl_idx
= ctrl_idx
;
7666 msg
->chip_no
= flash
;
7667 msg
->chip_ce
= chip
;
7668 msg
->page_no
= page
;
7669 msg
->page_count
= count
;
7672 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7675 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7682 static int ssd_nand_write(struct ssd_device
*dev
, void *buf
,
7683 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7685 struct ssd_nand_op_msg
*msg
;
7690 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7702 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7707 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7709 /* write data to ram */
7710 /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx);
7715 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7716 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7717 ret
= dma_mapping_error(buf_dma
);
7719 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7722 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7723 goto out_dma_mapping
;
7726 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7727 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7731 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7733 msg
->fun
= SSD_FUNC_NAND_WRITE
;
7734 msg
->ctrl_idx
= ctrl_idx
;
7735 msg
->chip_no
= flash
;
7736 msg
->chip_ce
= chip
;
7738 msg
->page_no
= page
;
7739 msg
->page_count
= count
;
7742 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7745 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7751 static int ssd_nand_erase(struct ssd_device
*dev
, int flash
, int chip
, int page
, int ctrl_idx
)
7753 struct ssd_nand_op_msg
*msg
;
7756 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7761 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7762 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7766 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7768 msg
->fun
= SSD_FUNC_NAND_ERASE
;
7769 msg
->ctrl_idx
= ctrl_idx
;
7770 msg
->chip_no
= flash
;
7771 msg
->chip_ce
= chip
;
7772 msg
->page_no
= page
;
7774 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7780 static int ssd_update_bbt(struct ssd_device
*dev
, int flash
, int ctrl_idx
)
7782 struct ssd_nand_op_msg
*msg
;
7783 struct ssd_flush_msg
*fmsg
;
7786 ret
= ssd_check_flash(dev
, flash
, 0, ctrl_idx
);
7791 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7793 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7794 fmsg
= (struct ssd_flush_msg
*)msg
;
7796 fmsg
->fun
= SSD_FUNC_FLUSH
;
7798 fmsg
->flash
= flash
;
7799 fmsg
->ctrl_idx
= ctrl_idx
;
7801 msg
->fun
= SSD_FUNC_FLUSH
;
7803 msg
->chip_no
= flash
;
7804 msg
->ctrl_idx
= ctrl_idx
;
7807 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7813 /* flash controller init state */
7814 static int __ssd_check_init_state(struct ssd_device
*dev
)
7816 uint32_t *init_state
= NULL
;
7817 int reg_base
, reg_sz
;
7818 int max_wait
= SSD_INIT_MAX_WAIT
;
7824 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7825 ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data);
7826 read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8);
7827 if (read_data == ~test_data) {
7828 //dev->hw_info.nr_ctrl++;
7829 dev->hw_info.nr_ctrl_map |= 1<<i;
7835 read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG);
7837 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7838 if (((read_data>>i) & 0x1) == 0) {
7843 if (dev->hw_info.nr_ctrl != j) {
7844 printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j);
7850 init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0);
7851 for (j=1; j<dev->hw_info.nr_ctrl;j++) {
7852 if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) {
7853 printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j);
7859 /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0);
7860 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7861 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) {
7862 printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j);
7867 init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8);
7868 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7869 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) {
7870 printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j);
7876 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7877 max_wait
= SSD_INIT_MAX_WAIT_V3_2
;
7880 reg_base
= dev
->protocol_info
.init_state_reg
;
7881 reg_sz
= dev
->protocol_info
.init_state_reg_sz
;
7883 init_state
= (uint32_t *)kmalloc(reg_sz
, GFP_KERNEL
);
7888 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7890 for (j
=0, k
=0; j
<reg_sz
; j
+=sizeof(uint32_t), k
++) {
7891 init_state
[k
] = ssd_reg32_read(dev
->ctrlp
+ reg_base
+ j
);
7894 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7895 /* just check the last bit, no need to check all channel */
7896 ch_start
= dev
->hw_info
.max_ch
- 1;
7901 for (j
=0; j
<dev
->hw_info
.nr_chip
; j
++) {
7902 for (k
=ch_start
; k
<dev
->hw_info
.max_ch
; k
++) {
7903 if (test_bit((j
*dev
->hw_info
.max_ch
+ k
), (void *)init_state
)) {
7908 if (init_wait
<= max_wait
) {
7909 msleep(SSD_INIT_WAIT
);
7912 if (k
< dev
->hw_info
.nr_ch
) {
7913 hio_warn("%s: controller %d chip %d ch %d init failed\n",
7914 dev
->name
, i
, j
, k
);
7916 hio_warn("%s: controller %d chip %d init failed\n",
7927 //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait);
7933 static int ssd_check_init_state(struct ssd_device
*dev
)
7935 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7939 return __ssd_check_init_state(dev
);
7942 static void ssd_reset_resp_ptr(struct ssd_device
*dev
);
7944 /* reset flash controller etc */
7945 static int __ssd_reset(struct ssd_device
*dev
, int type
)
7948 if (type
< SSD_RST_NOINIT
|| type
> SSD_RST_FULL
) {
7952 mutex_lock(&dev
->fw_mutex
);
7954 if (type
== SSD_RST_NOINIT
) { //no init
7955 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET_NOINIT
);
7956 } else if (type
== SSD_RST_NORMAL
) { //reset & init
7957 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET
);
7958 } else { // full reset
7959 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7960 mutex_unlock(&dev
->fw_mutex
);
7964 ssd_reg32_write(dev
->ctrlp
+ SSD_FULL_RESET_REG
, SSD_RESET_FULL
);
7967 ssd_reset_resp_ptr(dev
);
7970 #ifdef SSD_OT_PROTECT
7977 ssd_set_flush_timeout(dev
, dev
->wmode
);
7979 mutex_unlock(&dev
->fw_mutex
);
7980 ssd_gen_swlog(dev
, SSD_LOG_RESET
, (uint32_t)type
);
7981 do_gettimeofday(&tv
);
7982 dev
->reset_time
= tv
.tv_sec
;
7984 return __ssd_check_init_state(dev
);
7987 static int ssd_save_md(struct ssd_device
*dev
)
7989 struct ssd_nand_op_msg
*msg
;
7992 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7995 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7999 if (!dev
->save_md
) {
8003 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8005 msg
->fun
= SSD_FUNC_FLUSH
;
8010 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
8016 static int ssd_barrier_save_md(struct ssd_device
*dev
)
8018 struct ssd_nand_op_msg
*msg
;
8021 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8024 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
8028 if (!dev
->save_md
) {
8032 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8034 msg
->fun
= SSD_FUNC_FLUSH
;
8039 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
8045 static int ssd_flush(struct ssd_device
*dev
)
8047 struct ssd_nand_op_msg
*msg
;
8048 struct ssd_flush_msg
*fmsg
;
8051 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8054 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8056 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
8057 fmsg
= (struct ssd_flush_msg
*)msg
;
8059 fmsg
->fun
= SSD_FUNC_FLUSH
;
8064 msg
->fun
= SSD_FUNC_FLUSH
;
8070 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
8076 static int ssd_barrier_flush(struct ssd_device
*dev
)
8078 struct ssd_nand_op_msg
*msg
;
8079 struct ssd_flush_msg
*fmsg
;
8082 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8085 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8087 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
8088 fmsg
= (struct ssd_flush_msg
*)msg
;
8090 fmsg
->fun
= SSD_FUNC_FLUSH
;
8095 msg
->fun
= SSD_FUNC_FLUSH
;
8101 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
8107 #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710
8108 #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8
8109 #define SSD_WMODE_FUA_TIMEOUT 0x000503E8
8110 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int m
)
8115 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8120 case SSD_WMODE_BUFFER
:
8121 to
= SSD_WMODE_BUFFER_TIMEOUT
;
8123 case SSD_WMODE_BUFFER_EX
:
8124 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_1
) {
8125 to
= SSD_WMODE_BUFFER_EX_TIMEOUT
;
8127 to
= SSD_WMODE_BUFFER_TIMEOUT
;
8131 to
= SSD_WMODE_FUA_TIMEOUT
;
8137 val
= (((uint32_t)((uint32_t)m
& 0x3) << 28) | to
);
8139 ssd_reg32_write(dev
->ctrlp
+ SSD_FLUSH_TIMEOUT_REG
, val
);
8142 static int ssd_do_switch_wmode(struct ssd_device
*dev
, int m
)
8146 ret
= ssd_barrier_start(dev
);
8151 ret
= ssd_barrier_flush(dev
);
8153 goto out_barrier_end
;
8156 /* set contoller flush timeout */
8157 ssd_set_flush_timeout(dev
, m
);
8163 ssd_barrier_end(dev
);
8168 static int ssd_switch_wmode(struct ssd_device
*dev
, int m
)
8174 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8178 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8179 default_wmode
= SSD_WMODE_BUFFER
;
8181 default_wmode
= SSD_WMODE_BUFFER_EX
;
8184 if (SSD_WMODE_AUTO
== m
) {
8185 /* battery fault ? */
8186 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8187 next_wmode
= SSD_WMODE_FUA
;
8189 next_wmode
= default_wmode
;
8191 } else if (SSD_WMODE_DEFAULT
== m
) {
8192 next_wmode
= default_wmode
;
8197 if (next_wmode
!= dev
->wmode
) {
8198 hio_warn("%s: switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8199 ret
= ssd_do_switch_wmode(dev
, next_wmode
);
8201 hio_err("%s: can not switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8208 static int ssd_init_wmode(struct ssd_device
*dev
)
8213 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8214 default_wmode
= SSD_WMODE_BUFFER
;
8216 default_wmode
= SSD_WMODE_BUFFER_EX
;
8220 if (SSD_WMODE_AUTO
== dev
->user_wmode
) {
8221 /* battery fault ? */
8222 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8223 dev
->wmode
= SSD_WMODE_FUA
;
8225 dev
->wmode
= default_wmode
;
8227 } else if (SSD_WMODE_DEFAULT
== dev
->user_wmode
) {
8228 dev
->wmode
= default_wmode
;
8230 dev
->wmode
= dev
->user_wmode
;
8232 ssd_set_flush_timeout(dev
, dev
->wmode
);
8237 static int __ssd_set_wmode(struct ssd_device
*dev
, int m
)
8241 /* not support old fw*/
8242 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8247 if (m
< SSD_WMODE_BUFFER
|| m
> SSD_WMODE_DEFAULT
) {
8252 ssd_gen_swlog(dev
, SSD_LOG_SET_WMODE
, m
);
8254 dev
->user_wmode
= m
;
8256 ret
= ssd_switch_wmode(dev
, dev
->user_wmode
);
8265 int ssd_set_wmode(struct block_device
*bdev
, int m
)
8267 struct ssd_device
*dev
;
8269 if (!bdev
|| !(bdev
->bd_disk
)) {
8273 dev
= bdev
->bd_disk
->private_data
;
8275 return __ssd_set_wmode(dev
, m
);
8278 static int ssd_do_reset(struct ssd_device
*dev
)
8282 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8286 ssd_stop_workq(dev
);
8288 ret
= ssd_barrier_start(dev
);
8293 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8295 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8298 //ret = __ssd_reset(dev, SSD_RST_FULL);
8299 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8302 goto out_barrier_end
;
8306 ssd_barrier_end(dev
);
8308 ssd_start_workq(dev
);
8309 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8313 static int ssd_full_reset(struct ssd_device
*dev
)
8317 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8321 ssd_stop_workq(dev
);
8323 ret
= ssd_barrier_start(dev
);
8328 ret
= ssd_barrier_flush(dev
);
8330 goto out_barrier_end
;
8333 ret
= ssd_barrier_save_md(dev
);
8335 goto out_barrier_end
;
8338 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8340 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8343 //ret = __ssd_reset(dev, SSD_RST_FULL);
8344 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8347 goto out_barrier_end
;
8351 ssd_barrier_end(dev
);
8353 ssd_start_workq(dev
);
8354 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8358 int ssd_reset(struct block_device
*bdev
)
8361 struct ssd_device
*dev
;
8363 if (!bdev
|| !(bdev
->bd_disk
)) {
8367 dev
= bdev
->bd_disk
->private_data
;
8369 ret
= ssd_full_reset(dev
);
8371 if (!dev
->has_non_0x98_reg_access
) {
8372 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, 0);
8379 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
8380 static int ssd_issue_flush_fn(struct request_queue
*q
, struct gendisk
*disk
,
8381 sector_t
*error_sector
)
8383 struct ssd_device
*dev
= q
->queuedata
;
8385 return ssd_flush(dev
);
8389 void ssd_submit_pbio(struct request_queue
*q
, struct bio
*bio
)
8391 struct ssd_device
*dev
= q
->queuedata
;
8392 #ifdef SSD_QUEUE_PBIO
8396 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8397 ssd_bio_endio(bio
, -ENODEV
);
8401 #ifdef SSD_DEBUG_ERR
8402 if (atomic_read(&dev
->tocnt
)) {
8403 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8404 ssd_bio_endio(bio
, -EIO
);
8409 if (unlikely(ssd_bio_has_barrier_or_fua(bio
))) {
8410 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8414 if (unlikely(dev
->readonly
&& bio_data_dir(bio
) == WRITE
)) {
8415 ssd_bio_endio(bio
, -EROFS
);
8419 #ifdef SSD_QUEUE_PBIO
8420 if (0 == atomic_read(&dev
->in_sendq
)) {
8421 ret
= __ssd_submit_pbio(dev
, bio
, 0);
8425 (void)test_and_set_bit(BIO_SSD_PBIO
, &bio
->bi_flags
);
8426 ssd_queue_bio(dev
, bio
);
8429 __ssd_submit_pbio(dev
, bio
, 1);
8436 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8437 static blk_qc_t
ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8438 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8439 static void ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8441 static int ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8444 struct ssd_device
*dev
= q
->queuedata
;
8447 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8448 ssd_bio_endio(bio
, -ENODEV
);
8452 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
8453 blk_queue_split(q
, &bio
, q
->bio_split
);
8456 #ifdef SSD_DEBUG_ERR
8457 if (atomic_read(&dev
->tocnt
)) {
8458 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8459 ssd_bio_endio(bio
, -EIO
);
8464 if (unlikely(ssd_bio_has_barrier_or_fua(bio
))) {
8465 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8469 /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */
8470 if (unlikely(ssd_bio_has_flush(bio
) && !bio_sectors(bio
))) {
8471 ssd_bio_endio(bio
, 0);
8475 if (0 == atomic_read(&dev
->in_sendq
)) {
8476 ret
= ssd_submit_bio(dev
, bio
, 0);
8480 ssd_queue_bio(dev
, bio
);
8484 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8485 return BLK_QC_T_NONE
;
8486 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8493 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
8494 static int ssd_block_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
8496 struct ssd_device
*dev
;
8502 dev
= bdev
->bd_disk
->private_data
;
8509 geo
->cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
8514 static int ssd_init_queue(struct ssd_device
*dev
);
8515 static void ssd_cleanup_queue(struct ssd_device
*dev
);
8516 static void ssd_cleanup_blkdev(struct ssd_device
*dev
);
8517 static int ssd_init_blkdev(struct ssd_device
*dev
);
8518 static int ssd_ioctl_common(struct ssd_device
*dev
, unsigned int cmd
, unsigned long arg
)
8520 void __user
*argp
= (void __user
*)arg
;
8521 void __user
*buf
= NULL
;
8526 case SSD_CMD_GET_PROTOCOL_INFO
:
8527 if (copy_to_user(argp
, &dev
->protocol_info
, sizeof(struct ssd_protocol_info
))) {
8528 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8534 case SSD_CMD_GET_HW_INFO
:
8535 if (copy_to_user(argp
, &dev
->hw_info
, sizeof(struct ssd_hw_info
))) {
8536 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8542 case SSD_CMD_GET_ROM_INFO
:
8543 if (copy_to_user(argp
, &dev
->rom_info
, sizeof(struct ssd_rom_info
))) {
8544 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8550 case SSD_CMD_GET_SMART
: {
8551 struct ssd_smart smart
;
8554 memcpy(&smart
, &dev
->smart
, sizeof(struct ssd_smart
));
8556 mutex_lock(&dev
->gd_mutex
);
8557 ssd_update_smart(dev
, &smart
);
8558 mutex_unlock(&dev
->gd_mutex
);
8560 /* combine the volatile log info */
8561 if (dev
->log_info
.nr_log
) {
8562 for (i
=0; i
<SSD_LOG_NR_LEVEL
; i
++) {
8563 smart
.log_info
.stat
[i
] += dev
->log_info
.stat
[i
];
8567 if (copy_to_user(argp
, &smart
, sizeof(struct ssd_smart
))) {
8568 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8576 case SSD_CMD_GET_IDX
:
8577 if (copy_to_user(argp
, &dev
->idx
, sizeof(int))) {
8578 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8584 case SSD_CMD_GET_AMOUNT
: {
8585 int nr_ssd
= atomic_read(&ssd_nr
);
8586 if (copy_to_user(argp
, &nr_ssd
, sizeof(int))) {
8587 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8594 case SSD_CMD_GET_TO_INFO
: {
8595 int tocnt
= atomic_read(&dev
->tocnt
);
8597 if (copy_to_user(argp
, &tocnt
, sizeof(int))) {
8598 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8605 case SSD_CMD_GET_DRV_VER
: {
8606 char ver
[] = DRIVER_VERSION
;
8607 int len
= sizeof(ver
);
8609 if (len
> (DRIVER_VERSION_LEN
- 1)) {
8610 len
= (DRIVER_VERSION_LEN
- 1);
8612 if (copy_to_user(argp
, ver
, len
)) {
8613 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8620 case SSD_CMD_GET_BBACC_INFO
: {
8621 struct ssd_acc_info acc
;
8623 mutex_lock(&dev
->fw_mutex
);
8624 ret
= ssd_bb_acc(dev
, &acc
);
8625 mutex_unlock(&dev
->fw_mutex
);
8630 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8631 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8638 case SSD_CMD_GET_ECACC_INFO
: {
8639 struct ssd_acc_info acc
;
8641 mutex_lock(&dev
->fw_mutex
);
8642 ret
= ssd_ec_acc(dev
, &acc
);
8643 mutex_unlock(&dev
->fw_mutex
);
8648 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8649 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8656 case SSD_CMD_GET_HW_INFO_EXT
:
8657 if (copy_to_user(argp
, &dev
->hw_info_ext
, sizeof(struct ssd_hw_info_extend
))) {
8658 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8664 case SSD_CMD_REG_READ
: {
8665 struct ssd_reg_op_info reg_info
;
8667 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8668 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8673 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8678 reg_info
.value
= ssd_reg32_read(dev
->ctrlp
+ reg_info
.offset
);
8679 if (copy_to_user(argp
, ®_info
, sizeof(struct ssd_reg_op_info
))) {
8680 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8688 case SSD_CMD_REG_WRITE
: {
8689 struct ssd_reg_op_info reg_info
;
8691 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8692 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8697 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8702 ssd_reg32_write(dev
->ctrlp
+ reg_info
.offset
, reg_info
.value
);
8707 case SSD_CMD_SPI_READ
: {
8708 struct ssd_spi_op_info spi_info
;
8711 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8712 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8718 size
= spi_info
.len
;
8721 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8726 kbuf
= kmalloc(size
, GFP_KERNEL
);
8732 ret
= ssd_spi_page_read(dev
, kbuf
, off
, size
);
8738 if (copy_to_user(buf
, kbuf
, size
)) {
8739 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8750 case SSD_CMD_SPI_WRITE
: {
8751 struct ssd_spi_op_info spi_info
;
8754 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8755 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8761 size
= spi_info
.len
;
8764 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8769 kbuf
= kmalloc(size
, GFP_KERNEL
);
8775 if (copy_from_user(kbuf
, buf
, size
)) {
8776 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8782 ret
= ssd_spi_page_write(dev
, kbuf
, off
, size
);
8793 case SSD_CMD_SPI_ERASE
: {
8794 struct ssd_spi_op_info spi_info
;
8797 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8798 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8805 if ((off
+ dev
->rom_info
.block_size
) > dev
->rom_info
.size
) {
8810 ret
= ssd_spi_block_erase(dev
, off
);
8818 case SSD_CMD_I2C_READ
: {
8819 struct ssd_i2c_op_info i2c_info
;
8823 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8824 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8829 saddr
= i2c_info
.saddr
;
8830 rsize
= i2c_info
.rsize
;
8831 buf
= i2c_info
.rbuf
;
8833 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8838 kbuf
= kmalloc(rsize
, GFP_KERNEL
);
8844 ret
= ssd_i2c_read(dev
, saddr
, rsize
, kbuf
);
8850 if (copy_to_user(buf
, kbuf
, rsize
)) {
8851 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8862 case SSD_CMD_I2C_WRITE
: {
8863 struct ssd_i2c_op_info i2c_info
;
8867 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8868 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8873 saddr
= i2c_info
.saddr
;
8874 wsize
= i2c_info
.wsize
;
8875 buf
= i2c_info
.wbuf
;
8877 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8882 kbuf
= kmalloc(wsize
, GFP_KERNEL
);
8888 if (copy_from_user(kbuf
, buf
, wsize
)) {
8889 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8895 ret
= ssd_i2c_write(dev
, saddr
, wsize
, kbuf
);
8906 case SSD_CMD_I2C_WRITE_READ
: {
8907 struct ssd_i2c_op_info i2c_info
;
8913 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8914 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8919 saddr
= i2c_info
.saddr
;
8920 wsize
= i2c_info
.wsize
;
8921 rsize
= i2c_info
.rsize
;
8922 buf
= i2c_info
.wbuf
;
8924 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8929 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8934 size
= wsize
+ rsize
;
8936 kbuf
= kmalloc(size
, GFP_KERNEL
);
8942 if (copy_from_user((kbuf
+ rsize
), buf
, wsize
)) {
8943 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8949 buf
= i2c_info
.rbuf
;
8951 ret
= ssd_i2c_write_read(dev
, saddr
, wsize
, (kbuf
+ rsize
), rsize
, kbuf
);
8957 if (copy_to_user(buf
, kbuf
, rsize
)) {
8958 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8969 case SSD_CMD_SMBUS_SEND_BYTE
: {
8970 struct ssd_smbus_op_info smbus_info
;
8971 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8975 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8976 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8981 saddr
= smbus_info
.saddr
;
8982 buf
= smbus_info
.buf
;
8985 if (copy_from_user(smb_data
, buf
, size
)) {
8986 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8991 ret
= ssd_smbus_send_byte(dev
, saddr
, smb_data
);
8999 case SSD_CMD_SMBUS_RECEIVE_BYTE
: {
9000 struct ssd_smbus_op_info smbus_info
;
9001 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9005 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9006 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9011 saddr
= smbus_info
.saddr
;
9012 buf
= smbus_info
.buf
;
9015 ret
= ssd_smbus_receive_byte(dev
, saddr
, smb_data
);
9020 if (copy_to_user(buf
, smb_data
, size
)) {
9021 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9029 case SSD_CMD_SMBUS_WRITE_BYTE
: {
9030 struct ssd_smbus_op_info smbus_info
;
9031 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9036 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9037 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9042 saddr
= smbus_info
.saddr
;
9043 command
= smbus_info
.cmd
;
9044 buf
= smbus_info
.buf
;
9047 if (copy_from_user(smb_data
, buf
, size
)) {
9048 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9053 ret
= ssd_smbus_write_byte(dev
, saddr
, command
, smb_data
);
9061 case SSD_CMD_SMBUS_READ_BYTE
: {
9062 struct ssd_smbus_op_info smbus_info
;
9063 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9068 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9069 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9074 saddr
= smbus_info
.saddr
;
9075 command
= smbus_info
.cmd
;
9076 buf
= smbus_info
.buf
;
9079 ret
= ssd_smbus_read_byte(dev
, saddr
, command
, smb_data
);
9084 if (copy_to_user(buf
, smb_data
, size
)) {
9085 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9093 case SSD_CMD_SMBUS_WRITE_WORD
: {
9094 struct ssd_smbus_op_info smbus_info
;
9095 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9100 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9101 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9106 saddr
= smbus_info
.saddr
;
9107 command
= smbus_info
.cmd
;
9108 buf
= smbus_info
.buf
;
9111 if (copy_from_user(smb_data
, buf
, size
)) {
9112 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9117 ret
= ssd_smbus_write_word(dev
, saddr
, command
, smb_data
);
9125 case SSD_CMD_SMBUS_READ_WORD
: {
9126 struct ssd_smbus_op_info smbus_info
;
9127 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9132 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9133 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9138 saddr
= smbus_info
.saddr
;
9139 command
= smbus_info
.cmd
;
9140 buf
= smbus_info
.buf
;
9143 ret
= ssd_smbus_read_word(dev
, saddr
, command
, smb_data
);
9148 if (copy_to_user(buf
, smb_data
, size
)) {
9149 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9157 case SSD_CMD_SMBUS_WRITE_BLOCK
: {
9158 struct ssd_smbus_op_info smbus_info
;
9159 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9164 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9165 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9170 saddr
= smbus_info
.saddr
;
9171 command
= smbus_info
.cmd
;
9172 buf
= smbus_info
.buf
;
9173 size
= smbus_info
.size
;
9175 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9180 if (copy_from_user(smb_data
, buf
, size
)) {
9181 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9186 ret
= ssd_smbus_write_block(dev
, saddr
, command
, size
, smb_data
);
9194 case SSD_CMD_SMBUS_READ_BLOCK
: {
9195 struct ssd_smbus_op_info smbus_info
;
9196 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9201 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9202 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9207 saddr
= smbus_info
.saddr
;
9208 command
= smbus_info
.cmd
;
9209 buf
= smbus_info
.buf
;
9210 size
= smbus_info
.size
;
9212 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9217 ret
= ssd_smbus_read_block(dev
, saddr
, command
, size
, smb_data
);
9222 if (copy_to_user(buf
, smb_data
, size
)) {
9223 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9231 case SSD_CMD_BM_GET_VER
: {
9234 ret
= ssd_bm_get_version(dev
, &ver
);
9239 if (copy_to_user(argp
, &ver
, sizeof(uint16_t))) {
9240 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9248 case SSD_CMD_BM_GET_NR_CAP
: {
9251 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
9256 if (copy_to_user(argp
, &nr_cap
, sizeof(int))) {
9257 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9265 case SSD_CMD_BM_CAP_LEARNING
: {
9266 ret
= ssd_bm_enter_cap_learning(dev
);
9275 case SSD_CMD_CAP_LEARN
: {
9278 ret
= ssd_cap_learn(dev
, &cap
);
9283 if (copy_to_user(argp
, &cap
, sizeof(uint32_t))) {
9284 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9292 case SSD_CMD_GET_CAP_STATUS
: {
9295 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9299 if (copy_to_user(argp
, &cap_status
, sizeof(int))) {
9300 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9308 case SSD_CMD_RAM_READ
: {
9309 struct ssd_ram_op_info ram_info
;
9312 size_t rlen
, len
= dev
->hw_info
.ram_max_len
;
9315 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9316 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9321 ofs
= ram_info
.start
;
9322 length
= ram_info
.length
;
9324 ctrl_idx
= ram_info
.ctrl_idx
;
9326 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9331 kbuf
= kmalloc(len
, GFP_KERNEL
);
9337 for (rlen
=0; rlen
<length
; rlen
+=len
, buf
+=len
, ofs
+=len
) {
9338 if ((length
- rlen
) < len
) {
9339 len
= length
- rlen
;
9342 ret
= ssd_ram_read(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9347 if (copy_to_user(buf
, kbuf
, len
)) {
9358 case SSD_CMD_RAM_WRITE
: {
9359 struct ssd_ram_op_info ram_info
;
9362 size_t wlen
, len
= dev
->hw_info
.ram_max_len
;
9365 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9366 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9370 ofs
= ram_info
.start
;
9371 length
= ram_info
.length
;
9373 ctrl_idx
= ram_info
.ctrl_idx
;
9375 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9380 kbuf
= kmalloc(len
, GFP_KERNEL
);
9386 for (wlen
=0; wlen
<length
; wlen
+=len
, buf
+=len
, ofs
+=len
) {
9387 if ((length
- wlen
) < len
) {
9388 len
= length
- wlen
;
9391 if (copy_from_user(kbuf
, buf
, len
)) {
9396 ret
= ssd_ram_write(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9407 case SSD_CMD_NAND_READ_ID
: {
9408 struct ssd_flash_op_info flash_info
;
9409 int chip_no
, chip_ce
, length
, ctrl_idx
;
9411 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9412 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9417 chip_no
= flash_info
.flash
;
9418 chip_ce
= flash_info
.chip
;
9419 ctrl_idx
= flash_info
.ctrl_idx
;
9420 buf
= flash_info
.buf
;
9421 length
= dev
->hw_info
.id_size
;
9423 //kbuf = kmalloc(length, GFP_KERNEL);
9424 kbuf
= kmalloc(SSD_NAND_ID_BUFF_SZ
, GFP_KERNEL
); //xx
9429 memset(kbuf
, 0, length
);
9431 ret
= ssd_nand_read_id(dev
, kbuf
, chip_no
, chip_ce
, ctrl_idx
);
9437 if (copy_to_user(buf
, kbuf
, length
)) {
9448 case SSD_CMD_NAND_READ
: { //with oob
9449 struct ssd_flash_op_info flash_info
;
9451 int flash
, chip
, page
, ctrl_idx
;
9454 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9455 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9460 flash
= flash_info
.flash
;
9461 chip
= flash_info
.chip
;
9462 page
= flash_info
.page
;
9463 buf
= flash_info
.buf
;
9464 ctrl_idx
= flash_info
.ctrl_idx
;
9466 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9468 kbuf
= kmalloc(length
, GFP_KERNEL
);
9474 err
= ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9475 if (ret
&& (-EIO
!= ret
)) {
9480 if (copy_to_user(buf
, kbuf
, length
)) {
9492 case SSD_CMD_NAND_WRITE
: {
9493 struct ssd_flash_op_info flash_info
;
9494 int flash
, chip
, page
, ctrl_idx
;
9497 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9498 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9503 flash
= flash_info
.flash
;
9504 chip
= flash_info
.chip
;
9505 page
= flash_info
.page
;
9506 buf
= flash_info
.buf
;
9507 ctrl_idx
= flash_info
.ctrl_idx
;
9509 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9511 kbuf
= kmalloc(length
, GFP_KERNEL
);
9517 if (copy_from_user(kbuf
, buf
, length
)) {
9523 ret
= ssd_nand_write(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9533 case SSD_CMD_NAND_ERASE
: {
9534 struct ssd_flash_op_info flash_info
;
9535 int flash
, chip
, page
, ctrl_idx
;
9537 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9538 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9543 flash
= flash_info
.flash
;
9544 chip
= flash_info
.chip
;
9545 page
= flash_info
.page
;
9546 ctrl_idx
= flash_info
.ctrl_idx
;
9548 if ((page
% dev
->hw_info
.page_count
) != 0) {
9553 //hio_warn("erase fs = %llx\n", ofs);
9554 ret
= ssd_nand_erase(dev
, flash
, chip
, page
, ctrl_idx
);
9562 case SSD_CMD_NAND_READ_EXT
: { //ingore EIO
9563 struct ssd_flash_op_info flash_info
;
9565 int flash
, chip
, page
, ctrl_idx
;
9567 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9568 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9573 flash
= flash_info
.flash
;
9574 chip
= flash_info
.chip
;
9575 page
= flash_info
.page
;
9576 buf
= flash_info
.buf
;
9577 ctrl_idx
= flash_info
.ctrl_idx
;
9579 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9581 kbuf
= kmalloc(length
, GFP_KERNEL
);
9587 ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9588 if (-EIO
== ret
) { //ingore EIO
9596 if (copy_to_user(buf
, kbuf
, length
)) {
9606 case SSD_CMD_UPDATE_BBT
: {
9607 struct ssd_flash_op_info flash_info
;
9608 int ctrl_idx
, flash
;
9610 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9611 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9616 ctrl_idx
= flash_info
.ctrl_idx
;
9617 flash
= flash_info
.flash
;
9618 ret
= ssd_update_bbt(dev
, flash
, ctrl_idx
);
9626 case SSD_CMD_CLEAR_ALARM
:
9627 ssd_clear_alarm(dev
);
9630 case SSD_CMD_SET_ALARM
:
9635 ret
= ssd_do_reset(dev
);
9638 case SSD_CMD_RELOAD_FW
:
9640 dev
->has_non_0x98_reg_access
= 1;
9641 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9642 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
9643 } else if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_1_1
) {
9644 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
9649 case SSD_CMD_UNLOAD_DEV
: {
9650 if (atomic_read(&dev
->refcnt
)) {
9656 ssd_save_smart(dev
);
9658 ret
= ssd_flush(dev
);
9663 /* cleanup the block device */
9664 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
9665 mutex_lock(&dev
->gd_mutex
);
9666 ssd_cleanup_blkdev(dev
);
9667 ssd_cleanup_queue(dev
);
9668 mutex_unlock(&dev
->gd_mutex
);
9674 case SSD_CMD_LOAD_DEV
: {
9676 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9681 ret
= ssd_init_smart(dev
);
9683 hio_warn("%s: init info: failed\n", dev
->name
);
9687 ret
= ssd_init_queue(dev
);
9689 hio_warn("%s: init queue failed\n", dev
->name
);
9692 ret
= ssd_init_blkdev(dev
);
9694 hio_warn("%s: register block device: failed\n", dev
->name
);
9697 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
9702 case SSD_CMD_UPDATE_VP
: {
9704 uint32_t new_vp
, new_vp1
= 0;
9706 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9711 if (copy_from_user(&new_vp
, argp
, sizeof(uint32_t))) {
9712 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9717 if (new_vp
> dev
->hw_info
.max_valid_pages
|| new_vp
<= 0) {
9722 while (new_vp
<= dev
->hw_info
.max_valid_pages
) {
9723 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, new_vp
);
9725 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
9726 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9727 new_vp1
= val
& 0x3FF;
9729 new_vp1
= val
& 0x7FFF;
9732 if (new_vp1
== new_vp
) {
9737 /*if (new_vp == dev->hw_info.valid_pages) {
9742 if (new_vp1
!= new_vp
|| new_vp
> dev
->hw_info
.max_valid_pages
) {
9744 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9749 if (copy_to_user(argp
, &new_vp
, sizeof(uint32_t))) {
9750 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9751 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9757 dev
->hw_info
.valid_pages
= new_vp
;
9758 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
9759 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
9760 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
9765 case SSD_CMD_FULL_RESET
: {
9766 ret
= ssd_full_reset(dev
);
9770 case SSD_CMD_GET_NR_LOG
: {
9771 if (copy_to_user(argp
, &dev
->internal_log
.nr_log
, sizeof(dev
->internal_log
.nr_log
))) {
9778 case SSD_CMD_GET_LOG
: {
9779 uint32_t length
= dev
->rom_info
.log_sz
;
9783 if (copy_to_user(buf
, dev
->internal_log
.log
, length
)) {
9791 case SSD_CMD_LOG_LEVEL
: {
9793 if (copy_from_user(&level
, argp
, sizeof(int))) {
9794 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9799 if (level
>= SSD_LOG_NR_LEVEL
|| level
< SSD_LOG_LEVEL_INFO
) {
9800 level
= SSD_LOG_LEVEL_ERR
;
9803 //just for showing log, no need to protect
9808 case SSD_CMD_OT_PROTECT
: {
9811 if (copy_from_user(&protect
, argp
, sizeof(int))) {
9812 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9817 ssd_set_ot_protect(dev
, !!protect
);
9821 case SSD_CMD_GET_OT_STATUS
: {
9822 int status
= ssd_get_ot_status(dev
, &status
);
9824 if (copy_to_user(argp
, &status
, sizeof(int))) {
9825 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9832 case SSD_CMD_CLEAR_LOG
: {
9833 ret
= ssd_clear_log(dev
);
9837 case SSD_CMD_CLEAR_SMART
: {
9838 ret
= ssd_clear_smart(dev
);
9842 case SSD_CMD_CLEAR_WARNING
: {
9843 ret
= ssd_clear_warning(dev
);
9847 case SSD_CMD_SW_LOG
: {
9848 struct ssd_sw_log_info sw_log
;
9850 if (copy_from_user(&sw_log
, argp
, sizeof(struct ssd_sw_log_info
))) {
9851 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9856 ret
= ssd_gen_swlog(dev
, sw_log
.event
, sw_log
.data
);
9860 case SSD_CMD_GET_LABEL
: {
9862 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9867 if (copy_to_user(argp
, &dev
->label
, sizeof(struct ssd_label
))) {
9868 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9875 case SSD_CMD_GET_VERSION
: {
9876 struct ssd_version_info ver
;
9878 mutex_lock(&dev
->fw_mutex
);
9879 ret
= __ssd_get_version(dev
, &ver
);
9880 mutex_unlock(&dev
->fw_mutex
);
9885 if (copy_to_user(argp
, &ver
, sizeof(struct ssd_version_info
))) {
9886 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9893 case SSD_CMD_GET_TEMPERATURE
: {
9896 mutex_lock(&dev
->fw_mutex
);
9897 ret
= __ssd_get_temperature(dev
, &temp
);
9898 mutex_unlock(&dev
->fw_mutex
);
9903 if (copy_to_user(argp
, &temp
, sizeof(int))) {
9904 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9911 case SSD_CMD_GET_BMSTATUS
: {
9914 mutex_lock(&dev
->fw_mutex
);
9915 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9916 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9917 status
= SSD_BMSTATUS_WARNING
;
9919 status
= SSD_BMSTATUS_OK
;
9921 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
9922 ret
= __ssd_bm_status(dev
, &status
);
9924 status
= SSD_BMSTATUS_OK
;
9926 mutex_unlock(&dev
->fw_mutex
);
9931 if (copy_to_user(argp
, &status
, sizeof(int))) {
9932 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9939 case SSD_CMD_GET_LABEL2
: {
9943 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9944 label
= &dev
->label
;
9945 length
= sizeof(struct ssd_label
);
9947 label
= &dev
->labelv3
;
9948 length
= sizeof(struct ssd_labelv3
);
9951 if (copy_to_user(argp
, label
, length
)) {
9959 ret
= ssd_flush(dev
);
9961 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9967 case SSD_CMD_SAVE_MD
: {
9970 if (copy_from_user(&save_md
, argp
, sizeof(int))) {
9971 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9976 dev
->save_md
= !!save_md
;
9980 case SSD_CMD_SET_WMODE
: {
9983 if (copy_from_user(&new_wmode
, argp
, sizeof(int))) {
9984 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9989 ret
= __ssd_set_wmode(dev
, new_wmode
);
9997 case SSD_CMD_GET_WMODE
: {
9998 if (copy_to_user(argp
, &dev
->wmode
, sizeof(int))) {
9999 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10007 case SSD_CMD_GET_USER_WMODE
: {
10008 if (copy_to_user(argp
, &dev
->user_wmode
, sizeof(int))) {
10009 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10017 case SSD_CMD_DEBUG
: {
10018 struct ssd_debug_info db_info
;
10025 if (copy_from_user(&db_info
, argp
, sizeof(struct ssd_debug_info
))) {
10026 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
10031 if (db_info
.type
< SSD_DEBUG_NONE
|| db_info
.type
>= SSD_DEBUG_NR
) {
10037 if (db_info
.type
>= SSD_DEBUG_READ_ERR
&& db_info
.type
<= SSD_DEBUG_RW_ERR
&&
10038 (db_info
.data
.loc
.off
+ db_info
.data
.loc
.len
) > (dev
->hw_info
.size
>> 9)) {
10043 memcpy(&dev
->db_info
, &db_info
, sizeof(struct ssd_debug_info
));
10045 #ifdef SSD_OT_PROTECT
10047 if (db_info
.type
== SSD_DEBUG_NONE
) {
10048 ssd_check_temperature(dev
, SSD_OT_TEMP
);
10049 } else if (db_info
.type
== SSD_DEBUG_LOG
) {
10050 if (db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
) {
10051 dev
->ot_delay
= SSD_OT_DELAY
;
10052 } else if (db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
) {
10059 if (db_info
.type
== SSD_DEBUG_OFFLINE
) {
10060 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
10061 } else if (db_info
.type
== SSD_DEBUG_NONE
) {
10062 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
10066 if (db_info
.type
== SSD_DEBUG_LOG
&& dev
->event_call
&& dev
->gd
) {
10067 dev
->event_call(dev
->gd
, db_info
.data
.log
.event
, 0);
10073 case SSD_CMD_DRV_PARAM_INFO
: {
10074 struct ssd_drv_param_info drv_param
;
10076 memset(&drv_param
, 0, sizeof(struct ssd_drv_param_info
));
10078 drv_param
.mode
= mode
;
10079 drv_param
.status_mask
= status_mask
;
10080 drv_param
.int_mode
= int_mode
;
10081 drv_param
.threaded_irq
= threaded_irq
;
10082 drv_param
.log_level
= log_level
;
10083 drv_param
.wmode
= wmode
;
10084 drv_param
.ot_protect
= ot_protect
;
10085 drv_param
.finject
= finject
;
10087 if (copy_to_user(argp
, &drv_param
, sizeof(struct ssd_drv_param_info
))) {
10088 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10104 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10105 static int ssd_block_ioctl(struct inode
*inode
, struct file
*file
,
10106 unsigned int cmd
, unsigned long arg
)
10108 struct ssd_device
*dev
;
10109 void __user
*argp
= (void __user
*)arg
;
10115 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10120 static int ssd_block_ioctl(struct block_device
*bdev
, fmode_t mode
,
10121 unsigned int cmd
, unsigned long arg
)
10123 struct ssd_device
*dev
;
10124 void __user
*argp
= (void __user
*)arg
;
10131 dev
= bdev
->bd_disk
->private_data
;
10138 case HDIO_GETGEO
: {
10139 struct hd_geometry geo
;
10140 geo
.cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
10143 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10144 geo
.start
= get_start_sect(inode
->i_bdev
);
10146 geo
.start
= get_start_sect(bdev
);
10148 if (copy_to_user(argp
, &geo
, sizeof(geo
))) {
10157 ret
= ssd_flush(dev
);
10159 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
10167 ret
= ssd_ioctl_common(dev
, cmd
, arg
);
10178 static void ssd_free_dev(struct kref
*kref
)
10180 struct ssd_device
*dev
;
10186 dev
= container_of(kref
, struct ssd_device
, kref
);
10190 ssd_put_index(dev
->slave
, dev
->idx
);
10195 static void ssd_put(struct ssd_device
*dev
)
10197 kref_put(&dev
->kref
, ssd_free_dev
);
10200 static int ssd_get(struct ssd_device
*dev
)
10202 kref_get(&dev
->kref
);
10207 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10208 static int ssd_block_open(struct inode
*inode
, struct file
*filp
)
10210 struct ssd_device
*dev
;
10216 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10221 static int ssd_block_open(struct block_device
*bdev
, fmode_t mode
)
10223 struct ssd_device
*dev
;
10229 dev
= bdev
->bd_disk
->private_data
;
10235 /*if (!try_module_get(dev->owner))
10241 atomic_inc(&dev
->refcnt
);
10246 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10247 static int ssd_block_release(struct inode
*inode
, struct file
*filp
)
10249 struct ssd_device
*dev
;
10255 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10259 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10260 static int ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10262 struct ssd_device
*dev
;
10268 dev
= disk
->private_data
;
10273 static void ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10275 struct ssd_device
*dev
;
10281 dev
= disk
->private_data
;
10287 atomic_dec(&dev
->refcnt
);
10291 //module_put(dev->owner);
10292 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10297 static struct block_device_operations ssd_fops
= {
10298 .owner
= THIS_MODULE
,
10299 .open
= ssd_block_open
,
10300 .release
= ssd_block_release
,
10301 .ioctl
= ssd_block_ioctl
,
10302 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
10303 .getgeo
= ssd_block_getgeo
,
10307 static void ssd_init_trim(ssd_device_t
*dev
)
10309 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
10310 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10313 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, dev
->rq
);
10315 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6))
10316 dev
->rq
->limits
.discard_zeroes_data
= 1;
10317 dev
->rq
->limits
.discard_alignment
= 4096;
10318 dev
->rq
->limits
.discard_granularity
= 4096;
10320 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_4
) {
10321 dev
->rq
->limits
.max_discard_sectors
= dev
->hw_info
.sg_max_sec
;
10323 dev
->rq
->limits
.max_discard_sectors
= (dev
->hw_info
.sg_max_sec
) * (dev
->hw_info
.cmd_max_sg
);
10328 static void ssd_cleanup_queue(struct ssd_device
*dev
)
10332 blk_cleanup_queue(dev
->rq
);
10336 static int ssd_init_queue(struct ssd_device
*dev
)
10338 dev
->rq
= blk_alloc_queue(GFP_KERNEL
);
10339 if (dev
->rq
== NULL
) {
10340 hio_warn("%s: alloc queue: failed\n ", dev
->name
);
10341 goto out_init_queue
;
10344 /* must be first */
10345 blk_queue_make_request(dev
->rq
, ssd_make_request
);
10347 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6))
10348 blk_queue_max_hw_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10349 blk_queue_max_phys_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10350 blk_queue_max_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10352 blk_queue_max_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10353 blk_queue_max_hw_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10356 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
10357 blk_queue_hardsect_size(dev
->rq
, 512);
10359 blk_queue_logical_block_size(dev
->rq
, 512);
10361 /* not work for make_request based drivers(bio) */
10362 blk_queue_max_segment_size(dev
->rq
, dev
->hw_info
.sg_max_sec
<< 9);
10364 blk_queue_bounce_limit(dev
->rq
, BLK_BOUNCE_HIGH
);
10366 dev
->rq
->queuedata
= dev
;
10368 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
10369 blk_queue_issue_flush_fn(dev
->rq
, ssd_issue_flush_fn
);
10372 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
10373 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, dev
->rq
);
10376 ssd_init_trim(dev
);
10384 static void ssd_cleanup_blkdev(struct ssd_device
*dev
)
10386 del_gendisk(dev
->gd
);
10389 static int ssd_init_blkdev(struct ssd_device
*dev
)
10395 dev
->gd
= alloc_disk(ssd_minors
);
10397 hio_warn("%s: alloc_disk fail\n", dev
->name
);
10400 dev
->gd
->major
= dev
->major
;
10401 dev
->gd
->first_minor
= dev
->idx
* ssd_minors
;
10402 dev
->gd
->fops
= &ssd_fops
;
10403 dev
->gd
->queue
= dev
->rq
;
10404 dev
->gd
->private_data
= dev
;
10406 snprintf (dev
->gd
->disk_name
, sizeof(dev
->gd
->disk_name
), "%s", dev
->name
);
10408 set_capacity(dev
->gd
, dev
->hw_info
.size
>> 9);
10410 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
10411 device_add_disk(&dev
->pdev
->dev
, dev
->gd
);
10413 dev
->gd
->driverfs_dev
= &dev
->pdev
->dev
;
10423 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10424 static int ssd_ioctl(struct inode
*inode
, struct file
*file
,
10425 unsigned int cmd
, unsigned long arg
)
10427 static long ssd_ioctl(struct file
*file
,
10428 unsigned int cmd
, unsigned long arg
)
10431 struct ssd_device
*dev
;
10437 dev
= file
->private_data
;
10442 return (long)ssd_ioctl_common(dev
, cmd
, arg
);
10445 static int ssd_open(struct inode
*inode
, struct file
*file
)
10447 struct ssd_device
*dev
= NULL
;
10448 struct ssd_device
*n
= NULL
;
10452 if (!inode
|| !file
) {
10456 idx
= iminor(inode
);
10458 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
10459 if (dev
->idx
== idx
) {
10469 file
->private_data
= dev
;
10476 static int ssd_release(struct inode
*inode
, struct file
*file
)
10478 struct ssd_device
*dev
;
10484 dev
= file
->private_data
;
10491 file
->private_data
= NULL
;
10496 static int ssd_reload_ssd_ptr(struct ssd_device
*dev
)
10498 ssd_reset_resp_ptr(dev
);
10500 //update base reg address
10501 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
10503 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
10506 //update response base reg address
10507 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
10508 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
10513 static struct file_operations ssd_cfops
= {
10514 .owner
= THIS_MODULE
,
10516 .release
= ssd_release
,
10517 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10518 .ioctl
= ssd_ioctl
,
10520 .unlocked_ioctl
= ssd_ioctl
,
10524 static void ssd_cleanup_chardev(struct ssd_device
*dev
)
10530 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10531 class_simple_device_remove(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10532 devfs_remove("c%s", dev
->name
);
10533 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10534 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10535 devfs_remove("c%s", dev
->name
);
10536 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10537 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10538 devfs_remove("c%s", dev
->name
);
10539 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10540 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10542 device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10546 static int ssd_init_chardev(struct ssd_device
*dev
)
10554 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10555 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10559 class_simple_device_add(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10561 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10562 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10566 class_device_create(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10568 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10569 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10573 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10575 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10576 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10577 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
10578 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), "c%s", dev
->name
);
10579 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10580 device_create_drvdata(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10582 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10588 static int ssd_check_hw(struct ssd_device
*dev
)
10590 uint32_t test_data
= 0x55AA5AA5;
10591 uint32_t read_data
;
10593 ssd_reg32_write(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
, test_data
);
10594 read_data
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
);
10595 if (read_data
!= ~(test_data
)) {
10596 //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data);
10603 static int ssd_check_fw(struct ssd_device
*dev
)
10608 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10612 for (i
=0; i
<SSD_CONTROLLER_WAIT
; i
++) {
10613 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10614 if ((val
& 0x1) && ((val
>> 8) & 0x1)) {
10618 msleep(SSD_INIT_WAIT
);
10621 if (!(val
& 0x1)) {
10622 /* controller fw status */
10623 hio_warn("%s: controller firmware load failed: %#x\n", dev
->name
, val
);
10625 } else if (!((val
>> 8) & 0x1)) {
10626 /* controller state */
10627 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10631 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RELOAD_FW_REG
);
10633 dev
->reload_fw
= 1;
10639 static int ssd_init_fw_info(struct ssd_device
*dev
)
10644 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_VER_REG
);
10645 dev
->hw_info
.bridge_ver
= val
& 0xFFF;
10646 if (dev
->hw_info
.bridge_ver
< SSD_FW_MIN
) {
10647 hio_warn("%s: bridge firmware version %03X is not supported\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10650 hio_info("%s: bridge firmware version: %03X\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10652 ret
= ssd_check_fw(dev
);
10658 /* skip error if not in standard mode */
10659 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10665 static int ssd_check_clock(struct ssd_device
*dev
)
10670 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10674 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10677 if (!((val
>> 4 ) & 0x1)) {
10678 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST
), &dev
->hwmon
)) {
10679 hio_warn("%s: 166MHz clock losed: %#x\n", dev
->name
, val
);
10680 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10685 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
10686 if (!((val
>> 5 ) & 0x1)) {
10687 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW
), &dev
->hwmon
)) {
10688 hio_warn("%s: 166MHz clock is skew: %#x\n", dev
->name
, val
);
10689 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10693 if (!((val
>> 6 ) & 0x1)) {
10694 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST
), &dev
->hwmon
)) {
10695 hio_warn("%s: 156.25MHz clock lost: %#x\n", dev
->name
, val
);
10696 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10700 if (!((val
>> 7 ) & 0x1)) {
10701 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW
), &dev
->hwmon
)) {
10702 hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev
->name
, val
);
10703 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10712 static int ssd_check_volt(struct ssd_device
*dev
)
10719 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10723 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10725 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
)) {
10726 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V0_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10727 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10728 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10729 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10730 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10731 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10735 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10736 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10737 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10738 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10739 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10745 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
)) {
10746 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V8_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10747 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10748 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10749 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10750 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10751 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10755 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10756 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10757 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10758 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10759 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10768 static int ssd_check_reset_sync(struct ssd_device
*dev
)
10772 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10776 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10777 if (!((val
>> 8) & 0x1)) {
10778 /* controller state */
10779 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10783 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10787 if (((val
>> 9 ) & 0x1)) {
10788 hio_warn("%s: controller reset asynchronously: %#x\n", dev
->name
, val
);
10789 ssd_gen_swlog(dev
, SSD_LOG_CTRL_RST_SYNC
, val
);
10796 static int ssd_check_hw_bh(struct ssd_device
*dev
)
10800 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10805 ret
= ssd_check_clock(dev
);
10811 /* skip error if not in standard mode */
10812 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10818 static int ssd_check_controller(struct ssd_device
*dev
)
10822 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10827 ret
= ssd_check_reset_sync(dev
);
10833 /* skip error if not in standard mode */
10834 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10840 static int ssd_check_controller_bh(struct ssd_device
*dev
)
10842 uint32_t test_data
= 0x55AA5AA5;
10844 int reg_base
, reg_sz
;
10849 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10854 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_READY_REG
);
10856 hio_warn("%s: controller 0 not ready\n", dev
->name
);
10860 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10861 reg_base
= SSD_CTRL_TEST_REG0
+ i
* SSD_CTRL_TEST_REG_SZ
;
10862 ssd_reg32_write(dev
->ctrlp
+ reg_base
, test_data
);
10863 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10864 if (val
!= ~(test_data
)) {
10865 hio_warn("%s: check controller %d error: %#x\n", dev
->name
, i
, val
);
10871 ret
= ssd_check_volt(dev
);
10877 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
10878 reg_base
= SSD_PV3_RAM_STATUS_REG0
;
10879 reg_sz
= SSD_PV3_RAM_STATUS_REG_SZ
;
10881 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10883 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10885 if (!((val
>> 1) & 0x1)) {
10887 if (init_wait
<= SSD_RAM_INIT_MAX_WAIT
) {
10888 msleep(SSD_INIT_WAIT
);
10889 goto check_ram_status
;
10891 hio_warn("%s: controller %d ram init failed: %#x\n", dev
->name
, i
, val
);
10892 ssd_gen_swlog(dev
, SSD_LOG_DDR_INIT_ERR
, i
);
10897 reg_base
+= reg_sz
;
10902 for (i
=0; i
<SSD_CH_INFO_MAX_WAIT
; i
++) {
10903 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10904 if (!((val
>> 31) & 0x1)) {
10908 msleep(SSD_INIT_WAIT
);
10910 if ((val
>> 31) & 0x1) {
10911 hio_warn("%s: channel info init failed: %#x\n", dev
->name
, val
);
10918 static int ssd_init_protocol_info(struct ssd_device
*dev
)
10922 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PROTOCOL_VER_REG
);
10923 if (val
== (uint32_t)-1) {
10924 hio_warn("%s: protocol version error: %#x\n", dev
->name
, val
);
10927 dev
->protocol_info
.ver
= val
;
10929 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10930 dev
->protocol_info
.init_state_reg
= SSD_INIT_STATE_REG0
;
10931 dev
->protocol_info
.init_state_reg_sz
= SSD_INIT_STATE_REG_SZ
;
10933 dev
->protocol_info
.chip_info_reg
= SSD_CHIP_INFO_REG0
;
10934 dev
->protocol_info
.chip_info_reg_sz
= SSD_CHIP_INFO_REG_SZ
;
10936 dev
->protocol_info
.init_state_reg
= SSD_PV3_INIT_STATE_REG0
;
10937 dev
->protocol_info
.init_state_reg_sz
= SSD_PV3_INIT_STATE_REG_SZ
;
10939 dev
->protocol_info
.chip_info_reg
= SSD_PV3_CHIP_INFO_REG0
;
10940 dev
->protocol_info
.chip_info_reg_sz
= SSD_PV3_CHIP_INFO_REG_SZ
;
10946 static int ssd_init_hw_info(struct ssd_device
*dev
)
10954 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESP_INFO_REG
);
10955 dev
->hw_info
.resp_ptr_sz
= 16 * (1U << (val
& 0xFF));
10956 dev
->hw_info
.resp_msg_sz
= 16 * (1U << ((val
>> 8) & 0xFF));
10958 if (0 == dev
->hw_info
.resp_ptr_sz
|| 0 == dev
->hw_info
.resp_msg_sz
) {
10959 hio_warn("%s: response info error\n", dev
->name
);
10964 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10965 dev
->hw_info
.cmd_fifo_sz
= 1U << ((val
>> 4) & 0xF);
10966 dev
->hw_info
.cmd_max_sg
= 1U << ((val
>> 8) & 0xF);
10967 dev
->hw_info
.sg_max_sec
= 1U << ((val
>> 12) & 0xF);
10968 dev
->hw_info
.cmd_fifo_sz_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
10970 if (0 == dev
->hw_info
.cmd_fifo_sz
|| 0 == dev
->hw_info
.cmd_max_sg
|| 0 == dev
->hw_info
.sg_max_sec
) {
10971 hio_warn("%s: cmd info error\n", dev
->name
);
10977 if (ssd_check_hw_bh(dev
)) {
10978 hio_warn("%s: check hardware status failed\n", dev
->name
);
10983 if (ssd_check_controller(dev
)) {
10984 hio_warn("%s: check controller state failed\n", dev
->name
);
10989 /* nr controller : read again*/
10990 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10991 dev
->hw_info
.nr_ctrl
= (val
>> 16) & 0xF;
10993 /* nr ctrl configured */
10994 nr_ctrl
= (val
>> 20) & 0xF;
10995 if (0 == dev
->hw_info
.nr_ctrl
) {
10996 hio_warn("%s: nr controller error: %u\n", dev
->name
, dev
->hw_info
.nr_ctrl
);
10999 } else if (0 != nr_ctrl
&& nr_ctrl
!= dev
->hw_info
.nr_ctrl
) {
11000 hio_warn("%s: nr controller error: configured %u but found %u\n", dev
->name
, nr_ctrl
, dev
->hw_info
.nr_ctrl
);
11001 if (mode
<= SSD_DRV_MODE_STANDARD
) {
11007 if (ssd_check_controller_bh(dev
)) {
11008 hio_warn("%s: check controller failed\n", dev
->name
);
11013 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
11014 dev
->hw_info
.pcb_ver
= (uint8_t) ((val
>> 4) & 0xF) + 'A' -1;
11015 if ((val
& 0xF) != 0xF) {
11016 dev
->hw_info
.upper_pcb_ver
= (uint8_t) (val
& 0xF) + 'A' -1;
11019 if (dev
->hw_info
.pcb_ver
< 'A' || (0 != dev
->hw_info
.upper_pcb_ver
&& dev
->hw_info
.upper_pcb_ver
< 'A')) {
11020 hio_warn("%s: PCB version error: %#x %#x\n", dev
->name
, dev
->hw_info
.pcb_ver
, dev
->hw_info
.upper_pcb_ver
);
11026 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11027 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11028 dev
->hw_info
.nr_data_ch
= val
& 0xFF;
11029 dev
->hw_info
.nr_ch
= dev
->hw_info
.nr_data_ch
+ ((val
>> 8) & 0xFF);
11030 dev
->hw_info
.nr_chip
= (val
>> 16) & 0xFF;
11032 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11033 dev
->hw_info
.max_ch
= 1;
11034 while (dev
->hw_info
.max_ch
< dev
->hw_info
.nr_ch
) dev
->hw_info
.max_ch
<<= 1;
11036 /* set max channel 32 */
11037 dev
->hw_info
.max_ch
= 32;
11040 if (0 == dev
->hw_info
.nr_chip
) {
11042 dev
->hw_info
.nr_chip
= 1;
11046 dev
->hw_info
.id_size
= SSD_NAND_ID_SZ
;
11047 dev
->hw_info
.max_ce
= SSD_NAND_MAX_CE
;
11049 if (0 == dev
->hw_info
.nr_data_ch
|| 0 == dev
->hw_info
.nr_ch
|| 0 == dev
->hw_info
.nr_chip
) {
11050 hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev
->name
, dev
->hw_info
.nr_data_ch
, dev
->hw_info
.nr_ch
, dev
->hw_info
.nr_chip
);
11057 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11058 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RAM_INFO_REG
);
11059 dev
->hw_info
.ram_size
= 0x4000000ull
* (1ULL << (val
& 0xF));
11060 dev
->hw_info
.ram_align
= 1U << ((val
>> 12) & 0xF);
11061 if (dev
->hw_info
.ram_align
< SSD_RAM_ALIGN
) {
11062 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11063 dev
->hw_info
.ram_align
= SSD_RAM_ALIGN
;
11065 hio_warn("%s: ram align error: %u\n", dev
->name
, dev
->hw_info
.ram_align
);
11070 dev
->hw_info
.ram_max_len
= 0x1000 * (1U << ((val
>> 16) & 0xF));
11072 if (0 == dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.ram_align
|| 0 == dev
->hw_info
.ram_max_len
|| dev
->hw_info
.ram_align
> dev
->hw_info
.ram_max_len
) {
11073 hio_warn("%s: ram info error\n", dev
->name
);
11078 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11079 dev
->hw_info
.log_sz
= SSD_LOG_MAX_SZ
;
11081 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LOG_INFO_REG
);
11082 dev
->hw_info
.log_sz
= 0x1000 * (1U << (val
& 0xFF));
11084 if (0 == dev
->hw_info
.log_sz
) {
11085 hio_warn("%s: log size error\n", dev
->name
);
11090 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BBT_BASE_REG
);
11091 dev
->hw_info
.bbt_base
= 0x40000ull
* (val
& 0xFFFF);
11092 dev
->hw_info
.bbt_size
= 0x40000 * (((val
>> 16) & 0xFFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
11093 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11094 if (dev
->hw_info
.bbt_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.bbt_size
) {
11095 hio_warn("%s: bbt info error\n", dev
->name
);
11101 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ECT_BASE_REG
);
11102 dev
->hw_info
.md_base
= 0x40000ull
* (val
& 0xFFFF);
11103 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
11104 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
11106 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.nr_chip
);
11108 dev
->hw_info
.md_entry_sz
= 8 * (1U << ((val
>> 28) & 0xF));
11109 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
11110 if (dev
->hw_info
.md_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.md_size
||
11111 0 == dev
->hw_info
.md_entry_sz
|| dev
->hw_info
.md_entry_sz
> dev
->hw_info
.md_size
) {
11112 hio_warn("%s: md info error\n", dev
->name
);
11118 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11119 dev
->hw_info
.nand_wbuff_base
= dev
->hw_info
.ram_size
+ 1;
11121 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_NAND_BUFF_BASE
);
11122 dev
->hw_info
.nand_wbuff_base
= 0x8000ull
* val
;
11127 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11128 if (dev
->hw_info
.nr_ctrl
> 1) {
11129 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CTRL_VER_REG
);
11130 dev
->hw_info
.ctrl_ver
= val
& 0xFFF;
11131 hio_info("%s: controller firmware version: %03X\n", dev
->name
, dev
->hw_info
.ctrl_ver
);
11134 val64
= ssd_reg_read(dev
->ctrlp
+ SSD_FLASH_INFO_REG0
);
11135 dev
->hw_info
.nand_vendor_id
= ((val64
>> 56) & 0xFF);
11136 dev
->hw_info
.nand_dev_id
= ((val64
>> 48) & 0xFF);
11138 dev
->hw_info
.block_count
= (((val64
>> 32) & 0xFFFF) + 1);
11139 dev
->hw_info
.page_count
= ((val64
>>16) & 0xFFFF);
11140 dev
->hw_info
.page_size
= (val64
& 0xFFFF);
11142 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_INFO_REG
);
11143 dev
->hw_info
.bbf_pages
= val
& 0xFF;
11144 dev
->hw_info
.bbf_seek
= (val
>> 8) & 0x1;
11146 if (0 == dev
->hw_info
.block_count
|| 0 == dev
->hw_info
.page_count
|| 0 == dev
->hw_info
.page_size
|| dev
->hw_info
.block_count
> INT_MAX
) {
11147 hio_warn("%s: flash info error\n", dev
->name
);
11153 dev
->hw_info
.oob_size
= SSD_NAND_OOB_SZ
; //(dev->hw_info.page_size) >> 5;
11155 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
11156 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11157 dev
->hw_info
.valid_pages
= val
& 0x3FF;
11158 dev
->hw_info
.max_valid_pages
= (val
>>20) & 0x3FF;
11160 dev
->hw_info
.valid_pages
= val
& 0x7FFF;
11161 dev
->hw_info
.max_valid_pages
= (val
>>15) & 0x7FFF;
11163 if (0 == dev
->hw_info
.valid_pages
|| 0 == dev
->hw_info
.max_valid_pages
||
11164 dev
->hw_info
.valid_pages
> dev
->hw_info
.max_valid_pages
|| dev
->hw_info
.max_valid_pages
> dev
->hw_info
.page_count
) {
11165 hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev
->name
, dev
->hw_info
.valid_pages
, dev
->hw_info
.max_valid_pages
);
11170 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESERVED_BLKS_REG
);
11171 dev
->hw_info
.reserved_blks
= val
& 0xFFFF;
11172 dev
->hw_info
.md_reserved_blks
= (val
>> 16) & 0xFF;
11173 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
11174 dev
->hw_info
.md_reserved_blks
= SSD_BBT_RESERVED
;
11176 if (dev
->hw_info
.reserved_blks
> dev
->hw_info
.block_count
|| dev
->hw_info
.md_reserved_blks
> dev
->hw_info
.block_count
) {
11177 hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev
->name
, dev
->hw_info
.reserved_blks
, dev
->hw_info
.md_reserved_blks
);
11184 if (mode
< SSD_DRV_MODE_DEBUG
) {
11185 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
11186 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
11187 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
11190 /* extend hardware info */
11191 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
11192 dev
->hw_info_ext
.board_type
= (val
>> 24) & 0xF;
11194 dev
->hw_info_ext
.form_factor
= SSD_FORM_FACTOR_FHHL
;
11195 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_1
) {
11196 dev
->hw_info_ext
.form_factor
= (val
>> 31) & 0x1;
11199 dev->hw_info_ext.cap_type = (val >> 28) & 0x3;
11200 if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) {
11201 dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA;
11204 /* power loss protect */
11205 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PLP_INFO_REG
);
11206 dev
->hw_info_ext
.plp_type
= (val
& 0x3);
11207 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
11209 dev
->hw_info_ext
.cap_type
= ((val
>> 2)& 0x1);
11213 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11214 dev
->hw_info_ext
.work_mode
= (val
>> 25) & 0x1;
11217 /* skip error if not in standard mode */
11218 if (mode
!= SSD_DRV_MODE_STANDARD
) {
11224 static void ssd_cleanup_response(struct ssd_device
*dev
)
11226 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11227 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11229 pci_free_consistent(dev
->pdev
, resp_ptr_sz
, dev
->resp_ptr_base
, dev
->resp_ptr_base_dma
);
11230 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11233 static int ssd_init_response(struct ssd_device
*dev
)
11235 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11236 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11238 dev
->resp_msg_base
= pci_alloc_consistent(dev
->pdev
, resp_msg_sz
, &(dev
->resp_msg_base_dma
));
11239 if (!dev
->resp_msg_base
) {
11240 hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev
->name
);
11241 goto out_alloc_resp_msg
;
11243 memset(dev
->resp_msg_base
, 0xFF, resp_msg_sz
);
11245 dev
->resp_ptr_base
= pci_alloc_consistent(dev
->pdev
, resp_ptr_sz
, &(dev
->resp_ptr_base_dma
));
11246 if (!dev
->resp_ptr_base
){
11247 hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev
->name
);
11248 goto out_alloc_resp_ptr
;
11250 memset(dev
->resp_ptr_base
, 0, resp_ptr_sz
);
11251 dev
->resp_idx
= *(uint32_t *)(dev
->resp_ptr_base
) = dev
->hw_info
.cmd_fifo_sz
* 2 - 1;
11253 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
11254 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
11258 out_alloc_resp_ptr
:
11259 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11260 out_alloc_resp_msg
:
11264 static int ssd_cleanup_cmd(struct ssd_device
*dev
)
11266 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11269 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11270 kfree(dev
->cmd
[i
].sgl
);
11273 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11277 static int ssd_init_cmd(struct ssd_device
*dev
)
11279 int sgl_sz
= sizeof(struct scatterlist
) * dev
->hw_info
.cmd_max_sg
;
11280 int cmd_sz
= sizeof(struct ssd_cmd
) * dev
->hw_info
.cmd_fifo_sz
;
11281 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11284 spin_lock_init(&dev
->cmd_lock
);
11286 dev
->msg_base
= pci_alloc_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), &dev
->msg_base_dma
);
11287 if (!dev
->msg_base
) {
11288 hio_warn("%s: can not alloc cmd msg\n", dev
->name
);
11289 goto out_alloc_msg
;
11292 dev
->cmd
= kmalloc(cmd_sz
, GFP_KERNEL
);
11294 hio_warn("%s: can not alloc cmd\n", dev
->name
);
11295 goto out_alloc_cmd
;
11297 memset(dev
->cmd
, 0, cmd_sz
);
11299 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11300 dev
->cmd
[i
].sgl
= kmalloc(sgl_sz
, GFP_KERNEL
);
11301 if (!dev
->cmd
[i
].sgl
) {
11302 hio_warn("%s: can not alloc cmd sgl %d\n", dev
->name
, i
);
11303 goto out_alloc_sgl
;
11306 dev
->cmd
[i
].msg
= dev
->msg_base
+ (msg_sz
* i
);
11307 dev
->cmd
[i
].msg_dma
= dev
->msg_base_dma
+ ((dma_addr_t
)msg_sz
* i
);
11309 dev
->cmd
[i
].dev
= dev
;
11310 dev
->cmd
[i
].tag
= i
;
11311 dev
->cmd
[i
].flag
= 0;
11313 INIT_LIST_HEAD(&dev
->cmd
[i
].list
);
11316 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11317 dev
->scmd
= ssd_dispatch_cmd
;
11319 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
11321 dev
->scmd
= ssd_send_cmd_db
;
11323 dev
->scmd
= ssd_send_cmd
;
11330 for (i
--; i
>=0; i
--) {
11331 kfree(dev
->cmd
[i
].sgl
);
11335 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11340 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11341 static irqreturn_t
ssd_interrupt_check(int irq
, void *dev_id
)
11343 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11345 if (*(uint32_t *)queue
->resp_ptr
== queue
->resp_idx
) {
11349 return IRQ_WAKE_THREAD
;
11352 static irqreturn_t
ssd_interrupt_threaded(int irq
, void *dev_id
)
11354 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11355 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11356 struct ssd_cmd
*cmd
;
11357 union ssd_response_msq __msg
;
11358 union ssd_response_msq
*msg
= &__msg
;
11360 uint32_t resp_idx
= queue
->resp_idx
;
11361 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11362 uint32_t end_resp_idx
;
11364 if (unlikely(resp_idx
== new_resp_idx
)) {
11368 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11371 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11374 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11375 msg
->u64_msg
= *u64_msg
;
11377 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11378 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11381 /* clear the resp msg */
11382 *u64_msg
= (uint64_t)(-1);
11384 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11385 /*if (unlikely(!cmd->bio)) {
11386 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11387 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11391 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11392 cmd
->errors
= -EIO
;
11396 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11400 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11401 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11402 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11403 queue_work(dev
->workq
, &dev
->log_work
);
11407 if (unlikely(msg
->resp_msg
.status
)) {
11408 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11409 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11410 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11413 ssd_set_alarm(dev
);
11414 queue
->io_stat
.nr_rwerr
++;
11415 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11417 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11418 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11420 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11422 queue
->io_stat
.nr_ioerr
++;
11425 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11426 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11427 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11429 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11431 }while (resp_idx
!= end_resp_idx
);
11433 queue
->resp_idx
= new_resp_idx
;
11435 return IRQ_HANDLED
;
11439 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11440 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
11442 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
)
11445 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11446 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11447 struct ssd_cmd
*cmd
;
11448 union ssd_response_msq __msg
;
11449 union ssd_response_msq
*msg
= &__msg
;
11451 uint32_t resp_idx
= queue
->resp_idx
;
11452 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11453 uint32_t end_resp_idx
;
11455 if (unlikely(resp_idx
== new_resp_idx
)) {
11459 #if (defined SSD_ESCAPE_IRQ)
11460 if (SSD_INT_MSIX
!= dev
->int_mode
) {
11461 dev
->irq_cpu
= smp_processor_id();
11465 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11468 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11471 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11472 msg
->u64_msg
= *u64_msg
;
11474 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11475 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11478 /* clear the resp msg */
11479 *u64_msg
= (uint64_t)(-1);
11481 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11482 /*if (unlikely(!cmd->bio)) {
11483 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11484 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11488 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11489 cmd
->errors
= -EIO
;
11493 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11497 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11498 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11499 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11500 queue_work(dev
->workq
, &dev
->log_work
);
11504 if (unlikely(msg
->resp_msg
.status
)) {
11505 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11506 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11507 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11510 ssd_set_alarm(dev
);
11511 queue
->io_stat
.nr_rwerr
++;
11512 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11514 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11515 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11517 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11519 queue
->io_stat
.nr_ioerr
++;
11522 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11523 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11524 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11526 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11528 }while (resp_idx
!= end_resp_idx
);
11530 queue
->resp_idx
= new_resp_idx
;
11532 return IRQ_HANDLED
;
11535 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11536 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
, struct pt_regs
*regs
)
11538 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
)
11542 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11543 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11545 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11546 ret
= ssd_interrupt(irq
, dev_id
, regs
);
11548 ret
= ssd_interrupt(irq
, dev_id
);
11552 if (IRQ_HANDLED
== ret
) {
11553 ssd_reg32_write(dev
->ctrlp
+ SSD_CLEAR_INTR_REG
, 1);
11559 static void ssd_reset_resp_ptr(struct ssd_device
*dev
)
11563 for (i
=0; i
<dev
->nr_queue
; i
++) {
11564 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11568 static void ssd_free_irq(struct ssd_device
*dev
)
11572 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11573 if (SSD_INT_MSIX
== dev
->int_mode
) {
11574 for (i
=0; i
<dev
->nr_queue
; i
++) {
11575 irq_set_affinity_hint(dev
->entry
[i
].vector
, NULL
);
11580 for (i
=0; i
<dev
->nr_queue
; i
++) {
11581 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11584 if (SSD_INT_MSIX
== dev
->int_mode
) {
11585 pci_disable_msix(dev
->pdev
);
11586 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11587 pci_disable_msi(dev
->pdev
);
11592 static int ssd_init_irq(struct ssd_device
*dev
)
11594 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11595 const struct cpumask
*cpu_mask
= NULL
;
11596 static int cpu_affinity
= 0;
11598 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11599 const struct cpumask
*mask
= NULL
;
11600 static int cpu
= 0;
11604 unsigned long flags
= 0;
11607 ssd_reg32_write(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
, 0x800);
11609 #ifdef SSD_ESCAPE_IRQ
11613 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11614 dev
->nr_queue
= SSD_MSIX_VEC
;
11615 for (i
=0; i
<dev
->nr_queue
; i
++) {
11616 dev
->entry
[i
].entry
= i
;
11619 ret
= pci_enable_msix(dev
->pdev
, dev
->entry
, dev
->nr_queue
);
11622 } else if (ret
> 0) {
11623 dev
->nr_queue
= ret
;
11625 hio_warn("%s: can not enable msix\n", dev
->name
);
11627 ssd_set_alarm(dev
);
11632 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11633 mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11634 if ((0 == cpu
) || (!cpumask_intersects(mask
, cpumask_of(cpu
)))) {
11635 cpu
= cpumask_first(mask
);
11637 for (i
=0; i
<dev
->nr_queue
; i
++) {
11638 irq_set_affinity_hint(dev
->entry
[i
].vector
, cpumask_of(cpu
));
11639 cpu
= cpumask_next(cpu
, mask
);
11640 if (cpu
>= nr_cpu_ids
) {
11641 cpu
= cpumask_first(mask
);
11646 dev
->int_mode
= SSD_INT_MSIX
;
11647 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11648 ret
= pci_enable_msi(dev
->pdev
);
11650 hio_warn("%s: can not enable msi\n", dev
->name
);
11652 ssd_set_alarm(dev
);
11657 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11659 dev
->int_mode
= SSD_INT_MSI
;
11662 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11664 dev
->int_mode
= SSD_INT_LEGACY
;
11667 for (i
=0; i
<dev
->nr_queue
; i
++) {
11668 if (dev
->nr_queue
> 1) {
11669 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100-%d", dev
->name
, i
);
11671 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100", dev
->name
);
11674 dev
->queue
[i
].dev
= dev
;
11675 dev
->queue
[i
].idx
= i
;
11677 dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11678 dev
->queue
[i
].resp_idx_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
11680 dev
->queue
[i
].resp_msg_sz
= dev
->hw_info
.resp_msg_sz
;
11681 dev
->queue
[i
].resp_msg
= dev
->resp_msg_base
+ dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* i
;
11682 dev
->queue
[i
].resp_ptr
= dev
->resp_ptr_base
+ dev
->hw_info
.resp_ptr_sz
* i
;
11683 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
;
11685 dev
->queue
[i
].cmd
= dev
->cmd
;
11688 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
11689 flags
= IRQF_SHARED
;
11694 for (i
=0; i
<dev
->nr_queue
; i
++) {
11695 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11696 if (threaded_irq
) {
11697 ret
= request_threaded_irq(dev
->entry
[i
].vector
, ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11698 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11699 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11701 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11704 if (dev
->int_mode
== SSD_INT_LEGACY
) {
11705 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11707 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11711 hio_warn("%s: request irq failed\n", dev
->name
);
11713 ssd_set_alarm(dev
);
11714 goto out_request_irq
;
11717 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11718 cpu_mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11719 if (SSD_INT_MSIX
== dev
->int_mode
) {
11720 if ((0 == cpu_affinity
) || (!cpumask_intersects(mask
, cpumask_of(cpu_affinity
)))) {
11721 cpu_affinity
= cpumask_first(cpu_mask
);
11724 irq_set_affinity(dev
->entry
[i
].vector
, cpumask_of(cpu_affinity
));
11725 cpu_affinity
= cpumask_next(cpu_affinity
, cpu_mask
);
11726 if (cpu_affinity
>= nr_cpu_ids
) {
11727 cpu_affinity
= cpumask_first(cpu_mask
);
11736 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11737 if (SSD_INT_MSIX
== dev
->int_mode
) {
11738 for (j
=0; j
<dev
->nr_queue
; j
++) {
11739 irq_set_affinity_hint(dev
->entry
[j
].vector
, NULL
);
11744 for (i
--; i
>=0; i
--) {
11745 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11748 if (SSD_INT_MSIX
== dev
->int_mode
) {
11749 pci_disable_msix(dev
->pdev
);
11750 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11751 pci_disable_msi(dev
->pdev
);
11758 static void ssd_initial_log(struct ssd_device
*dev
)
11761 uint32_t speed
, width
;
11763 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11767 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_POWER_ON_REG
);
11769 // Poweron detection switched to SSD_INTR_INTERVAL_REG in 'ssd_init_smart'
11770 //ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver);
11773 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCIE_LINKSTATUS_REG
);
11775 width
= (val
>> 4)& 0x3F;
11776 if (0x1 == speed
) {
11777 hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev
->name
, width
);
11778 } else if (0x2 == speed
) {
11779 hio_info("%s: PCIe: 5GT/s, x%u\n", dev
->name
, width
);
11781 hio_info("%s: PCIe: unknown GT/s, x%u\n", dev
->name
, width
);
11783 ssd_gen_swlog(dev
, SSD_LOG_PCIE_LINK_STATUS
, val
);
11788 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11789 static void ssd_hwmon_worker(void *data
)
11791 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11793 static void ssd_hwmon_worker(struct work_struct
*work
)
11795 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, hwmon_work
);
11798 if (ssd_check_hw(dev
)) {
11799 //hio_err("%s: check hardware failed\n", dev->name);
11803 ssd_check_clock(dev
);
11804 ssd_check_volt(dev
);
11806 ssd_mon_boardvolt(dev
);
11809 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11810 static void ssd_tempmon_worker(void *data
)
11812 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11814 static void ssd_tempmon_worker(struct work_struct
*work
)
11816 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, tempmon_work
);
11819 if (ssd_check_hw(dev
)) {
11820 //hio_err("%s: check hardware failed\n", dev->name);
11828 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11829 static void ssd_capmon_worker(void *data
)
11831 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11833 static void ssd_capmon_worker(struct work_struct
*work
)
11835 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, capmon_work
);
11838 uint32_t cap_threshold
= SSD_PL_CAP_THRESHOLD
;
11841 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11845 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
11849 /* fault before? */
11850 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11851 ret
= ssd_check_pl_cap_fast(dev
);
11858 ret
= ssd_do_cap_learn(dev
, &cap
);
11860 hio_err("%s: cap learn failed\n", dev
->name
);
11861 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
11865 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, cap
);
11867 if (SSD_PL_CAP_CP
== dev
->hw_info_ext
.cap_type
) {
11868 cap_threshold
= SSD_PL_CAP_CP_THRESHOLD
;
11871 //use the fw event id?
11872 if (cap
< cap_threshold
) {
11873 if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11874 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_FAULT
, 0);
11876 } else if (cap
>= (cap_threshold
+ SSD_PL_CAP_THRESHOLD_HYST
)) {
11877 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11878 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_OK
, 0);
11883 static void ssd_routine_start(void *data
)
11885 struct ssd_device
*dev
;
11892 dev
->routine_tick
++;
11894 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
) && !ssd_busy(dev
)) {
11895 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11896 queue_work(dev
->workq
, &dev
->log_work
);
11899 if ((dev
->routine_tick
% SSD_HWMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11900 queue_work(dev
->workq
, &dev
->hwmon_work
);
11903 if ((dev
->routine_tick
% SSD_CAPMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11904 queue_work(dev
->workq
, &dev
->capmon_work
);
11907 if ((dev
->routine_tick
% SSD_CAPMON2_ROUTINE_TICK
) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
) && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11908 /* CAP fault? check again */
11909 queue_work(dev
->workq
, &dev
->capmon_work
);
11912 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11913 queue_work(dev
->workq
, &dev
->tempmon_work
);
11916 /* schedule routine */
11917 mod_timer(&dev
->routine_timer
, jiffies
+ msecs_to_jiffies(SSD_ROUTINE_INTERVAL
));
11920 static void ssd_cleanup_routine(struct ssd_device
*dev
)
11922 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11925 (void)ssd_del_timer(&dev
->routine_timer
);
11927 (void)ssd_del_timer(&dev
->bm_timer
);
11930 static int ssd_init_routine(struct ssd_device
*dev
)
11932 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11935 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11936 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
, dev
);
11937 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
, dev
);
11938 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
, dev
);
11939 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
, dev
);
11941 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
);
11942 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
);
11943 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
);
11944 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
);
11948 ssd_initial_log(dev
);
11950 /* schedule bm routine */
11951 ssd_add_timer(&dev
->bm_timer
, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY
), ssd_bm_routine_start
, dev
);
11953 /* schedule routine */
11954 ssd_add_timer(&dev
->routine_timer
, msecs_to_jiffies(SSD_ROUTINE_INTERVAL
), ssd_routine_start
, dev
);
11960 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
11963 ssd_remove_one (struct pci_dev
*pdev
)
11965 struct ssd_device
*dev
;
11971 dev
= pci_get_drvdata(pdev
);
11976 list_del_init(&dev
->list
);
11978 ssd_unregister_sysfs(dev
);
11980 /* offline firstly */
11981 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
11983 /* clean work queue first */
11985 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
11986 ssd_cleanup_workq(dev
);
11990 (void)ssd_flush(dev
);
11991 (void)ssd_save_md(dev
);
11995 ssd_save_smart(dev
);
11998 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
11999 ssd_cleanup_blkdev(dev
);
12003 ssd_cleanup_chardev(dev
);
12006 /* clean routine */
12008 ssd_cleanup_routine(dev
);
12011 ssd_cleanup_queue(dev
);
12013 ssd_cleanup_tag(dev
);
12014 ssd_cleanup_thread(dev
);
12018 ssd_cleanup_dcmd(dev
);
12019 ssd_cleanup_cmd(dev
);
12020 ssd_cleanup_response(dev
);
12023 ssd_cleanup_log(dev
);
12026 if (dev
->reload_fw
) { //reload fw
12027 dev
->has_non_0x98_reg_access
= 1;
12028 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12031 /* unmap physical adress */
12032 #ifdef LINUX_SUSE_OS
12033 iounmap(dev
->ctrlp
);
12035 pci_iounmap(pdev
, dev
->ctrlp
);
12038 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12040 pci_disable_device(pdev
);
12042 pci_set_drvdata(pdev
, NULL
);
12048 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12051 ssd_init_one(struct pci_dev
*pdev
,
12052 const struct pci_device_id
*ent
)
12054 struct ssd_device
*dev
;
12058 if (!pdev
|| !ent
) {
12063 dev
= kmalloc(sizeof(struct ssd_device
), GFP_KERNEL
);
12066 goto out_alloc_dev
;
12068 memset(dev
, 0, sizeof(struct ssd_device
));
12070 dev
->owner
= THIS_MODULE
;
12072 if (SSD_SLAVE_PORT_DEVID
== ent
->device
) {
12076 dev
->idx
= ssd_get_index(dev
->slave
);
12077 if (dev
->idx
< 0) {
12079 goto out_get_index
;
12083 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_DEV_NAME
);
12084 ssd_set_dev_name(&dev
->name
[strlen(SSD_DEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_DEV_NAME
), dev
->idx
);
12086 dev
->major
= ssd_major
;
12087 dev
->cmajor
= ssd_cmajor
;
12089 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_SDEV_NAME
);
12090 ssd_set_dev_name(&dev
->name
[strlen(SSD_SDEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_SDEV_NAME
), dev
->idx
);
12091 dev
->major
= ssd_major_sl
;
12095 do_gettimeofday(&tv
);
12096 dev
->reset_time
= tv
.tv_sec
;
12098 atomic_set(&(dev
->refcnt
), 0);
12099 atomic_set(&(dev
->tocnt
), 0);
12101 mutex_init(&dev
->fw_mutex
);
12104 mutex_init(&dev
->gd_mutex
);
12105 dev
->has_non_0x98_reg_access
= 0;
12107 //init in_flight lock
12108 spin_lock_init(&dev
->in_flight_lock
);
12111 pci_set_drvdata(pdev
, dev
);
12113 kref_init(&dev
->kref
);
12115 ret
= pci_enable_device(pdev
);
12117 hio_warn("%s: can not enable device\n", dev
->name
);
12118 goto out_enable_device
;
12121 pci_set_master(pdev
);
12123 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12124 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
12126 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
12129 hio_warn("%s: set dma mask: failed\n", dev
->name
);
12130 goto out_set_dma_mask
;
12133 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12134 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
12136 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
12139 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
12140 goto out_set_dma_mask
;
12143 dev
->mmio_base
= pci_resource_start(pdev
, 0);
12144 dev
->mmio_len
= pci_resource_len(pdev
, 0);
12146 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
12147 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
12149 goto out_request_mem_region
;
12152 /* 2.6.9 kernel bug */
12153 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
12155 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
12157 goto out_pci_iomap
;
12160 ret
= ssd_check_hw(dev
);
12162 hio_err("%s: check hardware failed\n", dev
->name
);
12166 ret
= ssd_init_protocol_info(dev
);
12168 hio_err("%s: init protocol info failed\n", dev
->name
);
12169 goto out_init_protocol_info
;
12173 ssd_clear_alarm(dev
);
12175 ret
= ssd_init_fw_info(dev
);
12177 hio_err("%s: init firmware info failed\n", dev
->name
);
12179 ssd_set_alarm(dev
);
12180 goto out_init_fw_info
;
12188 ret
= ssd_init_rom_info(dev
);
12190 hio_err("%s: init rom info failed\n", dev
->name
);
12192 ssd_set_alarm(dev
);
12193 goto out_init_rom_info
;
12196 ret
= ssd_init_label(dev
);
12198 hio_err("%s: init label failed\n", dev
->name
);
12200 ssd_set_alarm(dev
);
12201 goto out_init_label
;
12204 ret
= ssd_init_workq(dev
);
12206 hio_warn("%s: init workq failed\n", dev
->name
);
12207 goto out_init_workq
;
12209 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12211 ret
= ssd_init_log(dev
);
12213 hio_err("%s: init log failed\n", dev
->name
);
12215 ssd_set_alarm(dev
);
12219 ret
= ssd_init_smart(dev
);
12221 hio_err("%s: init info failed\n", dev
->name
);
12223 ssd_set_alarm(dev
);
12224 goto out_init_smart
;
12228 ret
= ssd_init_hw_info(dev
);
12230 hio_err("%s: init hardware info failed\n", dev
->name
);
12232 ssd_set_alarm(dev
);
12233 goto out_init_hw_info
;
12241 ret
= ssd_init_sensor(dev
);
12243 hio_err("%s: init sensor failed\n", dev
->name
);
12245 ssd_set_alarm(dev
);
12246 goto out_init_sensor
;
12249 ret
= ssd_init_pl_cap(dev
);
12251 hio_err("%s: int pl_cap failed\n", dev
->name
);
12253 ssd_set_alarm(dev
);
12254 goto out_init_pl_cap
;
12258 ret
= ssd_check_init_state(dev
);
12260 hio_err("%s: check init state failed\n", dev
->name
);
12262 ssd_set_alarm(dev
);
12263 goto out_check_init_state
;
12266 ret
= ssd_init_response(dev
);
12268 hio_warn("%s: init resp_msg failed\n", dev
->name
);
12269 goto out_init_response
;
12272 ret
= ssd_init_cmd(dev
);
12274 hio_warn("%s: init msg failed\n", dev
->name
);
12278 ret
= ssd_init_dcmd(dev
);
12280 hio_warn("%s: init cmd failed\n", dev
->name
);
12281 goto out_init_dcmd
;
12284 ret
= ssd_init_irq(dev
);
12286 hio_warn("%s: init irq failed\n", dev
->name
);
12290 ret
= ssd_init_thread(dev
);
12292 hio_warn("%s: init thread failed\n", dev
->name
);
12293 goto out_init_thread
;
12296 ret
= ssd_init_tag(dev
);
12298 hio_warn("%s: init tags failed\n", dev
->name
);
12299 goto out_init_tags
;
12303 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12305 ret
= ssd_init_queue(dev
);
12307 hio_warn("%s: init queue failed\n", dev
->name
);
12308 goto out_init_queue
;
12316 ret
= ssd_init_ot_protect(dev
);
12318 hio_err("%s: int ot_protect failed\n", dev
->name
);
12320 ssd_set_alarm(dev
);
12321 goto out_int_ot_protect
;
12324 ret
= ssd_init_wmode(dev
);
12326 hio_warn("%s: init write mode\n", dev
->name
);
12327 goto out_init_wmode
;
12330 /* init routine after hw is ready */
12331 ret
= ssd_init_routine(dev
);
12333 hio_warn("%s: init routine\n", dev
->name
);
12334 goto out_init_routine
;
12337 ret
= ssd_init_chardev(dev
);
12339 hio_warn("%s: register char device failed\n", dev
->name
);
12340 goto out_init_chardev
;
12344 ret
= ssd_init_blkdev(dev
);
12346 hio_warn("%s: register block device failed\n", dev
->name
);
12347 goto out_init_blkdev
;
12349 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12351 ret
= ssd_register_sysfs(dev
);
12353 hio_warn("%s: register sysfs failed\n", dev
->name
);
12354 goto out_register_sysfs
;
12359 list_add_tail(&dev
->list
, &ssd_list
);
12363 out_register_sysfs
:
12364 test_and_clear_bit(SSD_INIT_BD
, &dev
->state
);
12365 ssd_cleanup_blkdev(dev
);
12369 ssd_cleanup_chardev(dev
);
12374 ssd_cleanup_routine(dev
);
12378 out_int_ot_protect
:
12379 ssd_cleanup_queue(dev
);
12381 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12382 ssd_cleanup_tag(dev
);
12384 ssd_cleanup_thread(dev
);
12388 ssd_cleanup_dcmd(dev
);
12390 ssd_cleanup_cmd(dev
);
12392 ssd_cleanup_response(dev
);
12394 out_check_init_state
:
12401 ssd_cleanup_log(dev
);
12406 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12407 ssd_cleanup_workq(dev
);
12413 out_init_protocol_info
:
12415 #ifdef LINUX_SUSE_OS
12416 iounmap(dev
->ctrlp
);
12418 pci_iounmap(pdev
, dev
->ctrlp
);
12421 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12422 out_request_mem_region
:
12424 pci_disable_device(pdev
);
12426 pci_set_drvdata(pdev
, NULL
);
12434 static void ssd_cleanup_tasklet(void)
12437 for_each_online_cpu(i
) {
12438 tasklet_kill(&per_cpu(ssd_tasklet
, i
));
12442 static int ssd_init_tasklet(void)
12446 for_each_online_cpu(i
) {
12447 INIT_LIST_HEAD(&per_cpu(ssd_doneq
, i
));
12450 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done_db
, 0);
12452 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done
, 0);
12459 static struct pci_device_id ssd_pci_tbl
[] = {
12460 { 0x10ee, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* g3 */
12461 { 0x19e5, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v1 */
12462 //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/
12463 { 0x19e5, 0x0009, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 */
12464 { 0x19e5, 0x000a, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 dp slave*/
12468 /*driver power management handler for pm_ops*/
12469 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12470 static int ssd_hio_suspend(struct pci_dev
*pdev
, pm_message_t state
)
12473 static int ssd_hio_suspend(struct device
*ddev
)
12475 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12477 struct ssd_device
*dev
;
12484 dev
= pci_get_drvdata(pdev
);
12489 hio_warn("%s: suspend disk start.\n", dev
->name
);
12490 ssd_unregister_sysfs(dev
);
12492 /* offline firstly */
12493 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12495 /* clean work queue first */
12497 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12498 ssd_cleanup_workq(dev
);
12502 (void)ssd_flush(dev
);
12503 (void)ssd_save_md(dev
);
12507 ssd_save_smart(dev
);
12510 /* clean routine */
12512 ssd_cleanup_routine(dev
);
12515 ssd_cleanup_thread(dev
);
12520 ssd_cleanup_log(dev
);
12523 if (dev
->reload_fw
) { //reload fw
12524 dev
->has_non_0x98_reg_access
= 1;
12525 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12528 /* unmap physical adress */
12530 #ifdef LINUX_SUSE_OS
12531 iounmap(dev
->ctrlp
);
12533 pci_iounmap(pdev
, dev
->ctrlp
);
12538 if (dev
->mmio_base
) {
12539 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12540 dev
->mmio_base
= 0;
12543 pci_disable_device(pdev
);
12545 hio_warn("%s: suspend disk finish.\n", dev
->name
);
12551 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12552 static int ssd_hio_resume(struct pci_dev
*pdev
)
12555 static int ssd_hio_resume(struct device
*ddev
)
12557 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12559 struct ssd_device
*dev
= NULL
;
12567 dev
= pci_get_drvdata(pdev
);
12570 goto out_alloc_dev
;
12573 hio_warn("%s: resume disk start.\n", dev
->name
);
12574 ret
= pci_enable_device(pdev
);
12576 hio_warn("%s: can not enable device\n", dev
->name
);
12577 goto out_enable_device
;
12580 pci_set_master(pdev
);
12582 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12583 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
12585 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
12588 hio_warn("%s: set dma mask: failed\n", dev
->name
);
12589 goto out_set_dma_mask
;
12592 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12593 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
12595 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
12598 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
12599 goto out_set_dma_mask
;
12602 dev
->mmio_base
= pci_resource_start(pdev
, 0);
12603 dev
->mmio_len
= pci_resource_len(pdev
, 0);
12605 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
12606 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
12608 goto out_request_mem_region
;
12611 /* 2.6.9 kernel bug */
12612 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
12614 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
12616 goto out_pci_iomap
;
12619 ret
= ssd_check_hw(dev
);
12621 hio_err("%s: check hardware failed\n", dev
->name
);
12626 ssd_clear_alarm(dev
);
12628 ret
= ssd_init_fw_info(dev
);
12630 hio_err("%s: init firmware info failed\n", dev
->name
);
12632 ssd_set_alarm(dev
);
12633 goto out_init_fw_info
;
12641 ret
= ssd_init_rom_info(dev
);
12643 hio_err("%s: init rom info failed\n", dev
->name
);
12645 ssd_set_alarm(dev
);
12646 goto out_init_rom_info
;
12649 ret
= ssd_init_label(dev
);
12651 hio_err("%s: init label failed\n", dev
->name
);
12653 ssd_set_alarm(dev
);
12654 goto out_init_label
;
12657 ret
= ssd_init_workq(dev
);
12659 hio_warn("%s: init workq failed\n", dev
->name
);
12660 goto out_init_workq
;
12662 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12664 ret
= ssd_init_log(dev
);
12666 hio_err("%s: init log failed\n", dev
->name
);
12668 ssd_set_alarm(dev
);
12672 ret
= ssd_init_smart(dev
);
12674 hio_err("%s: init info failed\n", dev
->name
);
12676 ssd_set_alarm(dev
);
12677 goto out_init_smart
;
12681 ret
= ssd_init_hw_info(dev
);
12683 hio_err("%s: init hardware info failed\n", dev
->name
);
12685 ssd_set_alarm(dev
);
12686 goto out_init_hw_info
;
12694 ret
= ssd_init_sensor(dev
);
12696 hio_err("%s: init sensor failed\n", dev
->name
);
12698 ssd_set_alarm(dev
);
12699 goto out_init_sensor
;
12702 ret
= ssd_init_pl_cap(dev
);
12704 hio_err("%s: int pl_cap failed\n", dev
->name
);
12706 ssd_set_alarm(dev
);
12707 goto out_init_pl_cap
;
12711 ret
= ssd_check_init_state(dev
);
12713 hio_err("%s: check init state failed\n", dev
->name
);
12715 ssd_set_alarm(dev
);
12716 goto out_check_init_state
;
12719 //flush all base pointer to ssd
12720 (void)ssd_reload_ssd_ptr(dev
);
12722 ret
= ssd_init_irq(dev
);
12724 hio_warn("%s: init irq failed\n", dev
->name
);
12728 ret
= ssd_init_thread(dev
);
12730 hio_warn("%s: init thread failed\n", dev
->name
);
12731 goto out_init_thread
;
12735 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12742 ret
= ssd_init_ot_protect(dev
);
12744 hio_err("%s: int ot_protect failed\n", dev
->name
);
12746 ssd_set_alarm(dev
);
12747 goto out_int_ot_protect
;
12750 ret
= ssd_init_wmode(dev
);
12752 hio_warn("%s: init write mode\n", dev
->name
);
12753 goto out_init_wmode
;
12756 /* init routine after hw is ready */
12757 ret
= ssd_init_routine(dev
);
12759 hio_warn("%s: init routine\n", dev
->name
);
12760 goto out_init_routine
;
12764 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12768 hio_warn("%s: resume disk finish.\n", dev
->name
);
12774 out_int_ot_protect
:
12775 ssd_cleanup_thread(dev
);
12779 out_check_init_state
:
12786 ssd_cleanup_log(dev
);
12791 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12792 ssd_cleanup_workq(dev
);
12799 #ifdef LINUX_SUSE_OS
12800 iounmap(dev
->ctrlp
);
12802 pci_iounmap(pdev
, dev
->ctrlp
);
12805 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12806 out_request_mem_region
:
12808 pci_disable_device(pdev
);
12813 hio_warn("%s: resume disk fail.\n", dev
->name
);
12818 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12820 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12822 SIMPLE_DEV_PM_OPS(hio_pm_ops
, ssd_hio_suspend
, ssd_hio_resume
);
12825 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12826 struct pci_driver ssd_driver
= {
12827 .name
= MODULE_NAME
,
12828 .id_table
= ssd_pci_tbl
,
12829 .probe
= ssd_init_one
,
12830 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12831 .remove
= __devexit_p(ssd_remove_one
),
12833 .remove
= ssd_remove_one
,
12836 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12837 .suspend
= ssd_hio_suspend
,
12838 .resume
= ssd_hio_resume
,
12846 /* notifier block to get a notify on system shutdown/halt/reboot */
12847 static int ssd_notify_reboot(struct notifier_block
*nb
, unsigned long event
, void *buf
)
12849 struct ssd_device
*dev
= NULL
;
12850 struct ssd_device
*n
= NULL
;
12852 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
12853 ssd_gen_swlog(dev
, SSD_LOG_POWER_OFF
, 0);
12855 (void)ssd_flush(dev
);
12856 (void)ssd_save_md(dev
);
12860 ssd_save_smart(dev
);
12862 ssd_stop_workq(dev
);
12864 if (dev
->reload_fw
) {
12865 dev
->has_non_0x98_reg_access
= 1;
12866 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12874 static struct notifier_block ssd_notifier
= {
12875 ssd_notify_reboot
, NULL
, 0
12878 static int __init
ssd_init_module(void)
12882 hio_info("driver version: %s\n", DRIVER_VERSION
);
12884 ret
= ssd_init_index();
12886 hio_warn("init index failed\n");
12887 goto out_init_index
;
12890 ret
= ssd_init_proc();
12892 hio_warn("init proc failed\n");
12893 goto out_init_proc
;
12896 ret
= ssd_init_sysfs();
12898 hio_warn("init sysfs failed\n");
12899 goto out_init_sysfs
;
12902 ret
= ssd_init_tasklet();
12904 hio_warn("init tasklet failed\n");
12905 goto out_init_tasklet
;
12908 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12909 ssd_class
= class_simple_create(THIS_MODULE
, SSD_DEV_NAME
);
12911 ssd_class
= class_create(THIS_MODULE
, SSD_DEV_NAME
);
12913 if (IS_ERR(ssd_class
)) {
12914 ret
= PTR_ERR(ssd_class
);
12915 goto out_class_create
;
12918 if (ssd_cmajor
> 0) {
12919 ret
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12921 ret
= ssd_cmajor
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12924 hio_warn("unable to register chardev major number\n");
12925 goto out_register_chardev
;
12928 if (ssd_major
> 0) {
12929 ret
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12931 ret
= ssd_major
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12934 hio_warn("unable to register major number\n");
12935 goto out_register_blkdev
;
12938 if (ssd_major_sl
> 0) {
12939 ret
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12941 ret
= ssd_major_sl
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12944 hio_warn("unable to register slave major number\n");
12945 goto out_register_blkdev_sl
;
12948 if (mode
< SSD_DRV_MODE_STANDARD
|| mode
> SSD_DRV_MODE_BASE
) {
12949 mode
= SSD_DRV_MODE_STANDARD
;
12953 if (mode
!= SSD_DRV_MODE_STANDARD
) {
12957 if (int_mode
< SSD_INT_LEGACY
|| int_mode
> SSD_INT_MSIX
) {
12958 int_mode
= SSD_INT_MODE_DEFAULT
;
12961 if (threaded_irq
) {
12962 int_mode
= SSD_INT_MSI
;
12965 if (log_level
>= SSD_LOG_NR_LEVEL
|| log_level
< SSD_LOG_LEVEL_INFO
) {
12966 log_level
= SSD_LOG_LEVEL_ERR
;
12969 if (wmode
< SSD_WMODE_BUFFER
|| wmode
> SSD_WMODE_DEFAULT
) {
12970 wmode
= SSD_WMODE_DEFAULT
;
12973 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
12974 ret
= pci_module_init(&ssd_driver
);
12976 ret
= pci_register_driver(&ssd_driver
);
12979 hio_warn("pci init failed\n");
12983 ret
= register_reboot_notifier(&ssd_notifier
);
12985 hio_warn("register reboot notifier failed\n");
12986 goto out_register_reboot_notifier
;
12991 out_register_reboot_notifier
:
12993 pci_unregister_driver(&ssd_driver
);
12994 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12995 out_register_blkdev_sl
:
12996 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
12997 out_register_blkdev
:
12998 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
12999 out_register_chardev
:
13000 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
13001 class_simple_destroy(ssd_class
);
13003 class_destroy(ssd_class
);
13006 ssd_cleanup_tasklet();
13008 ssd_cleanup_sysfs();
13010 ssd_cleanup_proc();
13012 ssd_cleanup_index();
13018 static void __exit
ssd_cleanup_module(void)
13021 hio_info("unload driver: %s\n", DRIVER_VERSION
);
13025 unregister_reboot_notifier(&ssd_notifier
);
13027 pci_unregister_driver(&ssd_driver
);
13029 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13030 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
13031 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
13032 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
13033 class_simple_destroy(ssd_class
);
13035 class_destroy(ssd_class
);
13038 ssd_cleanup_tasklet();
13039 ssd_cleanup_sysfs();
13040 ssd_cleanup_proc();
13041 ssd_cleanup_index();
13044 int ssd_register_event_notifier(struct block_device
*bdev
, ssd_event_call event_call
)
13046 struct ssd_device
*dev
;
13048 struct ssd_log
*le
, *temp_le
= NULL
;
13053 if (!bdev
|| !event_call
|| !(bdev
->bd_disk
)) {
13057 dev
= bdev
->bd_disk
->private_data
;
13058 dev
->event_call
= event_call
;
13060 do_gettimeofday(&tv
);
13063 le
= (struct ssd_log
*)(dev
->internal_log
.log
);
13064 log_nr
= dev
->internal_log
.nr_log
;
13067 if (le
->time
<= cur
&& le
->time
>= dev
->uptime
) {
13068 if ((le
->le
.event
== SSD_LOG_SEU_FAULT1
) && (le
->time
< dev
->reset_time
)) {
13072 if (le
->le
.event
== SSD_LOG_OVER_TEMP
|| le
->le
.event
== SSD_LOG_NORMAL_TEMP
|| le
->le
.event
== SSD_LOG_WARN_TEMP
) {
13073 if (!temp_le
|| le
->time
>= temp_le
->time
) {
13079 (void)dev
->event_call(dev
->gd
, le
->le
.event
, ssd_parse_log(dev
, le
, 0));
13084 ssd_get_temperature(bdev
, &temp
);
13085 if (temp_le
&& (temp
>= SSD_OT_TEMP_HYST
)) {
13086 (void)dev
->event_call(dev
->gd
, temp_le
->le
.event
, ssd_parse_log(dev
, temp_le
, 0));
13092 int ssd_unregister_event_notifier(struct block_device
*bdev
)
13094 struct ssd_device
*dev
;
13096 if (!bdev
|| !(bdev
->bd_disk
)) {
13100 dev
= bdev
->bd_disk
->private_data
;
13101 dev
->event_call
= NULL
;
13106 EXPORT_SYMBOL(ssd_get_label
);
13107 EXPORT_SYMBOL(ssd_get_version
);
13108 EXPORT_SYMBOL(ssd_set_otprotect
);
13109 EXPORT_SYMBOL(ssd_bm_status
);
13110 EXPORT_SYMBOL(ssd_submit_pbio
);
13111 EXPORT_SYMBOL(ssd_get_pciaddr
);
13112 EXPORT_SYMBOL(ssd_get_temperature
);
13113 EXPORT_SYMBOL(ssd_register_event_notifier
);
13114 EXPORT_SYMBOL(ssd_unregister_event_notifier
);
13115 EXPORT_SYMBOL(ssd_reset
);
13116 EXPORT_SYMBOL(ssd_set_wmode
);
13120 module_init(ssd_init_module
);
13121 module_exit(ssd_cleanup_module
);
13122 MODULE_VERSION(DRIVER_VERSION
);
13123 MODULE_LICENSE("GPL");
13124 MODULE_AUTHOR("Huawei SSD DEV Team");
13125 MODULE_DESCRIPTION("Huawei SSD driver");