2 * Huawei SSD device driver
3 * Copyright (c) 2016, Huawei Technologies Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #ifndef LINUX_VERSION_CODE
16 #include <linux/version.h>
18 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
19 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/bio.h>
25 #include <linux/timer.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/blkdev.h>
31 #include <linux/sched.h>
32 #include <linux/fcntl.h>
33 #include <linux/interrupt.h>
34 #include <linux/compiler.h>
35 #include <linux/bitops.h>
36 #include <linux/delay.h>
37 #include <linux/time.h>
38 #include <linux/stat.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
44 #include <linux/ioctl.h>
45 #include <linux/hdreg.h> /* HDIO_GETGEO */
46 #include <linux/list.h>
47 #include <linux/reboot.h>
48 #include <linux/kthread.h>
49 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
50 #include <linux/seq_file.h>
52 #include <asm/uaccess.h>
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
54 #include <linux/scatterlist.h>
55 #include <linux/vmalloc.h>
57 #include <asm/scatterlist.h>
60 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
61 #include <linux/devfs_fs_kernel.h>
65 #define MODULE_NAME "hio"
66 #define DRIVER_VERSION "2.1.0.40"
67 #define DRIVER_VERSION_LEN 16
69 #define SSD_FW_MIN 0x1
71 #define SSD_DEV_NAME MODULE_NAME
72 #define SSD_DEV_NAME_LEN 16
73 #define SSD_CDEV_NAME "c"SSD_DEV_NAME
74 #define SSD_SDEV_NAME "s"SSD_DEV_NAME
79 #define SSD_MAJOR_SL 0
82 #define SSD_MAX_DEV 702
83 #define SSD_ALPHABET_NUM 26
85 #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg)
86 #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg)
87 #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg)
88 #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg)
91 #define SSD_SLAVE_PORT_DEVID 0x000a
95 /* 2.6.9 msi affinity bug, should turn msi & msi-x off */
97 #define SSD_ESCAPE_IRQ
103 #define SSD_MSIX_VEC 8
106 #undef SSD_ESCAPE_IRQ
107 #define SSD_MSIX_AFFINITY_FORCE
112 /* Over temperature protect */
113 #define SSD_OT_PROTECT
115 #ifdef SSD_QUEUE_PBIO
116 #define BIO_SSD_PBIO 20
120 //#define SSD_DEBUG_ERR
123 #define SSD_CMD_TIMEOUT (60*HZ)
126 #define SSD_SPI_TIMEOUT (5*HZ)
127 #define SSD_I2C_TIMEOUT (5*HZ)
129 #define SSD_I2C_MAX_DATA (127)
130 #define SSD_SMBUS_BLOCK_MAX (32)
131 #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2)
134 #define SSD_INIT_WAIT (1000) //1s
135 #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s
136 #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s
137 #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s
138 #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
139 #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
141 /* blkdev busy wait */
142 #define SSD_DEV_BUSY_WAIT 1000 //ms
143 #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s
146 #define SSD_SMBUS_RETRY_INTERVAL (5) //ms
147 #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL)
149 #define SSD_BM_RETRY_MAX 7
151 /* bm routine interval */
152 #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000)
154 /* routine interval */
155 #define SSD_ROUTINE_INTERVAL (10*1000) //10s
156 #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL)
157 #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30)
158 #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover
161 #define SSD_DMA_ALIGN (16)
163 /* some hw defalut */
164 #define SSD_LOG_MAX_SZ 4096
166 #define SSD_NAND_OOB_SZ 1024
167 #define SSD_NAND_ID_SZ 8
168 #define SSD_NAND_ID_BUFF_SZ 1024
169 #define SSD_NAND_MAX_CE 2
171 #define SSD_BBT_RESERVED 8
173 #define SSD_ECC_MAX_FLIP (64+1)
175 #define SSD_RAM_ALIGN 16
178 #define SSD_RELOAD_FLAG 0x3333CCCC
179 #define SSD_RELOAD_FW 0xAA5555AA
180 #define SSD_RESET_NOINIT 0xAA5555AA
181 #define SSD_RESET 0x55AAAA55
182 #define SSD_RESET_FULL 0x5A
183 //#define SSD_RESET_WAIT 1000 //1s
184 //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s
188 #define SSD_PROTOCOL_V1 0x0
190 #define SSD_ROM_SIZE (16*1024*1024)
191 #define SSD_ROM_BLK_SIZE (256*1024)
192 #define SSD_ROM_PAGE_SIZE (256)
193 #define SSD_ROM_NR_BRIDGE_FW 2
194 #define SSD_ROM_NR_CTRL_FW 2
195 #define SSD_ROM_BRIDGE_FW_BASE 0
196 #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024)
197 #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE)
198 #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024)
199 #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW)
200 #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE)
203 #define SSD_PROTOCOL_V3 0x3000000
204 #define SSD_PROTOCOL_V3_1_1 0x3010001
205 #define SSD_PROTOCOL_V3_1_3 0x3010003
206 #define SSD_PROTOCOL_V3_2 0x3020000
207 #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */
208 #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */
209 #define SSD_PROTOCOL_V3_2_4 0x3020004
212 #define SSD_PV3_ROM_NR_BM_FW 1
213 #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8)
215 #define SSD_ROM_LOG_SZ (64*1024*4)
217 #define SSD_ROM_NR_SMART_MAX 2
218 #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX
219 #define SSD_PV3_ROM_SMART_SZ (64*1024)
222 #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */
223 #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */
227 #define SSD_REQ_FIFO_REG 0x0000
228 #define SSD_RESP_FIFO_REG 0x0008 //0x0010
229 #define SSD_RESP_PTR_REG 0x0010 //0x0018
230 #define SSD_INTR_INTERVAL_REG 0x0018
231 #define SSD_READY_REG 0x001C
232 #define SSD_BRIDGE_TEST_REG 0x0020
233 #define SSD_STRIPE_SIZE_REG 0x0028
234 #define SSD_CTRL_VER_REG 0x0030 //controller
235 #define SSD_BRIDGE_VER_REG 0x0034 //bridge
236 #define SSD_PCB_VER_REG 0x0038
237 #define SSD_BURN_FLAG_REG 0x0040
238 #define SSD_BRIDGE_INFO_REG 0x0044
240 #define SSD_WL_VAL_REG 0x0048 //32-bit
242 #define SSD_BB_INFO_REG 0x004C
244 #define SSD_ECC_TEST_REG 0x0050 //test only
245 #define SSD_ERASE_TEST_REG 0x0058 //test only
246 #define SSD_WRITE_TEST_REG 0x0060 //test only
248 #define SSD_RESET_REG 0x0068
249 #define SSD_RELOAD_FW_REG 0x0070
251 #define SSD_RESERVED_BLKS_REG 0x0074
252 #define SSD_VALID_PAGES_REG 0x0078
253 #define SSD_CH_INFO_REG 0x007C
255 #define SSD_CTRL_TEST_REG_SZ 0x8
256 #define SSD_CTRL_TEST_REG0 0x0080
257 #define SSD_CTRL_TEST_REG1 0x0088
258 #define SSD_CTRL_TEST_REG2 0x0090
259 #define SSD_CTRL_TEST_REG3 0x0098
260 #define SSD_CTRL_TEST_REG4 0x00A0
261 #define SSD_CTRL_TEST_REG5 0x00A8
262 #define SSD_CTRL_TEST_REG6 0x00B0
263 #define SSD_CTRL_TEST_REG7 0x00B8
265 #define SSD_FLASH_INFO_REG0 0x00C0
266 #define SSD_FLASH_INFO_REG1 0x00C8
267 #define SSD_FLASH_INFO_REG2 0x00D0
268 #define SSD_FLASH_INFO_REG3 0x00D8
269 #define SSD_FLASH_INFO_REG4 0x00E0
270 #define SSD_FLASH_INFO_REG5 0x00E8
271 #define SSD_FLASH_INFO_REG6 0x00F0
272 #define SSD_FLASH_INFO_REG7 0x00F8
274 #define SSD_RESP_INFO_REG 0x01B8
275 #define SSD_NAND_BUFF_BASE 0x01BC //for nand write
277 #define SSD_CHIP_INFO_REG_SZ 0x10
278 #define SSD_CHIP_INFO_REG0 0x0100 //128 bit
279 #define SSD_CHIP_INFO_REG1 0x0110
280 #define SSD_CHIP_INFO_REG2 0x0120
281 #define SSD_CHIP_INFO_REG3 0x0130
282 #define SSD_CHIP_INFO_REG4 0x0140
283 #define SSD_CHIP_INFO_REG5 0x0150
284 #define SSD_CHIP_INFO_REG6 0x0160
285 #define SSD_CHIP_INFO_REG7 0x0170
287 #define SSD_RAM_INFO_REG 0x01C4
289 #define SSD_BBT_BASE_REG 0x01C8
290 #define SSD_ECT_BASE_REG 0x01CC
292 #define SSD_CLEAR_INTR_REG 0x01F0
294 #define SSD_INIT_STATE_REG_SZ 0x8
295 #define SSD_INIT_STATE_REG0 0x0200
296 #define SSD_INIT_STATE_REG1 0x0208
297 #define SSD_INIT_STATE_REG2 0x0210
298 #define SSD_INIT_STATE_REG3 0x0218
299 #define SSD_INIT_STATE_REG4 0x0220
300 #define SSD_INIT_STATE_REG5 0x0228
301 #define SSD_INIT_STATE_REG6 0x0230
302 #define SSD_INIT_STATE_REG7 0x0238
304 #define SSD_ROM_INFO_REG 0x0600
305 #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604
306 #define SSD_ROM_CTRL_FW_INFO_REG 0x0608
307 #define SSD_ROM_VP_INFO_REG 0x060C
309 #define SSD_LOG_INFO_REG 0x0610
310 #define SSD_LED_REG 0x0614
311 #define SSD_MSG_BASE_REG 0x06F8
314 #define SSD_SPI_REG_CMD 0x0180
315 #define SSD_SPI_REG_CMD_HI 0x0184
316 #define SSD_SPI_REG_WDATA 0x0188
317 #define SSD_SPI_REG_ID 0x0190
318 #define SSD_SPI_REG_STATUS 0x0198
319 #define SSD_SPI_REG_RDATA 0x01A0
320 #define SSD_SPI_REG_READY 0x01A8
323 #define SSD_I2C_CTRL_REG 0x06F0
324 #define SSD_I2C_RDATA_REG 0x06F4
326 /* temperature reg */
327 #define SSD_BRIGE_TEMP_REG 0x0618
329 #define SSD_CTRL_TEMP_REG0 0x0700
330 #define SSD_CTRL_TEMP_REG1 0x0708
331 #define SSD_CTRL_TEMP_REG2 0x0710
332 #define SSD_CTRL_TEMP_REG3 0x0718
333 #define SSD_CTRL_TEMP_REG4 0x0720
334 #define SSD_CTRL_TEMP_REG5 0x0728
335 #define SSD_CTRL_TEMP_REG6 0x0730
336 #define SSD_CTRL_TEMP_REG7 0x0738
338 /* reversion 3 reg */
339 #define SSD_PROTOCOL_VER_REG 0x01B4
341 #define SSD_FLUSH_TIMEOUT_REG 0x02A4
342 #define SSD_BM_FAULT_REG 0x0660
344 #define SSD_PV3_RAM_STATUS_REG_SZ 0x4
345 #define SSD_PV3_RAM_STATUS_REG0 0x0260
346 #define SSD_PV3_RAM_STATUS_REG1 0x0264
347 #define SSD_PV3_RAM_STATUS_REG2 0x0268
348 #define SSD_PV3_RAM_STATUS_REG3 0x026C
349 #define SSD_PV3_RAM_STATUS_REG4 0x0270
350 #define SSD_PV3_RAM_STATUS_REG5 0x0274
351 #define SSD_PV3_RAM_STATUS_REG6 0x0278
352 #define SSD_PV3_RAM_STATUS_REG7 0x027C
354 #define SSD_PV3_CHIP_INFO_REG_SZ 0x40
355 #define SSD_PV3_CHIP_INFO_REG0 0x0300
356 #define SSD_PV3_CHIP_INFO_REG1 0x0340
357 #define SSD_PV3_CHIP_INFO_REG2 0x0380
358 #define SSD_PV3_CHIP_INFO_REG3 0x03B0
359 #define SSD_PV3_CHIP_INFO_REG4 0x0400
360 #define SSD_PV3_CHIP_INFO_REG5 0x0440
361 #define SSD_PV3_CHIP_INFO_REG6 0x0480
362 #define SSD_PV3_CHIP_INFO_REG7 0x04B0
364 #define SSD_PV3_INIT_STATE_REG_SZ 0x20
365 #define SSD_PV3_INIT_STATE_REG0 0x0500
366 #define SSD_PV3_INIT_STATE_REG1 0x0520
367 #define SSD_PV3_INIT_STATE_REG2 0x0540
368 #define SSD_PV3_INIT_STATE_REG3 0x0560
369 #define SSD_PV3_INIT_STATE_REG4 0x0580
370 #define SSD_PV3_INIT_STATE_REG5 0x05A0
371 #define SSD_PV3_INIT_STATE_REG6 0x05C0
372 #define SSD_PV3_INIT_STATE_REG7 0x05E0
374 /* reversion 3.1.1 reg */
375 #define SSD_FULL_RESET_REG 0x01B0
377 #define SSD_CTRL_REG_ZONE_SZ 0x800
379 #define SSD_BB_THRESHOLD_L1_REG 0x2C0
380 #define SSD_BB_THRESHOLD_L2_REG 0x2C4
382 #define SSD_BB_ACC_REG_SZ 0x4
383 #define SSD_BB_ACC_REG0 0x21C0
384 #define SSD_BB_ACC_REG1 0x29C0
385 #define SSD_BB_ACC_REG2 0x31C0
387 #define SSD_EC_THRESHOLD_L1_REG 0x2C8
388 #define SSD_EC_THRESHOLD_L2_REG 0x2CC
390 #define SSD_EC_ACC_REG_SZ 0x4
391 #define SSD_EC_ACC_REG0 0x21E0
392 #define SSD_EC_ACC_REG1 0x29E0
393 #define SSD_EC_ACC_REG2 0x31E0
395 /* reversion 3.1.2 & 3.1.3 reg */
396 #define SSD_HW_STATUS_REG 0x02AC
398 #define SSD_PLP_INFO_REG 0x0664
400 /*reversion 3.2 reg*/
401 #define SSD_POWER_ON_REG 0x01EC
402 #define SSD_PCIE_LINKSTATUS_REG 0x01F8
403 #define SSD_PL_CAP_LEARN_REG 0x01FC
405 #define SSD_FPGA_1V0_REG0 0x2070
406 #define SSD_FPGA_1V8_REG0 0x2078
407 #define SSD_FPGA_1V0_REG1 0x2870
408 #define SSD_FPGA_1V8_REG1 0x2878
410 /*reversion 3.2 reg*/
411 #define SSD_READ_OT_REG0 0x2260
412 #define SSD_WRITE_OT_REG0 0x2264
413 #define SSD_READ_OT_REG1 0x2A60
414 #define SSD_WRITE_OT_REG1 0x2A64
418 #define SSD_FUNC_READ 0x01
419 #define SSD_FUNC_WRITE 0x02
420 #define SSD_FUNC_NAND_READ_WOOB 0x03
421 #define SSD_FUNC_NAND_READ 0x04
422 #define SSD_FUNC_NAND_WRITE 0x05
423 #define SSD_FUNC_NAND_ERASE 0x06
424 #define SSD_FUNC_NAND_READ_ID 0x07
425 #define SSD_FUNC_READ_LOG 0x08
426 #define SSD_FUNC_TRIM 0x09
427 #define SSD_FUNC_RAM_READ 0x10
428 #define SSD_FUNC_RAM_WRITE 0x11
429 #define SSD_FUNC_FLUSH 0x12 //cache / bbt
432 #define SSD_SPI_CMD_PROGRAM 0x02
433 #define SSD_SPI_CMD_READ 0x03
434 #define SSD_SPI_CMD_W_DISABLE 0x04
435 #define SSD_SPI_CMD_READ_STATUS 0x05
436 #define SSD_SPI_CMD_W_ENABLE 0x06
437 #define SSD_SPI_CMD_ERASE 0xd8
438 #define SSD_SPI_CMD_CLSR 0x30
439 #define SSD_SPI_CMD_READ_ID 0x9f
442 #define SSD_I2C_CTRL_READ 0x00
443 #define SSD_I2C_CTRL_WRITE 0x01
445 /* i2c internal register */
446 #define SSD_I2C_CFG_REG 0x00
447 #define SSD_I2C_DATA_REG 0x01
448 #define SSD_I2C_CMD_REG 0x02
449 #define SSD_I2C_STATUS_REG 0x03
450 #define SSD_I2C_SADDR_REG 0x04
451 #define SSD_I2C_LEN_REG 0x05
452 #define SSD_I2C_RLEN_REG 0x06
453 #define SSD_I2C_WLEN_REG 0x07
454 #define SSD_I2C_RESET_REG 0x08 //write for reset
455 #define SSD_I2C_PRER_REG 0x09
459 /* FPGA volt = ADC_value / 4096 * 3v */
460 #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v
461 #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v
462 #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v
463 #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98
466 #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4)
467 #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4)
468 #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4)
469 #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12)
471 #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt))
482 SSD_CLOCK_166M_LOST
= 0,
490 #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1)
491 #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1)
493 #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8))
495 #define SSD_INLET_OT_TEMP (55) //55 DegC
496 #define SSD_INLET_OT_HYST (50) //50 DegC
497 #define SSD_FLASH_OT_TEMP (70) //70 DegC
498 #define SSD_FLASH_OT_HYST (65) //65 DegC
511 SSD_LM75_REG_TEMP
= 0,
518 #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2)
519 #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2)
520 #define SSD_LM80_REG_IN(nr) (0x20 + (nr))
522 #define SSD_LM80_REG_FAN1 0x28
523 #define SSD_LM80_REG_FAN2 0x29
524 #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr))
526 #define SSD_LM80_REG_TEMP 0x27
527 #define SSD_LM80_REG_TEMP_HOT_MAX 0x38
528 #define SSD_LM80_REG_TEMP_HOT_HYST 0x39
529 #define SSD_LM80_REG_TEMP_OS_MAX 0x3a
530 #define SSD_LM80_REG_TEMP_OS_HYST 0x3b
532 #define SSD_LM80_REG_CONFIG 0x00
533 #define SSD_LM80_REG_ALARM1 0x01
534 #define SSD_LM80_REG_ALARM2 0x02
535 #define SSD_LM80_REG_MASK1 0x03
536 #define SSD_LM80_REG_MASK2 0x04
537 #define SSD_LM80_REG_FANDIV 0x05
538 #define SSD_LM80_REG_RES 0x06
540 #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8)
542 #define SSD_LM80_3V3_VOLT(val) ((val)*33/19)
544 #define SSD_LM80_CONV_INTERVAL (1000)
553 SSD_LM80_IN_FPGA_3V3
,
558 struct ssd_lm80_limit
564 /* +/- 5% except cap in*/
565 static struct ssd_lm80_limit ssd_lm80_limit
[SSD_LM80_IN_NR
] = {
566 {171, 217}, /* CAP in: 1710 ~ 2170 */
575 /* temperature sensors */
585 #ifdef SSD_OT_PROTECT
586 #define SSD_OT_DELAY (60) //ms
588 #define SSD_OT_TEMP (90) //90 DegC
590 #define SSD_OT_TEMP_HYST (85) //85 DegC
593 /* fpga temperature */
594 //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f)
595 #define CONVERT_TEMP(val) ((val)*504/4096-273)
597 #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4))
598 #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4))
599 #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4))
603 #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP)
604 #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8)
605 #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2)))
606 #define SSD_PL_CAP_LEARN_WAIT (20) //20ms
607 #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s
609 #define SSD_PL_CAP_CHARGE_WAIT (1000)
610 #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s
612 #define SSD_PL_CAP_VOLT(val) (val*7)
614 #define SSD_PL_CAP_VOLT_FULL (13700)
615 #define SSD_PL_CAP_VOLT_READY (12880)
617 #define SSD_PL_CAP_THRESHOLD (8900)
618 #define SSD_PL_CAP_CP_THRESHOLD (5800)
619 #define SSD_PL_CAP_THRESHOLD_HYST (100)
621 enum ssd_pl_cap_status
629 SSD_PL_CAP_DEFAULT
= 0, /* 4 cap */
630 SSD_PL_CAP_CP
/* 3 cap */
635 #define SSD_HWMON_OFFS_TEMP (0)
636 #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR)
637 #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR)
638 #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR)
639 #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR)
640 #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR)
642 #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx)
643 #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx)
644 #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx)
645 #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx)
646 #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx)
647 #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx)
663 static int sfifo_alloc(struct sfifo
*fifo
, uint32_t size
, uint32_t esize
)
667 if (!fifo
|| size
> INT_MAX
|| esize
== 0) {
671 while (__size
< size
) __size
<<= 1;
677 fifo
->data
= vmalloc(esize
* __size
);
684 fifo
->mask
= __size
- 1;
687 spin_lock_init(&fifo
->lock
);
692 static void sfifo_free(struct sfifo
*fifo
)
707 static int __sfifo_put(struct sfifo
*fifo
, void *val
)
709 if (((fifo
->in
+ 1) & fifo
->mask
) == fifo
->out
) {
713 memcpy((fifo
->data
+ (fifo
->in
* fifo
->esize
)), val
, fifo
->esize
);
714 fifo
->in
= (fifo
->in
+ 1) & fifo
->mask
;
719 static int sfifo_put(struct sfifo
*fifo
, void *val
)
727 if (!in_interrupt()) {
728 spin_lock_irq(&fifo
->lock
);
729 ret
= __sfifo_put(fifo
, val
);
730 spin_unlock_irq(&fifo
->lock
);
732 spin_lock(&fifo
->lock
);
733 ret
= __sfifo_put(fifo
, val
);
734 spin_unlock(&fifo
->lock
);
740 static int __sfifo_get(struct sfifo
*fifo
, void *val
)
742 if (fifo
->out
== fifo
->in
) {
746 memcpy(val
, (fifo
->data
+ (fifo
->out
* fifo
->esize
)), fifo
->esize
);
747 fifo
->out
= (fifo
->out
+ 1) & fifo
->mask
;
752 static int sfifo_get(struct sfifo
*fifo
, void *val
)
760 if (!in_interrupt()) {
761 spin_lock_irq(&fifo
->lock
);
762 ret
= __sfifo_get(fifo
, val
);
763 spin_unlock_irq(&fifo
->lock
);
765 spin_lock(&fifo
->lock
);
766 ret
= __sfifo_get(fifo
, val
);
767 spin_unlock(&fifo
->lock
);
774 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
780 static inline void ssd_blist_init(struct ssd_blist
*ssd_bl
)
786 static inline struct bio
*ssd_blist_get(struct ssd_blist
*ssd_bl
)
788 struct bio
*bio
= ssd_bl
->prev
;
796 static inline void ssd_blist_add(struct ssd_blist
*ssd_bl
, struct bio
*bio
)
801 ssd_bl
->next
->bi_next
= bio
;
810 #define ssd_blist bio_list
811 #define ssd_blist_init bio_list_init
812 #define ssd_blist_get bio_list_get
813 #define ssd_blist_add bio_list_add
816 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
817 #define bio_start(bio) (bio->bi_sector)
819 #define bio_start(bio) (bio->bi_iter.bi_sector)
823 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
824 #define mutex_lock down
825 #define mutex_unlock up
826 #define mutex semaphore
827 #define mutex_init init_MUTEX
831 typedef union ssd_i2c_ctrl
{
839 }__attribute__((packed
)) ssd_i2c_ctrl_t
;
841 typedef union ssd_i2c_data
{
848 }__attribute__((packed
)) ssd_i2c_data_t
;
853 SSD_WMODE_BUFFER
= 0,
870 typedef struct ssd_sg_entry
875 }__attribute__((packed
))ssd_sg_entry_t
;
877 typedef struct ssd_rw_msg
883 uint32_t reserved
; //for 64-bit align
884 struct ssd_sg_entry sge
[1]; //base
885 }__attribute__((packed
))ssd_rw_msg_t
;
887 typedef struct ssd_resp_msg
895 }__attribute__((packed
))ssd_resp_msg_t
;
897 typedef struct ssd_flush_msg
900 uint8_t flag
:2; //flash cache 0 or bbt 1
904 uint32_t reserved
; //align
905 }__attribute__((packed
))ssd_flush_msg_t
;
907 typedef struct ssd_nand_op_msg
913 uint32_t reserved
; //align
919 }__attribute__((packed
))ssd_nand_op_msg_t
;
921 typedef struct ssd_ram_op_msg
927 uint32_t reserved
; //align
931 }__attribute__((packed
))ssd_ram_op_msg_t
;
935 typedef struct ssd_log_msg
941 uint32_t reserved
; //align
943 }__attribute__((packed
))ssd_log_msg_t
;
945 typedef struct ssd_log_op_msg
951 uint32_t reserved
; //align
952 uint64_t reserved1
; //align
954 }__attribute__((packed
))ssd_log_op_msg_t
;
956 typedef struct ssd_log_resp_msg
960 uint16_t reserved1
:2; //align with the normal resp msg
964 }__attribute__((packed
))ssd_log_resp_msg_t
;
968 typedef union ssd_response_msq
970 ssd_resp_msg_t resp_msg
;
971 ssd_log_resp_msg_t log_resp_msg
;
974 } ssd_response_msq_t
;
978 typedef struct ssd_protocol_info
981 uint32_t init_state_reg
;
982 uint32_t init_state_reg_sz
;
983 uint32_t chip_info_reg
;
984 uint32_t chip_info_reg_sz
;
985 } ssd_protocol_info_t
;
987 typedef struct ssd_hw_info
992 uint32_t cmd_fifo_sz
;
993 uint32_t cmd_fifo_sz_mask
;
996 uint32_t resp_ptr_sz
;
997 uint32_t resp_msg_sz
;
1001 uint16_t nr_data_ch
;
1007 uint8_t upper_pcb_ver
;
1009 uint8_t nand_vendor_id
;
1010 uint8_t nand_dev_id
;
1017 uint16_t bbf_seek
; //
1019 uint16_t page_count
; //per block
1021 uint32_t block_count
; //per flash
1025 uint32_t ram_max_len
;
1029 uint64_t md_base
; //metadata
1031 uint32_t md_entry_sz
;
1035 uint64_t nand_wbuff_base
;
1037 uint32_t md_reserved_blks
;
1038 uint32_t reserved_blks
;
1039 uint32_t valid_pages
;
1040 uint32_t max_valid_pages
;
1044 typedef struct ssd_hw_info_extend
1050 uint8_t form_factor
;
1053 }ssd_hw_info_extend_t
;
1055 typedef struct ssd_rom_info
1058 uint32_t block_size
;
1060 uint8_t nr_bridge_fw
;
1064 uint32_t bridge_fw_base
;
1065 uint32_t bridge_fw_sz
;
1066 uint32_t ctrl_fw_base
;
1067 uint32_t ctrl_fw_sz
;
1068 uint32_t bm_fw_base
;
1072 uint32_t smart_base
;
1075 uint32_t label_base
;
1083 SSD_DEBUG_WRITE_ERR
,
1093 typedef struct ssd_debug_info
1109 #define SSD_LABEL_FIELD_SZ 32
1110 #define SSD_SN_SZ 16
1112 typedef struct ssd_label
1114 char date
[SSD_LABEL_FIELD_SZ
];
1115 char sn
[SSD_LABEL_FIELD_SZ
];
1116 char part
[SSD_LABEL_FIELD_SZ
];
1117 char desc
[SSD_LABEL_FIELD_SZ
];
1118 char other
[SSD_LABEL_FIELD_SZ
];
1119 char maf
[SSD_LABEL_FIELD_SZ
];
1122 #define SSD_LABEL_DESC_SZ 256
1124 typedef struct ssd_labelv3
1126 char boardtype
[SSD_LABEL_FIELD_SZ
];
1127 char barcode
[SSD_LABEL_FIELD_SZ
];
1128 char item
[SSD_LABEL_FIELD_SZ
];
1129 char description
[SSD_LABEL_DESC_SZ
];
1130 char manufactured
[SSD_LABEL_FIELD_SZ
];
1131 char vendorname
[SSD_LABEL_FIELD_SZ
];
1132 char issuenumber
[SSD_LABEL_FIELD_SZ
];
1133 char cleicode
[SSD_LABEL_FIELD_SZ
];
1134 char bom
[SSD_LABEL_FIELD_SZ
];
1138 typedef struct ssd_battery_info
1141 } ssd_battery_info_t
;
1143 /* ssd power stat */
1144 typedef struct ssd_power_stat
1146 uint64_t nr_poweron
;
1147 uint64_t nr_powerloss
;
1148 uint64_t init_failed
;
1152 typedef struct ssd_io_stat
1165 typedef struct ssd_ecc_info
1167 uint64_t bitflip
[SSD_ECC_MAX_FLIP
];
1173 SSD_LOG_LEVEL_INFO
= 0,
1174 SSD_LOG_LEVEL_NOTICE
,
1175 SSD_LOG_LEVEL_WARNING
,
1180 typedef struct ssd_log_info
1183 uint64_t stat
[SSD_LOG_NR_LEVEL
];
1187 #define SSD_SMART_MAGIC (0x5452414D53445353ull)
1189 typedef struct ssd_smart
1191 struct ssd_power_stat pstat
;
1192 struct ssd_io_stat io_stat
;
1193 struct ssd_ecc_info ecc_info
;
1194 struct ssd_log_info log_info
;
1200 typedef struct ssd_internal_log
1204 } ssd_internal_log_t
;
1207 typedef struct ssd_cmd
1210 struct scatterlist
*sgl
;
1211 struct list_head list
;
1214 int flag
; /*pbio(1) or bio(0)*/
1220 unsigned long start_time
;
1223 unsigned int nr_log
;
1225 struct timer_list cmd_timer
;
1226 struct completion
*waiting
;
1229 typedef void (*send_cmd_func
)(struct ssd_cmd
*);
1230 typedef int (*ssd_event_call
)(struct gendisk
*, int, int); /* gendisk, event id, event level */
1233 #define SSD_DCMD_MAX_SZ 32
1235 typedef struct ssd_dcmd
1237 struct list_head list
;
1239 uint8_t msg
[SSD_DCMD_MAX_SZ
];
1255 #define SSD_QUEUE_NAME_LEN 16
1256 typedef struct ssd_queue
{
1257 char name
[SSD_QUEUE_NAME_LEN
];
1263 uint32_t resp_idx_mask
;
1264 uint32_t resp_msg_sz
;
1269 struct ssd_cmd
*cmd
;
1271 struct ssd_io_stat io_stat
;
1272 struct ssd_ecc_info ecc_info
;
1275 typedef struct ssd_device
{
1276 char name
[SSD_DEV_NAME_LEN
];
1283 #ifdef SSD_ESCAPE_IRQ
1289 int ot_delay
; //in ms
1293 atomic_t in_flight
[2]; //r&w
1297 struct list_head list
;
1298 struct pci_dev
*pdev
;
1300 unsigned long mmio_base
;
1301 unsigned long mmio_len
;
1302 void __iomem
*ctrlp
;
1304 struct mutex spi_mutex
;
1305 struct mutex i2c_mutex
;
1307 struct ssd_protocol_info protocol_info
;
1308 struct ssd_hw_info hw_info
;
1309 struct ssd_rom_info rom_info
;
1310 struct ssd_label label
;
1312 struct ssd_smart smart
;
1315 spinlock_t sendq_lock
;
1316 struct ssd_blist sendq
;
1317 struct task_struct
*send_thread
;
1318 wait_queue_head_t send_waitq
;
1321 spinlock_t doneq_lock
;
1322 struct ssd_blist doneq
;
1323 struct task_struct
*done_thread
;
1324 wait_queue_head_t done_waitq
;
1326 struct ssd_dcmd
*dcmd
;
1327 spinlock_t dcmd_lock
;
1328 struct list_head dcmd_list
; /* direct cmd list */
1329 wait_queue_head_t dcmd_wq
;
1331 unsigned long *tag_map
;
1332 wait_queue_head_t tag_wq
;
1334 spinlock_t cmd_lock
;
1335 struct ssd_cmd
*cmd
;
1338 ssd_event_call event_call
;
1340 dma_addr_t msg_base_dma
;
1343 void *resp_msg_base
;
1344 void *resp_ptr_base
;
1345 dma_addr_t resp_msg_base_dma
;
1346 dma_addr_t resp_ptr_base_dma
;
1349 struct msix_entry entry
[SSD_MSIX_VEC
];
1350 struct ssd_queue queue
[SSD_MSIX_VEC
];
1352 struct request_queue
*rq
; /* The device request queue */
1353 struct gendisk
*gd
; /* The gendisk structure */
1355 struct mutex internal_log_mutex
;
1356 struct ssd_internal_log internal_log
;
1357 struct workqueue_struct
*workq
;
1358 struct work_struct log_work
; /* get log */
1361 unsigned long state
; /* device state, for example, block device inited */
1363 struct module
*owner
;
1374 struct mutex gd_mutex
;
1375 struct ssd_log_info log_info
; /* volatile */
1377 atomic_t queue_depth
;
1378 struct mutex barrier_mutex
;
1379 struct mutex fw_mutex
;
1381 struct ssd_hw_info_extend hw_info_ext
;
1382 struct ssd_labelv3 labelv3
;
1386 struct mutex bm_mutex
;
1387 struct work_struct bm_work
; /* check bm */
1388 struct timer_list bm_timer
;
1389 struct sfifo log_fifo
;
1391 struct timer_list routine_timer
;
1392 unsigned long routine_tick
;
1393 unsigned long hwmon
;
1395 struct work_struct hwmon_work
; /* check hw */
1396 struct work_struct capmon_work
; /* check battery */
1397 struct work_struct tempmon_work
; /* check temp */
1400 struct ssd_debug_info db_info
;
1401 uint64_t reset_time
;
1402 int has_non_0x98_reg_access
;
1403 spinlock_t in_flight_lock
;
1405 uint64_t last_poweron_id
;
1411 typedef struct ssd_acc_info
{
1412 uint32_t threshold_l1
;
1413 uint32_t threshold_l2
;
1417 typedef struct ssd_reg_op_info
1421 } ssd_reg_op_info_t
;
1423 typedef struct ssd_spi_op_info
1428 } ssd_spi_op_info_t
;
1430 typedef struct ssd_i2c_op_info
1437 } ssd_i2c_op_info_t
;
1439 typedef struct ssd_smbus_op_info
1445 } ssd_smbus_op_info_t
;
1447 typedef struct ssd_ram_op_info
{
1451 uint8_t __user
*buf
;
1452 } ssd_ram_op_info_t
;
1454 typedef struct ssd_flash_op_info
{
1459 uint8_t __user
*buf
;
1460 } ssd_flash_op_info_t
;
1462 typedef struct ssd_sw_log_info
{
1466 } ssd_sw_log_info_t
;
1468 typedef struct ssd_version_info
1470 uint32_t bridge_ver
; /* bridge fw version */
1471 uint32_t ctrl_ver
; /* controller fw version */
1472 uint32_t bm_ver
; /* battery manager fw version */
1473 uint8_t pcb_ver
; /* main pcb version */
1474 uint8_t upper_pcb_ver
;
1477 } ssd_version_info_t
;
1479 typedef struct pci_addr
1487 typedef struct ssd_drv_param_info
{
1497 } ssd_drv_param_info_t
;
1501 enum ssd_form_factor
1503 SSD_FORM_FACTOR_HHHL
= 0,
1504 SSD_FORM_FACTOR_FHHL
1508 /* ssd power loss protect */
1517 #define SSD_BM_SLAVE_ADDRESS 0x16
1518 #define SSD_BM_CAP 5
1521 #define SSD_BM_SAFETYSTATUS 0x51
1522 #define SSD_BM_OPERATIONSTATUS 0x54
1524 /* ManufacturerAccess */
1525 #define SSD_BM_MANUFACTURERACCESS 0x00
1526 #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */
1528 /* Data flash access */
1529 #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77
1530 #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78
1531 #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56
1532 #define SSD_BM_CONFIGURATION_REGISTERS_ID 64
1534 /* min cap voltage */
1535 #define SSD_BM_CAP_VOLT_MIN 500
1540 SSD_BM_CAP_VINA = 1,
1546 SSD_BMSTATUS_OK
= 0,
1547 SSD_BMSTATUS_CHARGING
, /* not fully charged */
1548 SSD_BMSTATUS_WARNING
1553 SBS_UNIT_TEMPERATURE
,
1558 SBS_UNIT_CAPACITANCE
1586 uint16_t cap_volt
[SSD_BM_CAP
];
1593 struct ssd_bm_manufacturer_data
1595 uint16_t pack_lot_code
;
1596 uint16_t pcb_lot_code
;
1597 uint16_t firmware_ver
;
1598 uint16_t hardware_ver
;
1601 struct ssd_bm_configuration_registers
1614 uint16_t fet_action
;
1619 #define SBS_VALUE_MASK 0xffff
1621 #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var)
1622 #define bm_var(start, offset) ((void *) start + (offset))
1624 static struct sbs_cmd ssd_bm_sbs
[] = {
1625 {0x08, SBS_SIZE_WORD
, SBS_UNIT_TEMPERATURE
, bm_var_offset(temp
), SBS_VALUE_MASK
, "Temperature"},
1626 {0x09, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(volt
), SBS_VALUE_MASK
, "Voltage"},
1627 {0x0a, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(curr
), SBS_VALUE_MASK
, "Current"},
1628 {0x0b, SBS_SIZE_WORD
, SBS_UNIT_ESR
, bm_var_offset(esr
), SBS_VALUE_MASK
, "ESR"},
1629 {0x0d, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(rsoc
), SBS_VALUE_MASK
, "RelativeStateOfCharge"},
1630 {0x0e, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(health
), SBS_VALUE_MASK
, "Health"},
1631 {0x10, SBS_SIZE_WORD
, SBS_UNIT_CAPACITANCE
, bm_var_offset(cap
), SBS_VALUE_MASK
, "Capacitance"},
1632 {0x14, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(chg_curr
), SBS_VALUE_MASK
, "ChargingCurrent"},
1633 {0x15, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(chg_volt
), SBS_VALUE_MASK
, "ChargingVoltage"},
1634 {0x3b, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[4]), SBS_VALUE_MASK
, "CapacitorVoltage5"},
1635 {0x3c, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[3]), SBS_VALUE_MASK
, "CapacitorVoltage4"},
1636 {0x3d, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[2]), SBS_VALUE_MASK
, "CapacitorVoltage3"},
1637 {0x3e, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[1]), SBS_VALUE_MASK
, "CapacitorVoltage2"},
1638 {0x3f, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[0]), SBS_VALUE_MASK
, "CapacitorVoltage1"},
1639 {0x50, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_alert
), 0x870F, "SafetyAlert"},
1640 {0x51, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_status
), 0xE7BF, "SafetyStatus"},
1641 {0x54, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(op_status
), 0x79F4, "OperationStatus"},
1642 {0x5a, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(sys_volt
), SBS_VALUE_MASK
, "SystemVoltage"},
1643 {0, 0, 0, 0, 0, NULL
},
1647 #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info)
1648 #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info)
1649 #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info)
1650 #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart)
1651 #define SSD_CMD_GET_IDX _IOR('H', 105, int)
1652 #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int)
1653 #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int)
1654 #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN])
1656 #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info)
1657 #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info)
1659 #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend)
1661 #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info)
1662 #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info)
1664 #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info)
1665 #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info)
1666 #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info)
1668 #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info)
1669 #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info)
1670 #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info)
1672 #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info)
1673 #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info)
1674 #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info)
1675 #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info)
1676 #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info)
1677 #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info)
1678 #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info)
1679 #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info)
1681 #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t)
1682 #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int)
1683 #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int)
1684 #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t)
1685 #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int)
1687 #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info)
1688 #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info)
1690 #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info)
1691 #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob
1692 #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info)
1693 #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info)
1694 #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO
1696 #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info)
1698 #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int)
1699 #define SSD_CMD_SET_ALARM _IOW('H', 191, int)
1701 #define SSD_CMD_RESET _IOW('H', 200, int)
1702 #define SSD_CMD_RELOAD_FW _IOW('H', 201, int)
1703 #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int)
1704 #define SSD_CMD_LOAD_DEV _IOW('H', 203, int)
1705 #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t)
1706 #define SSD_CMD_FULL_RESET _IOW('H', 206, int)
1708 #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t)
1709 #define SSD_CMD_GET_LOG _IOR('H', 221, void *)
1710 #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int)
1712 #define SSD_CMD_OT_PROTECT _IOW('H', 223, int)
1713 #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int)
1715 #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int)
1716 #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int)
1718 #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info)
1720 #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label)
1721 #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info)
1722 #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int)
1723 #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int)
1724 #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *)
1727 #define SSD_CMD_FLUSH _IOW('H', 240, int)
1728 #define SSD_CMD_SAVE_MD _IOW('H', 241, int)
1730 #define SSD_CMD_SET_WMODE _IOW('H', 242, int)
1731 #define SSD_CMD_GET_WMODE _IOR('H', 243, int)
1732 #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int)
1734 #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info)
1735 #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info)
1737 #define SSD_CMD_CLEAR_WARNING _IOW('H', 260, int)
1741 #define SSD_LOG_MAX_SZ 4096
1742 #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE
1743 #define SSD_DIF_WITH_OLD_LOG 0x3f
1747 SSD_LOG_DATA_NONE
= 0,
1752 typedef struct ssd_log_entry
1770 }__attribute__((packed
))ssd_log_entry_t
;
1772 typedef struct ssd_log
1775 uint64_t ctrl_idx
:8;
1777 } __attribute__((packed
)) ssd_log_t
;
1779 typedef struct ssd_log_desc
1787 } __attribute__((packed
)) ssd_log_desc_t
;
1789 #define SSD_LOG_SW_IDX 0xF
1790 #define SSD_UNKNOWN_EVENT ((uint16_t)-1)
1791 static struct ssd_log_desc ssd_log_desc
[] = {
1792 /* event, level, show flash, show block, show page, desc */
1793 {0x0, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Create BBT failure"}, //g3
1794 {0x1, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Read BBT failure"}, //g3
1795 {0x2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Mark bad block"},
1796 {0x3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flush BBT failure"},
1797 {0x4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1798 {0x7, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "No available blocks"},
1799 {0x8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Bad EC header"},
1800 {0x9, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 0, "Bad VID header"}, //g3
1801 {0xa, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Wear leveling"},
1802 {0xb, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "WL read back failure"},
1803 {0x11, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Data recovery failure"}, // err
1804 {0x20, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan mapping table failure"}, // err g3
1805 {0x21, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1806 {0x22, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1807 {0x23, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1808 {0x24, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Merge: read mapping page failure"},
1809 {0x25, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: read back failure"},
1810 {0x26, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1811 {0x27, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Data corrupted for abnormal power down"}, //g3
1812 {0x28, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: mapping page corrupted"},
1813 {0x29, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: no mapping page"},
1814 {0x2a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: mapping pages incomplete"},
1815 {0x2b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read back failure after programming failure"}, // err
1816 {0xf1, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure without recovery"}, // err
1817 {0xf2, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available blocks"}, // maybe err g3
1818 {0xf3, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: RAID incomplete"}, // err g3
1819 {0xf4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1820 {0xf5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure in moving data"},
1821 {0xf6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1822 {0xf7, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Init: RAID not complete"},
1823 {0xf8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: data moving interrupted"},
1824 {0xfe, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Data inspection failure"},
1825 {0xff, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "IO: ECC failed"},
1828 {0x2e, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available reserved blocks" }, // err
1829 {0x30, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PMT membership not found"},
1830 {0x31, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PMT corrupted"},
1831 {0x32, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT membership not found"},
1832 {0x33, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT not found"},
1833 {0x34, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT corrupted"},
1834 {0x35, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT page read failure"},
1835 {0x36, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT page read failure"},
1836 {0x37, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT backup page read failure"},
1837 {0x38, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT read failure"},
1838 {0x39, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT scan failure"}, // err
1839 {0x3a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page read failure"},
1840 {0x3b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page scan failure"}, // err
1841 {0x3c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan unclosed block failure"}, // err
1842 {0x3d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: write pointer mismatch"},
1843 {0x3e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: PBMT read failure"},
1844 {0x3f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: PMT recovery: PBMT scan failure"},
1845 {0x40, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: data page read failure"}, //err
1846 {0x41, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT write pointer mismatch"},
1847 {0x42, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT latest version corrupted"},
1848 {0x43, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: too many unclosed blocks"},
1849 {0x44, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PDW block found"},
1850 {0x45, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Init: more than one PDW block found"}, //err
1851 {0x46, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page is blank or read failure"},
1852 {0x47, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PDW block not found"},
1854 {0x50, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: hit error data"}, // err
1855 {0x51, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: read back failure"}, // err
1856 {0x52, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Cache: unknown command"}, //?
1857 {0x53, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "GC/WL read back failure"}, // err
1859 {0x60, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Erase failure"},
1861 {0x70, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "LPA not matched"},
1862 {0x71, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "PBN not matched"},
1863 {0x72, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read retry failure"},
1864 {0x73, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Need raid recovery"},
1865 {0x74, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "Need read retry"},
1866 {0x75, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read invalid data page"},
1867 {0x76, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN matched"},
1868 {0x77, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN not matched"},
1869 {0x78, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in flash, PBN not matched"},
1870 {0x79, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in cache, LPA not matched"},
1871 {0x7a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in flash, LPA not matched"},
1872 {0x7b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in cache, LPA not matched"},
1873 {0x7c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in flash, LPA not matched"},
1874 {0x7d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data page status error"},
1875 {0x7e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1876 {0x7f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Access flash timeout"},
1878 {0x80, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "EC overflow"},
1879 {0x81, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_NONE
, 0, 0, "Scrubbing completed"},
1880 {0x82, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Unstable block(too much bit flip)"},
1881 {0x83, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: ram error"}, //?
1882 {0x84, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: one PBMT read failure"},
1884 {0x88, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: mark bad block"},
1885 {0x89, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: invalid page count error"}, // maybe err
1886 {0x8a, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Bad Block close to limit"},
1887 {0x8b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: Bad Block over limit"},
1888 {0x8c, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: P/E cycles close to limit"},
1889 {0x8d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: P/E cycles over limit"},
1891 {0x90, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Over temperature"}, //90
1892 {0x91, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature is OK"}, //80
1893 {0x92, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Battery fault"},
1894 {0x93, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault"}, //err
1895 {0x94, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "DDR error"}, //err
1896 {0x95, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Controller serdes error"}, //err
1897 {0x96, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 1 error"}, //err
1898 {0x97, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 2 error"}, //err
1899 {0x98, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault (corrected)"}, //err
1900 {0x99, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Battery is OK"},
1901 {0x9a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature close to limit"}, //85
1903 {0x9b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (low)"},
1904 {0x9c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (high)"},
1905 {0x9d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "I2C fault" },
1906 {0x9e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "DDR single bit error" },
1907 {0x9f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Board voltage fault" },
1909 {0xa0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "LPA not matched"},
1910 {0xa1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Re-read data in cache"},
1911 {0xa2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1912 {0xa3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Read blank page"},
1913 {0xa4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: new data in cache"},
1914 {0xa5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: PBN not matched"},
1915 {0xa6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data with error flag"},
1916 {0xa7, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: recoverd data with error flag"},
1917 {0xa8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Blank page in cache, PBN matched"},
1918 {0xa9, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Blank page in cache, PBN matched"},
1919 {0xaa, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flash init failure"},
1920 {0xab, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Mapping table recovery failure"},
1921 {0xac, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: ECC failed"},
1922 {0xb0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 95 degrees C"},
1923 {0xb1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 100 degrees C"},
1925 {0x300, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "CMD timeout"},
1926 {0x301, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Power on"},
1927 {0x302, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Power off"},
1928 {0x303, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear log"},
1929 {0x304, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity"},
1930 {0x305, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data"},
1931 {0x306, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "BM safety status"},
1932 {0x307, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "I/O error"},
1933 {0x308, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CMD error"},
1934 {0x309, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set wmode"},
1935 {0x30a, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "DDR init failed" },
1936 {0x30b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "PCIe link status" },
1937 {0x30c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Controller reset sync error" },
1938 {0x30d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Clock fault" },
1939 {0x30e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "FPGA voltage fault status" },
1940 {0x30f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity finished"},
1941 {0x310, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data finished"},
1942 {0x311, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Reset"},
1943 {0x312, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "CAP: voltage fault"},
1944 {0x313, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: learn fault"},
1945 {0x314, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CAP status"},
1946 {0x315, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Board voltage fault status"},
1947 {0x316, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 55 degrees C"}, //55
1948 {0x317, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 50 degrees C"}, //50
1949 {0x318, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash over temperature"}, //70
1950 {0x319, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash temperature is OK"}, //65
1951 {0x31a, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: short circuit"},
1952 {0x31b, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "Sensor fault"},
1953 {0x31c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data"},
1954 {0x31d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data finished"},
1955 {0x320, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Temperature sensor event"},
1957 {0x350, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear smart"},
1958 {0x351, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear warning"},
1960 {SSD_UNKNOWN_EVENT
, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "unknown event"},
1963 #define SSD_LOG_OVER_TEMP 0x90
1964 #define SSD_LOG_NORMAL_TEMP 0x91
1965 #define SSD_LOG_WARN_TEMP 0x9a
1966 #define SSD_LOG_SEU_FAULT 0x93
1967 #define SSD_LOG_SEU_FAULT1 0x98
1968 #define SSD_LOG_BATTERY_FAULT 0x92
1969 #define SSD_LOG_BATTERY_OK 0x99
1970 #define SSD_LOG_BOARD_VOLT_FAULT 0x9f
1973 #define SSD_LOG_TIMEOUT 0x300
1974 #define SSD_LOG_POWER_ON 0x301
1975 #define SSD_LOG_POWER_OFF 0x302
1976 #define SSD_LOG_CLEAR_LOG 0x303
1977 #define SSD_LOG_SET_CAPACITY 0x304
1978 #define SSD_LOG_CLEAR_DATA 0x305
1979 #define SSD_LOG_BM_SFSTATUS 0x306
1980 #define SSD_LOG_EIO 0x307
1981 #define SSD_LOG_ECMD 0x308
1982 #define SSD_LOG_SET_WMODE 0x309
1983 #define SSD_LOG_DDR_INIT_ERR 0x30a
1984 #define SSD_LOG_PCIE_LINK_STATUS 0x30b
1985 #define SSD_LOG_CTRL_RST_SYNC 0x30c
1986 #define SSD_LOG_CLK_FAULT 0x30d
1987 #define SSD_LOG_VOLT_FAULT 0x30e
1988 #define SSD_LOG_SET_CAPACITY_END 0x30F
1989 #define SSD_LOG_CLEAR_DATA_END 0x310
1990 #define SSD_LOG_RESET 0x311
1991 #define SSD_LOG_CAP_VOLT_FAULT 0x312
1992 #define SSD_LOG_CAP_LEARN_FAULT 0x313
1993 #define SSD_LOG_CAP_STATUS 0x314
1994 #define SSD_LOG_VOLT_STATUS 0x315
1995 #define SSD_LOG_INLET_OVER_TEMP 0x316
1996 #define SSD_LOG_INLET_NORMAL_TEMP 0x317
1997 #define SSD_LOG_FLASH_OVER_TEMP 0x318
1998 #define SSD_LOG_FLASH_NORMAL_TEMP 0x319
1999 #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a
2000 #define SSD_LOG_SENSOR_FAULT 0x31b
2001 #define SSD_LOG_ERASE_ALL 0x31c
2002 #define SSD_LOG_ERASE_ALL_END 0x31d
2003 #define SSD_LOG_TEMP_SENSOR_EVENT 0x320
2004 #define SSD_LOG_CLEAR_SMART 0x350
2005 #define SSD_LOG_CLEAR_WARNING 0x351
2008 /* sw log fifo depth */
2009 #define SSD_LOG_FIFO_SZ 1024
2013 static DEFINE_PER_CPU(struct list_head
, ssd_doneq
);
2014 static DEFINE_PER_CPU(struct tasklet_struct
, ssd_tasklet
);
2017 /* unloading driver */
2018 static volatile int ssd_exiting
= 0;
2020 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
2021 static struct class_simple
*ssd_class
;
2023 static struct class *ssd_class
;
2026 static int ssd_cmajor
= SSD_CMAJOR
;
2028 /* ssd block device major, minors */
2029 static int ssd_major
= SSD_MAJOR
;
2030 static int ssd_major_sl
= SSD_MAJOR_SL
;
2031 static int ssd_minors
= SSD_MINORS
;
2033 /* ssd device list */
2034 static struct list_head ssd_list
;
2035 static unsigned long ssd_index_bits
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2036 static unsigned long ssd_index_bits_sl
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2037 static atomic_t ssd_nr
;
2042 SSD_DRV_MODE_STANDARD
= 0, /* full */
2043 SSD_DRV_MODE_DEBUG
= 2, /* debug */
2044 SSD_DRV_MODE_BASE
/* base only */
2054 #if (defined SSD_MSIX)
2055 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2056 #elif (defined SSD_MSI)
2057 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2059 /* auto select the defaut int mode according to the kernel version*/
2060 /* suse 11 sp1 irqbalance bug: use msi instead*/
2061 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5))
2062 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2064 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2068 static int mode
= SSD_DRV_MODE_STANDARD
;
2069 static int status_mask
= 0xFF;
2070 static int int_mode
= SSD_INT_MODE_DEFAULT
;
2071 static int threaded_irq
= 0;
2072 static int log_level
= SSD_LOG_LEVEL_WARNING
;
2073 static int ot_protect
= 1;
2074 static int wmode
= SSD_WMODE_DEFAULT
;
2075 static int finject
= 0;
2077 module_param(mode
, int, 0);
2078 module_param(status_mask
, int, 0);
2079 module_param(int_mode
, int, 0);
2080 module_param(threaded_irq
, int, 0);
2081 module_param(log_level
, int, 0);
2082 module_param(ot_protect
, int, 0);
2083 module_param(wmode
, int, 0);
2084 module_param(finject
, int, 0);
2087 MODULE_PARM_DESC(mode
, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode");
2088 MODULE_PARM_DESC(status_mask
, "command status mask, 0 - without command error, 0xff - with command error");
2089 MODULE_PARM_DESC(int_mode
, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix");
2090 MODULE_PARM_DESC(threaded_irq
, "threaded irq, 0 - normal irq, 1 - threaded irq");
2091 MODULE_PARM_DESC(log_level
, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only");
2092 MODULE_PARM_DESC(ot_protect
, "over temperature protect, 0 - disable, 1 - enable");
2093 MODULE_PARM_DESC(wmode
, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default");
2094 MODULE_PARM_DESC(finject
, "enable fault simulation, 0 - off, 1 - on, for debug purpose only");
2096 // API adaption layer
2097 static inline void ssd_bio_endio(struct bio
*bio
, int error
)
2099 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
2100 bio
->bi_error
= error
;
2102 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
2103 bio_endio(bio
, error
);
2105 bio_endio(bio
, bio
->bi_size
, error
);
2109 static inline int ssd_bio_has_discard(struct bio
*bio
)
2113 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2114 return bio_op(bio
) == REQ_OP_DISCARD
;
2115 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
2116 return bio
->bi_rw
& REQ_DISCARD
;
2117 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
2118 return bio_rw_flagged(bio
, BIO_RW_DISCARD
);
2124 static inline int ssd_bio_has_flush(struct bio
*bio
)
2126 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2127 return bio_op(bio
) == REQ_OP_FLUSH
;
2128 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
2129 return bio
->bi_rw
& REQ_FLUSH
;
2135 static inline int ssd_bio_has_barrier_or_fua(struct bio
* bio
)
2137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2138 return bio
->bi_opf
& REQ_FUA
;
2139 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
2140 return bio
->bi_rw
& REQ_FUA
;
2141 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
2142 return bio
->bi_rw
& REQ_HARDBARRIER
;
2143 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
2144 return bio_rw_flagged(bio
, BIO_RW_BARRIER
);
2146 return bio_barrier(bio
);
2151 static int __init
ssd_drv_mode(char *str
)
2153 mode
= (int)simple_strtoul(str
, NULL
, 0);
2158 static int __init
ssd_status_mask(char *str
)
2160 status_mask
= (int)simple_strtoul(str
, NULL
, 16);
2165 static int __init
ssd_int_mode(char *str
)
2167 int_mode
= (int)simple_strtoul(str
, NULL
, 0);
2172 static int __init
ssd_threaded_irq(char *str
)
2174 threaded_irq
= (int)simple_strtoul(str
, NULL
, 0);
2179 static int __init
ssd_log_level(char *str
)
2181 log_level
= (int)simple_strtoul(str
, NULL
, 0);
2186 static int __init
ssd_ot_protect(char *str
)
2188 ot_protect
= (int)simple_strtoul(str
, NULL
, 0);
2193 static int __init
ssd_wmode(char *str
)
2195 wmode
= (int)simple_strtoul(str
, NULL
, 0);
2200 static int __init
ssd_finject(char *str
)
2202 finject
= (int)simple_strtoul(str
, NULL
, 0);
2207 __setup(MODULE_NAME
"_mode=", ssd_drv_mode
);
2208 __setup(MODULE_NAME
"_status_mask=", ssd_status_mask
);
2209 __setup(MODULE_NAME
"_int_mode=", ssd_int_mode
);
2210 __setup(MODULE_NAME
"_threaded_irq=", ssd_threaded_irq
);
2211 __setup(MODULE_NAME
"_log_level=", ssd_log_level
);
2212 __setup(MODULE_NAME
"_ot_protect=", ssd_ot_protect
);
2213 __setup(MODULE_NAME
"_wmode=", ssd_wmode
);
2214 __setup(MODULE_NAME
"_finject=", ssd_finject
);
2218 #ifdef CONFIG_PROC_FS
2219 #include <linux/proc_fs.h>
2220 #include <asm/uaccess.h>
2222 #define SSD_PROC_DIR MODULE_NAME
2223 #define SSD_PROC_INFO "info"
2225 static struct proc_dir_entry
*ssd_proc_dir
= NULL
;
2226 static struct proc_dir_entry
*ssd_proc_info
= NULL
;
2228 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2229 static int ssd_proc_read(char *page
, char **start
,
2230 off_t off
, int count
, int *eof
, void *data
)
2232 struct ssd_device
*dev
= NULL
;
2233 struct ssd_device
*n
= NULL
;
2239 if (ssd_exiting
|| off
!= 0) {
2243 len
+= snprintf((page
+ len
), (count
- len
), "Driver Version:\t%s\n", DRIVER_VERSION
);
2245 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2247 size
= dev
->hw_info
.size
;
2248 do_div(size
, 1000000000);
2250 len
+= snprintf((page
+ len
), (count
- len
), "\n");
2252 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2254 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2255 if (dev
->hw_info
.ctrl_ver
!= 0) {
2256 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2259 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2261 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2262 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2265 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Device:\t%s\n", idx
, dev
->name
);
2274 static int ssd_proc_show(struct seq_file
*m
, void *v
)
2276 struct ssd_device
*dev
= NULL
;
2277 struct ssd_device
*n
= NULL
;
2285 seq_printf(m
, "Driver Version:\t%s\n", DRIVER_VERSION
);
2287 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2289 size
= dev
->hw_info
.size
;
2290 do_div(size
, 1000000000);
2292 seq_printf(m
, "\n");
2294 seq_printf(m
, "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2296 seq_printf(m
, "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2297 if (dev
->hw_info
.ctrl_ver
!= 0) {
2298 seq_printf(m
, "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2301 seq_printf(m
, "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2303 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2304 seq_printf(m
, "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2307 seq_printf(m
, "HIO %d Device:\t%s\n", idx
, dev
->name
);
2313 static int ssd_proc_open(struct inode
*inode
, struct file
*file
)
2315 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
2316 return single_open(file
, ssd_proc_show
, PDE(inode
)->data
);
2318 return single_open(file
, ssd_proc_show
, PDE_DATA(inode
));
2322 static const struct file_operations ssd_proc_fops
= {
2323 .open
= ssd_proc_open
,
2325 .llseek
= seq_lseek
,
2326 .release
= single_release
,
2331 static void ssd_cleanup_proc(void)
2333 if (ssd_proc_info
) {
2334 remove_proc_entry(SSD_PROC_INFO
, ssd_proc_dir
);
2335 ssd_proc_info
= NULL
;
2338 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2339 ssd_proc_dir
= NULL
;
2342 static int ssd_init_proc(void)
2344 ssd_proc_dir
= proc_mkdir(SSD_PROC_DIR
, NULL
);
2346 goto out_proc_mkdir
;
2348 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2349 ssd_proc_info
= create_proc_entry(SSD_PROC_INFO
, S_IFREG
| S_IRUGO
| S_IWUSR
, ssd_proc_dir
);
2351 goto out_create_proc_entry
;
2353 ssd_proc_info
->read_proc
= ssd_proc_read
;
2356 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
2357 ssd_proc_info
->owner
= THIS_MODULE
;
2360 ssd_proc_info
= proc_create(SSD_PROC_INFO
, 0600, ssd_proc_dir
, &ssd_proc_fops
);
2362 goto out_create_proc_entry
;
2367 out_create_proc_entry
:
2368 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2374 static void ssd_cleanup_proc(void)
2378 static int ssd_init_proc(void)
2382 #endif /* CONFIG_PROC_FS */
2385 static void ssd_unregister_sysfs(struct ssd_device
*dev
)
2390 static int ssd_register_sysfs(struct ssd_device
*dev
)
2395 static void ssd_cleanup_sysfs(void)
2400 static int ssd_init_sysfs(void)
2405 static inline void ssd_put_index(int slave
, int index
)
2407 unsigned long *index_bits
= ssd_index_bits
;
2410 index_bits
= ssd_index_bits_sl
;
2413 if (test_and_clear_bit(index
, index_bits
)) {
2414 atomic_dec(&ssd_nr
);
2418 static inline int ssd_get_index(int slave
)
2420 unsigned long *index_bits
= ssd_index_bits
;
2424 index_bits
= ssd_index_bits_sl
;
2428 if ((index
= find_first_zero_bit(index_bits
, SSD_MAX_DEV
)) >= SSD_MAX_DEV
) {
2432 if (test_and_set_bit(index
, index_bits
)) {
2436 atomic_inc(&ssd_nr
);
2441 static void ssd_cleanup_index(void)
2446 static int ssd_init_index(void)
2448 INIT_LIST_HEAD(&ssd_list
);
2449 atomic_set(&ssd_nr
, 0);
2450 memset(ssd_index_bits
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2451 memset(ssd_index_bits_sl
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2456 static void ssd_set_dev_name(char *name
, size_t size
, int idx
)
2458 if(idx
< SSD_ALPHABET_NUM
) {
2459 snprintf(name
, size
, "%c", 'a'+idx
);
2461 idx
-= SSD_ALPHABET_NUM
;
2462 snprintf(name
, size
, "%c%c", 'a'+(idx
/SSD_ALPHABET_NUM
), 'a'+(idx
%SSD_ALPHABET_NUM
));
2466 /* pci register r&w */
2467 static inline void ssd_reg_write(void *addr
, uint64_t val
)
2469 iowrite32((uint32_t)val
, addr
);
2470 iowrite32((uint32_t)(val
>> 32), addr
+ 4);
2474 static inline uint64_t ssd_reg_read(void *addr
)
2477 uint32_t val_lo
, val_hi
;
2479 val_lo
= ioread32(addr
);
2480 val_hi
= ioread32(addr
+ 4);
2483 val
= val_lo
| ((uint64_t)val_hi
<< 32);
2489 #define ssd_reg32_write(addr, val) writel(val, addr)
2490 #define ssd_reg32_read(addr) readl(addr)
2493 static void ssd_clear_alarm(struct ssd_device
*dev
)
2497 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2501 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2503 /* firmware control */
2506 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2509 static void ssd_set_alarm(struct ssd_device
*dev
)
2513 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2517 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2521 /* software control */
2524 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2527 #define u32_swap(x) \
2529 (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
2530 (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
2531 (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
2532 (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24)))
2534 #define u16_swap(x) \
2536 (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \
2537 (((uint16_t)(x) & (uint16_t)0xff00) >> 8) ))
2541 /* No lock, for init only*/
2542 static int ssd_spi_read_id(struct ssd_device
*dev
, uint32_t *id
)
2552 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_ID
);
2554 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2555 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2556 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2557 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2561 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2562 if (val
== 0x1000000) {
2566 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2573 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_ID
);
2582 static int ssd_init_spi(struct ssd_device
*dev
)
2588 mutex_lock(&dev
->spi_mutex
);
2591 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2594 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2596 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2601 } while (val
!= 0x1000000);
2603 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2608 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2616 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2618 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2621 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2622 mutex_unlock(&dev
->spi_mutex
);
2629 static int ssd_spi_page_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2640 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2641 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
) {
2645 mutex_lock(&dev
->spi_mutex
);
2646 while (rlen
< size
) {
2647 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, ((off
+ rlen
) >> 24));
2649 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, (((off
+ rlen
) << 8) | SSD_SPI_CMD_READ
));
2651 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2652 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2653 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2654 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2658 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2659 if (val
== 0x1000000) {
2663 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2670 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
2671 *(uint32_t *)(buf
+ rlen
)= u32_swap(val
);
2673 rlen
+= sizeof(uint32_t);
2677 mutex_unlock(&dev
->spi_mutex
);
2681 static int ssd_spi_page_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2693 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2694 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
||
2695 (off
/ dev
->rom_info
.page_size
) != ((off
+ size
- 1) / dev
->rom_info
.page_size
)) {
2699 mutex_lock(&dev
->spi_mutex
);
2701 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2703 wlen
= size
/ sizeof(uint32_t);
2704 for (i
=0; i
<(int)wlen
; i
++) {
2705 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_WDATA
, u32_swap(*((uint32_t *)buf
+ i
)));
2709 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2711 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_PROGRAM
));
2717 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2719 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2721 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2726 } while (val
!= 0x1000000);
2728 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2733 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2740 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2741 if ((val
>> 6) & 0x1) {
2748 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2750 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2753 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2755 mutex_unlock(&dev
->spi_mutex
);
2760 static int ssd_spi_block_erase(struct ssd_device
*dev
, uint32_t off
)
2770 if ((off
% dev
->rom_info
.block_size
) != 0 || off
>= dev
->rom_info
.size
) {
2774 mutex_lock(&dev
->spi_mutex
);
2776 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2777 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2780 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2782 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_ERASE
));
2786 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2789 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2791 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2796 } while (val
!= 0x1000000);
2798 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2803 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2810 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2811 if ((val
>> 5) & 0x1) {
2818 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2820 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2823 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2825 mutex_unlock(&dev
->spi_mutex
);
2830 static int ssd_spi_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2841 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2842 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2846 while (len
< size
) {
2847 roff
= (off
+ len
) % dev
->rom_info
.page_size
;
2848 rsize
= dev
->rom_info
.page_size
- roff
;
2849 if ((size
- len
) < rsize
) {
2850 rsize
= (size
- len
);
2854 ret
= ssd_spi_page_read(dev
, (buf
+ len
), roff
, rsize
);
2868 static int ssd_spi_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2879 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2880 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2884 while (len
< size
) {
2885 woff
= (off
+ len
) % dev
->rom_info
.page_size
;
2886 wsize
= dev
->rom_info
.page_size
- woff
;
2887 if ((size
- len
) < wsize
) {
2888 wsize
= (size
- len
);
2892 ret
= ssd_spi_page_write(dev
, (buf
+ len
), woff
, wsize
);
2906 static int ssd_spi_erase(struct ssd_device
*dev
, uint32_t off
, uint32_t size
)
2916 if (size
== 0 || ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
||
2917 (off
% dev
->rom_info
.block_size
) != 0 || (size
% dev
->rom_info
.block_size
) != 0) {
2921 while (len
< size
) {
2924 ret
= ssd_spi_block_erase(dev
, eoff
);
2929 len
+= dev
->rom_info
.block_size
;
2939 static uint32_t __ssd_i2c_reg32_read(void *addr
)
2941 return ssd_reg32_read(addr
);
2944 static void __ssd_i2c_reg32_write(void *addr
, uint32_t val
)
2946 ssd_reg32_write(addr
, val
);
2947 ssd_reg32_read(addr
);
2950 static int __ssd_i2c_clear(struct ssd_device
*dev
, uint8_t saddr
)
2952 ssd_i2c_ctrl_t ctrl
;
2953 ssd_i2c_data_t data
;
2960 ctrl
.bits
.wdata
= 0;
2961 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
2962 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2963 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2967 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2968 if (data
.bits
.valid
== 0) {
2973 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2979 status
= data
.bits
.rdata
;
2981 if (!(status
& 0x4)) {
2982 /* clear read fifo data */
2983 ctrl
.bits
.wdata
= 0;
2984 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2985 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2986 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2990 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2991 if (data
.bits
.valid
== 0) {
2996 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3004 if (nr_data
<= SSD_I2C_MAX_DATA
) {
3013 ctrl
.bits
.wdata
= 0x04;
3014 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3015 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3016 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3019 if (!(status
& 0x8)) {
3021 /* reset i2c controller */
3022 ctrl
.bits
.wdata
= 0x0;
3023 ctrl
.bits
.addr
= SSD_I2C_RESET_REG
;
3024 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3025 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3032 static int ssd_i2c_write(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3034 ssd_i2c_ctrl_t ctrl
;
3035 ssd_i2c_data_t data
;
3041 mutex_lock(&dev
->i2c_mutex
);
3046 ctrl
.bits
.wdata
= saddr
;
3047 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3048 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3049 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3052 while (off
< size
) {
3053 ctrl
.bits
.wdata
= buf
[off
];
3054 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3055 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3056 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3062 ctrl
.bits
.wdata
= 0x01;
3063 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3064 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3065 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3070 ctrl
.bits
.wdata
= 0;
3071 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3072 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3073 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3076 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3077 if (data
.bits
.valid
== 0) {
3082 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3089 status
= data
.bits
.rdata
;
3094 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3101 if (!(status
& 0x1)) {
3107 if (status
& 0x20) {
3113 if (status
& 0x10) {
3120 if (__ssd_i2c_clear(dev
, saddr
)) {
3124 mutex_unlock(&dev
->i2c_mutex
);
3129 static int ssd_i2c_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3131 ssd_i2c_ctrl_t ctrl
;
3132 ssd_i2c_data_t data
;
3138 mutex_lock(&dev
->i2c_mutex
);
3143 ctrl
.bits
.wdata
= saddr
;
3144 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3145 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3146 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3149 ctrl
.bits
.wdata
= size
;
3150 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3151 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3152 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3155 ctrl
.bits
.wdata
= 0x02;
3156 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3157 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3158 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3163 ctrl
.bits
.wdata
= 0;
3164 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3165 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3166 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3169 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3170 if (data
.bits
.valid
== 0) {
3175 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3182 status
= data
.bits
.rdata
;
3187 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3194 if (!(status
& 0x2)) {
3200 if (status
& 0x20) {
3206 if (status
& 0x10) {
3212 while (off
< size
) {
3213 ctrl
.bits
.wdata
= 0;
3214 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3215 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3216 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3220 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3221 if (data
.bits
.valid
== 0) {
3226 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3233 buf
[off
] = data
.bits
.rdata
;
3240 if (__ssd_i2c_clear(dev
, saddr
)) {
3244 mutex_unlock(&dev
->i2c_mutex
);
3249 static int ssd_i2c_write_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t wsize
, uint8_t *wbuf
, uint8_t rsize
, uint8_t *rbuf
)
3251 ssd_i2c_ctrl_t ctrl
;
3252 ssd_i2c_data_t data
;
3258 mutex_lock(&dev
->i2c_mutex
);
3263 ctrl
.bits
.wdata
= saddr
;
3264 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3265 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3266 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3270 while (off
< wsize
) {
3271 ctrl
.bits
.wdata
= wbuf
[off
];
3272 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3273 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3274 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3280 ctrl
.bits
.wdata
= rsize
;
3281 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3282 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3283 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3286 ctrl
.bits
.wdata
= 0x03;
3287 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3288 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3289 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3294 ctrl
.bits
.wdata
= 0;
3295 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3296 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3297 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3300 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3301 if (data
.bits
.valid
== 0) {
3306 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3313 status
= data
.bits
.rdata
;
3318 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3325 if (!(status
& 0x2)) {
3331 if (status
& 0x20) {
3337 if (status
& 0x10) {
3344 while (off
< rsize
) {
3345 ctrl
.bits
.wdata
= 0;
3346 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3347 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3348 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3352 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3353 if (data
.bits
.valid
== 0) {
3358 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3365 rbuf
[off
] = data
.bits
.rdata
;
3372 if (__ssd_i2c_clear(dev
, saddr
)) {
3375 mutex_unlock(&dev
->i2c_mutex
);
3380 static int ssd_smbus_send_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3386 ret
= ssd_i2c_write(dev
, saddr
, 1, buf
);
3387 if (!ret
|| -ETIMEDOUT
== ret
) {
3392 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3395 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3401 static int ssd_smbus_receive_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3407 ret
= ssd_i2c_read(dev
, saddr
, 1, buf
);
3408 if (!ret
|| -ETIMEDOUT
== ret
) {
3413 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3416 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3422 static int ssd_smbus_write_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3424 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3429 memcpy((smb_data
+ 1), buf
, 1);
3432 ret
= ssd_i2c_write(dev
, saddr
, 2, smb_data
);
3433 if (!ret
|| -ETIMEDOUT
== ret
) {
3438 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3441 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3447 static int ssd_smbus_read_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3449 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3456 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 1, buf
);
3457 if (!ret
|| -ETIMEDOUT
== ret
) {
3462 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3465 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3471 static int ssd_smbus_write_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3473 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3478 memcpy((smb_data
+ 1), buf
, 2);
3481 ret
= ssd_i2c_write(dev
, saddr
, 3, smb_data
);
3482 if (!ret
|| -ETIMEDOUT
== ret
) {
3487 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3490 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3496 static int ssd_smbus_read_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3498 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3505 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 2, buf
);
3506 if (!ret
|| -ETIMEDOUT
== ret
) {
3511 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3514 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3520 static int ssd_smbus_write_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3522 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3528 memcpy((smb_data
+ 2), buf
, size
);
3531 ret
= ssd_i2c_write(dev
, saddr
, (2 + size
), smb_data
);
3532 if (!ret
|| -ETIMEDOUT
== ret
) {
3537 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3540 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3546 static int ssd_smbus_read_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3548 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3556 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, (SSD_SMBUS_BLOCK_MAX
+ 1), (smb_data
+ 1));
3557 if (!ret
|| -ETIMEDOUT
== ret
) {
3562 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3565 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3571 rsize
= smb_data
[1];
3573 if (rsize
> size
) {
3577 memcpy(buf
, (smb_data
+ 2), rsize
);
3583 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
);
3586 static int ssd_init_lm75(struct ssd_device
*dev
, uint8_t saddr
)
3591 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3596 conf
&= (uint8_t)(~1u);
3598 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3607 static int ssd_lm75_read(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3612 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM75_REG_TEMP
, (uint8_t *)&val
);
3617 *data
= u16_swap(val
);
3622 static int ssd_init_lm80(struct ssd_device
*dev
, uint8_t saddr
)
3631 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3638 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_RES
, &val
);
3643 /* set volt limit */
3644 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3645 high
= ssd_lm80_limit
[i
].high
;
3646 low
= ssd_lm80_limit
[i
].low
;
3648 if (SSD_LM80_IN_CAP
== i
) {
3652 if (dev
->hw_info
.nr_ctrl
<= 1 && SSD_LM80_IN_1V2
== i
) {
3658 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MAX(i
), &high
);
3664 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MIN(i
), &low
);
3670 /* set interrupt mask: allow volt in interrupt except cap in*/
3672 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3677 /* set interrupt mask: disable others */
3679 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK2
, &val
);
3686 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3695 static int ssd_lm80_enable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3700 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3704 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3709 val
&= ~(1UL << (uint32_t)idx
);
3711 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3720 static int ssd_lm80_disable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3725 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3729 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3734 val
|= (1UL << (uint32_t)idx
);
3736 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3745 static int ssd_lm80_read_temp(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3750 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_TEMP
, (uint8_t *)&val
);
3755 *data
= u16_swap(val
);
3759 static int ssd_generate_sensor_fault_log(struct ssd_device
*dev
, uint16_t event
, uint8_t addr
,uint32_t ret
)
3762 data
= ((ret
& 0xffff) << 16) | (addr
<< 8) | addr
;
3763 ssd_gen_swlog(dev
,event
,data
);
3766 static int ssd_lm80_check_event(struct ssd_device
*dev
, uint8_t saddr
)
3769 uint16_t val
= 0, status
;
3770 uint8_t alarm1
= 0, alarm2
= 0;
3775 /* read interrupt status to clear interrupt */
3776 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM1
, &alarm1
);
3781 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM2
, &alarm2
);
3786 status
= (uint16_t)alarm1
| ((uint16_t)alarm2
<< 8);
3788 /* parse inetrrupt status */
3789 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3790 if (!((status
>> (uint32_t)i
) & 0x1)) {
3791 if (test_and_clear_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3792 /* enable INx irq */
3793 ret
= ssd_lm80_enable_in(dev
, saddr
, i
);
3802 /* disable INx irq */
3803 ret
= ssd_lm80_disable_in(dev
, saddr
, i
);
3808 if (test_and_set_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3812 high
= (uint32_t)ssd_lm80_limit
[i
].high
* (uint32_t)10;
3813 low
= (uint32_t)ssd_lm80_limit
[i
].low
* (uint32_t)10;
3815 for (j
=0; j
<3; j
++) {
3816 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_IN(i
), (uint8_t *)&val
);
3820 volt
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
3821 if ((volt
>high
) || (volt
<=low
)) {
3823 msleep(SSD_LM80_CONV_INTERVAL
);
3835 case SSD_LM80_IN_CAP
: {
3837 ssd_gen_swlog(dev
, SSD_LOG_CAP_SHORT_CIRCUIT
, 0);
3839 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(volt
));
3844 case SSD_LM80_IN_1V2
:
3845 case SSD_LM80_IN_1V2a
:
3846 case SSD_LM80_IN_1V5
:
3847 case SSD_LM80_IN_1V8
: {
3848 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, volt
));
3851 case SSD_LM80_IN_FPGA_3V3
:
3852 case SSD_LM80_IN_3V3
: {
3853 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, SSD_LM80_3V3_VOLT(volt
)));
3863 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3864 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, (uint32_t)saddr
,ret
);
3867 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3873 static int ssd_init_sensor(struct ssd_device
*dev
)
3877 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3881 ret
= ssd_init_lm75(dev
, SSD_SENSOR_LM75_SADDRESS
);
3883 hio_warn("%s: init lm75 failed\n", dev
->name
);
3884 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3885 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
,ret
);
3890 if (dev
->hw_info
.pcb_ver
>= 'B' || dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_HHHL
) {
3891 ret
= ssd_init_lm80(dev
, SSD_SENSOR_LM80_SADDRESS
);
3893 hio_warn("%s: init lm80 failed\n", dev
->name
);
3894 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3895 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
3902 /* skip error if not in standard mode */
3903 if (mode
!= SSD_DRV_MODE_STANDARD
) {
3910 static int ssd_mon_boardvolt(struct ssd_device
*dev
)
3912 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3916 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3920 return ssd_lm80_check_event(dev
, SSD_SENSOR_LM80_SADDRESS
);
3924 static int ssd_mon_temp(struct ssd_device
*dev
)
3930 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3934 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3939 ret
= ssd_lm80_read_temp(dev
, SSD_SENSOR_LM80_SADDRESS
, &val
);
3941 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3942 ssd_generate_sensor_fault_log(dev
, SSD_LOG_TEMP_SENSOR_EVENT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
3946 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3948 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3949 if (cur
>= SSD_INLET_OT_TEMP
) {
3950 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3951 ssd_gen_swlog(dev
, SSD_LOG_INLET_OVER_TEMP
, (uint32_t)cur
);
3953 } else if(cur
< SSD_INLET_OT_HYST
) {
3954 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3955 ssd_gen_swlog(dev
, SSD_LOG_INLET_NORMAL_TEMP
, (uint32_t)cur
);
3960 ret
= ssd_lm75_read(dev
, SSD_SENSOR_LM75_SADDRESS
, &val
);
3962 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3963 ssd_generate_sensor_fault_log(dev
, SSD_LOG_TEMP_SENSOR_EVENT
, SSD_SENSOR_LM75_SADDRESS
,ret
);
3967 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
);
3969 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3970 if (cur
>= SSD_FLASH_OT_TEMP
) {
3971 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3972 ssd_gen_swlog(dev
, SSD_LOG_FLASH_OVER_TEMP
, (uint32_t)cur
);
3974 } else if(cur
< SSD_FLASH_OT_HYST
) {
3975 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3976 ssd_gen_swlog(dev
, SSD_LOG_FLASH_NORMAL_TEMP
, (uint32_t)cur
);
3985 static inline void ssd_put_tag(struct ssd_device
*dev
, int tag
)
3987 test_and_clear_bit(tag
, dev
->tag_map
);
3988 wake_up(&dev
->tag_wq
);
3991 static inline int ssd_get_tag(struct ssd_device
*dev
, int wait
)
3996 while ((tag
= find_first_zero_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
)) >= atomic_read(&dev
->queue_depth
)) {
3997 DEFINE_WAIT(__wait
);
4003 prepare_to_wait_exclusive(&dev
->tag_wq
, &__wait
, TASK_UNINTERRUPTIBLE
);
4006 finish_wait(&dev
->tag_wq
, &__wait
);
4009 if (test_and_set_bit(tag
, dev
->tag_map
)) {
4016 static void ssd_barrier_put_tag(struct ssd_device
*dev
, int tag
)
4018 test_and_clear_bit(tag
, dev
->tag_map
);
4021 static int ssd_barrier_get_tag(struct ssd_device
*dev
)
4025 if (test_and_set_bit(tag
, dev
->tag_map
)) {
4032 static void ssd_barrier_end(struct ssd_device
*dev
)
4034 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4035 wake_up_all(&dev
->tag_wq
);
4037 mutex_unlock(&dev
->barrier_mutex
);
4040 static int ssd_barrier_start(struct ssd_device
*dev
)
4044 mutex_lock(&dev
->barrier_mutex
);
4046 atomic_set(&dev
->queue_depth
, 0);
4048 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4049 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4053 __set_current_state(TASK_INTERRUPTIBLE
);
4054 schedule_timeout(1);
4057 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4058 wake_up_all(&dev
->tag_wq
);
4060 mutex_unlock(&dev
->barrier_mutex
);
4065 static int ssd_busy(struct ssd_device
*dev
)
4067 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4074 static int ssd_wait_io(struct ssd_device
*dev
)
4078 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4079 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4083 __set_current_state(TASK_INTERRUPTIBLE
);
4084 schedule_timeout(1);
4091 static int ssd_in_barrier(struct ssd_device
*dev
)
4093 return (0 == atomic_read(&dev
->queue_depth
));
4097 static void ssd_cleanup_tag(struct ssd_device
*dev
)
4099 kfree(dev
->tag_map
);
4102 static int ssd_init_tag(struct ssd_device
*dev
)
4104 int nr_ulongs
= ALIGN(dev
->hw_info
.cmd_fifo_sz
, BITS_PER_LONG
) / BITS_PER_LONG
;
4106 mutex_init(&dev
->barrier_mutex
);
4108 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4110 dev
->tag_map
= kmalloc(nr_ulongs
* sizeof(unsigned long), GFP_ATOMIC
);
4111 if (!dev
->tag_map
) {
4115 memset(dev
->tag_map
, 0, nr_ulongs
* sizeof(unsigned long));
4117 init_waitqueue_head(&dev
->tag_wq
);
4123 static void ssd_end_io_acct(struct ssd_cmd
*cmd
)
4125 struct ssd_device
*dev
= cmd
->dev
;
4126 struct bio
*bio
= cmd
->bio
;
4127 unsigned long dur
= jiffies
- cmd
->start_time
;
4128 int rw
= bio_data_dir(bio
);
4129 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4134 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4135 int cpu
= part_stat_lock();
4136 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4137 part_round_stats(cpu
, part
);
4138 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4139 part_dec_in_flight(part
, rw
);
4141 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4142 int cpu
= part_stat_lock();
4143 struct hd_struct
*part
= &dev
->gd
->part0
;
4144 part_round_stats(cpu
, part
);
4145 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4147 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4148 part
->in_flight
[rw
]--;
4149 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4153 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4155 disk_round_stats(dev
->gd
);
4156 disk_stat_add(dev
->gd
, ticks
[rw
], dur
);
4158 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4159 dev
->gd
->in_flight
--;
4160 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4166 disk_round_stats(dev
->gd
);
4168 disk_stat_add(dev
->gd
, write_ticks
, dur
);
4170 disk_stat_add(dev
->gd
, read_ticks
, dur
);
4172 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4173 dev
->gd
->in_flight
--;
4174 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4181 static void ssd_start_io_acct(struct ssd_cmd
*cmd
)
4183 struct ssd_device
*dev
= cmd
->dev
;
4184 struct bio
*bio
= cmd
->bio
;
4185 int rw
= bio_data_dir(bio
);
4186 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4191 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4192 int cpu
= part_stat_lock();
4193 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4194 part_round_stats(cpu
, part
);
4195 part_stat_inc(cpu
, part
, ios
[rw
]);
4196 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4197 part_inc_in_flight(part
, rw
);
4199 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4200 int cpu
= part_stat_lock();
4201 struct hd_struct
*part
= &dev
->gd
->part0
;
4202 part_round_stats(cpu
, part
);
4203 part_stat_inc(cpu
, part
, ios
[rw
]);
4204 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4206 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4207 part
->in_flight
[rw
]++;
4208 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4212 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4214 disk_round_stats(dev
->gd
);
4215 disk_stat_inc(dev
->gd
, ios
[rw
]);
4216 disk_stat_add(dev
->gd
, sectors
[rw
], bio_sectors(bio
));
4218 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4219 dev
->gd
->in_flight
++;
4220 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4225 disk_round_stats(dev
->gd
);
4227 disk_stat_inc(dev
->gd
, writes
);
4228 disk_stat_add(dev
->gd
, write_sectors
, bio_sectors(bio
));
4230 disk_stat_inc(dev
->gd
, reads
);
4231 disk_stat_add(dev
->gd
, read_sectors
, bio_sectors(bio
));
4234 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4235 dev
->gd
->in_flight
++;
4236 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4242 cmd
->start_time
= jiffies
;
4246 static void ssd_queue_bio(struct ssd_device
*dev
, struct bio
*bio
)
4248 spin_lock(&dev
->sendq_lock
);
4249 ssd_blist_add(&dev
->sendq
, bio
);
4250 spin_unlock(&dev
->sendq_lock
);
4252 atomic_inc(&dev
->in_sendq
);
4253 wake_up(&dev
->send_waitq
);
4256 static inline void ssd_end_request(struct ssd_cmd
*cmd
)
4258 struct ssd_device
*dev
= cmd
->dev
;
4259 struct bio
*bio
= cmd
->bio
;
4260 int errors
= cmd
->errors
;
4264 if (!ssd_bio_has_discard(bio
)) {
4265 ssd_end_io_acct(cmd
);
4267 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4268 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4273 ssd_put_tag(dev
, tag
);
4275 if (SSD_INT_MSIX
== dev
->int_mode
|| tag
< 16 || errors
) {
4276 ssd_bio_endio(bio
, errors
);
4277 } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ {
4278 spin_lock(&dev
->doneq_lock
);
4279 ssd_blist_add(&dev
->doneq
, bio
);
4280 spin_unlock(&dev
->doneq_lock
);
4282 atomic_inc(&dev
->in_doneq
);
4283 wake_up(&dev
->done_waitq
);
4287 complete(cmd
->waiting
);
4292 static void ssd_end_timeout_request(struct ssd_cmd
*cmd
)
4294 struct ssd_device
*dev
= cmd
->dev
;
4295 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4298 for (i
=0; i
<dev
->nr_queue
; i
++) {
4299 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
4300 disable_irq(dev
->entry
[i
].vector
);
4302 disable_irq(pci_irq_vector(dev
->pdev
, i
));
4306 atomic_inc(&dev
->tocnt
);
4308 hio_err("%s: cmd timeout: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4309 cmd
->errors
= -ETIMEDOUT
;
4310 ssd_end_request(cmd
);
4313 for (i
=0; i
<dev
->nr_queue
; i
++) {
4314 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
4315 enable_irq(dev
->entry
[i
].vector
);
4317 enable_irq(pci_irq_vector(dev
->pdev
, i
));
4326 static void ssd_cmd_add_timer(struct ssd_cmd
*cmd
, int timeout
, void (*complt
)(struct ssd_cmd
*))
4328 init_timer(&cmd
->cmd_timer
);
4330 cmd
->cmd_timer
.data
= (unsigned long)cmd
;
4331 cmd
->cmd_timer
.expires
= jiffies
+ timeout
;
4332 cmd
->cmd_timer
.function
= (void (*)(unsigned long)) complt
;
4334 add_timer(&cmd
->cmd_timer
);
4337 static int ssd_cmd_del_timer(struct ssd_cmd
*cmd
)
4339 return del_timer(&cmd
->cmd_timer
);
4342 static void ssd_add_timer(struct timer_list
*timer
, int timeout
, void (*complt
)(void *), void *data
)
4346 timer
->data
= (unsigned long)data
;
4347 timer
->expires
= jiffies
+ timeout
;
4348 timer
->function
= (void (*)(unsigned long)) complt
;
4353 static int ssd_del_timer(struct timer_list
*timer
)
4355 return del_timer(timer
);
4358 static void ssd_cmd_timeout(struct ssd_cmd
*cmd
)
4360 struct ssd_device
*dev
= cmd
->dev
;
4361 uint32_t msg
= *(uint32_t *)cmd
->msg
;
4363 ssd_end_timeout_request(cmd
);
4365 ssd_gen_swlog(dev
, SSD_LOG_TIMEOUT
, msg
);
4369 static void __ssd_done(unsigned long data
)
4371 struct ssd_cmd
*cmd
;
4374 local_irq_disable();
4375 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4376 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4378 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4382 while (!list_empty(&localq
)) {
4383 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4384 list_del_init(&cmd
->list
);
4386 ssd_end_request(cmd
);
4390 static void __ssd_done_db(unsigned long data
)
4392 struct ssd_cmd
*cmd
;
4393 struct ssd_device
*dev
;
4397 local_irq_disable();
4398 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4399 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4401 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4405 while (!list_empty(&localq
)) {
4406 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4407 list_del_init(&cmd
->list
);
4409 dev
= (struct ssd_device
*)cmd
->dev
;
4413 sector_t off
= dev
->db_info
.data
.loc
.off
;
4414 uint32_t len
= dev
->db_info
.data
.loc
.len
;
4416 switch (dev
->db_info
.type
) {
4417 case SSD_DEBUG_READ_ERR
:
4418 if (bio_data_dir(bio
) == READ
&&
4419 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4423 case SSD_DEBUG_WRITE_ERR
:
4424 if (bio_data_dir(bio
) == WRITE
&&
4425 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4426 cmd
->errors
= -EROFS
;
4429 case SSD_DEBUG_RW_ERR
:
4430 if (!((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4431 if (bio_data_dir(bio
) == READ
) {
4434 cmd
->errors
= -EROFS
;
4443 ssd_end_request(cmd
);
4447 static inline void ssd_done_bh(struct ssd_cmd
*cmd
)
4449 unsigned long flags
= 0;
4451 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4452 struct ssd_device
*dev
= cmd
->dev
;
4453 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4454 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4461 local_irq_save(flags
);
4462 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4463 list_add_tail(&cmd
->list
, &__get_cpu_var(ssd_doneq
));
4464 tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet
));
4466 list_add_tail(&cmd
->list
, this_cpu_ptr(&ssd_doneq
));
4467 tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet
));
4469 local_irq_restore(flags
);
4474 static inline void ssd_done(struct ssd_cmd
*cmd
)
4476 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4477 struct ssd_device
*dev
= cmd
->dev
;
4478 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4479 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4486 ssd_end_request(cmd
);
4491 static inline void ssd_dispatch_cmd(struct ssd_cmd
*cmd
)
4493 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4495 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4497 spin_lock(&dev
->cmd_lock
);
4498 ssd_reg_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, cmd
->msg_dma
);
4499 spin_unlock(&dev
->cmd_lock
);
4502 static inline void ssd_send_cmd(struct ssd_cmd
*cmd
)
4504 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4506 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4508 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4511 static inline void ssd_send_cmd_db(struct ssd_cmd
*cmd
)
4513 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4514 struct bio
*bio
= cmd
->bio
;
4516 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4519 switch (dev
->db_info
.type
) {
4520 case SSD_DEBUG_READ_TO
:
4521 if (bio_data_dir(bio
) == READ
) {
4525 case SSD_DEBUG_WRITE_TO
:
4526 if (bio_data_dir(bio
) == WRITE
) {
4530 case SSD_DEBUG_RW_TO
:
4538 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4542 /* fixed for BIOVEC_PHYS_MERGEABLE */
4543 #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED
4544 #include <linux/bio.h>
4545 #include <linux/io.h>
4546 #include <xen/page.h>
4548 static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec
*vec1
,
4549 const struct bio_vec
*vec2
)
4551 unsigned long mfn1
= pfn_to_mfn(page_to_pfn(vec1
->bv_page
));
4552 unsigned long mfn2
= pfn_to_mfn(page_to_pfn(vec2
->bv_page
));
4554 return __BIOVEC_PHYS_MERGEABLE(vec1
, vec2
) &&
4555 ((mfn1
== mfn2
) || ((mfn1
+1) == mfn2
));
4558 #ifdef BIOVEC_PHYS_MERGEABLE
4559 #undef BIOVEC_PHYS_MERGEABLE
4561 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
4562 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
4563 (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2)))
4567 static inline int ssd_bio_map_sg(struct ssd_device
*dev
, struct bio
*bio
, struct scatterlist
*sgl
)
4569 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
4570 struct bio_vec
*bvec
, *bvprv
= NULL
;
4571 struct scatterlist
*sg
= NULL
;
4572 int i
= 0, nsegs
= 0;
4574 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23))
4575 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4579 * for each segment in bio
4581 bio_for_each_segment(bvec
, bio
, i
) {
4582 if (bvprv
&& BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
)) {
4583 sg
->length
+= bvec
->bv_len
;
4585 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4589 sg
= sg
? (sg
+ 1) : sgl
;
4590 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4591 sg_set_page(sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
4593 sg
->page
= bvec
->bv_page
;
4594 sg
->length
= bvec
->bv_len
;
4595 sg
->offset
= bvec
->bv_offset
;
4602 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4612 struct bio_vec bvec
, bvprv
;
4613 struct bvec_iter iter
;
4614 struct scatterlist
*sg
= NULL
;
4618 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4621 * for each segment in bio
4623 bio_for_each_segment(bvec
, bio
, iter
) {
4624 if (!first
&& BIOVEC_PHYS_MERGEABLE(&bvprv
, &bvec
)) {
4625 sg
->length
+= bvec
.bv_len
;
4627 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4631 sg
= sg
? (sg
+ 1) : sgl
;
4633 sg_set_page(sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
4650 static int __ssd_submit_pbio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4652 struct ssd_cmd
*cmd
;
4653 struct ssd_rw_msg
*msg
;
4654 struct ssd_sg_entry
*sge
;
4655 sector_t block
= bio_start(bio
);
4659 tag
= ssd_get_tag(dev
, wait
);
4664 cmd
= &dev
->cmd
[tag
];
4668 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4670 if (ssd_bio_has_discard(bio
)) {
4671 unsigned int length
= bio_sectors(bio
);
4673 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4675 msg
->fun
= SSD_FUNC_TRIM
;
4678 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4680 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4683 block
+= sge
->length
;
4684 length
-= sge
->length
;
4692 msg
->nsegs
= cmd
->nsegs
= i
;
4698 //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl);
4699 msg
->nsegs
= cmd
->nsegs
= bio
->bi_vcnt
;
4702 if (bio_data_dir(bio
) == READ
) {
4703 msg
->fun
= SSD_FUNC_READ
;
4706 msg
->fun
= SSD_FUNC_WRITE
;
4707 msg
->flag
= dev
->wmode
;
4711 for (i
=0; i
<bio
->bi_vcnt
; i
++) {
4713 sge
->length
= bio
->bi_io_vec
[i
].bv_len
>> 9;
4714 sge
->buf
= (uint64_t)((void *)bio
->bi_io_vec
[i
].bv_page
+ bio
->bi_io_vec
[i
].bv_offset
);
4716 block
+= sge
->length
;
4722 #ifdef SSD_OT_PROTECT
4723 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4724 msleep_interruptible(dev
->ot_delay
);
4728 ssd_start_io_acct(cmd
);
4734 static inline int ssd_submit_bio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4736 struct ssd_cmd
*cmd
;
4737 struct ssd_rw_msg
*msg
;
4738 struct ssd_sg_entry
*sge
;
4739 struct scatterlist
*sgl
;
4740 sector_t block
= bio_start(bio
);
4744 tag
= ssd_get_tag(dev
, wait
);
4749 cmd
= &dev
->cmd
[tag
];
4753 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4757 if (ssd_bio_has_discard(bio
)) {
4758 unsigned int length
= bio_sectors(bio
);
4760 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4762 msg
->fun
= SSD_FUNC_TRIM
;
4765 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4767 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4770 block
+= sge
->length
;
4771 length
-= sge
->length
;
4779 msg
->nsegs
= cmd
->nsegs
= i
;
4785 msg
->nsegs
= cmd
->nsegs
= ssd_bio_map_sg(dev
, bio
, sgl
);
4788 if (bio_data_dir(bio
) == READ
) {
4789 msg
->fun
= SSD_FUNC_READ
;
4791 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_FROMDEVICE
);
4793 msg
->fun
= SSD_FUNC_WRITE
;
4794 msg
->flag
= dev
->wmode
;
4795 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_TODEVICE
);
4799 for (i
=0; i
<cmd
->nsegs
; i
++) {
4801 sge
->length
= sg_dma_len(sgl
) >> 9;
4802 sge
->buf
= sg_dma_address(sgl
);
4804 block
+= sge
->length
;
4811 #ifdef SSD_OT_PROTECT
4812 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4813 msleep_interruptible(dev
->ot_delay
);
4817 ssd_start_io_acct(cmd
);
4824 static int ssd_done_thread(void *data
)
4826 struct ssd_device
*dev
;
4835 current
->flags
|= PF_NOFREEZE
;
4836 //set_user_nice(current, -5);
4838 while (!kthread_should_stop()) {
4839 wait_event_interruptible(dev
->done_waitq
, (atomic_read(&dev
->in_doneq
) || kthread_should_stop()));
4841 while (atomic_read(&dev
->in_doneq
)) {
4843 spin_lock(&dev
->doneq_lock
);
4844 bio
= ssd_blist_get(&dev
->doneq
);
4845 spin_unlock(&dev
->doneq_lock
);
4847 spin_lock_irq(&dev
->doneq_lock
);
4848 bio
= ssd_blist_get(&dev
->doneq
);
4849 spin_unlock_irq(&dev
->doneq_lock
);
4853 next
= bio
->bi_next
;
4854 bio
->bi_next
= NULL
;
4855 ssd_bio_endio(bio
, 0);
4856 atomic_dec(&dev
->in_doneq
);
4862 #ifdef SSD_ESCAPE_IRQ
4863 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4864 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4865 cpumask_var_t new_mask
;
4866 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4867 cpumask_setall(new_mask
);
4868 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4869 set_cpus_allowed_ptr(current
, new_mask
);
4870 free_cpumask_var(new_mask
);
4874 cpus_setall(new_mask
);
4875 cpu_clear(dev
->irq_cpu
, new_mask
);
4876 set_cpus_allowed(current
, new_mask
);
4885 static int ssd_send_thread(void *data
)
4887 struct ssd_device
*dev
;
4896 current
->flags
|= PF_NOFREEZE
;
4897 //set_user_nice(current, -5);
4899 while (!kthread_should_stop()) {
4900 wait_event_interruptible(dev
->send_waitq
, (atomic_read(&dev
->in_sendq
) || kthread_should_stop()));
4902 while (atomic_read(&dev
->in_sendq
)) {
4903 spin_lock(&dev
->sendq_lock
);
4904 bio
= ssd_blist_get(&dev
->sendq
);
4905 spin_unlock(&dev
->sendq_lock
);
4908 next
= bio
->bi_next
;
4909 bio
->bi_next
= NULL
;
4910 #ifdef SSD_QUEUE_PBIO
4911 if (test_and_clear_bit(BIO_SSD_PBIO
, &bio
->bi_flags
)) {
4912 __ssd_submit_pbio(dev
, bio
, 1);
4914 ssd_submit_bio(dev
, bio
, 1);
4917 ssd_submit_bio(dev
, bio
, 1);
4919 atomic_dec(&dev
->in_sendq
);
4925 #ifdef SSD_ESCAPE_IRQ
4926 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4927 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4928 cpumask_var_t new_mask
;
4929 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4930 cpumask_setall(new_mask
);
4931 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4932 set_cpus_allowed_ptr(current
, new_mask
);
4933 free_cpumask_var(new_mask
);
4937 cpus_setall(new_mask
);
4938 cpu_clear(dev
->irq_cpu
, new_mask
);
4939 set_cpus_allowed(current
, new_mask
);
4949 static void ssd_cleanup_thread(struct ssd_device
*dev
)
4951 kthread_stop(dev
->send_thread
);
4952 kthread_stop(dev
->done_thread
);
4955 static int ssd_init_thread(struct ssd_device
*dev
)
4959 atomic_set(&dev
->in_doneq
, 0);
4960 atomic_set(&dev
->in_sendq
, 0);
4962 spin_lock_init(&dev
->doneq_lock
);
4963 spin_lock_init(&dev
->sendq_lock
);
4965 ssd_blist_init(&dev
->doneq
);
4966 ssd_blist_init(&dev
->sendq
);
4968 init_waitqueue_head(&dev
->done_waitq
);
4969 init_waitqueue_head(&dev
->send_waitq
);
4971 dev
->done_thread
= kthread_run(ssd_done_thread
, dev
, "%s/d", dev
->name
);
4972 if (IS_ERR(dev
->done_thread
)) {
4973 ret
= PTR_ERR(dev
->done_thread
);
4974 goto out_done_thread
;
4977 dev
->send_thread
= kthread_run(ssd_send_thread
, dev
, "%s/s", dev
->name
);
4978 if (IS_ERR(dev
->send_thread
)) {
4979 ret
= PTR_ERR(dev
->send_thread
);
4980 goto out_send_thread
;
4986 kthread_stop(dev
->done_thread
);
4992 static void ssd_put_dcmd(struct ssd_dcmd
*dcmd
)
4994 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4996 spin_lock(&dev
->dcmd_lock
);
4997 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4998 spin_unlock(&dev
->dcmd_lock
);
5001 static struct ssd_dcmd
*ssd_get_dcmd(struct ssd_device
*dev
)
5003 struct ssd_dcmd
*dcmd
= NULL
;
5005 spin_lock(&dev
->dcmd_lock
);
5006 if (!list_empty(&dev
->dcmd_list
)) {
5007 dcmd
= list_entry(dev
->dcmd_list
.next
,
5008 struct ssd_dcmd
, list
);
5009 list_del_init(&dcmd
->list
);
5011 spin_unlock(&dev
->dcmd_lock
);
5016 static void ssd_cleanup_dcmd(struct ssd_device
*dev
)
5021 static int ssd_init_dcmd(struct ssd_device
*dev
)
5023 struct ssd_dcmd
*dcmd
;
5024 int dcmd_sz
= sizeof(struct ssd_dcmd
)*dev
->hw_info
.cmd_fifo_sz
;
5027 spin_lock_init(&dev
->dcmd_lock
);
5028 INIT_LIST_HEAD(&dev
->dcmd_list
);
5029 init_waitqueue_head(&dev
->dcmd_wq
);
5031 dev
->dcmd
= kmalloc(dcmd_sz
, GFP_KERNEL
);
5033 hio_warn("%s: can not alloc dcmd\n", dev
->name
);
5034 goto out_alloc_dcmd
;
5036 memset(dev
->dcmd
, 0, dcmd_sz
);
5038 for (i
=0, dcmd
=dev
->dcmd
; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++, dcmd
++) {
5040 INIT_LIST_HEAD(&dcmd
->list
);
5041 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
5050 static void ssd_put_dmsg(void *msg
)
5052 struct ssd_dcmd
*dcmd
= container_of(msg
, struct ssd_dcmd
, msg
);
5053 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
5055 memset(dcmd
->msg
, 0, SSD_DCMD_MAX_SZ
);
5057 wake_up(&dev
->dcmd_wq
);
5060 static void *ssd_get_dmsg(struct ssd_device
*dev
)
5062 struct ssd_dcmd
*dcmd
= ssd_get_dcmd(dev
);
5066 prepare_to_wait_exclusive(&dev
->dcmd_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
5069 dcmd
= ssd_get_dcmd(dev
);
5071 finish_wait(&dev
->dcmd_wq
, &wait
);
5077 static int ssd_do_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5079 DECLARE_COMPLETION(wait
);
5080 struct ssd_cmd
*cmd
;
5084 tag
= ssd_get_tag(dev
, 1);
5089 cmd
= &dev
->cmd
[tag
];
5091 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5092 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5094 cmd
->waiting
= &wait
;
5098 wait_for_completion(cmd
->waiting
);
5099 cmd
->waiting
= NULL
;
5101 if (cmd
->errors
== -ETIMEDOUT
) {
5103 } else if (cmd
->errors
) {
5108 *done
= cmd
->nr_log
;
5110 ssd_put_tag(dev
, cmd
->tag
);
5115 static int ssd_do_barrier_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5117 DECLARE_COMPLETION(wait
);
5118 struct ssd_cmd
*cmd
;
5122 tag
= ssd_barrier_get_tag(dev
);
5127 cmd
= &dev
->cmd
[tag
];
5129 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5130 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5132 cmd
->waiting
= &wait
;
5136 wait_for_completion(cmd
->waiting
);
5137 cmd
->waiting
= NULL
;
5139 if (cmd
->errors
== -ETIMEDOUT
) {
5141 } else if (cmd
->errors
) {
5146 *done
= cmd
->nr_log
;
5148 ssd_barrier_put_tag(dev
, cmd
->tag
);
5153 #ifdef SSD_OT_PROTECT
5154 static void ssd_check_temperature(struct ssd_device
*dev
, int temp
)
5161 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5165 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5168 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5169 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
5171 val
= ssd_reg_read(dev
->ctrlp
+ off
);
5172 if (val
== 0xffffffffffffffffull
) {
5176 cur
= (int)CUR_TEMP(val
);
5178 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5179 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5180 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5181 dev
->ot_delay
= SSD_OT_DELAY
;
5188 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5189 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5190 hio_warn("%s: Temperature is OK.\n", dev
->name
);
5197 static int ssd_get_ot_status(struct ssd_device
*dev
, int *status
)
5203 if (!dev
|| !status
) {
5207 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5208 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5209 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5210 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5211 if ((val
>> 22) & 0x1) {
5217 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5218 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5219 if ((val
>> 22) & 0x1) {
5225 *status
= !!dev
->ot_delay
;
5232 static void ssd_set_ot_protect(struct ssd_device
*dev
, int protect
)
5238 mutex_lock(&dev
->fw_mutex
);
5240 dev
->ot_protect
= !!protect
;
5242 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5243 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5244 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5245 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5246 if (dev
->ot_protect
) {
5251 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5254 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5255 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5256 if (dev
->ot_protect
) {
5261 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5265 mutex_unlock(&dev
->fw_mutex
);
5268 static int ssd_init_ot_protect(struct ssd_device
*dev
)
5270 ssd_set_ot_protect(dev
, ot_protect
);
5272 #ifdef SSD_OT_PROTECT
5273 ssd_check_temperature(dev
, SSD_OT_TEMP
);
5280 static int ssd_read_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
, int *nr_log
)
5282 struct ssd_log_op_msg
*msg
;
5283 struct ssd_log_msg
*lmsg
;
5285 size_t length
= dev
->hw_info
.log_sz
;
5288 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
5292 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
5293 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
5294 ret
= dma_mapping_error(buf_dma
);
5296 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
5299 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
5300 goto out_dma_mapping
;
5303 msg
= (struct ssd_log_op_msg
*)ssd_get_dmsg(dev
);
5305 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5306 lmsg
= (struct ssd_log_msg
*)msg
;
5307 lmsg
->fun
= SSD_FUNC_READ_LOG
;
5308 lmsg
->ctrl_idx
= ctrl_idx
;
5309 lmsg
->buf
= buf_dma
;
5311 msg
->fun
= SSD_FUNC_READ_LOG
;
5312 msg
->ctrl_idx
= ctrl_idx
;
5316 ret
= ssd_do_request(dev
, READ
, msg
, nr_log
);
5319 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
5325 #define SSD_LOG_PRINT_BUF_SZ 256
5326 static int ssd_parse_log(struct ssd_device
*dev
, struct ssd_log
*log
, int print
)
5328 struct ssd_log_desc
*log_desc
= ssd_log_desc
;
5329 struct ssd_log_entry
*le
;
5331 char print_buf
[SSD_LOG_PRINT_BUF_SZ
];
5337 while (log_desc
->event
!= SSD_UNKNOWN_EVENT
) {
5338 if (log_desc
->event
== le
->event
) {
5348 if (log_desc
->level
< log_level
) {
5353 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5356 sn
= dev
->labelv3
.barcode
;
5359 print_len
= snprintf(print_buf
, SSD_LOG_PRINT_BUF_SZ
, "%s (%s): <%#x>", dev
->name
, sn
, le
->event
);
5361 if (log
->ctrl_idx
!= SSD_LOG_SW_IDX
) {
5362 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " controller %d", log
->ctrl_idx
);
5365 switch (log_desc
->data
) {
5366 case SSD_LOG_DATA_NONE
:
5368 case SSD_LOG_DATA_LOC
:
5369 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5370 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc
.flash
);
5371 if (log_desc
->sblock
) {
5372 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc
.block
);
5374 if (log_desc
->spage
) {
5375 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc
.page
);
5378 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc1
.flash
);
5379 if (log_desc
->sblock
) {
5380 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc1
.block
);
5382 if (log_desc
->spage
) {
5383 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc1
.page
);
5387 case SSD_LOG_DATA_HEX
:
5388 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " info %#x", le
->data
.val
);
5393 /*print_len += */snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), ": %s", log_desc
->desc
);
5395 switch (log_desc
->level
) {
5396 case SSD_LOG_LEVEL_INFO
:
5397 hio_info("%s\n", print_buf
);
5399 case SSD_LOG_LEVEL_NOTICE
:
5400 hio_note("%s\n", print_buf
);
5402 case SSD_LOG_LEVEL_WARNING
:
5403 hio_warn("%s\n", print_buf
);
5405 case SSD_LOG_LEVEL_ERR
:
5406 hio_err("%s\n", print_buf
);
5407 //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ.");
5410 hio_warn("%s\n", print_buf
);
5415 return log_desc
->level
;
5418 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
);
5419 static int ssd_switch_wmode(struct ssd_device
*dev
, int wmode
);
5422 static int ssd_handle_event(struct ssd_device
*dev
, uint16_t event
, int level
)
5427 case SSD_LOG_OVER_TEMP
: {
5428 #ifdef SSD_OT_PROTECT
5429 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5430 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5431 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5432 dev
->ot_delay
= SSD_OT_DELAY
;
5439 case SSD_LOG_NORMAL_TEMP
: {
5440 #ifdef SSD_OT_PROTECT
5441 /* need to check all controller's temperature */
5442 ssd_check_temperature(dev
, SSD_OT_TEMP_HYST
);
5447 case SSD_LOG_BATTERY_FAULT
: {
5450 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5451 if (!ssd_bm_get_sfstatus(dev
, &sfstatus
)) {
5452 ssd_gen_swlog(dev
, SSD_LOG_BM_SFSTATUS
, sfstatus
);
5456 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5457 ssd_switch_wmode(dev
, dev
->user_wmode
);
5462 case SSD_LOG_BATTERY_OK
: {
5463 if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5464 ssd_switch_wmode(dev
, dev
->user_wmode
);
5469 case SSD_LOG_BOARD_VOLT_FAULT
: {
5470 ssd_mon_boardvolt(dev
);
5474 case SSD_LOG_CLEAR_LOG
: {
5476 memset(&dev
->smart
.log_info
, 0, sizeof(struct ssd_log_info
));
5480 case SSD_LOG_CAP_VOLT_FAULT
:
5481 case SSD_LOG_CAP_LEARN_FAULT
:
5482 case SSD_LOG_CAP_SHORT_CIRCUIT
: {
5483 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5484 ssd_switch_wmode(dev
, dev
->user_wmode
);
5493 /* ssd event call */
5494 if (dev
->event_call
) {
5495 dev
->event_call(dev
->gd
, event
, level
);
5498 if (SSD_LOG_CAP_VOLT_FAULT
== event
|| SSD_LOG_CAP_LEARN_FAULT
== event
|| SSD_LOG_CAP_SHORT_CIRCUIT
== event
) {
5499 dev
->event_call(dev
->gd
, SSD_LOG_BATTERY_FAULT
, level
);
5506 static int ssd_save_log(struct ssd_device
*dev
, struct ssd_log
*log
)
5512 mutex_lock(&dev
->internal_log_mutex
);
5514 size
= sizeof(struct ssd_log
);
5515 off
= dev
->internal_log
.nr_log
* size
;
5517 if (off
== dev
->rom_info
.log_sz
) {
5518 if (dev
->internal_log
.nr_log
== dev
->smart
.log_info
.nr_log
) {
5519 hio_warn("%s: internal log is full\n", dev
->name
);
5524 internal_log
= dev
->internal_log
.log
+ off
;
5525 memcpy(internal_log
, log
, size
);
5527 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
5528 off
+= dev
->rom_info
.log_base
;
5530 ret
= ssd_spi_write(dev
, log
, off
, size
);
5536 dev
->internal_log
.nr_log
++;
5539 mutex_unlock(&dev
->internal_log_mutex
);
5543 /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
5544 static unsigned short const crc16_table
[256] = {
5545 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
5546 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
5547 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
5548 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
5549 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
5550 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
5551 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
5552 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
5553 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
5554 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
5555 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
5556 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
5557 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
5558 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
5559 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
5560 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
5561 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
5562 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
5563 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
5564 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
5565 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
5566 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
5567 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
5568 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
5569 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
5570 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
5571 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
5572 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
5573 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
5574 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
5575 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
5576 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
5579 static unsigned short crc16_byte(unsigned short crc
, const unsigned char data
)
5581 return (crc
>> 8) ^ crc16_table
[(crc
^ data
) & 0xff];
5584 * crc16 - compute the CRC-16 for the data buffer
5585 * @crc: previous CRC value
5586 * @buffer: data pointer
5587 * @len: number of bytes in the buffer
5589 * Returns the updated CRC value.
5591 static unsigned short crc16(unsigned short crc
, unsigned char const *buffer
, int len
)
5594 crc
= crc16_byte(crc
, *buffer
++);
5598 static int ssd_save_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5605 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5608 memset(&log
, 0, sizeof(struct ssd_log
));
5610 do_gettimeofday(&tv
);
5611 log
.ctrl_idx
= SSD_LOG_SW_IDX
;
5612 log
.time
= tv
.tv_sec
;
5613 log
.le
.event
= event
;
5614 log
.le
.data
.val
= data
;
5616 log
.le
.mod
= SSD_DIF_WITH_OLD_LOG
;
5617 log
.le
.idx
= crc16(0,(const unsigned char *)&log
,14);
5618 level
= ssd_parse_log(dev
, &log
, 0);
5619 if (level
>= SSD_LOG_LEVEL
) {
5620 ret
= ssd_save_log(dev
, &log
);
5624 if (SSD_LOG_LEVEL_ERR
== level
) {
5629 dev
->smart
.log_info
.nr_log
++;
5630 dev
->smart
.log_info
.stat
[level
]++;
5633 ssd_handle_event(dev
, event
, level
);
5638 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5640 struct ssd_log_entry le
;
5643 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5651 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5655 ret
= sfifo_put(&dev
->log_fifo
, &le
);
5660 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
5661 queue_work(dev
->workq
, &dev
->log_work
);
5667 static int ssd_do_swlog(struct ssd_device
*dev
)
5669 struct ssd_log_entry le
;
5672 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5673 while (!sfifo_get(&dev
->log_fifo
, &le
)) {
5674 ret
= ssd_save_swlog(dev
, le
.event
, le
.data
.val
);
5683 static int __ssd_clear_log(struct ssd_device
*dev
)
5685 uint32_t off
, length
;
5688 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5692 if (dev
->internal_log
.nr_log
== 0) {
5696 mutex_lock(&dev
->internal_log_mutex
);
5698 off
= dev
->rom_info
.log_base
;
5699 length
= dev
->rom_info
.log_sz
;
5701 ret
= ssd_spi_erase(dev
, off
, length
);
5703 hio_warn("%s: log erase: failed\n", dev
->name
);
5707 dev
->internal_log
.nr_log
= 0;
5710 mutex_unlock(&dev
->internal_log_mutex
);
5714 static int ssd_clear_log(struct ssd_device
*dev
)
5718 ret
= __ssd_clear_log(dev
);
5720 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_LOG
, 0);
5726 static int ssd_do_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
)
5728 struct ssd_log_entry
*le
;
5735 ret
= ssd_read_log(dev
, ctrl_idx
, buf
, &nr_log
);
5740 do_gettimeofday(&tv
);
5742 log
.time
= tv
.tv_sec
;
5743 log
.ctrl_idx
= ctrl_idx
;
5745 le
= (ssd_log_entry_t
*)buf
;
5746 while (nr_log
> 0) {
5747 memcpy(&log
.le
, le
, sizeof(struct ssd_log_entry
));
5749 log
.le
.mod
= SSD_DIF_WITH_OLD_LOG
;
5750 log
.le
.idx
= crc16(0,(const unsigned char *)&log
,14);
5751 level
= ssd_parse_log(dev
, &log
, 1);
5752 if (level
>= SSD_LOG_LEVEL
) {
5753 ssd_save_log(dev
, &log
);
5757 if (SSD_LOG_LEVEL_ERR
== level
) {
5761 dev
->smart
.log_info
.nr_log
++;
5762 if (SSD_LOG_SEU_FAULT
!= le
->event
&& SSD_LOG_SEU_FAULT1
!= le
->event
) {
5763 dev
->smart
.log_info
.stat
[level
]++;
5767 /* log to the volatile log info */
5768 dev
->log_info
.nr_log
++;
5769 dev
->log_info
.stat
[level
]++;
5773 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
5774 if (le
->event
!= SSD_LOG_SEU_FAULT1
) {
5775 dev
->has_non_0x98_reg_access
= 1;
5778 /*dev->readonly = 1;
5779 set_disk_ro(dev->gd, 1);
5780 hio_warn("%s: switched to read-only mode.\n", dev->name);*/
5784 ssd_handle_event(dev
, le
->event
, level
);
5793 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5794 static void ssd_log_worker(void *data
)
5796 struct ssd_device
*dev
= (struct ssd_device
*)data
;
5798 static void ssd_log_worker(struct work_struct
*work
)
5800 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, log_work
);
5805 if (!test_bit(SSD_LOG_ERR
, &dev
->state
) && test_bit(SSD_ONLINE
, &dev
->state
)) {
5807 if (!dev
->log_buf
) {
5808 dev
->log_buf
= kmalloc(dev
->hw_info
.log_sz
, GFP_KERNEL
);
5809 if (!dev
->log_buf
) {
5810 hio_warn("%s: ssd_log_worker: no mem\n", dev
->name
);
5816 if (test_and_clear_bit(SSD_LOG_HW
, &dev
->state
)) {
5817 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5818 ret
= ssd_do_log(dev
, i
, dev
->log_buf
);
5820 (void)test_and_set_bit(SSD_LOG_ERR
, &dev
->state
);
5821 hio_warn("%s: do log fail\n", dev
->name
);
5827 ret
= ssd_do_swlog(dev
);
5829 hio_warn("%s: do swlog fail\n", dev
->name
);
5833 static void ssd_cleanup_log(struct ssd_device
*dev
)
5836 kfree(dev
->log_buf
);
5837 dev
->log_buf
= NULL
;
5840 sfifo_free(&dev
->log_fifo
);
5842 if (dev
->internal_log
.log
) {
5843 vfree(dev
->internal_log
.log
);
5844 dev
->internal_log
.nr_log
= 0;
5845 dev
->internal_log
.log
= NULL
;
5849 static int ssd_init_log(struct ssd_device
*dev
)
5851 struct ssd_log
*log
;
5856 mutex_init(&dev
->internal_log_mutex
);
5858 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5859 INIT_WORK(&dev
->log_work
, ssd_log_worker
, dev
);
5861 INIT_WORK(&dev
->log_work
, ssd_log_worker
);
5864 off
= dev
->rom_info
.log_base
;
5865 size
= dev
->rom_info
.log_sz
;
5867 dev
->internal_log
.nr_log
= 0;
5868 dev
->internal_log
.log
= vmalloc(size
);
5869 if (!dev
->internal_log
.log
) {
5874 ret
= sfifo_alloc(&dev
->log_fifo
, SSD_LOG_FIFO_SZ
, sizeof(struct ssd_log_entry
));
5876 goto out_alloc_log_fifo
;
5879 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5883 log
= (struct ssd_log
*)dev
->internal_log
.log
;
5884 while (len
< size
) {
5885 ret
= ssd_spi_read(dev
, log
, off
, sizeof(struct ssd_log
));
5890 if (log
->ctrl_idx
== 0xff) {
5894 if (log
->le
.event
== SSD_LOG_POWER_ON
) {
5895 if (dev
->internal_log
.nr_log
> dev
->last_poweron_id
) {
5896 dev
->last_poweron_id
= dev
->internal_log
.nr_log
;
5900 dev
->internal_log
.nr_log
++;
5902 len
+= sizeof(struct ssd_log
);
5903 off
+= sizeof(struct ssd_log
);
5909 sfifo_free(&dev
->log_fifo
);
5911 vfree(dev
->internal_log
.log
);
5912 dev
->internal_log
.log
= NULL
;
5913 dev
->internal_log
.nr_log
= 0;
5915 /* skip error if not in standard mode */
5916 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5923 static void ssd_stop_workq(struct ssd_device
*dev
)
5925 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
5926 flush_workqueue(dev
->workq
);
5929 static void ssd_start_workq(struct ssd_device
*dev
)
5931 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
5934 queue_work(dev
->workq
, &dev
->log_work
);
5937 static void ssd_cleanup_workq(struct ssd_device
*dev
)
5939 flush_workqueue(dev
->workq
);
5940 destroy_workqueue(dev
->workq
);
5944 static int ssd_init_workq(struct ssd_device
*dev
)
5948 dev
->workq
= create_singlethread_workqueue(dev
->name
);
5959 static int ssd_init_rom_info(struct ssd_device
*dev
)
5963 mutex_init(&dev
->spi_mutex
);
5964 mutex_init(&dev
->i2c_mutex
);
5966 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5967 /* fix bug: read data to clear status */
5968 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
5970 dev
->rom_info
.size
= SSD_ROM_SIZE
;
5971 dev
->rom_info
.block_size
= SSD_ROM_BLK_SIZE
;
5972 dev
->rom_info
.page_size
= SSD_ROM_PAGE_SIZE
;
5974 dev
->rom_info
.bridge_fw_base
= SSD_ROM_BRIDGE_FW_BASE
;
5975 dev
->rom_info
.bridge_fw_sz
= SSD_ROM_BRIDGE_FW_SIZE
;
5976 dev
->rom_info
.nr_bridge_fw
= SSD_ROM_NR_BRIDGE_FW
;
5978 dev
->rom_info
.ctrl_fw_base
= SSD_ROM_CTRL_FW_BASE
;
5979 dev
->rom_info
.ctrl_fw_sz
= SSD_ROM_CTRL_FW_SIZE
;
5980 dev
->rom_info
.nr_ctrl_fw
= SSD_ROM_NR_CTRL_FW
;
5982 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5984 dev
->rom_info
.vp_base
= SSD_ROM_VP_BASE
;
5985 dev
->rom_info
.label_base
= SSD_ROM_LABEL_BASE
;
5986 } else if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5987 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5988 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5989 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5990 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5992 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5993 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5994 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5995 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5997 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5998 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5999 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6000 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
6002 dev
->rom_info
.bm_fw_base
= dev
->rom_info
.ctrl_fw_base
+ (dev
->rom_info
.nr_ctrl_fw
* dev
->rom_info
.ctrl_fw_sz
);
6003 dev
->rom_info
.bm_fw_sz
= SSD_PV3_ROM_BM_FW_SZ
;
6004 dev
->rom_info
.nr_bm_fw
= SSD_PV3_ROM_NR_BM_FW
;
6006 dev
->rom_info
.log_base
= dev
->rom_info
.bm_fw_base
+ (dev
->rom_info
.nr_bm_fw
* dev
->rom_info
.bm_fw_sz
);
6007 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
6009 dev
->rom_info
.smart_base
= dev
->rom_info
.log_base
+ dev
->rom_info
.log_sz
;
6010 dev
->rom_info
.smart_sz
= SSD_PV3_ROM_SMART_SZ
;
6011 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
6013 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
6014 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
6015 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
+ dev
->rom_info
.block_size
;
6016 if (dev
->rom_info
.label_base
>= dev
->rom_info
.size
) {
6017 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- dev
->rom_info
.block_size
;
6020 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
6021 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
6022 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
6023 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
6025 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
6026 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6027 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6028 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
6030 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
6031 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6032 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6033 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
6035 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
6036 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
6037 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- SSD_PV3_2_ROM_SEC_SZ
;
6039 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
6040 dev
->rom_info
.smart_sz
= SSD_PV3_2_ROM_SEC_SZ
;
6041 dev
->rom_info
.smart_base
= dev
->rom_info
.label_base
- (dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
);
6042 if (dev
->rom_info
.smart_sz
> dev
->rom_info
.block_size
) {
6043 dev
->rom_info
.smart_sz
= dev
->rom_info
.block_size
;
6046 dev
->rom_info
.log_sz
= SSD_PV3_2_ROM_LOG_SZ
;
6047 dev
->rom_info
.log_base
= dev
->rom_info
.smart_base
- dev
->rom_info
.log_sz
;
6050 return ssd_init_spi(dev
);
6054 static int ssd_update_smart(struct ssd_device
*dev
, struct ssd_smart
*smart
)
6058 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
6059 struct hd_struct
*part
;
6065 if (!test_bit(SSD_INIT_BD
, &dev
->state
)) {
6069 do_gettimeofday(&tv
);
6070 if ((uint64_t)tv
.tv_sec
< dev
->uptime
) {
6073 run_time
= tv
.tv_sec
- dev
->uptime
;
6076 /* avoid frequently update */
6077 if (run_time
>= 60) {
6082 smart
->io_stat
.run_time
+= run_time
;
6084 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
6085 cpu
= part_stat_lock();
6086 part
= &dev
->gd
->part0
;
6087 part_round_stats(cpu
, part
);
6090 smart
->io_stat
.nr_read
+= part_stat_read(part
, ios
[READ
]);
6091 smart
->io_stat
.nr_write
+= part_stat_read(part
, ios
[WRITE
]);
6092 smart
->io_stat
.rsectors
+= part_stat_read(part
, sectors
[READ
]);
6093 smart
->io_stat
.wsectors
+= part_stat_read(part
, sectors
[WRITE
]);
6094 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
6096 disk_round_stats(dev
->gd
);
6099 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, ios
[READ
]);
6100 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, ios
[WRITE
]);
6101 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, sectors
[READ
]);
6102 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, sectors
[WRITE
]);
6105 disk_round_stats(dev
->gd
);
6108 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, reads
);
6109 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, writes
);
6110 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, read_sectors
);
6111 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, write_sectors
);
6114 smart
->io_stat
.nr_to
+= atomic_read(&dev
->tocnt
);
6116 for (i
=0; i
<dev
->nr_queue
; i
++) {
6117 smart
->io_stat
.nr_rwerr
+= dev
->queue
[i
].io_stat
.nr_rwerr
;
6118 smart
->io_stat
.nr_ioerr
+= dev
->queue
[i
].io_stat
.nr_ioerr
;
6121 for (i
=0; i
<dev
->nr_queue
; i
++) {
6122 for (j
=0; j
<SSD_ECC_MAX_FLIP
; j
++) {
6123 smart
->ecc_info
.bitflip
[j
] += dev
->queue
[i
].ecc_info
.bitflip
[j
];
6127 //dev->uptime = tv.tv_sec;
6132 static int __ssd_clear_smart(struct ssd_device
*dev
)
6136 uint32_t off
, length
;
6140 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6145 off
= dev
->rom_info
.smart_base
;
6146 length
= dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
;
6148 ret
= ssd_spi_erase(dev
, off
, length
);
6150 hio_warn("%s: info erase: failed\n", dev
->name
);
6154 sversion
= dev
->smart
.version
;
6156 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6157 dev
->smart
.version
= sversion
+ 1;
6158 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6160 /* clear all tmp acc */
6161 for (i
=0; i
<dev
->nr_queue
; i
++) {
6162 memset(&(dev
->queue
[i
].io_stat
), 0, sizeof(struct ssd_io_stat
));
6163 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(struct ssd_ecc_info
));
6166 atomic_set(&dev
->tocnt
, 0);
6168 /* clear tmp log info */
6169 memset(&dev
->log_info
, 0, sizeof(struct ssd_log_info
));
6171 do_gettimeofday(&tv
);
6172 dev
->uptime
= tv
.tv_sec
;
6175 //ssd_clear_alarm(dev);
6180 static int __ssd_clear_warning(struct ssd_device
*dev
)
6185 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6189 /* clear log_info warning */
6190 memset(&dev
->smart
.log_info
, 0, sizeof(dev
->smart
.log_info
));
6192 /* clear io_stat warning */
6193 dev
->smart
.io_stat
.nr_to
= 0;
6194 dev
->smart
.io_stat
.nr_rwerr
= 0;
6195 dev
->smart
.io_stat
.nr_ioerr
= 0;
6197 /* clear ecc_info warning */
6198 memset(&dev
->smart
.ecc_info
, 0, sizeof(dev
->smart
.ecc_info
));
6200 /* clear queued warnings */
6201 for (i
=0; i
<dev
->nr_queue
; i
++) {
6202 /* queued io_stat warning */
6203 dev
->queue
[i
].io_stat
.nr_to
= 0;
6204 dev
->queue
[i
].io_stat
.nr_rwerr
= 0;
6205 dev
->queue
[i
].io_stat
.nr_ioerr
= 0;
6207 /* queued ecc_info warning */
6208 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(dev
->queue
[i
].ecc_info
));
6211 /* write smart back to nor */
6212 for (i
= 0; i
< dev
->rom_info
.nr_smart
; i
++) {
6213 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6214 size
= dev
->rom_info
.smart_sz
;
6216 ret
= ssd_spi_erase(dev
, off
, size
);
6218 hio_warn("%s: warning erase: failed with code 1\n", dev
->name
);
6222 size
= sizeof(struct ssd_smart
);
6224 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6226 hio_warn("%s: warning erase: failed with code 2\n", dev
->name
);
6231 dev
->smart
.version
++;
6233 /* clear cmd timeout warning */
6234 atomic_set(&dev
->tocnt
, 0);
6236 /* clear tmp log info */
6237 memset(&dev
->log_info
, 0, sizeof(dev
->log_info
));
6243 static int ssd_clear_smart(struct ssd_device
*dev
)
6247 ret
= __ssd_clear_smart(dev
);
6249 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_SMART
, 0);
6255 static int ssd_clear_warning(struct ssd_device
*dev
)
6259 ret
= __ssd_clear_warning(dev
);
6261 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_WARNING
, 0);
6267 static int ssd_save_smart(struct ssd_device
*dev
)
6273 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
6276 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6280 if (!ssd_update_smart(dev
, &dev
->smart
)) {
6284 dev
->smart
.version
++;
6286 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6287 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6288 size
= dev
->rom_info
.smart_sz
;
6290 ret
= ssd_spi_erase(dev
, off
, size
);
6292 hio_warn("%s: info erase failed\n", dev
->name
);
6296 size
= sizeof(struct ssd_smart
);
6298 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6300 hio_warn("%s: info write failed\n", dev
->name
);
6311 static int ssd_init_smart(struct ssd_device
*dev
)
6313 struct ssd_smart
*smart
;
6315 uint32_t off
, size
, val
;
6318 int update_smart
= 0;
6320 do_gettimeofday(&tv
);
6321 dev
->uptime
= tv
.tv_sec
;
6323 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6327 smart
= kmalloc(sizeof(struct ssd_smart
) * SSD_ROM_NR_SMART_MAX
, GFP_KERNEL
);
6333 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6336 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6337 memset(&smart
[i
], 0, sizeof(struct ssd_smart
));
6339 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6340 size
= sizeof(struct ssd_smart
);
6342 ret
= ssd_spi_read(dev
, &smart
[i
], off
, size
);
6344 hio_warn("%s: info read failed\n", dev
->name
);
6348 if (smart
[i
].magic
!= SSD_SMART_MAGIC
) {
6350 smart
[i
].version
= 0;
6354 if (smart
[i
].version
> dev
->smart
.version
) {
6355 memcpy(&dev
->smart
, &smart
[i
], sizeof(struct ssd_smart
));
6359 if (dev
->smart
.magic
!= SSD_SMART_MAGIC
) {
6360 /* first time power up */
6361 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6362 dev
->smart
.version
= 1;
6365 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
);
6367 dev
->last_poweron_id
= ~0;
6368 ssd_gen_swlog(dev
, SSD_LOG_POWER_ON
, dev
->hw_info
.bridge_ver
);
6369 if (dev
->smart
.io_stat
.nr_to
) {
6370 dev
->smart
.io_stat
.nr_to
= 0;
6375 /* check log info */
6377 struct ssd_log_info log_info
;
6378 struct ssd_log
*log
= (struct ssd_log
*)dev
->internal_log
.log
;
6380 memset(&log_info
, 0, sizeof(struct ssd_log_info
));
6382 while (log_info
.nr_log
< dev
->internal_log
.nr_log
) {
6385 switch (log
->le
.event
) {
6386 /* skip the volatile log info */
6387 case SSD_LOG_SEU_FAULT
:
6388 case SSD_LOG_SEU_FAULT1
:
6391 case SSD_LOG_TIMEOUT
:
6392 skip
= (dev
->last_poweron_id
>= log_info
.nr_log
);
6397 log_info
.stat
[ssd_parse_log(dev
, log
, 0)]++;
6405 for (i
=(SSD_LOG_NR_LEVEL
-1); i
>=0; i
--) {
6406 if (log_info
.stat
[i
] != dev
->smart
.log_info
.stat
[i
]) {
6408 memcpy(&dev
->smart
.log_info
, &log_info
, sizeof(struct ssd_log_info
));
6415 ++dev
->smart
.version
;
6419 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6420 if (smart
[i
].magic
== SSD_SMART_MAGIC
&& smart
[i
].version
== dev
->smart
.version
) {
6424 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6425 size
= dev
->rom_info
.smart_sz
;
6427 ret
= ssd_spi_erase(dev
, off
, size
);
6429 hio_warn("%s: info erase failed\n", dev
->name
);
6433 size
= sizeof(struct ssd_smart
);
6434 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6436 hio_warn("%s: info write failed\n", dev
->name
);
6443 /* sync smart with alarm led */
6444 if (dev
->smart
.io_stat
.nr_to
|| dev
->smart
.io_stat
.nr_rwerr
|| dev
->smart
.log_info
.stat
[SSD_LOG_LEVEL_ERR
]) {
6445 hio_warn("%s: some fault found in the history info\n", dev
->name
);
6452 /* skip error if not in standard mode */
6453 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6460 static int __ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6462 struct ssd_bm_manufacturer_data bm_md
= {0};
6463 uint16_t sc_id
= SSD_BM_SYSTEM_DATA_SUBCLASS_ID
;
6471 mutex_lock(&dev
->bm_mutex
);
6473 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6474 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6479 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6480 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_manufacturer_data
), (uint8_t *)&bm_md
);
6485 if (bm_md
.firmware_ver
& 0xF000) {
6490 *ver
= bm_md
.firmware_ver
;
6493 mutex_unlock(&dev
->bm_mutex
);
6497 static int ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6500 int i
= SSD_BM_RETRY_MAX
;
6504 ret
= __ssd_bm_get_version(dev
, &tmp
);
6518 static int __ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6520 struct ssd_bm_configuration_registers bm_cr
;
6521 uint16_t sc_id
= SSD_BM_CONFIGURATION_REGISTERS_ID
;
6525 mutex_lock(&dev
->bm_mutex
);
6527 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6528 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6533 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6534 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_configuration_registers
), (uint8_t *)&bm_cr
);
6539 if (bm_cr
.operation_cfg
.cc
== 0 || bm_cr
.operation_cfg
.cc
> 4) {
6544 *nr_cap
= bm_cr
.operation_cfg
.cc
+ 1;
6547 mutex_unlock(&dev
->bm_mutex
);
6551 static int ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6554 int i
= SSD_BM_RETRY_MAX
;
6558 ret
= __ssd_bm_nr_cap(dev
, &tmp
);
6572 static int ssd_bm_enter_cap_learning(struct ssd_device
*dev
)
6574 uint16_t buf
= SSD_BM_ENTER_CAP_LEARNING
;
6575 uint8_t cmd
= SSD_BM_MANUFACTURERACCESS
;
6578 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&buf
);
6587 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
)
6590 uint8_t cmd
= SSD_BM_SAFETYSTATUS
;
6593 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6603 static int ssd_bm_get_opstatus(struct ssd_device
*dev
, uint16_t *status
)
6606 uint8_t cmd
= SSD_BM_OPERATIONSTATUS
;
6609 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6619 static int ssd_get_bmstruct(struct ssd_device
*dev
, struct ssd_bm
*bm_status_out
)
6621 struct sbs_cmd
*bm_sbs
= ssd_bm_sbs
;
6622 struct ssd_bm bm_status
;
6623 uint8_t buf
[2] = {0, };
6628 memset(&bm_status
, 0, sizeof(struct ssd_bm
));
6630 while (bm_sbs
->desc
!= NULL
) {
6631 switch (bm_sbs
->size
) {
6633 ret
= ssd_smbus_read_byte(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, buf
);
6635 //printf("Error: smbus read byte %#x\n", bm_sbs->cmd);
6641 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, (uint8_t *)&val
);
6643 //printf("Error: smbus read word %#x\n", bm_sbs->cmd);
6646 //val = *(uint16_t *)buf;
6654 switch (bm_sbs
->unit
) {
6655 case SBS_UNIT_VALUE
:
6656 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
& bm_sbs
->mask
;
6658 case SBS_UNIT_TEMPERATURE
:
6659 cval
= (uint16_t)(val
- 2731) / 10;
6660 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = cval
;
6662 case SBS_UNIT_VOLTAGE
:
6663 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6665 case SBS_UNIT_CURRENT
:
6666 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6669 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6671 case SBS_UNIT_PERCENT
:
6672 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6674 case SBS_UNIT_CAPACITANCE
:
6675 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6686 memcpy(bm_status_out
, &bm_status
, sizeof(struct ssd_bm
));
6692 static int __ssd_bm_status(struct ssd_device
*dev
, int *status
)
6694 struct ssd_bm bm_status
= {0};
6699 ret
= ssd_get_bmstruct(dev
, &bm_status
);
6704 /* capacitor voltage */
6705 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
6710 for (i
=0; i
<nr_cap
; i
++) {
6711 if (bm_status
.cap_volt
[i
] < SSD_BM_CAP_VOLT_MIN
) {
6712 *status
= SSD_BMSTATUS_WARNING
;
6718 if (bm_status
.sf_status
) {
6719 *status
= SSD_BMSTATUS_WARNING
;
6724 if (!((bm_status
.op_status
>> 12) & 0x1)) {
6725 *status
= SSD_BMSTATUS_CHARGING
;
6727 *status
= SSD_BMSTATUS_OK
;
6734 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int mode
);
6736 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
6737 static void ssd_bm_worker(void *data
)
6739 struct ssd_device
*dev
= (struct ssd_device
*)data
;
6741 static void ssd_bm_worker(struct work_struct
*work
)
6743 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, bm_work
);
6749 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6753 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
6757 if (dev
->hw_info_ext
.plp_type
!= SSD_PLP_SCAP
) {
6761 ret
= ssd_bm_get_opstatus(dev
, &opstatus
);
6763 hio_warn("%s: get bm operationstatus failed\n", dev
->name
);
6767 /* need cap learning ? */
6768 if (!(opstatus
& 0xF0)) {
6769 ret
= ssd_bm_enter_cap_learning(dev
);
6771 hio_warn("%s: enter capacitance learning failed\n", dev
->name
);
6777 static void ssd_bm_routine_start(void *data
)
6779 struct ssd_device
*dev
;
6786 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
6787 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6788 queue_work(dev
->workq
, &dev
->bm_work
);
6790 queue_work(dev
->workq
, &dev
->capmon_work
);
6796 static int ssd_do_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6803 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6808 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6813 /* make sure the lm80 voltage value is updated */
6814 msleep(SSD_LM80_CONV_INTERVAL
);
6816 /* check if full charged */
6819 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6821 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6822 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6826 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6827 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_FULL
) {
6832 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6836 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6839 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U2
, (uint8_t *)&val
);
6841 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6842 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6846 u2
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6853 /* enter cap learn */
6854 ssd_reg32_write(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
, 0x1);
6858 msleep(SSD_PL_CAP_LEARN_WAIT
);
6860 t
= ssd_reg32_read(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
);
6861 if (!((t
>> 1) & 0x1)) {
6866 if (wait
> SSD_PL_CAP_LEARN_MAX_WAIT
) {
6872 if ((t
>> 4) & 0x1) {
6883 *cap
= SSD_PL_CAP_LEARN(u1
, u2
, t
);
6889 static int ssd_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6897 mutex_lock(&dev
->bm_mutex
);
6899 ssd_stop_workq(dev
);
6901 ret
= ssd_do_cap_learn(dev
, cap
);
6903 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
6907 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, *cap
);
6910 ssd_start_workq(dev
);
6911 mutex_unlock(&dev
->bm_mutex
);
6916 static int ssd_check_pl_cap(struct ssd_device
*dev
)
6924 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6928 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6935 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6937 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6938 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6942 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6943 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_READY
) {
6948 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6950 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(u1
));
6953 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6956 low
= ssd_lm80_limit
[SSD_LM80_IN_CAP
].low
;
6957 ret
= ssd_smbus_write_byte(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP
), &low
);
6962 /* enable cap INx */
6963 ret
= ssd_lm80_enable_in(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_IN_CAP
);
6965 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6966 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6972 /* skip error if not in standard mode */
6973 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6979 static int ssd_check_pl_cap_fast(struct ssd_device
*dev
)
6985 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6989 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6994 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6998 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6999 if (SSD_PL_CAP_VOLT(u1
) < SSD_PL_CAP_VOLT_READY
) {
7007 static int ssd_init_pl_cap(struct ssd_device
*dev
)
7011 /* set here: user write mode */
7012 dev
->user_wmode
= wmode
;
7014 mutex_init(&dev
->bm_mutex
);
7016 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7018 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BM_FAULT_REG
);
7019 if ((val
>> 1) & 0x1) {
7020 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
7023 ret
= ssd_check_pl_cap(dev
);
7025 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
7033 static void __end_str(char *str
, int len
)
7037 for(i
=0; i
<len
; i
++) {
7038 if (*(str
+i
) == '\0')
7044 static int ssd_init_label(struct ssd_device
*dev
)
7050 /* label location */
7051 off
= dev
->rom_info
.label_base
;
7053 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7054 size
= sizeof(struct ssd_label
);
7057 ret
= ssd_spi_read(dev
, &dev
->label
, off
, size
);
7059 memset(&dev
->label
, 0, size
);
7063 __end_str(dev
->label
.date
, SSD_LABEL_FIELD_SZ
);
7064 __end_str(dev
->label
.sn
, SSD_LABEL_FIELD_SZ
);
7065 __end_str(dev
->label
.part
, SSD_LABEL_FIELD_SZ
);
7066 __end_str(dev
->label
.desc
, SSD_LABEL_FIELD_SZ
);
7067 __end_str(dev
->label
.other
, SSD_LABEL_FIELD_SZ
);
7068 __end_str(dev
->label
.maf
, SSD_LABEL_FIELD_SZ
);
7070 size
= sizeof(struct ssd_labelv3
);
7073 ret
= ssd_spi_read(dev
, &dev
->labelv3
, off
, size
);
7075 memset(&dev
->labelv3
, 0, size
);
7079 __end_str(dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
7080 __end_str(dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
7081 __end_str(dev
->labelv3
.item
, SSD_LABEL_FIELD_SZ
);
7082 __end_str(dev
->labelv3
.description
, SSD_LABEL_DESC_SZ
);
7083 __end_str(dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
7084 __end_str(dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
7085 __end_str(dev
->labelv3
.issuenumber
, SSD_LABEL_FIELD_SZ
);
7086 __end_str(dev
->labelv3
.cleicode
, SSD_LABEL_FIELD_SZ
);
7087 __end_str(dev
->labelv3
.bom
, SSD_LABEL_FIELD_SZ
);
7091 /* skip error if not in standard mode */
7092 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7098 int ssd_get_label(struct block_device
*bdev
, struct ssd_label
*label
)
7100 struct ssd_device
*dev
;
7102 if (!bdev
|| !label
|| !(bdev
->bd_disk
)) {
7106 dev
= bdev
->bd_disk
->private_data
;
7108 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7109 memset(label
, 0, sizeof(struct ssd_label
));
7110 memcpy(label
->date
, dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
7111 memcpy(label
->sn
, dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
7112 memcpy(label
->desc
, dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
7113 memcpy(label
->maf
, dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
7115 memcpy(label
, &dev
->label
, sizeof(struct ssd_label
));
7121 static int __ssd_get_version(struct ssd_device
*dev
, struct ssd_version_info
*ver
)
7123 uint16_t bm_ver
= 0;
7126 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7127 ret
= ssd_bm_get_version(dev
, &bm_ver
);
7133 ver
->bridge_ver
= dev
->hw_info
.bridge_ver
;
7134 ver
->ctrl_ver
= dev
->hw_info
.ctrl_ver
;
7135 ver
->bm_ver
= bm_ver
;
7136 ver
->pcb_ver
= dev
->hw_info
.pcb_ver
;
7137 ver
->upper_pcb_ver
= dev
->hw_info
.upper_pcb_ver
;
7144 int ssd_get_version(struct block_device
*bdev
, struct ssd_version_info
*ver
)
7146 struct ssd_device
*dev
;
7149 if (!bdev
|| !ver
|| !(bdev
->bd_disk
)) {
7153 dev
= bdev
->bd_disk
->private_data
;
7155 mutex_lock(&dev
->fw_mutex
);
7156 ret
= __ssd_get_version(dev
, ver
);
7157 mutex_unlock(&dev
->fw_mutex
);
7162 static int __ssd_get_temperature(struct ssd_device
*dev
, int *temp
)
7170 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7176 if (dev
->db_info
.type
== SSD_DEBUG_LOG
&&
7177 (dev
->db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
||
7178 dev
->db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
||
7179 dev
->db_info
.data
.log
.event
== SSD_LOG_WARN_TEMP
)) {
7180 *temp
= (int)dev
->db_info
.data
.log
.extra
;
7185 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7186 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
7188 val
= ssd_reg_read(dev
->ctrlp
+ off
);
7189 if (val
== 0xffffffffffffffffull
) {
7193 cur
= (int)CUR_TEMP(val
);
7204 int ssd_get_temperature(struct block_device
*bdev
, int *temp
)
7206 struct ssd_device
*dev
;
7209 if (!bdev
|| !temp
|| !(bdev
->bd_disk
)) {
7213 dev
= bdev
->bd_disk
->private_data
;
7216 mutex_lock(&dev
->fw_mutex
);
7217 ret
= __ssd_get_temperature(dev
, temp
);
7218 mutex_unlock(&dev
->fw_mutex
);
7223 int ssd_set_otprotect(struct block_device
*bdev
, int otprotect
)
7225 struct ssd_device
*dev
;
7227 if (!bdev
|| !(bdev
->bd_disk
)) {
7231 dev
= bdev
->bd_disk
->private_data
;
7232 ssd_set_ot_protect(dev
, !!otprotect
);
7237 int ssd_bm_status(struct block_device
*bdev
, int *status
)
7239 struct ssd_device
*dev
;
7242 if (!bdev
|| !status
|| !(bdev
->bd_disk
)) {
7246 dev
= bdev
->bd_disk
->private_data
;
7248 mutex_lock(&dev
->fw_mutex
);
7249 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7250 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7251 *status
= SSD_BMSTATUS_WARNING
;
7253 *status
= SSD_BMSTATUS_OK
;
7255 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7256 ret
= __ssd_bm_status(dev
, status
);
7258 *status
= SSD_BMSTATUS_OK
;
7260 mutex_unlock(&dev
->fw_mutex
);
7265 int ssd_get_pciaddr(struct block_device
*bdev
, struct pci_addr
*paddr
)
7267 struct ssd_device
*dev
;
7269 if (!bdev
|| !paddr
|| !bdev
->bd_disk
) {
7273 dev
= bdev
->bd_disk
->private_data
;
7275 paddr
->domain
= pci_domain_nr(dev
->pdev
->bus
);
7276 paddr
->bus
= dev
->pdev
->bus
->number
;
7277 paddr
->slot
= PCI_SLOT(dev
->pdev
->devfn
);
7278 paddr
->func
= PCI_FUNC(dev
->pdev
->devfn
);
7284 static int ssd_bb_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7289 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7293 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L1_REG
);
7294 if (0xffffffffull
== acc
->threshold_l1
) {
7297 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L2_REG
);
7298 if (0xffffffffull
== acc
->threshold_l2
) {
7303 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7304 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7305 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_BB_ACC_REG_SZ
* chip
));
7306 if (0xffffffffull
== acc
->val
) {
7309 if (val
> acc
->val
) {
7318 static int ssd_ec_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7323 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7327 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L1_REG
);
7328 if (0xffffffffull
== acc
->threshold_l1
) {
7331 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L2_REG
);
7332 if (0xffffffffull
== acc
->threshold_l2
) {
7337 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7338 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7339 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_EC_ACC_REG_SZ
* chip
));
7340 if (0xffffffffull
== acc
->val
) {
7344 if (val
> acc
->val
) {
7355 static int ssd_ram_read_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7357 struct ssd_ram_op_msg
*msg
;
7359 size_t len
= length
;
7363 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7364 || !length
|| length
> dev
->hw_info
.ram_max_len
7365 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7369 len
/= dev
->hw_info
.ram_align
;
7370 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7372 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7373 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7374 ret
= dma_mapping_error(buf_dma
);
7376 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7379 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7380 goto out_dma_mapping
;
7383 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7385 msg
->fun
= SSD_FUNC_RAM_READ
;
7386 msg
->ctrl_idx
= ctrl_idx
;
7387 msg
->start
= (uint32_t)ofs_w
;
7391 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7394 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7400 static int ssd_ram_write_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7402 struct ssd_ram_op_msg
*msg
;
7404 size_t len
= length
;
7408 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7409 || !length
|| length
> dev
->hw_info
.ram_max_len
7410 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7414 len
/= dev
->hw_info
.ram_align
;
7415 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7417 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7418 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7419 ret
= dma_mapping_error(buf_dma
);
7421 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7424 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7425 goto out_dma_mapping
;
7428 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7430 msg
->fun
= SSD_FUNC_RAM_WRITE
;
7431 msg
->ctrl_idx
= ctrl_idx
;
7432 msg
->start
= (uint32_t)ofs_w
;
7436 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7439 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7446 static int ssd_ram_read(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7453 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7454 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7459 len
= dev
->hw_info
.ram_max_len
;
7460 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7464 ret
= ssd_ram_read_4k(dev
, buf
, len
, off
, ctrl_idx
);
7477 static int ssd_ram_write(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7484 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7485 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7490 len
= dev
->hw_info
.ram_max_len
;
7491 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7495 ret
= ssd_ram_write_4k(dev
, buf
, len
, off
, ctrl_idx
);
7510 static int ssd_check_flash(struct ssd_device
*dev
, int flash
, int page
, int ctrl_idx
)
7512 int cur_ch
= flash
% dev
->hw_info
.max_ch
;
7513 int cur_chip
= flash
/dev
->hw_info
.max_ch
;
7515 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
7519 if (cur_ch
>= dev
->hw_info
.nr_ch
|| cur_chip
>= dev
->hw_info
.nr_chip
) {
7523 if (page
>= (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7529 static int ssd_nand_read_id(struct ssd_device
*dev
, void *id
, int flash
, int chip
, int ctrl_idx
)
7531 struct ssd_nand_op_msg
*msg
;
7538 buf_dma
= pci_map_single(dev
->pdev
, id
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7539 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7540 ret
= dma_mapping_error(buf_dma
);
7542 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7545 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7546 goto out_dma_mapping
;
7549 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7550 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7554 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7556 msg
->fun
= SSD_FUNC_NAND_READ_ID
;
7557 msg
->chip_no
= flash
;
7558 msg
->chip_ce
= chip
;
7559 msg
->ctrl_idx
= ctrl_idx
;
7562 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7565 pci_unmap_single(dev
->pdev
, buf_dma
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7572 static int ssd_nand_read(struct ssd_device
*dev
, void *buf
,
7573 int flash
, int chip
, int page
, int page_count
, int ctrl_idx
)
7575 struct ssd_nand_op_msg
*msg
;
7584 if ((page
+ page_count
) > dev
->hw_info
.block_count
*dev
->hw_info
.page_count
) {
7588 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7593 length
= page_count
* dev
->hw_info
.page_size
;
7595 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7596 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7597 ret
= dma_mapping_error(buf_dma
);
7599 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7602 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7603 goto out_dma_mapping
;
7606 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7607 flash
= (flash
<< 1) | chip
;
7611 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7613 msg
->fun
= SSD_FUNC_NAND_READ
;
7614 msg
->ctrl_idx
= ctrl_idx
;
7615 msg
->chip_no
= flash
;
7616 msg
->chip_ce
= chip
;
7617 msg
->page_no
= page
;
7618 msg
->page_count
= page_count
;
7621 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7624 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7631 static int ssd_nand_read_w_oob(struct ssd_device
*dev
, void *buf
,
7632 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7634 struct ssd_nand_op_msg
*msg
;
7643 if ((page
+ count
) > (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7647 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7652 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7654 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7655 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7656 ret
= dma_mapping_error(buf_dma
);
7658 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7661 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7662 goto out_dma_mapping
;
7665 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7666 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7670 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7672 msg
->fun
= SSD_FUNC_NAND_READ_WOOB
;
7673 msg
->ctrl_idx
= ctrl_idx
;
7674 msg
->chip_no
= flash
;
7675 msg
->chip_ce
= chip
;
7676 msg
->page_no
= page
;
7677 msg
->page_count
= count
;
7680 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7683 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7690 static int ssd_nand_write(struct ssd_device
*dev
, void *buf
,
7691 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7693 struct ssd_nand_op_msg
*msg
;
7698 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7710 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7715 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7717 /* write data to ram */
7718 /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx);
7723 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7724 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7725 ret
= dma_mapping_error(buf_dma
);
7727 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7730 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7731 goto out_dma_mapping
;
7734 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7735 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7739 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7741 msg
->fun
= SSD_FUNC_NAND_WRITE
;
7742 msg
->ctrl_idx
= ctrl_idx
;
7743 msg
->chip_no
= flash
;
7744 msg
->chip_ce
= chip
;
7746 msg
->page_no
= page
;
7747 msg
->page_count
= count
;
7750 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7753 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7759 static int ssd_nand_erase(struct ssd_device
*dev
, int flash
, int chip
, int page
, int ctrl_idx
)
7761 struct ssd_nand_op_msg
*msg
;
7764 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7769 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7770 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7774 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7776 msg
->fun
= SSD_FUNC_NAND_ERASE
;
7777 msg
->ctrl_idx
= ctrl_idx
;
7778 msg
->chip_no
= flash
;
7779 msg
->chip_ce
= chip
;
7780 msg
->page_no
= page
;
7782 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7788 static int ssd_update_bbt(struct ssd_device
*dev
, int flash
, int ctrl_idx
)
7790 struct ssd_nand_op_msg
*msg
;
7791 struct ssd_flush_msg
*fmsg
;
7794 ret
= ssd_check_flash(dev
, flash
, 0, ctrl_idx
);
7799 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7801 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7802 fmsg
= (struct ssd_flush_msg
*)msg
;
7804 fmsg
->fun
= SSD_FUNC_FLUSH
;
7806 fmsg
->flash
= flash
;
7807 fmsg
->ctrl_idx
= ctrl_idx
;
7809 msg
->fun
= SSD_FUNC_FLUSH
;
7811 msg
->chip_no
= flash
;
7812 msg
->ctrl_idx
= ctrl_idx
;
7815 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7821 /* flash controller init state */
7822 static int __ssd_check_init_state(struct ssd_device
*dev
)
7824 uint32_t *init_state
= NULL
;
7825 int reg_base
, reg_sz
;
7826 int max_wait
= SSD_INIT_MAX_WAIT
;
7832 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7833 ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data);
7834 read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8);
7835 if (read_data == ~test_data) {
7836 //dev->hw_info.nr_ctrl++;
7837 dev->hw_info.nr_ctrl_map |= 1<<i;
7843 read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG);
7845 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7846 if (((read_data>>i) & 0x1) == 0) {
7851 if (dev->hw_info.nr_ctrl != j) {
7852 printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j);
7858 init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0);
7859 for (j=1; j<dev->hw_info.nr_ctrl;j++) {
7860 if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) {
7861 printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j);
7867 /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0);
7868 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7869 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) {
7870 printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j);
7875 init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8);
7876 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7877 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) {
7878 printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j);
7884 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7885 max_wait
= SSD_INIT_MAX_WAIT_V3_2
;
7888 reg_base
= dev
->protocol_info
.init_state_reg
;
7889 reg_sz
= dev
->protocol_info
.init_state_reg_sz
;
7891 init_state
= (uint32_t *)kmalloc(reg_sz
, GFP_KERNEL
);
7896 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7898 for (j
=0, k
=0; j
<reg_sz
; j
+=sizeof(uint32_t), k
++) {
7899 init_state
[k
] = ssd_reg32_read(dev
->ctrlp
+ reg_base
+ j
);
7902 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7903 /* just check the last bit, no need to check all channel */
7904 ch_start
= dev
->hw_info
.max_ch
- 1;
7909 for (j
=0; j
<dev
->hw_info
.nr_chip
; j
++) {
7910 for (k
=ch_start
; k
<dev
->hw_info
.max_ch
; k
++) {
7911 if (test_bit((j
*dev
->hw_info
.max_ch
+ k
), (void *)init_state
)) {
7916 if (init_wait
<= max_wait
) {
7917 msleep(SSD_INIT_WAIT
);
7920 if (k
< dev
->hw_info
.nr_ch
) {
7921 hio_warn("%s: controller %d chip %d ch %d init failed\n",
7922 dev
->name
, i
, j
, k
);
7924 hio_warn("%s: controller %d chip %d init failed\n",
7935 //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait);
7941 static int ssd_check_init_state(struct ssd_device
*dev
)
7943 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7947 return __ssd_check_init_state(dev
);
7950 static void ssd_reset_resp_ptr(struct ssd_device
*dev
);
7952 /* reset flash controller etc */
7953 static int __ssd_reset(struct ssd_device
*dev
, int type
)
7956 if (type
< SSD_RST_NOINIT
|| type
> SSD_RST_FULL
) {
7960 mutex_lock(&dev
->fw_mutex
);
7962 if (type
== SSD_RST_NOINIT
) { //no init
7963 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET_NOINIT
);
7964 } else if (type
== SSD_RST_NORMAL
) { //reset & init
7965 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET
);
7966 } else { // full reset
7967 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7968 mutex_unlock(&dev
->fw_mutex
);
7972 ssd_reg32_write(dev
->ctrlp
+ SSD_FULL_RESET_REG
, SSD_RESET_FULL
);
7975 ssd_reset_resp_ptr(dev
);
7978 #ifdef SSD_OT_PROTECT
7985 ssd_set_flush_timeout(dev
, dev
->wmode
);
7987 mutex_unlock(&dev
->fw_mutex
);
7988 ssd_gen_swlog(dev
, SSD_LOG_RESET
, (uint32_t)type
);
7989 do_gettimeofday(&tv
);
7990 dev
->reset_time
= tv
.tv_sec
;
7992 return __ssd_check_init_state(dev
);
7995 static int ssd_save_md(struct ssd_device
*dev
)
7997 struct ssd_nand_op_msg
*msg
;
8000 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8003 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
8007 if (!dev
->save_md
) {
8011 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8013 msg
->fun
= SSD_FUNC_FLUSH
;
8018 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
8024 static int ssd_barrier_save_md(struct ssd_device
*dev
)
8026 struct ssd_nand_op_msg
*msg
;
8029 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8032 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
8036 if (!dev
->save_md
) {
8040 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8042 msg
->fun
= SSD_FUNC_FLUSH
;
8047 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
8053 static int ssd_flush(struct ssd_device
*dev
)
8055 struct ssd_nand_op_msg
*msg
;
8056 struct ssd_flush_msg
*fmsg
;
8059 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8062 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8064 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
8065 fmsg
= (struct ssd_flush_msg
*)msg
;
8067 fmsg
->fun
= SSD_FUNC_FLUSH
;
8072 msg
->fun
= SSD_FUNC_FLUSH
;
8078 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
8084 static int ssd_barrier_flush(struct ssd_device
*dev
)
8086 struct ssd_nand_op_msg
*msg
;
8087 struct ssd_flush_msg
*fmsg
;
8090 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8093 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8095 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
8096 fmsg
= (struct ssd_flush_msg
*)msg
;
8098 fmsg
->fun
= SSD_FUNC_FLUSH
;
8103 msg
->fun
= SSD_FUNC_FLUSH
;
8109 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
8115 #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710
8116 #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8
8117 #define SSD_WMODE_FUA_TIMEOUT 0x000503E8
8118 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int m
)
8123 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8128 case SSD_WMODE_BUFFER
:
8129 to
= SSD_WMODE_BUFFER_TIMEOUT
;
8131 case SSD_WMODE_BUFFER_EX
:
8132 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_1
) {
8133 to
= SSD_WMODE_BUFFER_EX_TIMEOUT
;
8135 to
= SSD_WMODE_BUFFER_TIMEOUT
;
8139 to
= SSD_WMODE_FUA_TIMEOUT
;
8145 val
= (((uint32_t)((uint32_t)m
& 0x3) << 28) | to
);
8147 ssd_reg32_write(dev
->ctrlp
+ SSD_FLUSH_TIMEOUT_REG
, val
);
8150 static int ssd_do_switch_wmode(struct ssd_device
*dev
, int m
)
8154 ret
= ssd_barrier_start(dev
);
8159 ret
= ssd_barrier_flush(dev
);
8161 goto out_barrier_end
;
8164 /* set contoller flush timeout */
8165 ssd_set_flush_timeout(dev
, m
);
8171 ssd_barrier_end(dev
);
8176 static int ssd_switch_wmode(struct ssd_device
*dev
, int m
)
8182 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8186 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8187 default_wmode
= SSD_WMODE_BUFFER
;
8189 default_wmode
= SSD_WMODE_BUFFER_EX
;
8192 if (SSD_WMODE_AUTO
== m
) {
8193 /* battery fault ? */
8194 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8195 next_wmode
= SSD_WMODE_FUA
;
8197 next_wmode
= default_wmode
;
8199 } else if (SSD_WMODE_DEFAULT
== m
) {
8200 next_wmode
= default_wmode
;
8205 if (next_wmode
!= dev
->wmode
) {
8206 hio_warn("%s: switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8207 ret
= ssd_do_switch_wmode(dev
, next_wmode
);
8209 hio_err("%s: can not switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8216 static int ssd_init_wmode(struct ssd_device
*dev
)
8221 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8222 default_wmode
= SSD_WMODE_BUFFER
;
8224 default_wmode
= SSD_WMODE_BUFFER_EX
;
8228 if (SSD_WMODE_AUTO
== dev
->user_wmode
) {
8229 /* battery fault ? */
8230 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8231 dev
->wmode
= SSD_WMODE_FUA
;
8233 dev
->wmode
= default_wmode
;
8235 } else if (SSD_WMODE_DEFAULT
== dev
->user_wmode
) {
8236 dev
->wmode
= default_wmode
;
8238 dev
->wmode
= dev
->user_wmode
;
8240 ssd_set_flush_timeout(dev
, dev
->wmode
);
8245 static int __ssd_set_wmode(struct ssd_device
*dev
, int m
)
8249 /* not support old fw*/
8250 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8255 if (m
< SSD_WMODE_BUFFER
|| m
> SSD_WMODE_DEFAULT
) {
8260 ssd_gen_swlog(dev
, SSD_LOG_SET_WMODE
, m
);
8262 dev
->user_wmode
= m
;
8264 ret
= ssd_switch_wmode(dev
, dev
->user_wmode
);
8273 int ssd_set_wmode(struct block_device
*bdev
, int m
)
8275 struct ssd_device
*dev
;
8277 if (!bdev
|| !(bdev
->bd_disk
)) {
8281 dev
= bdev
->bd_disk
->private_data
;
8283 return __ssd_set_wmode(dev
, m
);
8286 static int ssd_do_reset(struct ssd_device
*dev
)
8290 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8294 ssd_stop_workq(dev
);
8296 ret
= ssd_barrier_start(dev
);
8301 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8303 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8306 //ret = __ssd_reset(dev, SSD_RST_FULL);
8307 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8310 goto out_barrier_end
;
8314 ssd_barrier_end(dev
);
8316 ssd_start_workq(dev
);
8317 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8321 static int ssd_full_reset(struct ssd_device
*dev
)
8325 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8329 ssd_stop_workq(dev
);
8331 ret
= ssd_barrier_start(dev
);
8336 ret
= ssd_barrier_flush(dev
);
8338 goto out_barrier_end
;
8341 ret
= ssd_barrier_save_md(dev
);
8343 goto out_barrier_end
;
8346 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8348 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8351 //ret = __ssd_reset(dev, SSD_RST_FULL);
8352 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8355 goto out_barrier_end
;
8359 ssd_barrier_end(dev
);
8361 ssd_start_workq(dev
);
8362 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8366 int ssd_reset(struct block_device
*bdev
)
8369 struct ssd_device
*dev
;
8371 if (!bdev
|| !(bdev
->bd_disk
)) {
8375 dev
= bdev
->bd_disk
->private_data
;
8377 ret
= ssd_full_reset(dev
);
8379 if (!dev
->has_non_0x98_reg_access
) {
8380 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, 0);
8387 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
8388 static int ssd_issue_flush_fn(struct request_queue
*q
, struct gendisk
*disk
,
8389 sector_t
*error_sector
)
8391 struct ssd_device
*dev
= q
->queuedata
;
8393 return ssd_flush(dev
);
8397 void ssd_submit_pbio(struct request_queue
*q
, struct bio
*bio
)
8399 struct ssd_device
*dev
= q
->queuedata
;
8400 #ifdef SSD_QUEUE_PBIO
8404 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8405 ssd_bio_endio(bio
, -ENODEV
);
8409 #ifdef SSD_DEBUG_ERR
8410 if (atomic_read(&dev
->tocnt
)) {
8411 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8412 ssd_bio_endio(bio
, -EIO
);
8417 if (unlikely(ssd_bio_has_barrier_or_fua(bio
))) {
8418 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8422 if (unlikely(dev
->readonly
&& bio_data_dir(bio
) == WRITE
)) {
8423 ssd_bio_endio(bio
, -EROFS
);
8427 #ifdef SSD_QUEUE_PBIO
8428 if (0 == atomic_read(&dev
->in_sendq
)) {
8429 ret
= __ssd_submit_pbio(dev
, bio
, 0);
8433 (void)test_and_set_bit(BIO_SSD_PBIO
, &bio
->bi_flags
);
8434 ssd_queue_bio(dev
, bio
);
8437 __ssd_submit_pbio(dev
, bio
, 1);
8444 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8445 static blk_qc_t
ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8446 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8447 static void ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8449 static int ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8452 struct ssd_device
*dev
= q
->queuedata
;
8455 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8456 ssd_bio_endio(bio
, -ENODEV
);
8460 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
8461 blk_queue_split(q
, &bio
, q
->bio_split
);
8464 #ifdef SSD_DEBUG_ERR
8465 if (atomic_read(&dev
->tocnt
)) {
8466 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8467 ssd_bio_endio(bio
, -EIO
);
8472 if (unlikely(ssd_bio_has_barrier_or_fua(bio
))) {
8473 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8477 /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */
8478 if (unlikely(ssd_bio_has_flush(bio
) && !bio_sectors(bio
))) {
8479 ssd_bio_endio(bio
, 0);
8483 if (0 == atomic_read(&dev
->in_sendq
)) {
8484 ret
= ssd_submit_bio(dev
, bio
, 0);
8488 ssd_queue_bio(dev
, bio
);
8492 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8493 return BLK_QC_T_NONE
;
8494 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8501 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
8502 static int ssd_block_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
8504 struct ssd_device
*dev
;
8510 dev
= bdev
->bd_disk
->private_data
;
8517 geo
->cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
8522 static int ssd_init_queue(struct ssd_device
*dev
);
8523 static void ssd_cleanup_queue(struct ssd_device
*dev
);
8524 static void ssd_cleanup_blkdev(struct ssd_device
*dev
);
8525 static int ssd_init_blkdev(struct ssd_device
*dev
);
8526 static int ssd_ioctl_common(struct ssd_device
*dev
, unsigned int cmd
, unsigned long arg
)
8528 void __user
*argp
= (void __user
*)arg
;
8529 void __user
*buf
= NULL
;
8534 case SSD_CMD_GET_PROTOCOL_INFO
:
8535 if (copy_to_user(argp
, &dev
->protocol_info
, sizeof(struct ssd_protocol_info
))) {
8536 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8542 case SSD_CMD_GET_HW_INFO
:
8543 if (copy_to_user(argp
, &dev
->hw_info
, sizeof(struct ssd_hw_info
))) {
8544 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8550 case SSD_CMD_GET_ROM_INFO
:
8551 if (copy_to_user(argp
, &dev
->rom_info
, sizeof(struct ssd_rom_info
))) {
8552 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8558 case SSD_CMD_GET_SMART
: {
8559 struct ssd_smart smart
;
8562 memcpy(&smart
, &dev
->smart
, sizeof(struct ssd_smart
));
8564 mutex_lock(&dev
->gd_mutex
);
8565 ssd_update_smart(dev
, &smart
);
8566 mutex_unlock(&dev
->gd_mutex
);
8568 /* combine the volatile log info */
8569 if (dev
->log_info
.nr_log
) {
8570 for (i
=0; i
<SSD_LOG_NR_LEVEL
; i
++) {
8571 smart
.log_info
.stat
[i
] += dev
->log_info
.stat
[i
];
8575 if (copy_to_user(argp
, &smart
, sizeof(struct ssd_smart
))) {
8576 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8584 case SSD_CMD_GET_IDX
:
8585 if (copy_to_user(argp
, &dev
->idx
, sizeof(int))) {
8586 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8592 case SSD_CMD_GET_AMOUNT
: {
8593 int nr_ssd
= atomic_read(&ssd_nr
);
8594 if (copy_to_user(argp
, &nr_ssd
, sizeof(int))) {
8595 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8602 case SSD_CMD_GET_TO_INFO
: {
8603 int tocnt
= atomic_read(&dev
->tocnt
);
8605 if (copy_to_user(argp
, &tocnt
, sizeof(int))) {
8606 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8613 case SSD_CMD_GET_DRV_VER
: {
8614 char ver
[] = DRIVER_VERSION
;
8615 int len
= sizeof(ver
);
8617 if (len
> (DRIVER_VERSION_LEN
- 1)) {
8618 len
= (DRIVER_VERSION_LEN
- 1);
8620 if (copy_to_user(argp
, ver
, len
)) {
8621 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8628 case SSD_CMD_GET_BBACC_INFO
: {
8629 struct ssd_acc_info acc
;
8631 mutex_lock(&dev
->fw_mutex
);
8632 ret
= ssd_bb_acc(dev
, &acc
);
8633 mutex_unlock(&dev
->fw_mutex
);
8638 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8639 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8646 case SSD_CMD_GET_ECACC_INFO
: {
8647 struct ssd_acc_info acc
;
8649 mutex_lock(&dev
->fw_mutex
);
8650 ret
= ssd_ec_acc(dev
, &acc
);
8651 mutex_unlock(&dev
->fw_mutex
);
8656 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8657 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8664 case SSD_CMD_GET_HW_INFO_EXT
:
8665 if (copy_to_user(argp
, &dev
->hw_info_ext
, sizeof(struct ssd_hw_info_extend
))) {
8666 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8672 case SSD_CMD_REG_READ
: {
8673 struct ssd_reg_op_info reg_info
;
8675 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8676 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8681 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8686 reg_info
.value
= ssd_reg32_read(dev
->ctrlp
+ reg_info
.offset
);
8687 if (copy_to_user(argp
, ®_info
, sizeof(struct ssd_reg_op_info
))) {
8688 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8696 case SSD_CMD_REG_WRITE
: {
8697 struct ssd_reg_op_info reg_info
;
8699 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8700 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8705 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8710 ssd_reg32_write(dev
->ctrlp
+ reg_info
.offset
, reg_info
.value
);
8715 case SSD_CMD_SPI_READ
: {
8716 struct ssd_spi_op_info spi_info
;
8719 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8720 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8726 size
= spi_info
.len
;
8729 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8734 kbuf
= kmalloc(size
, GFP_KERNEL
);
8740 ret
= ssd_spi_page_read(dev
, kbuf
, off
, size
);
8746 if (copy_to_user(buf
, kbuf
, size
)) {
8747 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8758 case SSD_CMD_SPI_WRITE
: {
8759 struct ssd_spi_op_info spi_info
;
8762 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8763 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8769 size
= spi_info
.len
;
8772 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8777 kbuf
= kmalloc(size
, GFP_KERNEL
);
8783 if (copy_from_user(kbuf
, buf
, size
)) {
8784 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8790 ret
= ssd_spi_page_write(dev
, kbuf
, off
, size
);
8801 case SSD_CMD_SPI_ERASE
: {
8802 struct ssd_spi_op_info spi_info
;
8805 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8806 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8813 if ((off
+ dev
->rom_info
.block_size
) > dev
->rom_info
.size
) {
8818 ret
= ssd_spi_block_erase(dev
, off
);
8826 case SSD_CMD_I2C_READ
: {
8827 struct ssd_i2c_op_info i2c_info
;
8831 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8832 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8837 saddr
= i2c_info
.saddr
;
8838 rsize
= i2c_info
.rsize
;
8839 buf
= i2c_info
.rbuf
;
8841 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8846 kbuf
= kmalloc(rsize
, GFP_KERNEL
);
8852 ret
= ssd_i2c_read(dev
, saddr
, rsize
, kbuf
);
8858 if (copy_to_user(buf
, kbuf
, rsize
)) {
8859 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8870 case SSD_CMD_I2C_WRITE
: {
8871 struct ssd_i2c_op_info i2c_info
;
8875 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8876 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8881 saddr
= i2c_info
.saddr
;
8882 wsize
= i2c_info
.wsize
;
8883 buf
= i2c_info
.wbuf
;
8885 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8890 kbuf
= kmalloc(wsize
, GFP_KERNEL
);
8896 if (copy_from_user(kbuf
, buf
, wsize
)) {
8897 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8903 ret
= ssd_i2c_write(dev
, saddr
, wsize
, kbuf
);
8914 case SSD_CMD_I2C_WRITE_READ
: {
8915 struct ssd_i2c_op_info i2c_info
;
8921 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8922 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8927 saddr
= i2c_info
.saddr
;
8928 wsize
= i2c_info
.wsize
;
8929 rsize
= i2c_info
.rsize
;
8930 buf
= i2c_info
.wbuf
;
8932 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8937 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8942 size
= wsize
+ rsize
;
8944 kbuf
= kmalloc(size
, GFP_KERNEL
);
8950 if (copy_from_user((kbuf
+ rsize
), buf
, wsize
)) {
8951 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8957 buf
= i2c_info
.rbuf
;
8959 ret
= ssd_i2c_write_read(dev
, saddr
, wsize
, (kbuf
+ rsize
), rsize
, kbuf
);
8965 if (copy_to_user(buf
, kbuf
, rsize
)) {
8966 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8977 case SSD_CMD_SMBUS_SEND_BYTE
: {
8978 struct ssd_smbus_op_info smbus_info
;
8979 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8983 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8984 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8989 saddr
= smbus_info
.saddr
;
8990 buf
= smbus_info
.buf
;
8993 if (copy_from_user(smb_data
, buf
, size
)) {
8994 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8999 ret
= ssd_smbus_send_byte(dev
, saddr
, smb_data
);
9007 case SSD_CMD_SMBUS_RECEIVE_BYTE
: {
9008 struct ssd_smbus_op_info smbus_info
;
9009 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9013 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9014 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9019 saddr
= smbus_info
.saddr
;
9020 buf
= smbus_info
.buf
;
9023 ret
= ssd_smbus_receive_byte(dev
, saddr
, smb_data
);
9028 if (copy_to_user(buf
, smb_data
, size
)) {
9029 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9037 case SSD_CMD_SMBUS_WRITE_BYTE
: {
9038 struct ssd_smbus_op_info smbus_info
;
9039 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9044 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9045 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9050 saddr
= smbus_info
.saddr
;
9051 command
= smbus_info
.cmd
;
9052 buf
= smbus_info
.buf
;
9055 if (copy_from_user(smb_data
, buf
, size
)) {
9056 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9061 ret
= ssd_smbus_write_byte(dev
, saddr
, command
, smb_data
);
9069 case SSD_CMD_SMBUS_READ_BYTE
: {
9070 struct ssd_smbus_op_info smbus_info
;
9071 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9076 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9077 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9082 saddr
= smbus_info
.saddr
;
9083 command
= smbus_info
.cmd
;
9084 buf
= smbus_info
.buf
;
9087 ret
= ssd_smbus_read_byte(dev
, saddr
, command
, smb_data
);
9092 if (copy_to_user(buf
, smb_data
, size
)) {
9093 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9101 case SSD_CMD_SMBUS_WRITE_WORD
: {
9102 struct ssd_smbus_op_info smbus_info
;
9103 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9108 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9109 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9114 saddr
= smbus_info
.saddr
;
9115 command
= smbus_info
.cmd
;
9116 buf
= smbus_info
.buf
;
9119 if (copy_from_user(smb_data
, buf
, size
)) {
9120 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9125 ret
= ssd_smbus_write_word(dev
, saddr
, command
, smb_data
);
9133 case SSD_CMD_SMBUS_READ_WORD
: {
9134 struct ssd_smbus_op_info smbus_info
;
9135 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9140 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9141 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9146 saddr
= smbus_info
.saddr
;
9147 command
= smbus_info
.cmd
;
9148 buf
= smbus_info
.buf
;
9151 ret
= ssd_smbus_read_word(dev
, saddr
, command
, smb_data
);
9156 if (copy_to_user(buf
, smb_data
, size
)) {
9157 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9165 case SSD_CMD_SMBUS_WRITE_BLOCK
: {
9166 struct ssd_smbus_op_info smbus_info
;
9167 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9172 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9173 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9178 saddr
= smbus_info
.saddr
;
9179 command
= smbus_info
.cmd
;
9180 buf
= smbus_info
.buf
;
9181 size
= smbus_info
.size
;
9183 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9188 if (copy_from_user(smb_data
, buf
, size
)) {
9189 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9194 ret
= ssd_smbus_write_block(dev
, saddr
, command
, size
, smb_data
);
9202 case SSD_CMD_SMBUS_READ_BLOCK
: {
9203 struct ssd_smbus_op_info smbus_info
;
9204 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9209 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9210 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9215 saddr
= smbus_info
.saddr
;
9216 command
= smbus_info
.cmd
;
9217 buf
= smbus_info
.buf
;
9218 size
= smbus_info
.size
;
9220 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9225 ret
= ssd_smbus_read_block(dev
, saddr
, command
, size
, smb_data
);
9230 if (copy_to_user(buf
, smb_data
, size
)) {
9231 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9239 case SSD_CMD_BM_GET_VER
: {
9242 ret
= ssd_bm_get_version(dev
, &ver
);
9247 if (copy_to_user(argp
, &ver
, sizeof(uint16_t))) {
9248 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9256 case SSD_CMD_BM_GET_NR_CAP
: {
9259 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
9264 if (copy_to_user(argp
, &nr_cap
, sizeof(int))) {
9265 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9273 case SSD_CMD_BM_CAP_LEARNING
: {
9274 ret
= ssd_bm_enter_cap_learning(dev
);
9283 case SSD_CMD_CAP_LEARN
: {
9286 ret
= ssd_cap_learn(dev
, &cap
);
9291 if (copy_to_user(argp
, &cap
, sizeof(uint32_t))) {
9292 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9300 case SSD_CMD_GET_CAP_STATUS
: {
9303 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9307 if (copy_to_user(argp
, &cap_status
, sizeof(int))) {
9308 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9316 case SSD_CMD_RAM_READ
: {
9317 struct ssd_ram_op_info ram_info
;
9320 size_t rlen
, len
= dev
->hw_info
.ram_max_len
;
9323 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9324 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9329 ofs
= ram_info
.start
;
9330 length
= ram_info
.length
;
9332 ctrl_idx
= ram_info
.ctrl_idx
;
9334 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9339 kbuf
= kmalloc(len
, GFP_KERNEL
);
9345 for (rlen
=0; rlen
<length
; rlen
+=len
, buf
+=len
, ofs
+=len
) {
9346 if ((length
- rlen
) < len
) {
9347 len
= length
- rlen
;
9350 ret
= ssd_ram_read(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9355 if (copy_to_user(buf
, kbuf
, len
)) {
9366 case SSD_CMD_RAM_WRITE
: {
9367 struct ssd_ram_op_info ram_info
;
9370 size_t wlen
, len
= dev
->hw_info
.ram_max_len
;
9373 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9374 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9378 ofs
= ram_info
.start
;
9379 length
= ram_info
.length
;
9381 ctrl_idx
= ram_info
.ctrl_idx
;
9383 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9388 kbuf
= kmalloc(len
, GFP_KERNEL
);
9394 for (wlen
=0; wlen
<length
; wlen
+=len
, buf
+=len
, ofs
+=len
) {
9395 if ((length
- wlen
) < len
) {
9396 len
= length
- wlen
;
9399 if (copy_from_user(kbuf
, buf
, len
)) {
9404 ret
= ssd_ram_write(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9415 case SSD_CMD_NAND_READ_ID
: {
9416 struct ssd_flash_op_info flash_info
;
9417 int chip_no
, chip_ce
, length
, ctrl_idx
;
9419 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9420 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9425 chip_no
= flash_info
.flash
;
9426 chip_ce
= flash_info
.chip
;
9427 ctrl_idx
= flash_info
.ctrl_idx
;
9428 buf
= flash_info
.buf
;
9429 length
= dev
->hw_info
.id_size
;
9431 //kbuf = kmalloc(length, GFP_KERNEL);
9432 kbuf
= kmalloc(SSD_NAND_ID_BUFF_SZ
, GFP_KERNEL
); //xx
9437 memset(kbuf
, 0, length
);
9439 ret
= ssd_nand_read_id(dev
, kbuf
, chip_no
, chip_ce
, ctrl_idx
);
9445 if (copy_to_user(buf
, kbuf
, length
)) {
9456 case SSD_CMD_NAND_READ
: { //with oob
9457 struct ssd_flash_op_info flash_info
;
9459 int flash
, chip
, page
, ctrl_idx
;
9462 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9463 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9468 flash
= flash_info
.flash
;
9469 chip
= flash_info
.chip
;
9470 page
= flash_info
.page
;
9471 buf
= flash_info
.buf
;
9472 ctrl_idx
= flash_info
.ctrl_idx
;
9474 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9476 kbuf
= kmalloc(length
, GFP_KERNEL
);
9482 err
= ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9483 if (ret
&& (-EIO
!= ret
)) {
9488 if (copy_to_user(buf
, kbuf
, length
)) {
9500 case SSD_CMD_NAND_WRITE
: {
9501 struct ssd_flash_op_info flash_info
;
9502 int flash
, chip
, page
, ctrl_idx
;
9505 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9506 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9511 flash
= flash_info
.flash
;
9512 chip
= flash_info
.chip
;
9513 page
= flash_info
.page
;
9514 buf
= flash_info
.buf
;
9515 ctrl_idx
= flash_info
.ctrl_idx
;
9517 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9519 kbuf
= kmalloc(length
, GFP_KERNEL
);
9525 if (copy_from_user(kbuf
, buf
, length
)) {
9531 ret
= ssd_nand_write(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9541 case SSD_CMD_NAND_ERASE
: {
9542 struct ssd_flash_op_info flash_info
;
9543 int flash
, chip
, page
, ctrl_idx
;
9545 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9546 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9551 flash
= flash_info
.flash
;
9552 chip
= flash_info
.chip
;
9553 page
= flash_info
.page
;
9554 ctrl_idx
= flash_info
.ctrl_idx
;
9556 if ((page
% dev
->hw_info
.page_count
) != 0) {
9561 //hio_warn("erase fs = %llx\n", ofs);
9562 ret
= ssd_nand_erase(dev
, flash
, chip
, page
, ctrl_idx
);
9570 case SSD_CMD_NAND_READ_EXT
: { //ingore EIO
9571 struct ssd_flash_op_info flash_info
;
9573 int flash
, chip
, page
, ctrl_idx
;
9575 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9576 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9581 flash
= flash_info
.flash
;
9582 chip
= flash_info
.chip
;
9583 page
= flash_info
.page
;
9584 buf
= flash_info
.buf
;
9585 ctrl_idx
= flash_info
.ctrl_idx
;
9587 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9589 kbuf
= kmalloc(length
, GFP_KERNEL
);
9595 ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9596 if (-EIO
== ret
) { //ingore EIO
9604 if (copy_to_user(buf
, kbuf
, length
)) {
9614 case SSD_CMD_UPDATE_BBT
: {
9615 struct ssd_flash_op_info flash_info
;
9616 int ctrl_idx
, flash
;
9618 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9619 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9624 ctrl_idx
= flash_info
.ctrl_idx
;
9625 flash
= flash_info
.flash
;
9626 ret
= ssd_update_bbt(dev
, flash
, ctrl_idx
);
9634 case SSD_CMD_CLEAR_ALARM
:
9635 ssd_clear_alarm(dev
);
9638 case SSD_CMD_SET_ALARM
:
9643 ret
= ssd_do_reset(dev
);
9646 case SSD_CMD_RELOAD_FW
:
9648 dev
->has_non_0x98_reg_access
= 1;
9649 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9650 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
9651 } else if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_1_1
) {
9652 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
9657 case SSD_CMD_UNLOAD_DEV
: {
9658 if (atomic_read(&dev
->refcnt
)) {
9664 ssd_save_smart(dev
);
9666 ret
= ssd_flush(dev
);
9671 /* cleanup the block device */
9672 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
9673 mutex_lock(&dev
->gd_mutex
);
9674 ssd_cleanup_blkdev(dev
);
9675 ssd_cleanup_queue(dev
);
9676 mutex_unlock(&dev
->gd_mutex
);
9682 case SSD_CMD_LOAD_DEV
: {
9684 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9689 ret
= ssd_init_smart(dev
);
9691 hio_warn("%s: init info: failed\n", dev
->name
);
9695 ret
= ssd_init_queue(dev
);
9697 hio_warn("%s: init queue failed\n", dev
->name
);
9700 ret
= ssd_init_blkdev(dev
);
9702 hio_warn("%s: register block device: failed\n", dev
->name
);
9705 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
9710 case SSD_CMD_UPDATE_VP
: {
9712 uint32_t new_vp
, new_vp1
= 0;
9714 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9719 if (copy_from_user(&new_vp
, argp
, sizeof(uint32_t))) {
9720 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9725 if (new_vp
> dev
->hw_info
.max_valid_pages
|| new_vp
<= 0) {
9730 while (new_vp
<= dev
->hw_info
.max_valid_pages
) {
9731 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, new_vp
);
9733 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
9734 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9735 new_vp1
= val
& 0x3FF;
9737 new_vp1
= val
& 0x7FFF;
9740 if (new_vp1
== new_vp
) {
9745 /*if (new_vp == dev->hw_info.valid_pages) {
9750 if (new_vp1
!= new_vp
|| new_vp
> dev
->hw_info
.max_valid_pages
) {
9752 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9757 if (copy_to_user(argp
, &new_vp
, sizeof(uint32_t))) {
9758 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9759 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9765 dev
->hw_info
.valid_pages
= new_vp
;
9766 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
9767 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
9768 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
9773 case SSD_CMD_FULL_RESET
: {
9774 ret
= ssd_full_reset(dev
);
9778 case SSD_CMD_GET_NR_LOG
: {
9779 if (copy_to_user(argp
, &dev
->internal_log
.nr_log
, sizeof(dev
->internal_log
.nr_log
))) {
9786 case SSD_CMD_GET_LOG
: {
9787 uint32_t length
= dev
->rom_info
.log_sz
;
9791 if (copy_to_user(buf
, dev
->internal_log
.log
, length
)) {
9799 case SSD_CMD_LOG_LEVEL
: {
9801 if (copy_from_user(&level
, argp
, sizeof(int))) {
9802 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9807 if (level
>= SSD_LOG_NR_LEVEL
|| level
< SSD_LOG_LEVEL_INFO
) {
9808 level
= SSD_LOG_LEVEL_ERR
;
9811 //just for showing log, no need to protect
9816 case SSD_CMD_OT_PROTECT
: {
9819 if (copy_from_user(&protect
, argp
, sizeof(int))) {
9820 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9825 ssd_set_ot_protect(dev
, !!protect
);
9829 case SSD_CMD_GET_OT_STATUS
: {
9830 int status
= ssd_get_ot_status(dev
, &status
);
9832 if (copy_to_user(argp
, &status
, sizeof(int))) {
9833 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9840 case SSD_CMD_CLEAR_LOG
: {
9841 ret
= ssd_clear_log(dev
);
9845 case SSD_CMD_CLEAR_SMART
: {
9846 ret
= ssd_clear_smart(dev
);
9850 case SSD_CMD_CLEAR_WARNING
: {
9851 ret
= ssd_clear_warning(dev
);
9855 case SSD_CMD_SW_LOG
: {
9856 struct ssd_sw_log_info sw_log
;
9858 if (copy_from_user(&sw_log
, argp
, sizeof(struct ssd_sw_log_info
))) {
9859 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9864 ret
= ssd_gen_swlog(dev
, sw_log
.event
, sw_log
.data
);
9868 case SSD_CMD_GET_LABEL
: {
9870 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9875 if (copy_to_user(argp
, &dev
->label
, sizeof(struct ssd_label
))) {
9876 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9883 case SSD_CMD_GET_VERSION
: {
9884 struct ssd_version_info ver
;
9886 mutex_lock(&dev
->fw_mutex
);
9887 ret
= __ssd_get_version(dev
, &ver
);
9888 mutex_unlock(&dev
->fw_mutex
);
9893 if (copy_to_user(argp
, &ver
, sizeof(struct ssd_version_info
))) {
9894 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9901 case SSD_CMD_GET_TEMPERATURE
: {
9904 mutex_lock(&dev
->fw_mutex
);
9905 ret
= __ssd_get_temperature(dev
, &temp
);
9906 mutex_unlock(&dev
->fw_mutex
);
9911 if (copy_to_user(argp
, &temp
, sizeof(int))) {
9912 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9919 case SSD_CMD_GET_BMSTATUS
: {
9922 mutex_lock(&dev
->fw_mutex
);
9923 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9924 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9925 status
= SSD_BMSTATUS_WARNING
;
9927 status
= SSD_BMSTATUS_OK
;
9929 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
9930 ret
= __ssd_bm_status(dev
, &status
);
9932 status
= SSD_BMSTATUS_OK
;
9934 mutex_unlock(&dev
->fw_mutex
);
9939 if (copy_to_user(argp
, &status
, sizeof(int))) {
9940 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9947 case SSD_CMD_GET_LABEL2
: {
9951 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9952 label
= &dev
->label
;
9953 length
= sizeof(struct ssd_label
);
9955 label
= &dev
->labelv3
;
9956 length
= sizeof(struct ssd_labelv3
);
9959 if (copy_to_user(argp
, label
, length
)) {
9967 ret
= ssd_flush(dev
);
9969 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9975 case SSD_CMD_SAVE_MD
: {
9978 if (copy_from_user(&save_md
, argp
, sizeof(int))) {
9979 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9984 dev
->save_md
= !!save_md
;
9988 case SSD_CMD_SET_WMODE
: {
9991 if (copy_from_user(&new_wmode
, argp
, sizeof(int))) {
9992 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9997 ret
= __ssd_set_wmode(dev
, new_wmode
);
10005 case SSD_CMD_GET_WMODE
: {
10006 if (copy_to_user(argp
, &dev
->wmode
, sizeof(int))) {
10007 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10015 case SSD_CMD_GET_USER_WMODE
: {
10016 if (copy_to_user(argp
, &dev
->user_wmode
, sizeof(int))) {
10017 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10025 case SSD_CMD_DEBUG
: {
10026 struct ssd_debug_info db_info
;
10033 if (copy_from_user(&db_info
, argp
, sizeof(struct ssd_debug_info
))) {
10034 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
10039 if (db_info
.type
< SSD_DEBUG_NONE
|| db_info
.type
>= SSD_DEBUG_NR
) {
10045 if (db_info
.type
>= SSD_DEBUG_READ_ERR
&& db_info
.type
<= SSD_DEBUG_RW_ERR
&&
10046 (db_info
.data
.loc
.off
+ db_info
.data
.loc
.len
) > (dev
->hw_info
.size
>> 9)) {
10051 memcpy(&dev
->db_info
, &db_info
, sizeof(struct ssd_debug_info
));
10053 #ifdef SSD_OT_PROTECT
10055 if (db_info
.type
== SSD_DEBUG_NONE
) {
10056 ssd_check_temperature(dev
, SSD_OT_TEMP
);
10057 } else if (db_info
.type
== SSD_DEBUG_LOG
) {
10058 if (db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
) {
10059 dev
->ot_delay
= SSD_OT_DELAY
;
10060 } else if (db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
) {
10067 if (db_info
.type
== SSD_DEBUG_OFFLINE
) {
10068 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
10069 } else if (db_info
.type
== SSD_DEBUG_NONE
) {
10070 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
10074 if (db_info
.type
== SSD_DEBUG_LOG
&& dev
->event_call
&& dev
->gd
) {
10075 dev
->event_call(dev
->gd
, db_info
.data
.log
.event
, 0);
10081 case SSD_CMD_DRV_PARAM_INFO
: {
10082 struct ssd_drv_param_info drv_param
;
10084 memset(&drv_param
, 0, sizeof(struct ssd_drv_param_info
));
10086 drv_param
.mode
= mode
;
10087 drv_param
.status_mask
= status_mask
;
10088 drv_param
.int_mode
= int_mode
;
10089 drv_param
.threaded_irq
= threaded_irq
;
10090 drv_param
.log_level
= log_level
;
10091 drv_param
.wmode
= wmode
;
10092 drv_param
.ot_protect
= ot_protect
;
10093 drv_param
.finject
= finject
;
10095 if (copy_to_user(argp
, &drv_param
, sizeof(struct ssd_drv_param_info
))) {
10096 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10112 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10113 static int ssd_block_ioctl(struct inode
*inode
, struct file
*file
,
10114 unsigned int cmd
, unsigned long arg
)
10116 struct ssd_device
*dev
;
10117 void __user
*argp
= (void __user
*)arg
;
10123 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10128 static int ssd_block_ioctl(struct block_device
*bdev
, fmode_t mode
,
10129 unsigned int cmd
, unsigned long arg
)
10131 struct ssd_device
*dev
;
10132 void __user
*argp
= (void __user
*)arg
;
10139 dev
= bdev
->bd_disk
->private_data
;
10146 case HDIO_GETGEO
: {
10147 struct hd_geometry geo
;
10148 geo
.cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
10151 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10152 geo
.start
= get_start_sect(inode
->i_bdev
);
10154 geo
.start
= get_start_sect(bdev
);
10156 if (copy_to_user(argp
, &geo
, sizeof(geo
))) {
10165 ret
= ssd_flush(dev
);
10167 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
10175 ret
= ssd_ioctl_common(dev
, cmd
, arg
);
10186 static void ssd_free_dev(struct kref
*kref
)
10188 struct ssd_device
*dev
;
10194 dev
= container_of(kref
, struct ssd_device
, kref
);
10198 ssd_put_index(dev
->slave
, dev
->idx
);
10203 static void ssd_put(struct ssd_device
*dev
)
10205 kref_put(&dev
->kref
, ssd_free_dev
);
10208 static int ssd_get(struct ssd_device
*dev
)
10210 kref_get(&dev
->kref
);
10215 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10216 static int ssd_block_open(struct inode
*inode
, struct file
*filp
)
10218 struct ssd_device
*dev
;
10224 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10229 static int ssd_block_open(struct block_device
*bdev
, fmode_t mode
)
10231 struct ssd_device
*dev
;
10237 dev
= bdev
->bd_disk
->private_data
;
10243 /*if (!try_module_get(dev->owner))
10249 atomic_inc(&dev
->refcnt
);
10254 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10255 static int ssd_block_release(struct inode
*inode
, struct file
*filp
)
10257 struct ssd_device
*dev
;
10263 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10267 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10268 static int ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10270 struct ssd_device
*dev
;
10276 dev
= disk
->private_data
;
10281 static void ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10283 struct ssd_device
*dev
;
10289 dev
= disk
->private_data
;
10295 atomic_dec(&dev
->refcnt
);
10299 //module_put(dev->owner);
10300 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10305 static struct block_device_operations ssd_fops
= {
10306 .owner
= THIS_MODULE
,
10307 .open
= ssd_block_open
,
10308 .release
= ssd_block_release
,
10309 .ioctl
= ssd_block_ioctl
,
10310 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
10311 .getgeo
= ssd_block_getgeo
,
10315 static void ssd_init_trim(ssd_device_t
*dev
)
10317 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
10318 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10321 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, dev
->rq
);
10323 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6))
10324 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
10325 dev
->rq
->limits
.discard_zeroes_data
= 1;
10327 dev
->rq
->limits
.discard_alignment
= 4096;
10328 dev
->rq
->limits
.discard_granularity
= 4096;
10330 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_4
) {
10331 dev
->rq
->limits
.max_discard_sectors
= dev
->hw_info
.sg_max_sec
;
10333 dev
->rq
->limits
.max_discard_sectors
= (dev
->hw_info
.sg_max_sec
) * (dev
->hw_info
.cmd_max_sg
);
10338 static void ssd_cleanup_queue(struct ssd_device
*dev
)
10342 blk_cleanup_queue(dev
->rq
);
10346 static int ssd_init_queue(struct ssd_device
*dev
)
10348 dev
->rq
= blk_alloc_queue(GFP_KERNEL
);
10349 if (dev
->rq
== NULL
) {
10350 hio_warn("%s: alloc queue: failed\n ", dev
->name
);
10351 goto out_init_queue
;
10354 /* must be first */
10355 blk_queue_make_request(dev
->rq
, ssd_make_request
);
10357 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6))
10358 blk_queue_max_hw_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10359 blk_queue_max_phys_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10360 blk_queue_max_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10362 blk_queue_max_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10363 blk_queue_max_hw_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10366 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
10367 blk_queue_hardsect_size(dev
->rq
, 512);
10369 blk_queue_logical_block_size(dev
->rq
, 512);
10371 /* not work for make_request based drivers(bio) */
10372 blk_queue_max_segment_size(dev
->rq
, dev
->hw_info
.sg_max_sec
<< 9);
10374 blk_queue_bounce_limit(dev
->rq
, BLK_BOUNCE_HIGH
);
10376 dev
->rq
->queuedata
= dev
;
10378 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
10379 blk_queue_issue_flush_fn(dev
->rq
, ssd_issue_flush_fn
);
10382 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
10383 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, dev
->rq
);
10386 ssd_init_trim(dev
);
10394 static void ssd_cleanup_blkdev(struct ssd_device
*dev
)
10396 del_gendisk(dev
->gd
);
10399 static int ssd_init_blkdev(struct ssd_device
*dev
)
10405 dev
->gd
= alloc_disk(ssd_minors
);
10407 hio_warn("%s: alloc_disk fail\n", dev
->name
);
10410 dev
->gd
->major
= dev
->major
;
10411 dev
->gd
->first_minor
= dev
->idx
* ssd_minors
;
10412 dev
->gd
->fops
= &ssd_fops
;
10413 dev
->gd
->queue
= dev
->rq
;
10414 dev
->gd
->private_data
= dev
;
10416 snprintf (dev
->gd
->disk_name
, sizeof(dev
->gd
->disk_name
), "%s", dev
->name
);
10418 set_capacity(dev
->gd
, dev
->hw_info
.size
>> 9);
10420 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
10421 device_add_disk(&dev
->pdev
->dev
, dev
->gd
);
10423 dev
->gd
->driverfs_dev
= &dev
->pdev
->dev
;
10433 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10434 static int ssd_ioctl(struct inode
*inode
, struct file
*file
,
10435 unsigned int cmd
, unsigned long arg
)
10437 static long ssd_ioctl(struct file
*file
,
10438 unsigned int cmd
, unsigned long arg
)
10441 struct ssd_device
*dev
;
10447 dev
= file
->private_data
;
10452 return (long)ssd_ioctl_common(dev
, cmd
, arg
);
10455 static int ssd_open(struct inode
*inode
, struct file
*file
)
10457 struct ssd_device
*dev
= NULL
;
10458 struct ssd_device
*n
= NULL
;
10462 if (!inode
|| !file
) {
10466 idx
= iminor(inode
);
10468 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
10469 if (dev
->idx
== idx
) {
10479 file
->private_data
= dev
;
10486 static int ssd_release(struct inode
*inode
, struct file
*file
)
10488 struct ssd_device
*dev
;
10494 dev
= file
->private_data
;
10501 file
->private_data
= NULL
;
10506 static int ssd_reload_ssd_ptr(struct ssd_device
*dev
)
10508 ssd_reset_resp_ptr(dev
);
10510 //update base reg address
10511 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
10513 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
10516 //update response base reg address
10517 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
10518 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
10523 static struct file_operations ssd_cfops
= {
10524 .owner
= THIS_MODULE
,
10526 .release
= ssd_release
,
10527 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10528 .ioctl
= ssd_ioctl
,
10530 .unlocked_ioctl
= ssd_ioctl
,
10534 static void ssd_cleanup_chardev(struct ssd_device
*dev
)
10540 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10541 class_simple_device_remove(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10542 devfs_remove("c%s", dev
->name
);
10543 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10544 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10545 devfs_remove("c%s", dev
->name
);
10546 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10547 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10548 devfs_remove("c%s", dev
->name
);
10549 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10550 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10552 device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10556 static int ssd_init_chardev(struct ssd_device
*dev
)
10564 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10565 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10569 class_simple_device_add(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10571 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10572 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10576 class_device_create(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10578 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10579 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10583 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10585 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10586 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10587 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
10588 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), "c%s", dev
->name
);
10589 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10590 device_create_drvdata(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10592 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10598 static int ssd_check_hw(struct ssd_device
*dev
)
10600 uint32_t test_data
= 0x55AA5AA5;
10601 uint32_t read_data
;
10603 ssd_reg32_write(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
, test_data
);
10604 read_data
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
);
10605 if (read_data
!= ~(test_data
)) {
10606 //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data);
10613 static int ssd_check_fw(struct ssd_device
*dev
)
10618 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10622 for (i
=0; i
<SSD_CONTROLLER_WAIT
; i
++) {
10623 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10624 if ((val
& 0x1) && ((val
>> 8) & 0x1)) {
10628 msleep(SSD_INIT_WAIT
);
10631 if (!(val
& 0x1)) {
10632 /* controller fw status */
10633 hio_warn("%s: controller firmware load failed: %#x\n", dev
->name
, val
);
10635 } else if (!((val
>> 8) & 0x1)) {
10636 /* controller state */
10637 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10641 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RELOAD_FW_REG
);
10643 dev
->reload_fw
= 1;
10649 static int ssd_init_fw_info(struct ssd_device
*dev
)
10654 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_VER_REG
);
10655 dev
->hw_info
.bridge_ver
= val
& 0xFFF;
10656 if (dev
->hw_info
.bridge_ver
< SSD_FW_MIN
) {
10657 hio_warn("%s: bridge firmware version %03X is not supported\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10660 hio_info("%s: bridge firmware version: %03X\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10662 ret
= ssd_check_fw(dev
);
10668 /* skip error if not in standard mode */
10669 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10675 static int ssd_check_clock(struct ssd_device
*dev
)
10680 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10684 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10687 if (!((val
>> 4 ) & 0x1)) {
10688 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST
), &dev
->hwmon
)) {
10689 hio_warn("%s: 166MHz clock losed: %#x\n", dev
->name
, val
);
10690 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10695 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
10696 if (!((val
>> 5 ) & 0x1)) {
10697 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW
), &dev
->hwmon
)) {
10698 hio_warn("%s: 166MHz clock is skew: %#x\n", dev
->name
, val
);
10699 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10703 if (!((val
>> 6 ) & 0x1)) {
10704 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST
), &dev
->hwmon
)) {
10705 hio_warn("%s: 156.25MHz clock lost: %#x\n", dev
->name
, val
);
10706 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10710 if (!((val
>> 7 ) & 0x1)) {
10711 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW
), &dev
->hwmon
)) {
10712 hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev
->name
, val
);
10713 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10722 static int ssd_check_volt(struct ssd_device
*dev
)
10729 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10733 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10735 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
)) {
10736 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V0_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10737 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10738 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10739 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10740 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10741 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10745 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10746 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10747 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10748 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10749 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10755 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
)) {
10756 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V8_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10757 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10758 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10759 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10760 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10761 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10765 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10766 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10767 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10768 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10769 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10778 static int ssd_check_reset_sync(struct ssd_device
*dev
)
10782 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10786 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10787 if (!((val
>> 8) & 0x1)) {
10788 /* controller state */
10789 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10793 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10797 if (((val
>> 9 ) & 0x1)) {
10798 hio_warn("%s: controller reset asynchronously: %#x\n", dev
->name
, val
);
10799 ssd_gen_swlog(dev
, SSD_LOG_CTRL_RST_SYNC
, val
);
10806 static int ssd_check_hw_bh(struct ssd_device
*dev
)
10810 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10815 ret
= ssd_check_clock(dev
);
10821 /* skip error if not in standard mode */
10822 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10828 static int ssd_check_controller(struct ssd_device
*dev
)
10832 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10837 ret
= ssd_check_reset_sync(dev
);
10843 /* skip error if not in standard mode */
10844 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10850 static int ssd_check_controller_bh(struct ssd_device
*dev
)
10852 uint32_t test_data
= 0x55AA5AA5;
10854 int reg_base
, reg_sz
;
10859 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10864 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_READY_REG
);
10866 hio_warn("%s: controller 0 not ready\n", dev
->name
);
10870 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10871 reg_base
= SSD_CTRL_TEST_REG0
+ i
* SSD_CTRL_TEST_REG_SZ
;
10872 ssd_reg32_write(dev
->ctrlp
+ reg_base
, test_data
);
10873 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10874 if (val
!= ~(test_data
)) {
10875 hio_warn("%s: check controller %d error: %#x\n", dev
->name
, i
, val
);
10881 ret
= ssd_check_volt(dev
);
10887 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
10888 reg_base
= SSD_PV3_RAM_STATUS_REG0
;
10889 reg_sz
= SSD_PV3_RAM_STATUS_REG_SZ
;
10891 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10893 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10895 if (!((val
>> 1) & 0x1)) {
10897 if (init_wait
<= SSD_RAM_INIT_MAX_WAIT
) {
10898 msleep(SSD_INIT_WAIT
);
10899 goto check_ram_status
;
10901 hio_warn("%s: controller %d ram init failed: %#x\n", dev
->name
, i
, val
);
10902 ssd_gen_swlog(dev
, SSD_LOG_DDR_INIT_ERR
, i
);
10907 reg_base
+= reg_sz
;
10912 for (i
=0; i
<SSD_CH_INFO_MAX_WAIT
; i
++) {
10913 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10914 if (!((val
>> 31) & 0x1)) {
10918 msleep(SSD_INIT_WAIT
);
10920 if ((val
>> 31) & 0x1) {
10921 hio_warn("%s: channel info init failed: %#x\n", dev
->name
, val
);
10928 static int ssd_init_protocol_info(struct ssd_device
*dev
)
10932 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PROTOCOL_VER_REG
);
10933 if (val
== (uint32_t)-1) {
10934 hio_warn("%s: protocol version error: %#x\n", dev
->name
, val
);
10937 dev
->protocol_info
.ver
= val
;
10939 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10940 dev
->protocol_info
.init_state_reg
= SSD_INIT_STATE_REG0
;
10941 dev
->protocol_info
.init_state_reg_sz
= SSD_INIT_STATE_REG_SZ
;
10943 dev
->protocol_info
.chip_info_reg
= SSD_CHIP_INFO_REG0
;
10944 dev
->protocol_info
.chip_info_reg_sz
= SSD_CHIP_INFO_REG_SZ
;
10946 dev
->protocol_info
.init_state_reg
= SSD_PV3_INIT_STATE_REG0
;
10947 dev
->protocol_info
.init_state_reg_sz
= SSD_PV3_INIT_STATE_REG_SZ
;
10949 dev
->protocol_info
.chip_info_reg
= SSD_PV3_CHIP_INFO_REG0
;
10950 dev
->protocol_info
.chip_info_reg_sz
= SSD_PV3_CHIP_INFO_REG_SZ
;
10956 static int ssd_init_hw_info(struct ssd_device
*dev
)
10964 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESP_INFO_REG
);
10965 dev
->hw_info
.resp_ptr_sz
= 16 * (1U << (val
& 0xFF));
10966 dev
->hw_info
.resp_msg_sz
= 16 * (1U << ((val
>> 8) & 0xFF));
10968 if (0 == dev
->hw_info
.resp_ptr_sz
|| 0 == dev
->hw_info
.resp_msg_sz
) {
10969 hio_warn("%s: response info error\n", dev
->name
);
10974 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10975 dev
->hw_info
.cmd_fifo_sz
= 1U << ((val
>> 4) & 0xF);
10976 dev
->hw_info
.cmd_max_sg
= 1U << ((val
>> 8) & 0xF);
10977 dev
->hw_info
.sg_max_sec
= 1U << ((val
>> 12) & 0xF);
10978 dev
->hw_info
.cmd_fifo_sz_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
10980 if (0 == dev
->hw_info
.cmd_fifo_sz
|| 0 == dev
->hw_info
.cmd_max_sg
|| 0 == dev
->hw_info
.sg_max_sec
) {
10981 hio_warn("%s: cmd info error\n", dev
->name
);
10987 if (ssd_check_hw_bh(dev
)) {
10988 hio_warn("%s: check hardware status failed\n", dev
->name
);
10993 if (ssd_check_controller(dev
)) {
10994 hio_warn("%s: check controller state failed\n", dev
->name
);
10999 /* nr controller : read again*/
11000 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
11001 dev
->hw_info
.nr_ctrl
= (val
>> 16) & 0xF;
11003 /* nr ctrl configured */
11004 nr_ctrl
= (val
>> 20) & 0xF;
11005 if (0 == dev
->hw_info
.nr_ctrl
) {
11006 hio_warn("%s: nr controller error: %u\n", dev
->name
, dev
->hw_info
.nr_ctrl
);
11009 } else if (0 != nr_ctrl
&& nr_ctrl
!= dev
->hw_info
.nr_ctrl
) {
11010 hio_warn("%s: nr controller error: configured %u but found %u\n", dev
->name
, nr_ctrl
, dev
->hw_info
.nr_ctrl
);
11011 if (mode
<= SSD_DRV_MODE_STANDARD
) {
11017 if (ssd_check_controller_bh(dev
)) {
11018 hio_warn("%s: check controller failed\n", dev
->name
);
11023 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
11024 dev
->hw_info
.pcb_ver
= (uint8_t) ((val
>> 4) & 0xF) + 'A' -1;
11025 if ((val
& 0xF) != 0xF) {
11026 dev
->hw_info
.upper_pcb_ver
= (uint8_t) (val
& 0xF) + 'A' -1;
11029 if (dev
->hw_info
.pcb_ver
< 'A' || (0 != dev
->hw_info
.upper_pcb_ver
&& dev
->hw_info
.upper_pcb_ver
< 'A')) {
11030 hio_warn("%s: PCB version error: %#x %#x\n", dev
->name
, dev
->hw_info
.pcb_ver
, dev
->hw_info
.upper_pcb_ver
);
11036 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11037 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11038 dev
->hw_info
.nr_data_ch
= val
& 0xFF;
11039 dev
->hw_info
.nr_ch
= dev
->hw_info
.nr_data_ch
+ ((val
>> 8) & 0xFF);
11040 dev
->hw_info
.nr_chip
= (val
>> 16) & 0xFF;
11042 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11043 dev
->hw_info
.max_ch
= 1;
11044 while (dev
->hw_info
.max_ch
< dev
->hw_info
.nr_ch
) dev
->hw_info
.max_ch
<<= 1;
11046 /* set max channel 32 */
11047 dev
->hw_info
.max_ch
= 32;
11050 if (0 == dev
->hw_info
.nr_chip
) {
11052 dev
->hw_info
.nr_chip
= 1;
11056 dev
->hw_info
.id_size
= SSD_NAND_ID_SZ
;
11057 dev
->hw_info
.max_ce
= SSD_NAND_MAX_CE
;
11059 if (0 == dev
->hw_info
.nr_data_ch
|| 0 == dev
->hw_info
.nr_ch
|| 0 == dev
->hw_info
.nr_chip
) {
11060 hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev
->name
, dev
->hw_info
.nr_data_ch
, dev
->hw_info
.nr_ch
, dev
->hw_info
.nr_chip
);
11067 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11068 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RAM_INFO_REG
);
11069 dev
->hw_info
.ram_size
= 0x4000000ull
* (1ULL << (val
& 0xF));
11070 dev
->hw_info
.ram_align
= 1U << ((val
>> 12) & 0xF);
11071 if (dev
->hw_info
.ram_align
< SSD_RAM_ALIGN
) {
11072 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11073 dev
->hw_info
.ram_align
= SSD_RAM_ALIGN
;
11075 hio_warn("%s: ram align error: %u\n", dev
->name
, dev
->hw_info
.ram_align
);
11080 dev
->hw_info
.ram_max_len
= 0x1000 * (1U << ((val
>> 16) & 0xF));
11082 if (0 == dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.ram_align
|| 0 == dev
->hw_info
.ram_max_len
|| dev
->hw_info
.ram_align
> dev
->hw_info
.ram_max_len
) {
11083 hio_warn("%s: ram info error\n", dev
->name
);
11088 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11089 dev
->hw_info
.log_sz
= SSD_LOG_MAX_SZ
;
11091 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LOG_INFO_REG
);
11092 dev
->hw_info
.log_sz
= 0x1000 * (1U << (val
& 0xFF));
11094 if (0 == dev
->hw_info
.log_sz
) {
11095 hio_warn("%s: log size error\n", dev
->name
);
11100 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BBT_BASE_REG
);
11101 dev
->hw_info
.bbt_base
= 0x40000ull
* (val
& 0xFFFF);
11102 dev
->hw_info
.bbt_size
= 0x40000 * (((val
>> 16) & 0xFFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
11103 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11104 if (dev
->hw_info
.bbt_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.bbt_size
) {
11105 hio_warn("%s: bbt info error\n", dev
->name
);
11111 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ECT_BASE_REG
);
11112 dev
->hw_info
.md_base
= 0x40000ull
* (val
& 0xFFFF);
11113 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
11114 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
11116 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.nr_chip
);
11118 dev
->hw_info
.md_entry_sz
= 8 * (1U << ((val
>> 28) & 0xF));
11119 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
11120 if (dev
->hw_info
.md_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.md_size
||
11121 0 == dev
->hw_info
.md_entry_sz
|| dev
->hw_info
.md_entry_sz
> dev
->hw_info
.md_size
) {
11122 hio_warn("%s: md info error\n", dev
->name
);
11128 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11129 dev
->hw_info
.nand_wbuff_base
= dev
->hw_info
.ram_size
+ 1;
11131 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_NAND_BUFF_BASE
);
11132 dev
->hw_info
.nand_wbuff_base
= 0x8000ull
* val
;
11137 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11138 if (dev
->hw_info
.nr_ctrl
> 1) {
11139 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CTRL_VER_REG
);
11140 dev
->hw_info
.ctrl_ver
= val
& 0xFFF;
11141 hio_info("%s: controller firmware version: %03X\n", dev
->name
, dev
->hw_info
.ctrl_ver
);
11144 val64
= ssd_reg_read(dev
->ctrlp
+ SSD_FLASH_INFO_REG0
);
11145 dev
->hw_info
.nand_vendor_id
= ((val64
>> 56) & 0xFF);
11146 dev
->hw_info
.nand_dev_id
= ((val64
>> 48) & 0xFF);
11148 dev
->hw_info
.block_count
= (((val64
>> 32) & 0xFFFF) + 1);
11149 dev
->hw_info
.page_count
= ((val64
>>16) & 0xFFFF);
11150 dev
->hw_info
.page_size
= (val64
& 0xFFFF);
11152 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_INFO_REG
);
11153 dev
->hw_info
.bbf_pages
= val
& 0xFF;
11154 dev
->hw_info
.bbf_seek
= (val
>> 8) & 0x1;
11156 if (0 == dev
->hw_info
.block_count
|| 0 == dev
->hw_info
.page_count
|| 0 == dev
->hw_info
.page_size
|| dev
->hw_info
.block_count
> INT_MAX
) {
11157 hio_warn("%s: flash info error\n", dev
->name
);
11163 dev
->hw_info
.oob_size
= SSD_NAND_OOB_SZ
; //(dev->hw_info.page_size) >> 5;
11165 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
11166 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11167 dev
->hw_info
.valid_pages
= val
& 0x3FF;
11168 dev
->hw_info
.max_valid_pages
= (val
>>20) & 0x3FF;
11170 dev
->hw_info
.valid_pages
= val
& 0x7FFF;
11171 dev
->hw_info
.max_valid_pages
= (val
>>15) & 0x7FFF;
11173 if (0 == dev
->hw_info
.valid_pages
|| 0 == dev
->hw_info
.max_valid_pages
||
11174 dev
->hw_info
.valid_pages
> dev
->hw_info
.max_valid_pages
|| dev
->hw_info
.max_valid_pages
> dev
->hw_info
.page_count
) {
11175 hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev
->name
, dev
->hw_info
.valid_pages
, dev
->hw_info
.max_valid_pages
);
11180 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESERVED_BLKS_REG
);
11181 dev
->hw_info
.reserved_blks
= val
& 0xFFFF;
11182 dev
->hw_info
.md_reserved_blks
= (val
>> 16) & 0xFF;
11183 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
11184 dev
->hw_info
.md_reserved_blks
= SSD_BBT_RESERVED
;
11186 if (dev
->hw_info
.reserved_blks
> dev
->hw_info
.block_count
|| dev
->hw_info
.md_reserved_blks
> dev
->hw_info
.block_count
) {
11187 hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev
->name
, dev
->hw_info
.reserved_blks
, dev
->hw_info
.md_reserved_blks
);
11194 if (mode
< SSD_DRV_MODE_DEBUG
) {
11195 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
11196 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
11197 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
11200 /* extend hardware info */
11201 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
11202 dev
->hw_info_ext
.board_type
= (val
>> 24) & 0xF;
11204 dev
->hw_info_ext
.form_factor
= SSD_FORM_FACTOR_FHHL
;
11205 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_1
) {
11206 dev
->hw_info_ext
.form_factor
= (val
>> 31) & 0x1;
11209 dev->hw_info_ext.cap_type = (val >> 28) & 0x3;
11210 if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) {
11211 dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA;
11214 /* power loss protect */
11215 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PLP_INFO_REG
);
11216 dev
->hw_info_ext
.plp_type
= (val
& 0x3);
11217 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
11219 dev
->hw_info_ext
.cap_type
= ((val
>> 2)& 0x1);
11223 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11224 dev
->hw_info_ext
.work_mode
= (val
>> 25) & 0x1;
11227 /* skip error if not in standard mode */
11228 if (mode
!= SSD_DRV_MODE_STANDARD
) {
11234 static void ssd_cleanup_response(struct ssd_device
*dev
)
11236 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11237 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11239 pci_free_consistent(dev
->pdev
, resp_ptr_sz
, dev
->resp_ptr_base
, dev
->resp_ptr_base_dma
);
11240 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11243 static int ssd_init_response(struct ssd_device
*dev
)
11245 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11246 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11248 dev
->resp_msg_base
= pci_alloc_consistent(dev
->pdev
, resp_msg_sz
, &(dev
->resp_msg_base_dma
));
11249 if (!dev
->resp_msg_base
) {
11250 hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev
->name
);
11251 goto out_alloc_resp_msg
;
11253 memset(dev
->resp_msg_base
, 0xFF, resp_msg_sz
);
11255 dev
->resp_ptr_base
= pci_alloc_consistent(dev
->pdev
, resp_ptr_sz
, &(dev
->resp_ptr_base_dma
));
11256 if (!dev
->resp_ptr_base
){
11257 hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev
->name
);
11258 goto out_alloc_resp_ptr
;
11260 memset(dev
->resp_ptr_base
, 0, resp_ptr_sz
);
11261 dev
->resp_idx
= *(uint32_t *)(dev
->resp_ptr_base
) = dev
->hw_info
.cmd_fifo_sz
* 2 - 1;
11263 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
11264 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
11268 out_alloc_resp_ptr
:
11269 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11270 out_alloc_resp_msg
:
11274 static int ssd_cleanup_cmd(struct ssd_device
*dev
)
11276 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11279 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11280 kfree(dev
->cmd
[i
].sgl
);
11283 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11287 static int ssd_init_cmd(struct ssd_device
*dev
)
11289 int sgl_sz
= sizeof(struct scatterlist
) * dev
->hw_info
.cmd_max_sg
;
11290 int cmd_sz
= sizeof(struct ssd_cmd
) * dev
->hw_info
.cmd_fifo_sz
;
11291 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11294 spin_lock_init(&dev
->cmd_lock
);
11296 dev
->msg_base
= pci_alloc_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), &dev
->msg_base_dma
);
11297 if (!dev
->msg_base
) {
11298 hio_warn("%s: can not alloc cmd msg\n", dev
->name
);
11299 goto out_alloc_msg
;
11302 dev
->cmd
= kmalloc(cmd_sz
, GFP_KERNEL
);
11304 hio_warn("%s: can not alloc cmd\n", dev
->name
);
11305 goto out_alloc_cmd
;
11307 memset(dev
->cmd
, 0, cmd_sz
);
11309 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11310 dev
->cmd
[i
].sgl
= kmalloc(sgl_sz
, GFP_KERNEL
);
11311 if (!dev
->cmd
[i
].sgl
) {
11312 hio_warn("%s: can not alloc cmd sgl %d\n", dev
->name
, i
);
11313 goto out_alloc_sgl
;
11316 dev
->cmd
[i
].msg
= dev
->msg_base
+ (msg_sz
* i
);
11317 dev
->cmd
[i
].msg_dma
= dev
->msg_base_dma
+ ((dma_addr_t
)msg_sz
* i
);
11319 dev
->cmd
[i
].dev
= dev
;
11320 dev
->cmd
[i
].tag
= i
;
11321 dev
->cmd
[i
].flag
= 0;
11323 INIT_LIST_HEAD(&dev
->cmd
[i
].list
);
11326 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11327 dev
->scmd
= ssd_dispatch_cmd
;
11329 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
11331 dev
->scmd
= ssd_send_cmd_db
;
11333 dev
->scmd
= ssd_send_cmd
;
11340 for (i
--; i
>=0; i
--) {
11341 kfree(dev
->cmd
[i
].sgl
);
11345 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11350 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11351 static irqreturn_t
ssd_interrupt_check(int irq
, void *dev_id
)
11353 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11355 if (*(uint32_t *)queue
->resp_ptr
== queue
->resp_idx
) {
11359 return IRQ_WAKE_THREAD
;
11362 static irqreturn_t
ssd_interrupt_threaded(int irq
, void *dev_id
)
11364 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11365 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11366 struct ssd_cmd
*cmd
;
11367 union ssd_response_msq __msg
;
11368 union ssd_response_msq
*msg
= &__msg
;
11370 uint32_t resp_idx
= queue
->resp_idx
;
11371 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11372 uint32_t end_resp_idx
;
11374 if (unlikely(resp_idx
== new_resp_idx
)) {
11378 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11381 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11384 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11385 msg
->u64_msg
= *u64_msg
;
11387 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11388 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11391 /* clear the resp msg */
11392 *u64_msg
= (uint64_t)(-1);
11394 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11395 /*if (unlikely(!cmd->bio)) {
11396 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11397 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11401 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11402 cmd
->errors
= -EIO
;
11406 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11410 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11411 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11412 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11413 queue_work(dev
->workq
, &dev
->log_work
);
11417 if (unlikely(msg
->resp_msg
.status
)) {
11418 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11419 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11420 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11423 ssd_set_alarm(dev
);
11424 queue
->io_stat
.nr_rwerr
++;
11425 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11427 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11428 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11430 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11432 queue
->io_stat
.nr_ioerr
++;
11435 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11436 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11437 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11439 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11441 }while (resp_idx
!= end_resp_idx
);
11443 queue
->resp_idx
= new_resp_idx
;
11445 return IRQ_HANDLED
;
11449 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11450 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
11452 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
)
11455 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11456 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11457 struct ssd_cmd
*cmd
;
11458 union ssd_response_msq __msg
;
11459 union ssd_response_msq
*msg
= &__msg
;
11461 uint32_t resp_idx
= queue
->resp_idx
;
11462 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11463 uint32_t end_resp_idx
;
11465 if (unlikely(resp_idx
== new_resp_idx
)) {
11469 #if (defined SSD_ESCAPE_IRQ)
11470 if (SSD_INT_MSIX
!= dev
->int_mode
) {
11471 dev
->irq_cpu
= smp_processor_id();
11475 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11478 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11481 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11482 msg
->u64_msg
= *u64_msg
;
11484 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11485 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11488 /* clear the resp msg */
11489 *u64_msg
= (uint64_t)(-1);
11491 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11492 /*if (unlikely(!cmd->bio)) {
11493 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11494 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11498 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11499 cmd
->errors
= -EIO
;
11503 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11507 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11508 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11509 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11510 queue_work(dev
->workq
, &dev
->log_work
);
11514 if (unlikely(msg
->resp_msg
.status
)) {
11515 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11516 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11517 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11520 ssd_set_alarm(dev
);
11521 queue
->io_stat
.nr_rwerr
++;
11522 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11524 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11525 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11527 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11529 queue
->io_stat
.nr_ioerr
++;
11532 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11533 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11534 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11536 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11538 }while (resp_idx
!= end_resp_idx
);
11540 queue
->resp_idx
= new_resp_idx
;
11542 return IRQ_HANDLED
;
11545 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11546 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
, struct pt_regs
*regs
)
11548 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
)
11552 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11553 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11555 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11556 ret
= ssd_interrupt(irq
, dev_id
, regs
);
11558 ret
= ssd_interrupt(irq
, dev_id
);
11562 if (IRQ_HANDLED
== ret
) {
11563 ssd_reg32_write(dev
->ctrlp
+ SSD_CLEAR_INTR_REG
, 1);
11569 static void ssd_reset_resp_ptr(struct ssd_device
*dev
)
11573 for (i
=0; i
<dev
->nr_queue
; i
++) {
11574 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11578 static void ssd_free_irq(struct ssd_device
*dev
)
11582 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11583 if (SSD_INT_MSIX
== dev
->int_mode
) {
11584 for (i
=0; i
<dev
->nr_queue
; i
++) {
11585 irq_set_affinity_hint(dev
->entry
[i
].vector
, NULL
);
11590 for (i
=0; i
<dev
->nr_queue
; i
++) {
11591 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11592 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11594 free_irq(pci_irq_vector(dev
->pdev
, i
), &dev
->queue
[i
]);
11598 if (SSD_INT_MSIX
== dev
->int_mode
) {
11599 pci_disable_msix(dev
->pdev
);
11600 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11601 pci_disable_msi(dev
->pdev
);
11606 static int ssd_init_irq(struct ssd_device
*dev
)
11608 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11609 const struct cpumask
*cpu_mask
= NULL
;
11610 static int cpu_affinity
= 0;
11612 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11613 const struct cpumask
*mask
= NULL
;
11614 static int cpu
= 0;
11618 unsigned long flags
= 0;
11621 ssd_reg32_write(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
, 0x800);
11623 #ifdef SSD_ESCAPE_IRQ
11627 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11628 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11629 dev
->nr_queue
= SSD_MSIX_VEC
;
11631 for (i
=0; i
<dev
->nr_queue
; i
++) {
11632 dev
->entry
[i
].entry
= i
;
11635 ret
= pci_enable_msix(dev
->pdev
, dev
->entry
, dev
->nr_queue
);
11638 } else if (ret
> 0) {
11639 dev
->nr_queue
= ret
;
11641 hio_warn("%s: can not enable msix\n", dev
->name
);
11643 ssd_set_alarm(dev
);
11648 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11649 mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11650 if ((0 == cpu
) || (!cpumask_intersects(mask
, cpumask_of(cpu
)))) {
11651 cpu
= cpumask_first(mask
);
11653 for (i
=0; i
<dev
->nr_queue
; i
++) {
11654 irq_set_affinity_hint(dev
->entry
[i
].vector
, cpumask_of(cpu
));
11655 cpu
= cpumask_next(cpu
, mask
);
11656 if (cpu
>= nr_cpu_ids
) {
11657 cpu
= cpumask_first(mask
);
11662 dev
->int_mode
= SSD_INT_MSIX
;
11663 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11664 ret
= pci_enable_msi(dev
->pdev
);
11666 hio_warn("%s: can not enable msi\n", dev
->name
);
11668 ssd_set_alarm(dev
);
11673 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11675 dev
->int_mode
= SSD_INT_MSI
;
11678 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11680 dev
->int_mode
= SSD_INT_LEGACY
;
11683 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11684 dev
->nr_queue
= SSD_MSIX_VEC
;
11686 dev
->nr_queue
= pci_alloc_irq_vectors(dev
->pdev
, 1, dev
->nr_queue
, PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
);
11687 if (dev
->nr_queue
<= 0) {
11689 hio_warn("%s: can not enable msix\n", dev
->name
);
11690 ssd_set_alarm(dev
);
11694 dev
->int_mode
= SSD_INT_MSIX
;
11695 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11697 ret
= pci_alloc_irq_vectors(dev
->pdev
, 1, 1, PCI_IRQ_MSI
| PCI_IRQ_AFFINITY
);
11700 hio_warn("%s: can not enable msi\n", dev
->name
);
11702 ssd_set_alarm(dev
);
11707 dev
->int_mode
= SSD_INT_MSI
;
11709 ret
= pci_alloc_irq_vectors(dev
->pdev
, 1, 1, PCI_IRQ_LEGACY
);
11713 hio_warn("%s: can not enable msi\n", dev
->name
);
11715 ssd_set_alarm(dev
);
11720 dev
->int_mode
= SSD_INT_LEGACY
;
11724 for (i
=0; i
<dev
->nr_queue
; i
++) {
11725 if (dev
->nr_queue
> 1) {
11726 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100-%d", dev
->name
, i
);
11728 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100", dev
->name
);
11731 dev
->queue
[i
].dev
= dev
;
11732 dev
->queue
[i
].idx
= i
;
11734 dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11735 dev
->queue
[i
].resp_idx_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
11737 dev
->queue
[i
].resp_msg_sz
= dev
->hw_info
.resp_msg_sz
;
11738 dev
->queue
[i
].resp_msg
= dev
->resp_msg_base
+ dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* i
;
11739 dev
->queue
[i
].resp_ptr
= dev
->resp_ptr_base
+ dev
->hw_info
.resp_ptr_sz
* i
;
11740 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
;
11742 dev
->queue
[i
].cmd
= dev
->cmd
;
11745 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
11746 flags
= IRQF_SHARED
;
11751 for (i
=0; i
<dev
->nr_queue
; i
++) {
11752 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
11753 if (dev
->int_mode
== SSD_INT_LEGACY
) {
11754 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11756 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11758 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11759 if (threaded_irq
) {
11760 ret
= request_threaded_irq(dev
->entry
[i
].vector
, ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11761 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11762 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11764 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11767 if (threaded_irq
) {
11768 ret
= request_threaded_irq(pci_irq_vector(dev
->pdev
, i
), ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11769 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11770 ret
= request_irq(pci_irq_vector(dev
->pdev
, i
), &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11772 ret
= request_irq(pci_irq_vector(dev
->pdev
, i
), &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11776 hio_warn("%s: request irq failed\n", dev
->name
);
11778 ssd_set_alarm(dev
);
11779 goto out_request_irq
;
11782 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11783 cpu_mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11784 if (SSD_INT_MSIX
== dev
->int_mode
) {
11785 if ((0 == cpu_affinity
) || (!cpumask_intersects(mask
, cpumask_of(cpu_affinity
)))) {
11786 cpu_affinity
= cpumask_first(cpu_mask
);
11789 irq_set_affinity(dev
->entry
[i
].vector
, cpumask_of(cpu_affinity
));
11790 cpu_affinity
= cpumask_next(cpu_affinity
, cpu_mask
);
11791 if (cpu_affinity
>= nr_cpu_ids
) {
11792 cpu_affinity
= cpumask_first(cpu_mask
);
11801 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11802 if (SSD_INT_MSIX
== dev
->int_mode
) {
11803 for (j
=0; j
<dev
->nr_queue
; j
++) {
11804 irq_set_affinity_hint(dev
->entry
[j
].vector
, NULL
);
11809 for (i
--; i
>=0; i
--) {
11810 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11811 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11813 free_irq(pci_irq_vector(dev
->pdev
, i
), &dev
->queue
[i
]);
11817 if (SSD_INT_MSIX
== dev
->int_mode
) {
11818 pci_disable_msix(dev
->pdev
);
11819 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11820 pci_disable_msi(dev
->pdev
);
11827 static void ssd_initial_log(struct ssd_device
*dev
)
11830 uint32_t speed
, width
;
11832 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11836 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_POWER_ON_REG
);
11838 // Poweron detection switched to SSD_INTR_INTERVAL_REG in 'ssd_init_smart'
11839 //ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver);
11842 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCIE_LINKSTATUS_REG
);
11844 width
= (val
>> 4)& 0x3F;
11845 if (0x1 == speed
) {
11846 hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev
->name
, width
);
11847 } else if (0x2 == speed
) {
11848 hio_info("%s: PCIe: 5GT/s, x%u\n", dev
->name
, width
);
11850 hio_info("%s: PCIe: unknown GT/s, x%u\n", dev
->name
, width
);
11852 ssd_gen_swlog(dev
, SSD_LOG_PCIE_LINK_STATUS
, val
);
11857 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11858 static void ssd_hwmon_worker(void *data
)
11860 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11862 static void ssd_hwmon_worker(struct work_struct
*work
)
11864 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, hwmon_work
);
11867 if (ssd_check_hw(dev
)) {
11868 //hio_err("%s: check hardware failed\n", dev->name);
11872 ssd_check_clock(dev
);
11873 ssd_check_volt(dev
);
11875 ssd_mon_boardvolt(dev
);
11878 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11879 static void ssd_tempmon_worker(void *data
)
11881 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11883 static void ssd_tempmon_worker(struct work_struct
*work
)
11885 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, tempmon_work
);
11888 if (ssd_check_hw(dev
)) {
11889 //hio_err("%s: check hardware failed\n", dev->name);
11897 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11898 static void ssd_capmon_worker(void *data
)
11900 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11902 static void ssd_capmon_worker(struct work_struct
*work
)
11904 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, capmon_work
);
11907 uint32_t cap_threshold
= SSD_PL_CAP_THRESHOLD
;
11910 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11914 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
11918 /* fault before? */
11919 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11920 ret
= ssd_check_pl_cap_fast(dev
);
11927 ret
= ssd_do_cap_learn(dev
, &cap
);
11929 hio_err("%s: cap learn failed\n", dev
->name
);
11930 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
11934 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, cap
);
11936 if (SSD_PL_CAP_CP
== dev
->hw_info_ext
.cap_type
) {
11937 cap_threshold
= SSD_PL_CAP_CP_THRESHOLD
;
11940 //use the fw event id?
11941 if (cap
< cap_threshold
) {
11942 if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11943 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_FAULT
, 0);
11945 } else if (cap
>= (cap_threshold
+ SSD_PL_CAP_THRESHOLD_HYST
)) {
11946 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11947 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_OK
, 0);
11952 static void ssd_routine_start(void *data
)
11954 struct ssd_device
*dev
;
11961 dev
->routine_tick
++;
11963 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
) && !ssd_busy(dev
)) {
11964 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11965 queue_work(dev
->workq
, &dev
->log_work
);
11968 if ((dev
->routine_tick
% SSD_HWMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11969 queue_work(dev
->workq
, &dev
->hwmon_work
);
11972 if ((dev
->routine_tick
% SSD_CAPMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11973 queue_work(dev
->workq
, &dev
->capmon_work
);
11976 if ((dev
->routine_tick
% SSD_CAPMON2_ROUTINE_TICK
) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
) && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11977 /* CAP fault? check again */
11978 queue_work(dev
->workq
, &dev
->capmon_work
);
11981 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11982 queue_work(dev
->workq
, &dev
->tempmon_work
);
11985 /* schedule routine */
11986 mod_timer(&dev
->routine_timer
, jiffies
+ msecs_to_jiffies(SSD_ROUTINE_INTERVAL
));
11989 static void ssd_cleanup_routine(struct ssd_device
*dev
)
11991 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11994 (void)ssd_del_timer(&dev
->routine_timer
);
11996 (void)ssd_del_timer(&dev
->bm_timer
);
11999 static int ssd_init_routine(struct ssd_device
*dev
)
12001 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
12004 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
12005 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
, dev
);
12006 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
, dev
);
12007 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
, dev
);
12008 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
, dev
);
12010 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
);
12011 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
);
12012 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
);
12013 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
);
12017 ssd_initial_log(dev
);
12019 /* schedule bm routine */
12020 ssd_add_timer(&dev
->bm_timer
, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY
), ssd_bm_routine_start
, dev
);
12022 /* schedule routine */
12023 ssd_add_timer(&dev
->routine_timer
, msecs_to_jiffies(SSD_ROUTINE_INTERVAL
), ssd_routine_start
, dev
);
12029 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12032 ssd_remove_one (struct pci_dev
*pdev
)
12034 struct ssd_device
*dev
;
12040 dev
= pci_get_drvdata(pdev
);
12045 list_del_init(&dev
->list
);
12047 ssd_unregister_sysfs(dev
);
12049 /* offline firstly */
12050 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12052 /* clean work queue first */
12054 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12055 ssd_cleanup_workq(dev
);
12059 (void)ssd_flush(dev
);
12060 (void)ssd_save_md(dev
);
12064 ssd_save_smart(dev
);
12067 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
12068 ssd_cleanup_blkdev(dev
);
12072 ssd_cleanup_chardev(dev
);
12075 /* clean routine */
12077 ssd_cleanup_routine(dev
);
12080 ssd_cleanup_queue(dev
);
12082 ssd_cleanup_tag(dev
);
12083 ssd_cleanup_thread(dev
);
12087 ssd_cleanup_dcmd(dev
);
12088 ssd_cleanup_cmd(dev
);
12089 ssd_cleanup_response(dev
);
12092 ssd_cleanup_log(dev
);
12095 if (dev
->reload_fw
) { //reload fw
12096 dev
->has_non_0x98_reg_access
= 1;
12097 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12100 /* unmap physical adress */
12101 #ifdef LINUX_SUSE_OS
12102 iounmap(dev
->ctrlp
);
12104 pci_iounmap(pdev
, dev
->ctrlp
);
12107 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12109 pci_disable_device(pdev
);
12111 pci_set_drvdata(pdev
, NULL
);
12117 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12120 ssd_init_one(struct pci_dev
*pdev
,
12121 const struct pci_device_id
*ent
)
12123 struct ssd_device
*dev
;
12127 if (!pdev
|| !ent
) {
12132 dev
= kmalloc(sizeof(struct ssd_device
), GFP_KERNEL
);
12135 goto out_alloc_dev
;
12137 memset(dev
, 0, sizeof(struct ssd_device
));
12139 dev
->owner
= THIS_MODULE
;
12141 if (SSD_SLAVE_PORT_DEVID
== ent
->device
) {
12145 dev
->idx
= ssd_get_index(dev
->slave
);
12146 if (dev
->idx
< 0) {
12148 goto out_get_index
;
12152 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_DEV_NAME
);
12153 ssd_set_dev_name(&dev
->name
[strlen(SSD_DEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_DEV_NAME
), dev
->idx
);
12155 dev
->major
= ssd_major
;
12156 dev
->cmajor
= ssd_cmajor
;
12158 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_SDEV_NAME
);
12159 ssd_set_dev_name(&dev
->name
[strlen(SSD_SDEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_SDEV_NAME
), dev
->idx
);
12160 dev
->major
= ssd_major_sl
;
12164 do_gettimeofday(&tv
);
12165 dev
->reset_time
= tv
.tv_sec
;
12167 atomic_set(&(dev
->refcnt
), 0);
12168 atomic_set(&(dev
->tocnt
), 0);
12170 mutex_init(&dev
->fw_mutex
);
12173 mutex_init(&dev
->gd_mutex
);
12174 dev
->has_non_0x98_reg_access
= 0;
12176 //init in_flight lock
12177 spin_lock_init(&dev
->in_flight_lock
);
12180 pci_set_drvdata(pdev
, dev
);
12182 kref_init(&dev
->kref
);
12184 ret
= pci_enable_device(pdev
);
12186 hio_warn("%s: can not enable device\n", dev
->name
);
12187 goto out_enable_device
;
12190 pci_set_master(pdev
);
12192 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12193 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
12195 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
12198 hio_warn("%s: set dma mask: failed\n", dev
->name
);
12199 goto out_set_dma_mask
;
12202 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12203 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
12205 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
12208 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
12209 goto out_set_dma_mask
;
12212 dev
->mmio_base
= pci_resource_start(pdev
, 0);
12213 dev
->mmio_len
= pci_resource_len(pdev
, 0);
12215 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
12216 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
12218 goto out_request_mem_region
;
12221 /* 2.6.9 kernel bug */
12222 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
12224 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
12226 goto out_pci_iomap
;
12229 ret
= ssd_check_hw(dev
);
12231 hio_err("%s: check hardware failed\n", dev
->name
);
12235 ret
= ssd_init_protocol_info(dev
);
12237 hio_err("%s: init protocol info failed\n", dev
->name
);
12238 goto out_init_protocol_info
;
12242 ssd_clear_alarm(dev
);
12244 ret
= ssd_init_fw_info(dev
);
12246 hio_err("%s: init firmware info failed\n", dev
->name
);
12248 ssd_set_alarm(dev
);
12249 goto out_init_fw_info
;
12257 ret
= ssd_init_rom_info(dev
);
12259 hio_err("%s: init rom info failed\n", dev
->name
);
12261 ssd_set_alarm(dev
);
12262 goto out_init_rom_info
;
12265 ret
= ssd_init_label(dev
);
12267 hio_err("%s: init label failed\n", dev
->name
);
12269 ssd_set_alarm(dev
);
12270 goto out_init_label
;
12273 ret
= ssd_init_workq(dev
);
12275 hio_warn("%s: init workq failed\n", dev
->name
);
12276 goto out_init_workq
;
12278 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12280 ret
= ssd_init_log(dev
);
12282 hio_err("%s: init log failed\n", dev
->name
);
12284 ssd_set_alarm(dev
);
12288 ret
= ssd_init_smart(dev
);
12290 hio_err("%s: init info failed\n", dev
->name
);
12292 ssd_set_alarm(dev
);
12293 goto out_init_smart
;
12297 ret
= ssd_init_hw_info(dev
);
12299 hio_err("%s: init hardware info failed\n", dev
->name
);
12301 ssd_set_alarm(dev
);
12302 goto out_init_hw_info
;
12310 ret
= ssd_init_sensor(dev
);
12312 hio_err("%s: init sensor failed\n", dev
->name
);
12314 ssd_set_alarm(dev
);
12315 goto out_init_sensor
;
12318 ret
= ssd_init_pl_cap(dev
);
12320 hio_err("%s: int pl_cap failed\n", dev
->name
);
12322 ssd_set_alarm(dev
);
12323 goto out_init_pl_cap
;
12327 ret
= ssd_check_init_state(dev
);
12329 hio_err("%s: check init state failed\n", dev
->name
);
12331 ssd_set_alarm(dev
);
12332 goto out_check_init_state
;
12335 ret
= ssd_init_response(dev
);
12337 hio_warn("%s: init resp_msg failed\n", dev
->name
);
12338 goto out_init_response
;
12341 ret
= ssd_init_cmd(dev
);
12343 hio_warn("%s: init msg failed\n", dev
->name
);
12347 ret
= ssd_init_dcmd(dev
);
12349 hio_warn("%s: init cmd failed\n", dev
->name
);
12350 goto out_init_dcmd
;
12353 ret
= ssd_init_irq(dev
);
12355 hio_warn("%s: init irq failed\n", dev
->name
);
12359 ret
= ssd_init_thread(dev
);
12361 hio_warn("%s: init thread failed\n", dev
->name
);
12362 goto out_init_thread
;
12365 ret
= ssd_init_tag(dev
);
12367 hio_warn("%s: init tags failed\n", dev
->name
);
12368 goto out_init_tags
;
12372 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12374 ret
= ssd_init_queue(dev
);
12376 hio_warn("%s: init queue failed\n", dev
->name
);
12377 goto out_init_queue
;
12385 ret
= ssd_init_ot_protect(dev
);
12387 hio_err("%s: int ot_protect failed\n", dev
->name
);
12389 ssd_set_alarm(dev
);
12390 goto out_int_ot_protect
;
12393 ret
= ssd_init_wmode(dev
);
12395 hio_warn("%s: init write mode\n", dev
->name
);
12396 goto out_init_wmode
;
12399 /* init routine after hw is ready */
12400 ret
= ssd_init_routine(dev
);
12402 hio_warn("%s: init routine\n", dev
->name
);
12403 goto out_init_routine
;
12406 ret
= ssd_init_chardev(dev
);
12408 hio_warn("%s: register char device failed\n", dev
->name
);
12409 goto out_init_chardev
;
12413 ret
= ssd_init_blkdev(dev
);
12415 hio_warn("%s: register block device failed\n", dev
->name
);
12416 goto out_init_blkdev
;
12418 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12420 ret
= ssd_register_sysfs(dev
);
12422 hio_warn("%s: register sysfs failed\n", dev
->name
);
12423 goto out_register_sysfs
;
12428 list_add_tail(&dev
->list
, &ssd_list
);
12432 out_register_sysfs
:
12433 test_and_clear_bit(SSD_INIT_BD
, &dev
->state
);
12434 ssd_cleanup_blkdev(dev
);
12438 ssd_cleanup_chardev(dev
);
12443 ssd_cleanup_routine(dev
);
12447 out_int_ot_protect
:
12448 ssd_cleanup_queue(dev
);
12450 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12451 ssd_cleanup_tag(dev
);
12453 ssd_cleanup_thread(dev
);
12457 ssd_cleanup_dcmd(dev
);
12459 ssd_cleanup_cmd(dev
);
12461 ssd_cleanup_response(dev
);
12463 out_check_init_state
:
12470 ssd_cleanup_log(dev
);
12475 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12476 ssd_cleanup_workq(dev
);
12482 out_init_protocol_info
:
12484 #ifdef LINUX_SUSE_OS
12485 iounmap(dev
->ctrlp
);
12487 pci_iounmap(pdev
, dev
->ctrlp
);
12490 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12491 out_request_mem_region
:
12493 pci_disable_device(pdev
);
12495 pci_set_drvdata(pdev
, NULL
);
12503 static void ssd_cleanup_tasklet(void)
12506 for_each_online_cpu(i
) {
12507 tasklet_kill(&per_cpu(ssd_tasklet
, i
));
12511 static int ssd_init_tasklet(void)
12515 for_each_online_cpu(i
) {
12516 INIT_LIST_HEAD(&per_cpu(ssd_doneq
, i
));
12519 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done_db
, 0);
12521 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done
, 0);
12528 static struct pci_device_id ssd_pci_tbl
[] = {
12529 { 0x10ee, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* g3 */
12530 { 0x19e5, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v1 */
12531 //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/
12532 { 0x19e5, 0x0009, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 */
12533 { 0x19e5, 0x000a, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 dp slave*/
12537 /*driver power management handler for pm_ops*/
12538 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12539 static int ssd_hio_suspend(struct pci_dev
*pdev
, pm_message_t state
)
12542 static int ssd_hio_suspend(struct device
*ddev
)
12544 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12546 struct ssd_device
*dev
;
12553 dev
= pci_get_drvdata(pdev
);
12558 hio_warn("%s: suspend disk start.\n", dev
->name
);
12559 ssd_unregister_sysfs(dev
);
12561 /* offline firstly */
12562 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12564 /* clean work queue first */
12566 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12567 ssd_cleanup_workq(dev
);
12571 (void)ssd_flush(dev
);
12572 (void)ssd_save_md(dev
);
12576 ssd_save_smart(dev
);
12579 /* clean routine */
12581 ssd_cleanup_routine(dev
);
12584 ssd_cleanup_thread(dev
);
12589 ssd_cleanup_log(dev
);
12592 if (dev
->reload_fw
) { //reload fw
12593 dev
->has_non_0x98_reg_access
= 1;
12594 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12597 /* unmap physical adress */
12599 #ifdef LINUX_SUSE_OS
12600 iounmap(dev
->ctrlp
);
12602 pci_iounmap(pdev
, dev
->ctrlp
);
12607 if (dev
->mmio_base
) {
12608 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12609 dev
->mmio_base
= 0;
12612 pci_disable_device(pdev
);
12614 hio_warn("%s: suspend disk finish.\n", dev
->name
);
12620 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12621 static int ssd_hio_resume(struct pci_dev
*pdev
)
12624 static int ssd_hio_resume(struct device
*ddev
)
12626 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12628 struct ssd_device
*dev
= NULL
;
12636 dev
= pci_get_drvdata(pdev
);
12639 goto out_alloc_dev
;
12642 hio_warn("%s: resume disk start.\n", dev
->name
);
12643 ret
= pci_enable_device(pdev
);
12645 hio_warn("%s: can not enable device\n", dev
->name
);
12646 goto out_enable_device
;
12649 pci_set_master(pdev
);
12651 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12652 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
12654 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
12657 hio_warn("%s: set dma mask: failed\n", dev
->name
);
12658 goto out_set_dma_mask
;
12661 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12662 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
12664 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
12667 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
12668 goto out_set_dma_mask
;
12671 dev
->mmio_base
= pci_resource_start(pdev
, 0);
12672 dev
->mmio_len
= pci_resource_len(pdev
, 0);
12674 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
12675 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
12677 goto out_request_mem_region
;
12680 /* 2.6.9 kernel bug */
12681 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
12683 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
12685 goto out_pci_iomap
;
12688 ret
= ssd_check_hw(dev
);
12690 hio_err("%s: check hardware failed\n", dev
->name
);
12695 ssd_clear_alarm(dev
);
12697 ret
= ssd_init_fw_info(dev
);
12699 hio_err("%s: init firmware info failed\n", dev
->name
);
12701 ssd_set_alarm(dev
);
12702 goto out_init_fw_info
;
12710 ret
= ssd_init_rom_info(dev
);
12712 hio_err("%s: init rom info failed\n", dev
->name
);
12714 ssd_set_alarm(dev
);
12715 goto out_init_rom_info
;
12718 ret
= ssd_init_label(dev
);
12720 hio_err("%s: init label failed\n", dev
->name
);
12722 ssd_set_alarm(dev
);
12723 goto out_init_label
;
12726 ret
= ssd_init_workq(dev
);
12728 hio_warn("%s: init workq failed\n", dev
->name
);
12729 goto out_init_workq
;
12731 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12733 ret
= ssd_init_log(dev
);
12735 hio_err("%s: init log failed\n", dev
->name
);
12737 ssd_set_alarm(dev
);
12741 ret
= ssd_init_smart(dev
);
12743 hio_err("%s: init info failed\n", dev
->name
);
12745 ssd_set_alarm(dev
);
12746 goto out_init_smart
;
12750 ret
= ssd_init_hw_info(dev
);
12752 hio_err("%s: init hardware info failed\n", dev
->name
);
12754 ssd_set_alarm(dev
);
12755 goto out_init_hw_info
;
12763 ret
= ssd_init_sensor(dev
);
12765 hio_err("%s: init sensor failed\n", dev
->name
);
12767 ssd_set_alarm(dev
);
12768 goto out_init_sensor
;
12771 ret
= ssd_init_pl_cap(dev
);
12773 hio_err("%s: int pl_cap failed\n", dev
->name
);
12775 ssd_set_alarm(dev
);
12776 goto out_init_pl_cap
;
12780 ret
= ssd_check_init_state(dev
);
12782 hio_err("%s: check init state failed\n", dev
->name
);
12784 ssd_set_alarm(dev
);
12785 goto out_check_init_state
;
12788 //flush all base pointer to ssd
12789 (void)ssd_reload_ssd_ptr(dev
);
12791 ret
= ssd_init_irq(dev
);
12793 hio_warn("%s: init irq failed\n", dev
->name
);
12797 ret
= ssd_init_thread(dev
);
12799 hio_warn("%s: init thread failed\n", dev
->name
);
12800 goto out_init_thread
;
12804 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12811 ret
= ssd_init_ot_protect(dev
);
12813 hio_err("%s: int ot_protect failed\n", dev
->name
);
12815 ssd_set_alarm(dev
);
12816 goto out_int_ot_protect
;
12819 ret
= ssd_init_wmode(dev
);
12821 hio_warn("%s: init write mode\n", dev
->name
);
12822 goto out_init_wmode
;
12825 /* init routine after hw is ready */
12826 ret
= ssd_init_routine(dev
);
12828 hio_warn("%s: init routine\n", dev
->name
);
12829 goto out_init_routine
;
12833 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12837 hio_warn("%s: resume disk finish.\n", dev
->name
);
12843 out_int_ot_protect
:
12844 ssd_cleanup_thread(dev
);
12848 out_check_init_state
:
12855 ssd_cleanup_log(dev
);
12860 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12861 ssd_cleanup_workq(dev
);
12868 #ifdef LINUX_SUSE_OS
12869 iounmap(dev
->ctrlp
);
12871 pci_iounmap(pdev
, dev
->ctrlp
);
12874 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12875 out_request_mem_region
:
12877 pci_disable_device(pdev
);
12882 hio_warn("%s: resume disk fail.\n", dev
->name
);
12887 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12889 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12891 SIMPLE_DEV_PM_OPS(hio_pm_ops
, ssd_hio_suspend
, ssd_hio_resume
);
12894 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12895 struct pci_driver ssd_driver
= {
12896 .name
= MODULE_NAME
,
12897 .id_table
= ssd_pci_tbl
,
12898 .probe
= ssd_init_one
,
12899 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12900 .remove
= __devexit_p(ssd_remove_one
),
12902 .remove
= ssd_remove_one
,
12905 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12906 .suspend
= ssd_hio_suspend
,
12907 .resume
= ssd_hio_resume
,
12915 /* notifier block to get a notify on system shutdown/halt/reboot */
12916 static int ssd_notify_reboot(struct notifier_block
*nb
, unsigned long event
, void *buf
)
12918 struct ssd_device
*dev
= NULL
;
12919 struct ssd_device
*n
= NULL
;
12921 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
12922 ssd_gen_swlog(dev
, SSD_LOG_POWER_OFF
, 0);
12924 (void)ssd_flush(dev
);
12925 (void)ssd_save_md(dev
);
12929 ssd_save_smart(dev
);
12931 ssd_stop_workq(dev
);
12933 if (dev
->reload_fw
) {
12934 dev
->has_non_0x98_reg_access
= 1;
12935 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12943 static struct notifier_block ssd_notifier
= {
12944 ssd_notify_reboot
, NULL
, 0
12947 static int __init
ssd_init_module(void)
12951 hio_info("driver version: %s\n", DRIVER_VERSION
);
12953 ret
= ssd_init_index();
12955 hio_warn("init index failed\n");
12956 goto out_init_index
;
12959 ret
= ssd_init_proc();
12961 hio_warn("init proc failed\n");
12962 goto out_init_proc
;
12965 ret
= ssd_init_sysfs();
12967 hio_warn("init sysfs failed\n");
12968 goto out_init_sysfs
;
12971 ret
= ssd_init_tasklet();
12973 hio_warn("init tasklet failed\n");
12974 goto out_init_tasklet
;
12977 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12978 ssd_class
= class_simple_create(THIS_MODULE
, SSD_DEV_NAME
);
12980 ssd_class
= class_create(THIS_MODULE
, SSD_DEV_NAME
);
12982 if (IS_ERR(ssd_class
)) {
12983 ret
= PTR_ERR(ssd_class
);
12984 goto out_class_create
;
12987 if (ssd_cmajor
> 0) {
12988 ret
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12990 ret
= ssd_cmajor
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12993 hio_warn("unable to register chardev major number\n");
12994 goto out_register_chardev
;
12997 if (ssd_major
> 0) {
12998 ret
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
13000 ret
= ssd_major
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
13003 hio_warn("unable to register major number\n");
13004 goto out_register_blkdev
;
13007 if (ssd_major_sl
> 0) {
13008 ret
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13010 ret
= ssd_major_sl
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13013 hio_warn("unable to register slave major number\n");
13014 goto out_register_blkdev_sl
;
13017 if (mode
< SSD_DRV_MODE_STANDARD
|| mode
> SSD_DRV_MODE_BASE
) {
13018 mode
= SSD_DRV_MODE_STANDARD
;
13022 if (mode
!= SSD_DRV_MODE_STANDARD
) {
13026 if (int_mode
< SSD_INT_LEGACY
|| int_mode
> SSD_INT_MSIX
) {
13027 int_mode
= SSD_INT_MODE_DEFAULT
;
13030 if (threaded_irq
) {
13031 int_mode
= SSD_INT_MSI
;
13034 if (log_level
>= SSD_LOG_NR_LEVEL
|| log_level
< SSD_LOG_LEVEL_INFO
) {
13035 log_level
= SSD_LOG_LEVEL_ERR
;
13038 if (wmode
< SSD_WMODE_BUFFER
|| wmode
> SSD_WMODE_DEFAULT
) {
13039 wmode
= SSD_WMODE_DEFAULT
;
13042 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
13043 ret
= pci_module_init(&ssd_driver
);
13045 ret
= pci_register_driver(&ssd_driver
);
13048 hio_warn("pci init failed\n");
13052 ret
= register_reboot_notifier(&ssd_notifier
);
13054 hio_warn("register reboot notifier failed\n");
13055 goto out_register_reboot_notifier
;
13060 out_register_reboot_notifier
:
13062 pci_unregister_driver(&ssd_driver
);
13063 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13064 out_register_blkdev_sl
:
13065 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
13066 out_register_blkdev
:
13067 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
13068 out_register_chardev
:
13069 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
13070 class_simple_destroy(ssd_class
);
13072 class_destroy(ssd_class
);
13075 ssd_cleanup_tasklet();
13077 ssd_cleanup_sysfs();
13079 ssd_cleanup_proc();
13081 ssd_cleanup_index();
13087 static void __exit
ssd_cleanup_module(void)
13090 hio_info("unload driver: %s\n", DRIVER_VERSION
);
13094 unregister_reboot_notifier(&ssd_notifier
);
13096 pci_unregister_driver(&ssd_driver
);
13098 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13099 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
13100 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
13101 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
13102 class_simple_destroy(ssd_class
);
13104 class_destroy(ssd_class
);
13107 ssd_cleanup_tasklet();
13108 ssd_cleanup_sysfs();
13109 ssd_cleanup_proc();
13110 ssd_cleanup_index();
13113 int ssd_register_event_notifier(struct block_device
*bdev
, ssd_event_call event_call
)
13115 struct ssd_device
*dev
;
13117 struct ssd_log
*le
, *temp_le
= NULL
;
13122 if (!bdev
|| !event_call
|| !(bdev
->bd_disk
)) {
13126 dev
= bdev
->bd_disk
->private_data
;
13127 dev
->event_call
= event_call
;
13129 do_gettimeofday(&tv
);
13132 le
= (struct ssd_log
*)(dev
->internal_log
.log
);
13133 log_nr
= dev
->internal_log
.nr_log
;
13136 if (le
->time
<= cur
&& le
->time
>= dev
->uptime
) {
13137 if ((le
->le
.event
== SSD_LOG_SEU_FAULT1
) && (le
->time
< dev
->reset_time
)) {
13141 if (le
->le
.event
== SSD_LOG_OVER_TEMP
|| le
->le
.event
== SSD_LOG_NORMAL_TEMP
|| le
->le
.event
== SSD_LOG_WARN_TEMP
) {
13142 if (!temp_le
|| le
->time
>= temp_le
->time
) {
13148 (void)dev
->event_call(dev
->gd
, le
->le
.event
, ssd_parse_log(dev
, le
, 0));
13153 ssd_get_temperature(bdev
, &temp
);
13154 if (temp_le
&& (temp
>= SSD_OT_TEMP_HYST
)) {
13155 (void)dev
->event_call(dev
->gd
, temp_le
->le
.event
, ssd_parse_log(dev
, temp_le
, 0));
13161 int ssd_unregister_event_notifier(struct block_device
*bdev
)
13163 struct ssd_device
*dev
;
13165 if (!bdev
|| !(bdev
->bd_disk
)) {
13169 dev
= bdev
->bd_disk
->private_data
;
13170 dev
->event_call
= NULL
;
13175 EXPORT_SYMBOL(ssd_get_label
);
13176 EXPORT_SYMBOL(ssd_get_version
);
13177 EXPORT_SYMBOL(ssd_set_otprotect
);
13178 EXPORT_SYMBOL(ssd_bm_status
);
13179 EXPORT_SYMBOL(ssd_submit_pbio
);
13180 EXPORT_SYMBOL(ssd_get_pciaddr
);
13181 EXPORT_SYMBOL(ssd_get_temperature
);
13182 EXPORT_SYMBOL(ssd_register_event_notifier
);
13183 EXPORT_SYMBOL(ssd_unregister_event_notifier
);
13184 EXPORT_SYMBOL(ssd_reset
);
13185 EXPORT_SYMBOL(ssd_set_wmode
);
13189 module_init(ssd_init_module
);
13190 module_exit(ssd_cleanup_module
);
13191 MODULE_VERSION(DRIVER_VERSION
);
13192 MODULE_LICENSE("GPL");
13193 MODULE_AUTHOR("Huawei SSD DEV Team");
13194 MODULE_DESCRIPTION("Huawei SSD driver");