2 * Huawei SSD device driver
3 * Copyright (c) 2016, Huawei Technologies Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #ifndef LINUX_VERSION_CODE
16 #include <linux/version.h>
18 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
19 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/bio.h>
25 #include <linux/timer.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/blkdev.h>
31 #include <linux/sched.h>
32 #include <linux/fcntl.h>
33 #include <linux/interrupt.h>
34 #include <linux/compiler.h>
35 #include <linux/bitops.h>
36 #include <linux/delay.h>
37 #include <linux/time.h>
38 #include <linux/stat.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
44 #include <linux/ioctl.h>
45 #include <linux/hdreg.h> /* HDIO_GETGEO */
46 #include <linux/list.h>
47 #include <linux/reboot.h>
48 #include <linux/kthread.h>
49 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
50 #include <linux/seq_file.h>
52 #include <asm/uaccess.h>
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
54 #include <linux/scatterlist.h>
55 #include <linux/vmalloc.h>
57 #include <asm/scatterlist.h>
60 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
61 #include <linux/devfs_fs_kernel.h>
65 #define MODULE_NAME "hio"
66 #define DRIVER_VERSION "2.1.0.40"
67 #define DRIVER_VERSION_LEN 16
69 #define SSD_FW_MIN 0x1
71 #define SSD_DEV_NAME MODULE_NAME
72 #define SSD_DEV_NAME_LEN 16
73 #define SSD_CDEV_NAME "c"SSD_DEV_NAME
74 #define SSD_SDEV_NAME "s"SSD_DEV_NAME
79 #define SSD_MAJOR_SL 0
82 #define SSD_MAX_DEV 702
83 #define SSD_ALPHABET_NUM 26
85 #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg)
86 #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg)
87 #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg)
88 #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg)
91 #define SSD_SLAVE_PORT_DEVID 0x000a
95 /* 2.6.9 msi affinity bug, should turn msi & msi-x off */
97 #define SSD_ESCAPE_IRQ
103 #define SSD_MSIX_VEC 8
106 #undef SSD_ESCAPE_IRQ
107 #define SSD_MSIX_AFFINITY_FORCE
112 /* Over temperature protect */
113 #define SSD_OT_PROTECT
115 #ifdef SSD_QUEUE_PBIO
116 #define BIO_SSD_PBIO 20
120 //#define SSD_DEBUG_ERR
123 #define SSD_CMD_TIMEOUT (60*HZ)
126 #define SSD_SPI_TIMEOUT (5*HZ)
127 #define SSD_I2C_TIMEOUT (5*HZ)
129 #define SSD_I2C_MAX_DATA (127)
130 #define SSD_SMBUS_BLOCK_MAX (32)
131 #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2)
134 #define SSD_INIT_WAIT (1000) //1s
135 #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s
136 #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s
137 #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s
138 #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
139 #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
141 /* blkdev busy wait */
142 #define SSD_DEV_BUSY_WAIT 1000 //ms
143 #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s
146 #define SSD_SMBUS_RETRY_INTERVAL (5) //ms
147 #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL)
149 #define SSD_BM_RETRY_MAX 7
151 /* bm routine interval */
152 #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000)
154 /* routine interval */
155 #define SSD_ROUTINE_INTERVAL (10*1000) //10s
156 #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL)
157 #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30)
158 #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover
161 #define SSD_DMA_ALIGN (16)
163 /* some hw defalut */
164 #define SSD_LOG_MAX_SZ 4096
166 #define SSD_NAND_OOB_SZ 1024
167 #define SSD_NAND_ID_SZ 8
168 #define SSD_NAND_ID_BUFF_SZ 1024
169 #define SSD_NAND_MAX_CE 2
171 #define SSD_BBT_RESERVED 8
173 #define SSD_ECC_MAX_FLIP (64+1)
175 #define SSD_RAM_ALIGN 16
178 #define SSD_RELOAD_FLAG 0x3333CCCC
179 #define SSD_RELOAD_FW 0xAA5555AA
180 #define SSD_RESET_NOINIT 0xAA5555AA
181 #define SSD_RESET 0x55AAAA55
182 #define SSD_RESET_FULL 0x5A
183 //#define SSD_RESET_WAIT 1000 //1s
184 //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s
188 #define SSD_PROTOCOL_V1 0x0
190 #define SSD_ROM_SIZE (16*1024*1024)
191 #define SSD_ROM_BLK_SIZE (256*1024)
192 #define SSD_ROM_PAGE_SIZE (256)
193 #define SSD_ROM_NR_BRIDGE_FW 2
194 #define SSD_ROM_NR_CTRL_FW 2
195 #define SSD_ROM_BRIDGE_FW_BASE 0
196 #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024)
197 #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE)
198 #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024)
199 #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW)
200 #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE)
203 #define SSD_PROTOCOL_V3 0x3000000
204 #define SSD_PROTOCOL_V3_1_1 0x3010001
205 #define SSD_PROTOCOL_V3_1_3 0x3010003
206 #define SSD_PROTOCOL_V3_2 0x3020000
207 #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */
208 #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */
209 #define SSD_PROTOCOL_V3_2_4 0x3020004
212 #define SSD_PV3_ROM_NR_BM_FW 1
213 #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8)
215 #define SSD_ROM_LOG_SZ (64*1024*4)
217 #define SSD_ROM_NR_SMART_MAX 2
218 #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX
219 #define SSD_PV3_ROM_SMART_SZ (64*1024)
222 #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */
223 #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */
227 #define SSD_REQ_FIFO_REG 0x0000
228 #define SSD_RESP_FIFO_REG 0x0008 //0x0010
229 #define SSD_RESP_PTR_REG 0x0010 //0x0018
230 #define SSD_INTR_INTERVAL_REG 0x0018
231 #define SSD_READY_REG 0x001C
232 #define SSD_BRIDGE_TEST_REG 0x0020
233 #define SSD_STRIPE_SIZE_REG 0x0028
234 #define SSD_CTRL_VER_REG 0x0030 //controller
235 #define SSD_BRIDGE_VER_REG 0x0034 //bridge
236 #define SSD_PCB_VER_REG 0x0038
237 #define SSD_BURN_FLAG_REG 0x0040
238 #define SSD_BRIDGE_INFO_REG 0x0044
240 #define SSD_WL_VAL_REG 0x0048 //32-bit
242 #define SSD_BB_INFO_REG 0x004C
244 #define SSD_ECC_TEST_REG 0x0050 //test only
245 #define SSD_ERASE_TEST_REG 0x0058 //test only
246 #define SSD_WRITE_TEST_REG 0x0060 //test only
248 #define SSD_RESET_REG 0x0068
249 #define SSD_RELOAD_FW_REG 0x0070
251 #define SSD_RESERVED_BLKS_REG 0x0074
252 #define SSD_VALID_PAGES_REG 0x0078
253 #define SSD_CH_INFO_REG 0x007C
255 #define SSD_CTRL_TEST_REG_SZ 0x8
256 #define SSD_CTRL_TEST_REG0 0x0080
257 #define SSD_CTRL_TEST_REG1 0x0088
258 #define SSD_CTRL_TEST_REG2 0x0090
259 #define SSD_CTRL_TEST_REG3 0x0098
260 #define SSD_CTRL_TEST_REG4 0x00A0
261 #define SSD_CTRL_TEST_REG5 0x00A8
262 #define SSD_CTRL_TEST_REG6 0x00B0
263 #define SSD_CTRL_TEST_REG7 0x00B8
265 #define SSD_FLASH_INFO_REG0 0x00C0
266 #define SSD_FLASH_INFO_REG1 0x00C8
267 #define SSD_FLASH_INFO_REG2 0x00D0
268 #define SSD_FLASH_INFO_REG3 0x00D8
269 #define SSD_FLASH_INFO_REG4 0x00E0
270 #define SSD_FLASH_INFO_REG5 0x00E8
271 #define SSD_FLASH_INFO_REG6 0x00F0
272 #define SSD_FLASH_INFO_REG7 0x00F8
274 #define SSD_RESP_INFO_REG 0x01B8
275 #define SSD_NAND_BUFF_BASE 0x01BC //for nand write
277 #define SSD_CHIP_INFO_REG_SZ 0x10
278 #define SSD_CHIP_INFO_REG0 0x0100 //128 bit
279 #define SSD_CHIP_INFO_REG1 0x0110
280 #define SSD_CHIP_INFO_REG2 0x0120
281 #define SSD_CHIP_INFO_REG3 0x0130
282 #define SSD_CHIP_INFO_REG4 0x0140
283 #define SSD_CHIP_INFO_REG5 0x0150
284 #define SSD_CHIP_INFO_REG6 0x0160
285 #define SSD_CHIP_INFO_REG7 0x0170
287 #define SSD_RAM_INFO_REG 0x01C4
289 #define SSD_BBT_BASE_REG 0x01C8
290 #define SSD_ECT_BASE_REG 0x01CC
292 #define SSD_CLEAR_INTR_REG 0x01F0
294 #define SSD_INIT_STATE_REG_SZ 0x8
295 #define SSD_INIT_STATE_REG0 0x0200
296 #define SSD_INIT_STATE_REG1 0x0208
297 #define SSD_INIT_STATE_REG2 0x0210
298 #define SSD_INIT_STATE_REG3 0x0218
299 #define SSD_INIT_STATE_REG4 0x0220
300 #define SSD_INIT_STATE_REG5 0x0228
301 #define SSD_INIT_STATE_REG6 0x0230
302 #define SSD_INIT_STATE_REG7 0x0238
304 #define SSD_ROM_INFO_REG 0x0600
305 #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604
306 #define SSD_ROM_CTRL_FW_INFO_REG 0x0608
307 #define SSD_ROM_VP_INFO_REG 0x060C
309 #define SSD_LOG_INFO_REG 0x0610
310 #define SSD_LED_REG 0x0614
311 #define SSD_MSG_BASE_REG 0x06F8
314 #define SSD_SPI_REG_CMD 0x0180
315 #define SSD_SPI_REG_CMD_HI 0x0184
316 #define SSD_SPI_REG_WDATA 0x0188
317 #define SSD_SPI_REG_ID 0x0190
318 #define SSD_SPI_REG_STATUS 0x0198
319 #define SSD_SPI_REG_RDATA 0x01A0
320 #define SSD_SPI_REG_READY 0x01A8
323 #define SSD_I2C_CTRL_REG 0x06F0
324 #define SSD_I2C_RDATA_REG 0x06F4
326 /* temperature reg */
327 #define SSD_BRIGE_TEMP_REG 0x0618
329 #define SSD_CTRL_TEMP_REG0 0x0700
330 #define SSD_CTRL_TEMP_REG1 0x0708
331 #define SSD_CTRL_TEMP_REG2 0x0710
332 #define SSD_CTRL_TEMP_REG3 0x0718
333 #define SSD_CTRL_TEMP_REG4 0x0720
334 #define SSD_CTRL_TEMP_REG5 0x0728
335 #define SSD_CTRL_TEMP_REG6 0x0730
336 #define SSD_CTRL_TEMP_REG7 0x0738
338 /* reversion 3 reg */
339 #define SSD_PROTOCOL_VER_REG 0x01B4
341 #define SSD_FLUSH_TIMEOUT_REG 0x02A4
342 #define SSD_BM_FAULT_REG 0x0660
344 #define SSD_PV3_RAM_STATUS_REG_SZ 0x4
345 #define SSD_PV3_RAM_STATUS_REG0 0x0260
346 #define SSD_PV3_RAM_STATUS_REG1 0x0264
347 #define SSD_PV3_RAM_STATUS_REG2 0x0268
348 #define SSD_PV3_RAM_STATUS_REG3 0x026C
349 #define SSD_PV3_RAM_STATUS_REG4 0x0270
350 #define SSD_PV3_RAM_STATUS_REG5 0x0274
351 #define SSD_PV3_RAM_STATUS_REG6 0x0278
352 #define SSD_PV3_RAM_STATUS_REG7 0x027C
354 #define SSD_PV3_CHIP_INFO_REG_SZ 0x40
355 #define SSD_PV3_CHIP_INFO_REG0 0x0300
356 #define SSD_PV3_CHIP_INFO_REG1 0x0340
357 #define SSD_PV3_CHIP_INFO_REG2 0x0380
358 #define SSD_PV3_CHIP_INFO_REG3 0x03B0
359 #define SSD_PV3_CHIP_INFO_REG4 0x0400
360 #define SSD_PV3_CHIP_INFO_REG5 0x0440
361 #define SSD_PV3_CHIP_INFO_REG6 0x0480
362 #define SSD_PV3_CHIP_INFO_REG7 0x04B0
364 #define SSD_PV3_INIT_STATE_REG_SZ 0x20
365 #define SSD_PV3_INIT_STATE_REG0 0x0500
366 #define SSD_PV3_INIT_STATE_REG1 0x0520
367 #define SSD_PV3_INIT_STATE_REG2 0x0540
368 #define SSD_PV3_INIT_STATE_REG3 0x0560
369 #define SSD_PV3_INIT_STATE_REG4 0x0580
370 #define SSD_PV3_INIT_STATE_REG5 0x05A0
371 #define SSD_PV3_INIT_STATE_REG6 0x05C0
372 #define SSD_PV3_INIT_STATE_REG7 0x05E0
374 /* reversion 3.1.1 reg */
375 #define SSD_FULL_RESET_REG 0x01B0
377 #define SSD_CTRL_REG_ZONE_SZ 0x800
379 #define SSD_BB_THRESHOLD_L1_REG 0x2C0
380 #define SSD_BB_THRESHOLD_L2_REG 0x2C4
382 #define SSD_BB_ACC_REG_SZ 0x4
383 #define SSD_BB_ACC_REG0 0x21C0
384 #define SSD_BB_ACC_REG1 0x29C0
385 #define SSD_BB_ACC_REG2 0x31C0
387 #define SSD_EC_THRESHOLD_L1_REG 0x2C8
388 #define SSD_EC_THRESHOLD_L2_REG 0x2CC
390 #define SSD_EC_ACC_REG_SZ 0x4
391 #define SSD_EC_ACC_REG0 0x21E0
392 #define SSD_EC_ACC_REG1 0x29E0
393 #define SSD_EC_ACC_REG2 0x31E0
395 /* reversion 3.1.2 & 3.1.3 reg */
396 #define SSD_HW_STATUS_REG 0x02AC
398 #define SSD_PLP_INFO_REG 0x0664
400 /*reversion 3.2 reg*/
401 #define SSD_POWER_ON_REG 0x01EC
402 #define SSD_PCIE_LINKSTATUS_REG 0x01F8
403 #define SSD_PL_CAP_LEARN_REG 0x01FC
405 #define SSD_FPGA_1V0_REG0 0x2070
406 #define SSD_FPGA_1V8_REG0 0x2078
407 #define SSD_FPGA_1V0_REG1 0x2870
408 #define SSD_FPGA_1V8_REG1 0x2878
410 /*reversion 3.2 reg*/
411 #define SSD_READ_OT_REG0 0x2260
412 #define SSD_WRITE_OT_REG0 0x2264
413 #define SSD_READ_OT_REG1 0x2A60
414 #define SSD_WRITE_OT_REG1 0x2A64
418 #define SSD_FUNC_READ 0x01
419 #define SSD_FUNC_WRITE 0x02
420 #define SSD_FUNC_NAND_READ_WOOB 0x03
421 #define SSD_FUNC_NAND_READ 0x04
422 #define SSD_FUNC_NAND_WRITE 0x05
423 #define SSD_FUNC_NAND_ERASE 0x06
424 #define SSD_FUNC_NAND_READ_ID 0x07
425 #define SSD_FUNC_READ_LOG 0x08
426 #define SSD_FUNC_TRIM 0x09
427 #define SSD_FUNC_RAM_READ 0x10
428 #define SSD_FUNC_RAM_WRITE 0x11
429 #define SSD_FUNC_FLUSH 0x12 //cache / bbt
432 #define SSD_SPI_CMD_PROGRAM 0x02
433 #define SSD_SPI_CMD_READ 0x03
434 #define SSD_SPI_CMD_W_DISABLE 0x04
435 #define SSD_SPI_CMD_READ_STATUS 0x05
436 #define SSD_SPI_CMD_W_ENABLE 0x06
437 #define SSD_SPI_CMD_ERASE 0xd8
438 #define SSD_SPI_CMD_CLSR 0x30
439 #define SSD_SPI_CMD_READ_ID 0x9f
442 #define SSD_I2C_CTRL_READ 0x00
443 #define SSD_I2C_CTRL_WRITE 0x01
445 /* i2c internal register */
446 #define SSD_I2C_CFG_REG 0x00
447 #define SSD_I2C_DATA_REG 0x01
448 #define SSD_I2C_CMD_REG 0x02
449 #define SSD_I2C_STATUS_REG 0x03
450 #define SSD_I2C_SADDR_REG 0x04
451 #define SSD_I2C_LEN_REG 0x05
452 #define SSD_I2C_RLEN_REG 0x06
453 #define SSD_I2C_WLEN_REG 0x07
454 #define SSD_I2C_RESET_REG 0x08 //write for reset
455 #define SSD_I2C_PRER_REG 0x09
459 /* FPGA volt = ADC_value / 4096 * 3v */
460 #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v
461 #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v
462 #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v
463 #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98
466 #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4)
467 #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4)
468 #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4)
469 #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12)
471 #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt))
482 SSD_CLOCK_166M_LOST
= 0,
490 #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1)
491 #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1)
493 #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8))
495 #define SSD_INLET_OT_TEMP (55) //55 DegC
496 #define SSD_INLET_OT_HYST (50) //50 DegC
497 #define SSD_FLASH_OT_TEMP (70) //70 DegC
498 #define SSD_FLASH_OT_HYST (65) //65 DegC
511 SSD_LM75_REG_TEMP
= 0,
518 #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2)
519 #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2)
520 #define SSD_LM80_REG_IN(nr) (0x20 + (nr))
522 #define SSD_LM80_REG_FAN1 0x28
523 #define SSD_LM80_REG_FAN2 0x29
524 #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr))
526 #define SSD_LM80_REG_TEMP 0x27
527 #define SSD_LM80_REG_TEMP_HOT_MAX 0x38
528 #define SSD_LM80_REG_TEMP_HOT_HYST 0x39
529 #define SSD_LM80_REG_TEMP_OS_MAX 0x3a
530 #define SSD_LM80_REG_TEMP_OS_HYST 0x3b
532 #define SSD_LM80_REG_CONFIG 0x00
533 #define SSD_LM80_REG_ALARM1 0x01
534 #define SSD_LM80_REG_ALARM2 0x02
535 #define SSD_LM80_REG_MASK1 0x03
536 #define SSD_LM80_REG_MASK2 0x04
537 #define SSD_LM80_REG_FANDIV 0x05
538 #define SSD_LM80_REG_RES 0x06
540 #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8)
542 #define SSD_LM80_3V3_VOLT(val) ((val)*33/19)
544 #define SSD_LM80_CONV_INTERVAL (1000)
553 SSD_LM80_IN_FPGA_3V3
,
558 struct ssd_lm80_limit
564 /* +/- 5% except cap in*/
565 static struct ssd_lm80_limit ssd_lm80_limit
[SSD_LM80_IN_NR
] = {
566 {171, 217}, /* CAP in: 1710 ~ 2170 */
575 /* temperature sensors */
585 #ifdef SSD_OT_PROTECT
586 #define SSD_OT_DELAY (60) //ms
588 #define SSD_OT_TEMP (90) //90 DegC
590 #define SSD_OT_TEMP_HYST (85) //85 DegC
593 /* fpga temperature */
594 //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f)
595 #define CONVERT_TEMP(val) ((val)*504/4096-273)
597 #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4))
598 #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4))
599 #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4))
603 #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP)
604 #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8)
605 #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2)))
606 #define SSD_PL_CAP_LEARN_WAIT (20) //20ms
607 #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s
609 #define SSD_PL_CAP_CHARGE_WAIT (1000)
610 #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s
612 #define SSD_PL_CAP_VOLT(val) (val*7)
614 #define SSD_PL_CAP_VOLT_FULL (13700)
615 #define SSD_PL_CAP_VOLT_READY (12880)
617 #define SSD_PL_CAP_THRESHOLD (8900)
618 #define SSD_PL_CAP_CP_THRESHOLD (5800)
619 #define SSD_PL_CAP_THRESHOLD_HYST (100)
621 enum ssd_pl_cap_status
629 SSD_PL_CAP_DEFAULT
= 0, /* 4 cap */
630 SSD_PL_CAP_CP
/* 3 cap */
635 #define SSD_HWMON_OFFS_TEMP (0)
636 #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR)
637 #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR)
638 #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR)
639 #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR)
640 #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR)
642 #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx)
643 #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx)
644 #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx)
645 #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx)
646 #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx)
647 #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx)
663 static int sfifo_alloc(struct sfifo
*fifo
, uint32_t size
, uint32_t esize
)
667 if (!fifo
|| size
> INT_MAX
|| esize
== 0) {
671 while (__size
< size
) __size
<<= 1;
677 fifo
->data
= vmalloc(esize
* __size
);
684 fifo
->mask
= __size
- 1;
687 spin_lock_init(&fifo
->lock
);
692 static void sfifo_free(struct sfifo
*fifo
)
707 static int __sfifo_put(struct sfifo
*fifo
, void *val
)
709 if (((fifo
->in
+ 1) & fifo
->mask
) == fifo
->out
) {
713 memcpy((fifo
->data
+ (fifo
->in
* fifo
->esize
)), val
, fifo
->esize
);
714 fifo
->in
= (fifo
->in
+ 1) & fifo
->mask
;
719 static int sfifo_put(struct sfifo
*fifo
, void *val
)
727 if (!in_interrupt()) {
728 spin_lock_irq(&fifo
->lock
);
729 ret
= __sfifo_put(fifo
, val
);
730 spin_unlock_irq(&fifo
->lock
);
732 spin_lock(&fifo
->lock
);
733 ret
= __sfifo_put(fifo
, val
);
734 spin_unlock(&fifo
->lock
);
740 static int __sfifo_get(struct sfifo
*fifo
, void *val
)
742 if (fifo
->out
== fifo
->in
) {
746 memcpy(val
, (fifo
->data
+ (fifo
->out
* fifo
->esize
)), fifo
->esize
);
747 fifo
->out
= (fifo
->out
+ 1) & fifo
->mask
;
752 static int sfifo_get(struct sfifo
*fifo
, void *val
)
760 if (!in_interrupt()) {
761 spin_lock_irq(&fifo
->lock
);
762 ret
= __sfifo_get(fifo
, val
);
763 spin_unlock_irq(&fifo
->lock
);
765 spin_lock(&fifo
->lock
);
766 ret
= __sfifo_get(fifo
, val
);
767 spin_unlock(&fifo
->lock
);
774 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
780 static inline void ssd_blist_init(struct ssd_blist
*ssd_bl
)
786 static inline struct bio
*ssd_blist_get(struct ssd_blist
*ssd_bl
)
788 struct bio
*bio
= ssd_bl
->prev
;
796 static inline void ssd_blist_add(struct ssd_blist
*ssd_bl
, struct bio
*bio
)
801 ssd_bl
->next
->bi_next
= bio
;
810 #define ssd_blist bio_list
811 #define ssd_blist_init bio_list_init
812 #define ssd_blist_get bio_list_get
813 #define ssd_blist_add bio_list_add
816 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
817 #define bio_start(bio) (bio->bi_sector)
819 #define bio_start(bio) (bio->bi_iter.bi_sector)
823 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
824 #define mutex_lock down
825 #define mutex_unlock up
826 #define mutex semaphore
827 #define mutex_init init_MUTEX
831 typedef union ssd_i2c_ctrl
{
839 }__attribute__((packed
)) ssd_i2c_ctrl_t
;
841 typedef union ssd_i2c_data
{
848 }__attribute__((packed
)) ssd_i2c_data_t
;
853 SSD_WMODE_BUFFER
= 0,
870 typedef struct ssd_sg_entry
875 }__attribute__((packed
))ssd_sg_entry_t
;
877 typedef struct ssd_rw_msg
883 uint32_t reserved
; //for 64-bit align
884 struct ssd_sg_entry sge
[1]; //base
885 }__attribute__((packed
))ssd_rw_msg_t
;
887 typedef struct ssd_resp_msg
895 }__attribute__((packed
))ssd_resp_msg_t
;
897 typedef struct ssd_flush_msg
900 uint8_t flag
:2; //flash cache 0 or bbt 1
904 uint32_t reserved
; //align
905 }__attribute__((packed
))ssd_flush_msg_t
;
907 typedef struct ssd_nand_op_msg
913 uint32_t reserved
; //align
919 }__attribute__((packed
))ssd_nand_op_msg_t
;
921 typedef struct ssd_ram_op_msg
927 uint32_t reserved
; //align
931 }__attribute__((packed
))ssd_ram_op_msg_t
;
935 typedef struct ssd_log_msg
941 uint32_t reserved
; //align
943 }__attribute__((packed
))ssd_log_msg_t
;
945 typedef struct ssd_log_op_msg
951 uint32_t reserved
; //align
952 uint64_t reserved1
; //align
954 }__attribute__((packed
))ssd_log_op_msg_t
;
956 typedef struct ssd_log_resp_msg
960 uint16_t reserved1
:2; //align with the normal resp msg
964 }__attribute__((packed
))ssd_log_resp_msg_t
;
968 typedef union ssd_response_msq
970 ssd_resp_msg_t resp_msg
;
971 ssd_log_resp_msg_t log_resp_msg
;
974 } ssd_response_msq_t
;
978 typedef struct ssd_protocol_info
981 uint32_t init_state_reg
;
982 uint32_t init_state_reg_sz
;
983 uint32_t chip_info_reg
;
984 uint32_t chip_info_reg_sz
;
985 } ssd_protocol_info_t
;
987 typedef struct ssd_hw_info
992 uint32_t cmd_fifo_sz
;
993 uint32_t cmd_fifo_sz_mask
;
996 uint32_t resp_ptr_sz
;
997 uint32_t resp_msg_sz
;
1001 uint16_t nr_data_ch
;
1007 uint8_t upper_pcb_ver
;
1009 uint8_t nand_vendor_id
;
1010 uint8_t nand_dev_id
;
1017 uint16_t bbf_seek
; //
1019 uint16_t page_count
; //per block
1021 uint32_t block_count
; //per flash
1025 uint32_t ram_max_len
;
1029 uint64_t md_base
; //metadata
1031 uint32_t md_entry_sz
;
1035 uint64_t nand_wbuff_base
;
1037 uint32_t md_reserved_blks
;
1038 uint32_t reserved_blks
;
1039 uint32_t valid_pages
;
1040 uint32_t max_valid_pages
;
1044 typedef struct ssd_hw_info_extend
1050 uint8_t form_factor
;
1053 }ssd_hw_info_extend_t
;
1055 typedef struct ssd_rom_info
1058 uint32_t block_size
;
1060 uint8_t nr_bridge_fw
;
1064 uint32_t bridge_fw_base
;
1065 uint32_t bridge_fw_sz
;
1066 uint32_t ctrl_fw_base
;
1067 uint32_t ctrl_fw_sz
;
1068 uint32_t bm_fw_base
;
1072 uint32_t smart_base
;
1075 uint32_t label_base
;
1083 SSD_DEBUG_WRITE_ERR
,
1093 typedef struct ssd_debug_info
1109 #define SSD_LABEL_FIELD_SZ 32
1110 #define SSD_SN_SZ 16
1112 typedef struct ssd_label
1114 char date
[SSD_LABEL_FIELD_SZ
];
1115 char sn
[SSD_LABEL_FIELD_SZ
];
1116 char part
[SSD_LABEL_FIELD_SZ
];
1117 char desc
[SSD_LABEL_FIELD_SZ
];
1118 char other
[SSD_LABEL_FIELD_SZ
];
1119 char maf
[SSD_LABEL_FIELD_SZ
];
1122 #define SSD_LABEL_DESC_SZ 256
1124 typedef struct ssd_labelv3
1126 char boardtype
[SSD_LABEL_FIELD_SZ
];
1127 char barcode
[SSD_LABEL_FIELD_SZ
];
1128 char item
[SSD_LABEL_FIELD_SZ
];
1129 char description
[SSD_LABEL_DESC_SZ
];
1130 char manufactured
[SSD_LABEL_FIELD_SZ
];
1131 char vendorname
[SSD_LABEL_FIELD_SZ
];
1132 char issuenumber
[SSD_LABEL_FIELD_SZ
];
1133 char cleicode
[SSD_LABEL_FIELD_SZ
];
1134 char bom
[SSD_LABEL_FIELD_SZ
];
1138 typedef struct ssd_battery_info
1141 } ssd_battery_info_t
;
1143 /* ssd power stat */
1144 typedef struct ssd_power_stat
1146 uint64_t nr_poweron
;
1147 uint64_t nr_powerloss
;
1148 uint64_t init_failed
;
1152 typedef struct ssd_io_stat
1165 typedef struct ssd_ecc_info
1167 uint64_t bitflip
[SSD_ECC_MAX_FLIP
];
1173 SSD_LOG_LEVEL_INFO
= 0,
1174 SSD_LOG_LEVEL_NOTICE
,
1175 SSD_LOG_LEVEL_WARNING
,
1180 typedef struct ssd_log_info
1183 uint64_t stat
[SSD_LOG_NR_LEVEL
];
1187 #define SSD_SMART_MAGIC (0x5452414D53445353ull)
1189 typedef struct ssd_smart
1191 struct ssd_power_stat pstat
;
1192 struct ssd_io_stat io_stat
;
1193 struct ssd_ecc_info ecc_info
;
1194 struct ssd_log_info log_info
;
1200 typedef struct ssd_internal_log
1204 } ssd_internal_log_t
;
1207 typedef struct ssd_cmd
1210 struct scatterlist
*sgl
;
1211 struct list_head list
;
1214 int flag
; /*pbio(1) or bio(0)*/
1220 unsigned long start_time
;
1223 unsigned int nr_log
;
1225 struct timer_list cmd_timer
;
1226 struct completion
*waiting
;
1229 typedef void (*send_cmd_func
)(struct ssd_cmd
*);
1230 typedef int (*ssd_event_call
)(struct gendisk
*, int, int); /* gendisk, event id, event level */
1233 #define SSD_DCMD_MAX_SZ 32
1235 typedef struct ssd_dcmd
1237 struct list_head list
;
1239 uint8_t msg
[SSD_DCMD_MAX_SZ
];
1255 #define SSD_QUEUE_NAME_LEN 16
1256 typedef struct ssd_queue
{
1257 char name
[SSD_QUEUE_NAME_LEN
];
1263 uint32_t resp_idx_mask
;
1264 uint32_t resp_msg_sz
;
1269 struct ssd_cmd
*cmd
;
1271 struct ssd_io_stat io_stat
;
1272 struct ssd_ecc_info ecc_info
;
1275 typedef struct ssd_device
{
1276 char name
[SSD_DEV_NAME_LEN
];
1283 #ifdef SSD_ESCAPE_IRQ
1289 int ot_delay
; //in ms
1293 atomic_t in_flight
[2]; //r&w
1297 struct list_head list
;
1298 struct pci_dev
*pdev
;
1300 unsigned long mmio_base
;
1301 unsigned long mmio_len
;
1302 void __iomem
*ctrlp
;
1304 struct mutex spi_mutex
;
1305 struct mutex i2c_mutex
;
1307 struct ssd_protocol_info protocol_info
;
1308 struct ssd_hw_info hw_info
;
1309 struct ssd_rom_info rom_info
;
1310 struct ssd_label label
;
1312 struct ssd_smart smart
;
1315 spinlock_t sendq_lock
;
1316 struct ssd_blist sendq
;
1317 struct task_struct
*send_thread
;
1318 wait_queue_head_t send_waitq
;
1321 spinlock_t doneq_lock
;
1322 struct ssd_blist doneq
;
1323 struct task_struct
*done_thread
;
1324 wait_queue_head_t done_waitq
;
1326 struct ssd_dcmd
*dcmd
;
1327 spinlock_t dcmd_lock
;
1328 struct list_head dcmd_list
; /* direct cmd list */
1329 wait_queue_head_t dcmd_wq
;
1331 unsigned long *tag_map
;
1332 wait_queue_head_t tag_wq
;
1334 spinlock_t cmd_lock
;
1335 struct ssd_cmd
*cmd
;
1338 ssd_event_call event_call
;
1340 dma_addr_t msg_base_dma
;
1343 void *resp_msg_base
;
1344 void *resp_ptr_base
;
1345 dma_addr_t resp_msg_base_dma
;
1346 dma_addr_t resp_ptr_base_dma
;
1349 struct msix_entry entry
[SSD_MSIX_VEC
];
1350 struct ssd_queue queue
[SSD_MSIX_VEC
];
1352 struct request_queue
*rq
; /* The device request queue */
1353 struct gendisk
*gd
; /* The gendisk structure */
1355 struct mutex internal_log_mutex
;
1356 struct ssd_internal_log internal_log
;
1357 struct workqueue_struct
*workq
;
1358 struct work_struct log_work
; /* get log */
1361 unsigned long state
; /* device state, for example, block device inited */
1363 struct module
*owner
;
1374 struct mutex gd_mutex
;
1375 struct ssd_log_info log_info
; /* volatile */
1377 atomic_t queue_depth
;
1378 struct mutex barrier_mutex
;
1379 struct mutex fw_mutex
;
1381 struct ssd_hw_info_extend hw_info_ext
;
1382 struct ssd_labelv3 labelv3
;
1386 struct mutex bm_mutex
;
1387 struct work_struct bm_work
; /* check bm */
1388 struct timer_list bm_timer
;
1389 struct sfifo log_fifo
;
1391 struct timer_list routine_timer
;
1392 unsigned long routine_tick
;
1393 unsigned long hwmon
;
1395 struct work_struct hwmon_work
; /* check hw */
1396 struct work_struct capmon_work
; /* check battery */
1397 struct work_struct tempmon_work
; /* check temp */
1400 struct ssd_debug_info db_info
;
1401 uint64_t reset_time
;
1402 int has_non_0x98_reg_access
;
1403 spinlock_t in_flight_lock
;
1405 uint64_t last_poweron_id
;
1411 typedef struct ssd_acc_info
{
1412 uint32_t threshold_l1
;
1413 uint32_t threshold_l2
;
1417 typedef struct ssd_reg_op_info
1421 } ssd_reg_op_info_t
;
1423 typedef struct ssd_spi_op_info
1428 } ssd_spi_op_info_t
;
1430 typedef struct ssd_i2c_op_info
1437 } ssd_i2c_op_info_t
;
1439 typedef struct ssd_smbus_op_info
1445 } ssd_smbus_op_info_t
;
1447 typedef struct ssd_ram_op_info
{
1451 uint8_t __user
*buf
;
1452 } ssd_ram_op_info_t
;
1454 typedef struct ssd_flash_op_info
{
1459 uint8_t __user
*buf
;
1460 } ssd_flash_op_info_t
;
1462 typedef struct ssd_sw_log_info
{
1466 } ssd_sw_log_info_t
;
1468 typedef struct ssd_version_info
1470 uint32_t bridge_ver
; /* bridge fw version */
1471 uint32_t ctrl_ver
; /* controller fw version */
1472 uint32_t bm_ver
; /* battery manager fw version */
1473 uint8_t pcb_ver
; /* main pcb version */
1474 uint8_t upper_pcb_ver
;
1477 } ssd_version_info_t
;
1479 typedef struct pci_addr
1487 typedef struct ssd_drv_param_info
{
1497 } ssd_drv_param_info_t
;
1501 enum ssd_form_factor
1503 SSD_FORM_FACTOR_HHHL
= 0,
1504 SSD_FORM_FACTOR_FHHL
1508 /* ssd power loss protect */
1517 #define SSD_BM_SLAVE_ADDRESS 0x16
1518 #define SSD_BM_CAP 5
1521 #define SSD_BM_SAFETYSTATUS 0x51
1522 #define SSD_BM_OPERATIONSTATUS 0x54
1524 /* ManufacturerAccess */
1525 #define SSD_BM_MANUFACTURERACCESS 0x00
1526 #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */
1528 /* Data flash access */
1529 #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77
1530 #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78
1531 #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56
1532 #define SSD_BM_CONFIGURATION_REGISTERS_ID 64
1534 /* min cap voltage */
1535 #define SSD_BM_CAP_VOLT_MIN 500
1540 SSD_BM_CAP_VINA = 1,
1546 SSD_BMSTATUS_OK
= 0,
1547 SSD_BMSTATUS_CHARGING
, /* not fully charged */
1548 SSD_BMSTATUS_WARNING
1553 SBS_UNIT_TEMPERATURE
,
1558 SBS_UNIT_CAPACITANCE
1586 uint16_t cap_volt
[SSD_BM_CAP
];
1593 struct ssd_bm_manufacturer_data
1595 uint16_t pack_lot_code
;
1596 uint16_t pcb_lot_code
;
1597 uint16_t firmware_ver
;
1598 uint16_t hardware_ver
;
1601 struct ssd_bm_configuration_registers
1614 uint16_t fet_action
;
1619 #define SBS_VALUE_MASK 0xffff
1621 #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var)
1622 #define bm_var(start, offset) ((void *) start + (offset))
1624 static struct sbs_cmd ssd_bm_sbs
[] = {
1625 {0x08, SBS_SIZE_WORD
, SBS_UNIT_TEMPERATURE
, bm_var_offset(temp
), SBS_VALUE_MASK
, "Temperature"},
1626 {0x09, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(volt
), SBS_VALUE_MASK
, "Voltage"},
1627 {0x0a, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(curr
), SBS_VALUE_MASK
, "Current"},
1628 {0x0b, SBS_SIZE_WORD
, SBS_UNIT_ESR
, bm_var_offset(esr
), SBS_VALUE_MASK
, "ESR"},
1629 {0x0d, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(rsoc
), SBS_VALUE_MASK
, "RelativeStateOfCharge"},
1630 {0x0e, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(health
), SBS_VALUE_MASK
, "Health"},
1631 {0x10, SBS_SIZE_WORD
, SBS_UNIT_CAPACITANCE
, bm_var_offset(cap
), SBS_VALUE_MASK
, "Capacitance"},
1632 {0x14, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(chg_curr
), SBS_VALUE_MASK
, "ChargingCurrent"},
1633 {0x15, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(chg_volt
), SBS_VALUE_MASK
, "ChargingVoltage"},
1634 {0x3b, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[4]), SBS_VALUE_MASK
, "CapacitorVoltage5"},
1635 {0x3c, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[3]), SBS_VALUE_MASK
, "CapacitorVoltage4"},
1636 {0x3d, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[2]), SBS_VALUE_MASK
, "CapacitorVoltage3"},
1637 {0x3e, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[1]), SBS_VALUE_MASK
, "CapacitorVoltage2"},
1638 {0x3f, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[0]), SBS_VALUE_MASK
, "CapacitorVoltage1"},
1639 {0x50, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_alert
), 0x870F, "SafetyAlert"},
1640 {0x51, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_status
), 0xE7BF, "SafetyStatus"},
1641 {0x54, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(op_status
), 0x79F4, "OperationStatus"},
1642 {0x5a, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(sys_volt
), SBS_VALUE_MASK
, "SystemVoltage"},
1643 {0, 0, 0, 0, 0, NULL
},
1647 #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info)
1648 #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info)
1649 #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info)
1650 #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart)
1651 #define SSD_CMD_GET_IDX _IOR('H', 105, int)
1652 #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int)
1653 #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int)
1654 #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN])
1656 #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info)
1657 #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info)
1659 #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend)
1661 #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info)
1662 #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info)
1664 #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info)
1665 #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info)
1666 #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info)
1668 #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info)
1669 #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info)
1670 #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info)
1672 #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info)
1673 #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info)
1674 #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info)
1675 #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info)
1676 #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info)
1677 #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info)
1678 #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info)
1679 #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info)
1681 #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t)
1682 #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int)
1683 #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int)
1684 #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t)
1685 #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int)
1687 #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info)
1688 #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info)
1690 #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info)
1691 #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob
1692 #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info)
1693 #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info)
1694 #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO
1696 #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info)
1698 #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int)
1699 #define SSD_CMD_SET_ALARM _IOW('H', 191, int)
1701 #define SSD_CMD_RESET _IOW('H', 200, int)
1702 #define SSD_CMD_RELOAD_FW _IOW('H', 201, int)
1703 #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int)
1704 #define SSD_CMD_LOAD_DEV _IOW('H', 203, int)
1705 #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t)
1706 #define SSD_CMD_FULL_RESET _IOW('H', 206, int)
1708 #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t)
1709 #define SSD_CMD_GET_LOG _IOR('H', 221, void *)
1710 #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int)
1712 #define SSD_CMD_OT_PROTECT _IOW('H', 223, int)
1713 #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int)
1715 #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int)
1716 #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int)
1718 #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info)
1720 #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label)
1721 #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info)
1722 #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int)
1723 #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int)
1724 #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *)
1727 #define SSD_CMD_FLUSH _IOW('H', 240, int)
1728 #define SSD_CMD_SAVE_MD _IOW('H', 241, int)
1730 #define SSD_CMD_SET_WMODE _IOW('H', 242, int)
1731 #define SSD_CMD_GET_WMODE _IOR('H', 243, int)
1732 #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int)
1734 #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info)
1735 #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info)
1737 #define SSD_CMD_CLEAR_WARNING _IOW('H', 260, int)
1741 #define SSD_LOG_MAX_SZ 4096
1742 #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE
1743 #define SSD_DIF_WITH_OLD_LOG 0x3f
1747 SSD_LOG_DATA_NONE
= 0,
1752 typedef struct ssd_log_entry
1770 }__attribute__((packed
))ssd_log_entry_t
;
1772 typedef struct ssd_log
1775 uint64_t ctrl_idx
:8;
1777 } __attribute__((packed
)) ssd_log_t
;
1779 typedef struct ssd_log_desc
1787 } __attribute__((packed
)) ssd_log_desc_t
;
1789 #define SSD_LOG_SW_IDX 0xF
1790 #define SSD_UNKNOWN_EVENT ((uint16_t)-1)
1791 static struct ssd_log_desc ssd_log_desc
[] = {
1792 /* event, level, show flash, show block, show page, desc */
1793 {0x0, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Create BBT failure"}, //g3
1794 {0x1, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Read BBT failure"}, //g3
1795 {0x2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Mark bad block"},
1796 {0x3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flush BBT failure"},
1797 {0x4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1798 {0x7, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "No available blocks"},
1799 {0x8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Bad EC header"},
1800 {0x9, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 0, "Bad VID header"}, //g3
1801 {0xa, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Wear leveling"},
1802 {0xb, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "WL read back failure"},
1803 {0x11, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Data recovery failure"}, // err
1804 {0x20, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan mapping table failure"}, // err g3
1805 {0x21, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1806 {0x22, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1807 {0x23, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1808 {0x24, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Merge: read mapping page failure"},
1809 {0x25, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: read back failure"},
1810 {0x26, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1811 {0x27, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Data corrupted for abnormal power down"}, //g3
1812 {0x28, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: mapping page corrupted"},
1813 {0x29, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: no mapping page"},
1814 {0x2a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: mapping pages incomplete"},
1815 {0x2b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read back failure after programming failure"}, // err
1816 {0xf1, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure without recovery"}, // err
1817 {0xf2, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available blocks"}, // maybe err g3
1818 {0xf3, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: RAID incomplete"}, // err g3
1819 {0xf4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1820 {0xf5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure in moving data"},
1821 {0xf6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1822 {0xf7, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Init: RAID not complete"},
1823 {0xf8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: data moving interrupted"},
1824 {0xfe, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Data inspection failure"},
1825 {0xff, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "IO: ECC failed"},
1828 {0x2e, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available reserved blocks" }, // err
1829 {0x30, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PMT membership not found"},
1830 {0x31, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PMT corrupted"},
1831 {0x32, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT membership not found"},
1832 {0x33, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT not found"},
1833 {0x34, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT corrupted"},
1834 {0x35, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT page read failure"},
1835 {0x36, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT page read failure"},
1836 {0x37, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT backup page read failure"},
1837 {0x38, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT read failure"},
1838 {0x39, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT scan failure"}, // err
1839 {0x3a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page read failure"},
1840 {0x3b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page scan failure"}, // err
1841 {0x3c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan unclosed block failure"}, // err
1842 {0x3d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: write pointer mismatch"},
1843 {0x3e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: PBMT read failure"},
1844 {0x3f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: PMT recovery: PBMT scan failure"},
1845 {0x40, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: data page read failure"}, //err
1846 {0x41, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT write pointer mismatch"},
1847 {0x42, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT latest version corrupted"},
1848 {0x43, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: too many unclosed blocks"},
1849 {0x44, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PDW block found"},
1850 {0x45, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Init: more than one PDW block found"}, //err
1851 {0x46, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page is blank or read failure"},
1852 {0x47, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PDW block not found"},
1854 {0x50, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: hit error data"}, // err
1855 {0x51, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: read back failure"}, // err
1856 {0x52, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Cache: unknown command"}, //?
1857 {0x53, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "GC/WL read back failure"}, // err
1859 {0x60, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Erase failure"},
1861 {0x70, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "LPA not matched"},
1862 {0x71, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "PBN not matched"},
1863 {0x72, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read retry failure"},
1864 {0x73, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Need raid recovery"},
1865 {0x74, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "Need read retry"},
1866 {0x75, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read invalid data page"},
1867 {0x76, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN matched"},
1868 {0x77, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN not matched"},
1869 {0x78, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in flash, PBN not matched"},
1870 {0x79, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in cache, LPA not matched"},
1871 {0x7a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in flash, LPA not matched"},
1872 {0x7b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in cache, LPA not matched"},
1873 {0x7c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in flash, LPA not matched"},
1874 {0x7d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data page status error"},
1875 {0x7e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1876 {0x7f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Access flash timeout"},
1878 {0x80, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "EC overflow"},
1879 {0x81, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_NONE
, 0, 0, "Scrubbing completed"},
1880 {0x82, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Unstable block(too much bit flip)"},
1881 {0x83, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: ram error"}, //?
1882 {0x84, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: one PBMT read failure"},
1884 {0x88, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: mark bad block"},
1885 {0x89, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: invalid page count error"}, // maybe err
1886 {0x8a, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Bad Block close to limit"},
1887 {0x8b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: Bad Block over limit"},
1888 {0x8c, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: P/E cycles close to limit"},
1889 {0x8d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: P/E cycles over limit"},
1891 {0x90, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Over temperature"}, //90
1892 {0x91, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature is OK"}, //80
1893 {0x92, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Battery fault"},
1894 {0x93, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault"}, //err
1895 {0x94, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "DDR error"}, //err
1896 {0x95, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Controller serdes error"}, //err
1897 {0x96, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 1 error"}, //err
1898 {0x97, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 2 error"}, //err
1899 {0x98, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault (corrected)"}, //err
1900 {0x99, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Battery is OK"},
1901 {0x9a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature close to limit"}, //85
1903 {0x9b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (low)"},
1904 {0x9c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (high)"},
1905 {0x9d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "I2C fault" },
1906 {0x9e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "DDR single bit error" },
1907 {0x9f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Board voltage fault" },
1909 {0xa0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "LPA not matched"},
1910 {0xa1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Re-read data in cache"},
1911 {0xa2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1912 {0xa3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Read blank page"},
1913 {0xa4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: new data in cache"},
1914 {0xa5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: PBN not matched"},
1915 {0xa6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data with error flag"},
1916 {0xa7, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: recoverd data with error flag"},
1917 {0xa8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Blank page in cache, PBN matched"},
1918 {0xa9, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Blank page in cache, PBN matched"},
1919 {0xaa, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flash init failure"},
1920 {0xab, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Mapping table recovery failure"},
1921 {0xac, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: ECC failed"},
1922 {0xb0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 95 degrees C"},
1923 {0xb1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 100 degrees C"},
1925 {0x300, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "CMD timeout"},
1926 {0x301, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Power on"},
1927 {0x302, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Power off"},
1928 {0x303, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear log"},
1929 {0x304, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity"},
1930 {0x305, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data"},
1931 {0x306, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "BM safety status"},
1932 {0x307, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "I/O error"},
1933 {0x308, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CMD error"},
1934 {0x309, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set wmode"},
1935 {0x30a, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "DDR init failed" },
1936 {0x30b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "PCIe link status" },
1937 {0x30c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Controller reset sync error" },
1938 {0x30d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Clock fault" },
1939 {0x30e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "FPGA voltage fault status" },
1940 {0x30f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity finished"},
1941 {0x310, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data finished"},
1942 {0x311, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Reset"},
1943 {0x312, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "CAP: voltage fault"},
1944 {0x313, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: learn fault"},
1945 {0x314, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CAP status"},
1946 {0x315, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Board voltage fault status"},
1947 {0x316, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 55 degrees C"}, //55
1948 {0x317, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 50 degrees C"}, //50
1949 {0x318, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash over temperature"}, //70
1950 {0x319, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash temperature is OK"}, //65
1951 {0x31a, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: short circuit"},
1952 {0x31b, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "Sensor fault"},
1953 {0x31c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data"},
1954 {0x31d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data finished"},
1955 {0x320, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Temperature sensor event"},
1957 {0x350, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear smart"},
1958 {0x351, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear warning"},
1960 {SSD_UNKNOWN_EVENT
, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "unknown event"},
1963 #define SSD_LOG_OVER_TEMP 0x90
1964 #define SSD_LOG_NORMAL_TEMP 0x91
1965 #define SSD_LOG_WARN_TEMP 0x9a
1966 #define SSD_LOG_SEU_FAULT 0x93
1967 #define SSD_LOG_SEU_FAULT1 0x98
1968 #define SSD_LOG_BATTERY_FAULT 0x92
1969 #define SSD_LOG_BATTERY_OK 0x99
1970 #define SSD_LOG_BOARD_VOLT_FAULT 0x9f
1973 #define SSD_LOG_TIMEOUT 0x300
1974 #define SSD_LOG_POWER_ON 0x301
1975 #define SSD_LOG_POWER_OFF 0x302
1976 #define SSD_LOG_CLEAR_LOG 0x303
1977 #define SSD_LOG_SET_CAPACITY 0x304
1978 #define SSD_LOG_CLEAR_DATA 0x305
1979 #define SSD_LOG_BM_SFSTATUS 0x306
1980 #define SSD_LOG_EIO 0x307
1981 #define SSD_LOG_ECMD 0x308
1982 #define SSD_LOG_SET_WMODE 0x309
1983 #define SSD_LOG_DDR_INIT_ERR 0x30a
1984 #define SSD_LOG_PCIE_LINK_STATUS 0x30b
1985 #define SSD_LOG_CTRL_RST_SYNC 0x30c
1986 #define SSD_LOG_CLK_FAULT 0x30d
1987 #define SSD_LOG_VOLT_FAULT 0x30e
1988 #define SSD_LOG_SET_CAPACITY_END 0x30F
1989 #define SSD_LOG_CLEAR_DATA_END 0x310
1990 #define SSD_LOG_RESET 0x311
1991 #define SSD_LOG_CAP_VOLT_FAULT 0x312
1992 #define SSD_LOG_CAP_LEARN_FAULT 0x313
1993 #define SSD_LOG_CAP_STATUS 0x314
1994 #define SSD_LOG_VOLT_STATUS 0x315
1995 #define SSD_LOG_INLET_OVER_TEMP 0x316
1996 #define SSD_LOG_INLET_NORMAL_TEMP 0x317
1997 #define SSD_LOG_FLASH_OVER_TEMP 0x318
1998 #define SSD_LOG_FLASH_NORMAL_TEMP 0x319
1999 #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a
2000 #define SSD_LOG_SENSOR_FAULT 0x31b
2001 #define SSD_LOG_ERASE_ALL 0x31c
2002 #define SSD_LOG_ERASE_ALL_END 0x31d
2003 #define SSD_LOG_TEMP_SENSOR_EVENT 0x320
2004 #define SSD_LOG_CLEAR_SMART 0x350
2005 #define SSD_LOG_CLEAR_WARNING 0x351
2008 /* sw log fifo depth */
2009 #define SSD_LOG_FIFO_SZ 1024
2013 static DEFINE_PER_CPU(struct list_head
, ssd_doneq
);
2014 static DEFINE_PER_CPU(struct tasklet_struct
, ssd_tasklet
);
2017 /* unloading driver */
2018 static volatile int ssd_exiting
= 0;
2020 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
2021 static struct class_simple
*ssd_class
;
2023 static struct class *ssd_class
;
2026 static int ssd_cmajor
= SSD_CMAJOR
;
2028 /* ssd block device major, minors */
2029 static int ssd_major
= SSD_MAJOR
;
2030 static int ssd_major_sl
= SSD_MAJOR_SL
;
2031 static int ssd_minors
= SSD_MINORS
;
2033 /* ssd device list */
2034 static struct list_head ssd_list
;
2035 static unsigned long ssd_index_bits
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2036 static unsigned long ssd_index_bits_sl
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2037 static atomic_t ssd_nr
;
2042 SSD_DRV_MODE_STANDARD
= 0, /* full */
2043 SSD_DRV_MODE_DEBUG
= 2, /* debug */
2044 SSD_DRV_MODE_BASE
/* base only */
2054 #if (defined SSD_MSIX)
2055 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2056 #elif (defined SSD_MSI)
2057 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2059 /* auto select the defaut int mode according to the kernel version*/
2060 /* suse 11 sp1 irqbalance bug: use msi instead*/
2061 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5))
2062 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2064 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2068 static int mode
= SSD_DRV_MODE_STANDARD
;
2069 static int status_mask
= 0xFF;
2070 static int int_mode
= SSD_INT_MODE_DEFAULT
;
2071 static int threaded_irq
= 0;
2072 static int log_level
= SSD_LOG_LEVEL_WARNING
;
2073 static int ot_protect
= 1;
2074 static int wmode
= SSD_WMODE_DEFAULT
;
2075 static int finject
= 0;
2077 module_param(mode
, int, 0);
2078 module_param(status_mask
, int, 0);
2079 module_param(int_mode
, int, 0);
2080 module_param(threaded_irq
, int, 0);
2081 module_param(log_level
, int, 0);
2082 module_param(ot_protect
, int, 0);
2083 module_param(wmode
, int, 0);
2084 module_param(finject
, int, 0);
2087 MODULE_PARM_DESC(mode
, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode");
2088 MODULE_PARM_DESC(status_mask
, "command status mask, 0 - without command error, 0xff - with command error");
2089 MODULE_PARM_DESC(int_mode
, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix");
2090 MODULE_PARM_DESC(threaded_irq
, "threaded irq, 0 - normal irq, 1 - threaded irq");
2091 MODULE_PARM_DESC(log_level
, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only");
2092 MODULE_PARM_DESC(ot_protect
, "over temperature protect, 0 - disable, 1 - enable");
2093 MODULE_PARM_DESC(wmode
, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default");
2094 MODULE_PARM_DESC(finject
, "enable fault simulation, 0 - off, 1 - on, for debug purpose only");
2096 // API adaption layer
2097 static inline void ssd_bio_endio(struct bio
*bio
, int error
)
2099 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
2100 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0))
2101 bio
->bi_error
= error
;
2103 bio
->bi_status
= errno_to_blk_status(error
);
2106 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
2107 bio_endio(bio
, error
);
2109 bio_endio(bio
, bio
->bi_size
, error
);
2113 static inline int ssd_bio_has_discard(struct bio
*bio
)
2117 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2118 return bio_op(bio
) == REQ_OP_DISCARD
;
2119 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
2120 return bio
->bi_rw
& REQ_DISCARD
;
2121 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
2122 return bio_rw_flagged(bio
, BIO_RW_DISCARD
);
2128 static inline int ssd_bio_has_flush(struct bio
*bio
)
2130 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2131 return bio_op(bio
) == REQ_OP_FLUSH
;
2132 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
2133 return bio
->bi_rw
& REQ_FLUSH
;
2139 static inline int ssd_bio_has_barrier_or_fua(struct bio
* bio
)
2141 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2142 return bio
->bi_opf
& REQ_FUA
;
2143 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
2144 return bio
->bi_rw
& REQ_FUA
;
2145 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
2146 return bio
->bi_rw
& REQ_HARDBARRIER
;
2147 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
2148 return bio_rw_flagged(bio
, BIO_RW_BARRIER
);
2150 return bio_barrier(bio
);
2155 static int __init
ssd_drv_mode(char *str
)
2157 mode
= (int)simple_strtoul(str
, NULL
, 0);
2162 static int __init
ssd_status_mask(char *str
)
2164 status_mask
= (int)simple_strtoul(str
, NULL
, 16);
2169 static int __init
ssd_int_mode(char *str
)
2171 int_mode
= (int)simple_strtoul(str
, NULL
, 0);
2176 static int __init
ssd_threaded_irq(char *str
)
2178 threaded_irq
= (int)simple_strtoul(str
, NULL
, 0);
2183 static int __init
ssd_log_level(char *str
)
2185 log_level
= (int)simple_strtoul(str
, NULL
, 0);
2190 static int __init
ssd_ot_protect(char *str
)
2192 ot_protect
= (int)simple_strtoul(str
, NULL
, 0);
2197 static int __init
ssd_wmode(char *str
)
2199 wmode
= (int)simple_strtoul(str
, NULL
, 0);
2204 static int __init
ssd_finject(char *str
)
2206 finject
= (int)simple_strtoul(str
, NULL
, 0);
2211 __setup(MODULE_NAME
"_mode=", ssd_drv_mode
);
2212 __setup(MODULE_NAME
"_status_mask=", ssd_status_mask
);
2213 __setup(MODULE_NAME
"_int_mode=", ssd_int_mode
);
2214 __setup(MODULE_NAME
"_threaded_irq=", ssd_threaded_irq
);
2215 __setup(MODULE_NAME
"_log_level=", ssd_log_level
);
2216 __setup(MODULE_NAME
"_ot_protect=", ssd_ot_protect
);
2217 __setup(MODULE_NAME
"_wmode=", ssd_wmode
);
2218 __setup(MODULE_NAME
"_finject=", ssd_finject
);
2222 #ifdef CONFIG_PROC_FS
2223 #include <linux/proc_fs.h>
2224 #include <asm/uaccess.h>
2226 #define SSD_PROC_DIR MODULE_NAME
2227 #define SSD_PROC_INFO "info"
2229 static struct proc_dir_entry
*ssd_proc_dir
= NULL
;
2230 static struct proc_dir_entry
*ssd_proc_info
= NULL
;
2232 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2233 static int ssd_proc_read(char *page
, char **start
,
2234 off_t off
, int count
, int *eof
, void *data
)
2236 struct ssd_device
*dev
= NULL
;
2237 struct ssd_device
*n
= NULL
;
2243 if (ssd_exiting
|| off
!= 0) {
2247 len
+= snprintf((page
+ len
), (count
- len
), "Driver Version:\t%s\n", DRIVER_VERSION
);
2249 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2251 size
= dev
->hw_info
.size
;
2252 do_div(size
, 1000000000);
2254 len
+= snprintf((page
+ len
), (count
- len
), "\n");
2256 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2258 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2259 if (dev
->hw_info
.ctrl_ver
!= 0) {
2260 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2263 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2265 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2266 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2269 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Device:\t%s\n", idx
, dev
->name
);
2278 static int ssd_proc_show(struct seq_file
*m
, void *v
)
2280 struct ssd_device
*dev
= NULL
;
2281 struct ssd_device
*n
= NULL
;
2289 seq_printf(m
, "Driver Version:\t%s\n", DRIVER_VERSION
);
2291 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2293 size
= dev
->hw_info
.size
;
2294 do_div(size
, 1000000000);
2296 seq_printf(m
, "\n");
2298 seq_printf(m
, "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2300 seq_printf(m
, "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2301 if (dev
->hw_info
.ctrl_ver
!= 0) {
2302 seq_printf(m
, "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2305 seq_printf(m
, "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2307 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2308 seq_printf(m
, "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2311 seq_printf(m
, "HIO %d Device:\t%s\n", idx
, dev
->name
);
2317 static int ssd_proc_open(struct inode
*inode
, struct file
*file
)
2319 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
2320 return single_open(file
, ssd_proc_show
, PDE(inode
)->data
);
2322 return single_open(file
, ssd_proc_show
, PDE_DATA(inode
));
2326 static const struct file_operations ssd_proc_fops
= {
2327 .open
= ssd_proc_open
,
2329 .llseek
= seq_lseek
,
2330 .release
= single_release
,
2335 static void ssd_cleanup_proc(void)
2337 if (ssd_proc_info
) {
2338 remove_proc_entry(SSD_PROC_INFO
, ssd_proc_dir
);
2339 ssd_proc_info
= NULL
;
2342 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2343 ssd_proc_dir
= NULL
;
2346 static int ssd_init_proc(void)
2348 ssd_proc_dir
= proc_mkdir(SSD_PROC_DIR
, NULL
);
2350 goto out_proc_mkdir
;
2352 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2353 ssd_proc_info
= create_proc_entry(SSD_PROC_INFO
, S_IFREG
| S_IRUGO
| S_IWUSR
, ssd_proc_dir
);
2355 goto out_create_proc_entry
;
2357 ssd_proc_info
->read_proc
= ssd_proc_read
;
2360 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
2361 ssd_proc_info
->owner
= THIS_MODULE
;
2364 ssd_proc_info
= proc_create(SSD_PROC_INFO
, 0600, ssd_proc_dir
, &ssd_proc_fops
);
2366 goto out_create_proc_entry
;
2371 out_create_proc_entry
:
2372 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2378 static void ssd_cleanup_proc(void)
2382 static int ssd_init_proc(void)
2386 #endif /* CONFIG_PROC_FS */
2389 static void ssd_unregister_sysfs(struct ssd_device
*dev
)
2394 static int ssd_register_sysfs(struct ssd_device
*dev
)
2399 static void ssd_cleanup_sysfs(void)
2404 static int ssd_init_sysfs(void)
2409 static inline void ssd_put_index(int slave
, int index
)
2411 unsigned long *index_bits
= ssd_index_bits
;
2414 index_bits
= ssd_index_bits_sl
;
2417 if (test_and_clear_bit(index
, index_bits
)) {
2418 atomic_dec(&ssd_nr
);
2422 static inline int ssd_get_index(int slave
)
2424 unsigned long *index_bits
= ssd_index_bits
;
2428 index_bits
= ssd_index_bits_sl
;
2432 if ((index
= find_first_zero_bit(index_bits
, SSD_MAX_DEV
)) >= SSD_MAX_DEV
) {
2436 if (test_and_set_bit(index
, index_bits
)) {
2440 atomic_inc(&ssd_nr
);
2445 static void ssd_cleanup_index(void)
2450 static int ssd_init_index(void)
2452 INIT_LIST_HEAD(&ssd_list
);
2453 atomic_set(&ssd_nr
, 0);
2454 memset(ssd_index_bits
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2455 memset(ssd_index_bits_sl
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2460 static void ssd_set_dev_name(char *name
, size_t size
, int idx
)
2462 if(idx
< SSD_ALPHABET_NUM
) {
2463 snprintf(name
, size
, "%c", 'a'+idx
);
2465 idx
-= SSD_ALPHABET_NUM
;
2466 snprintf(name
, size
, "%c%c", 'a'+(idx
/SSD_ALPHABET_NUM
), 'a'+(idx
%SSD_ALPHABET_NUM
));
2470 /* pci register r&w */
2471 static inline void ssd_reg_write(void *addr
, uint64_t val
)
2473 iowrite32((uint32_t)val
, addr
);
2474 iowrite32((uint32_t)(val
>> 32), addr
+ 4);
2478 static inline uint64_t ssd_reg_read(void *addr
)
2481 uint32_t val_lo
, val_hi
;
2483 val_lo
= ioread32(addr
);
2484 val_hi
= ioread32(addr
+ 4);
2487 val
= val_lo
| ((uint64_t)val_hi
<< 32);
2493 #define ssd_reg32_write(addr, val) writel(val, addr)
2494 #define ssd_reg32_read(addr) readl(addr)
2497 static void ssd_clear_alarm(struct ssd_device
*dev
)
2501 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2505 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2507 /* firmware control */
2510 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2513 static void ssd_set_alarm(struct ssd_device
*dev
)
2517 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2521 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2525 /* software control */
2528 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2531 #define u32_swap(x) \
2533 (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
2534 (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
2535 (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
2536 (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24)))
2538 #define u16_swap(x) \
2540 (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \
2541 (((uint16_t)(x) & (uint16_t)0xff00) >> 8) ))
2545 /* No lock, for init only*/
2546 static int ssd_spi_read_id(struct ssd_device
*dev
, uint32_t *id
)
2556 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_ID
);
2558 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2559 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2560 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2561 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2565 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2566 if (val
== 0x1000000) {
2570 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2577 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_ID
);
2586 static int ssd_init_spi(struct ssd_device
*dev
)
2592 mutex_lock(&dev
->spi_mutex
);
2595 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2598 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2600 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2605 } while (val
!= 0x1000000);
2607 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2612 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2620 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2622 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2625 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2626 mutex_unlock(&dev
->spi_mutex
);
2633 static int ssd_spi_page_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2644 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2645 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
) {
2649 mutex_lock(&dev
->spi_mutex
);
2650 while (rlen
< size
) {
2651 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, ((off
+ rlen
) >> 24));
2653 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, (((off
+ rlen
) << 8) | SSD_SPI_CMD_READ
));
2655 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2656 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2657 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2658 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2662 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2663 if (val
== 0x1000000) {
2667 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2674 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
2675 *(uint32_t *)(buf
+ rlen
)= u32_swap(val
);
2677 rlen
+= sizeof(uint32_t);
2681 mutex_unlock(&dev
->spi_mutex
);
2685 static int ssd_spi_page_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2697 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2698 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
||
2699 (off
/ dev
->rom_info
.page_size
) != ((off
+ size
- 1) / dev
->rom_info
.page_size
)) {
2703 mutex_lock(&dev
->spi_mutex
);
2705 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2707 wlen
= size
/ sizeof(uint32_t);
2708 for (i
=0; i
<(int)wlen
; i
++) {
2709 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_WDATA
, u32_swap(*((uint32_t *)buf
+ i
)));
2713 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2715 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_PROGRAM
));
2721 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2723 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2725 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2730 } while (val
!= 0x1000000);
2732 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2737 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2744 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2745 if ((val
>> 6) & 0x1) {
2752 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2754 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2757 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2759 mutex_unlock(&dev
->spi_mutex
);
2764 static int ssd_spi_block_erase(struct ssd_device
*dev
, uint32_t off
)
2774 if ((off
% dev
->rom_info
.block_size
) != 0 || off
>= dev
->rom_info
.size
) {
2778 mutex_lock(&dev
->spi_mutex
);
2780 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2781 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2784 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2786 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_ERASE
));
2790 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2793 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2795 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2800 } while (val
!= 0x1000000);
2802 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2807 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2814 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2815 if ((val
>> 5) & 0x1) {
2822 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2824 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2827 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2829 mutex_unlock(&dev
->spi_mutex
);
2834 static int ssd_spi_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2845 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2846 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2850 while (len
< size
) {
2851 roff
= (off
+ len
) % dev
->rom_info
.page_size
;
2852 rsize
= dev
->rom_info
.page_size
- roff
;
2853 if ((size
- len
) < rsize
) {
2854 rsize
= (size
- len
);
2858 ret
= ssd_spi_page_read(dev
, (buf
+ len
), roff
, rsize
);
2872 static int ssd_spi_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2883 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2884 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2888 while (len
< size
) {
2889 woff
= (off
+ len
) % dev
->rom_info
.page_size
;
2890 wsize
= dev
->rom_info
.page_size
- woff
;
2891 if ((size
- len
) < wsize
) {
2892 wsize
= (size
- len
);
2896 ret
= ssd_spi_page_write(dev
, (buf
+ len
), woff
, wsize
);
2910 static int ssd_spi_erase(struct ssd_device
*dev
, uint32_t off
, uint32_t size
)
2920 if (size
== 0 || ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
||
2921 (off
% dev
->rom_info
.block_size
) != 0 || (size
% dev
->rom_info
.block_size
) != 0) {
2925 while (len
< size
) {
2928 ret
= ssd_spi_block_erase(dev
, eoff
);
2933 len
+= dev
->rom_info
.block_size
;
2943 static uint32_t __ssd_i2c_reg32_read(void *addr
)
2945 return ssd_reg32_read(addr
);
2948 static void __ssd_i2c_reg32_write(void *addr
, uint32_t val
)
2950 ssd_reg32_write(addr
, val
);
2951 ssd_reg32_read(addr
);
2954 static int __ssd_i2c_clear(struct ssd_device
*dev
, uint8_t saddr
)
2956 ssd_i2c_ctrl_t ctrl
;
2957 ssd_i2c_data_t data
;
2964 ctrl
.bits
.wdata
= 0;
2965 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
2966 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2967 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2971 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2972 if (data
.bits
.valid
== 0) {
2977 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2983 status
= data
.bits
.rdata
;
2985 if (!(status
& 0x4)) {
2986 /* clear read fifo data */
2987 ctrl
.bits
.wdata
= 0;
2988 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2989 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2990 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2994 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2995 if (data
.bits
.valid
== 0) {
3000 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3008 if (nr_data
<= SSD_I2C_MAX_DATA
) {
3017 ctrl
.bits
.wdata
= 0x04;
3018 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3019 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3020 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3023 if (!(status
& 0x8)) {
3025 /* reset i2c controller */
3026 ctrl
.bits
.wdata
= 0x0;
3027 ctrl
.bits
.addr
= SSD_I2C_RESET_REG
;
3028 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3029 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3036 static int ssd_i2c_write(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3038 ssd_i2c_ctrl_t ctrl
;
3039 ssd_i2c_data_t data
;
3045 mutex_lock(&dev
->i2c_mutex
);
3050 ctrl
.bits
.wdata
= saddr
;
3051 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3052 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3053 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3056 while (off
< size
) {
3057 ctrl
.bits
.wdata
= buf
[off
];
3058 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3059 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3060 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3066 ctrl
.bits
.wdata
= 0x01;
3067 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3068 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3069 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3074 ctrl
.bits
.wdata
= 0;
3075 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3076 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3077 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3080 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3081 if (data
.bits
.valid
== 0) {
3086 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3093 status
= data
.bits
.rdata
;
3098 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3105 if (!(status
& 0x1)) {
3111 if (status
& 0x20) {
3117 if (status
& 0x10) {
3124 if (__ssd_i2c_clear(dev
, saddr
)) {
3128 mutex_unlock(&dev
->i2c_mutex
);
3133 static int ssd_i2c_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3135 ssd_i2c_ctrl_t ctrl
;
3136 ssd_i2c_data_t data
;
3142 mutex_lock(&dev
->i2c_mutex
);
3147 ctrl
.bits
.wdata
= saddr
;
3148 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3149 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3150 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3153 ctrl
.bits
.wdata
= size
;
3154 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3155 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3156 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3159 ctrl
.bits
.wdata
= 0x02;
3160 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3161 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3162 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3167 ctrl
.bits
.wdata
= 0;
3168 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3169 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3170 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3173 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3174 if (data
.bits
.valid
== 0) {
3179 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3186 status
= data
.bits
.rdata
;
3191 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3198 if (!(status
& 0x2)) {
3204 if (status
& 0x20) {
3210 if (status
& 0x10) {
3216 while (off
< size
) {
3217 ctrl
.bits
.wdata
= 0;
3218 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3219 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3220 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3224 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3225 if (data
.bits
.valid
== 0) {
3230 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3237 buf
[off
] = data
.bits
.rdata
;
3244 if (__ssd_i2c_clear(dev
, saddr
)) {
3248 mutex_unlock(&dev
->i2c_mutex
);
3253 static int ssd_i2c_write_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t wsize
, uint8_t *wbuf
, uint8_t rsize
, uint8_t *rbuf
)
3255 ssd_i2c_ctrl_t ctrl
;
3256 ssd_i2c_data_t data
;
3262 mutex_lock(&dev
->i2c_mutex
);
3267 ctrl
.bits
.wdata
= saddr
;
3268 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3269 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3270 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3274 while (off
< wsize
) {
3275 ctrl
.bits
.wdata
= wbuf
[off
];
3276 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3277 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3278 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3284 ctrl
.bits
.wdata
= rsize
;
3285 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3286 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3287 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3290 ctrl
.bits
.wdata
= 0x03;
3291 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3292 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3293 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3298 ctrl
.bits
.wdata
= 0;
3299 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3300 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3301 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3304 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3305 if (data
.bits
.valid
== 0) {
3310 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3317 status
= data
.bits
.rdata
;
3322 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3329 if (!(status
& 0x2)) {
3335 if (status
& 0x20) {
3341 if (status
& 0x10) {
3348 while (off
< rsize
) {
3349 ctrl
.bits
.wdata
= 0;
3350 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3351 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3352 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3356 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3357 if (data
.bits
.valid
== 0) {
3362 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3369 rbuf
[off
] = data
.bits
.rdata
;
3376 if (__ssd_i2c_clear(dev
, saddr
)) {
3379 mutex_unlock(&dev
->i2c_mutex
);
3384 static int ssd_smbus_send_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3390 ret
= ssd_i2c_write(dev
, saddr
, 1, buf
);
3391 if (!ret
|| -ETIMEDOUT
== ret
) {
3396 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3399 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3405 static int ssd_smbus_receive_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3411 ret
= ssd_i2c_read(dev
, saddr
, 1, buf
);
3412 if (!ret
|| -ETIMEDOUT
== ret
) {
3417 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3420 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3426 static int ssd_smbus_write_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3428 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3433 memcpy((smb_data
+ 1), buf
, 1);
3436 ret
= ssd_i2c_write(dev
, saddr
, 2, smb_data
);
3437 if (!ret
|| -ETIMEDOUT
== ret
) {
3442 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3445 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3451 static int ssd_smbus_read_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3453 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3460 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 1, buf
);
3461 if (!ret
|| -ETIMEDOUT
== ret
) {
3466 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3469 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3475 static int ssd_smbus_write_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3477 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3482 memcpy((smb_data
+ 1), buf
, 2);
3485 ret
= ssd_i2c_write(dev
, saddr
, 3, smb_data
);
3486 if (!ret
|| -ETIMEDOUT
== ret
) {
3491 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3494 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3500 static int ssd_smbus_read_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3502 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3509 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 2, buf
);
3510 if (!ret
|| -ETIMEDOUT
== ret
) {
3515 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3518 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3524 static int ssd_smbus_write_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3526 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3532 memcpy((smb_data
+ 2), buf
, size
);
3535 ret
= ssd_i2c_write(dev
, saddr
, (2 + size
), smb_data
);
3536 if (!ret
|| -ETIMEDOUT
== ret
) {
3541 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3544 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3550 static int ssd_smbus_read_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3552 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3560 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, (SSD_SMBUS_BLOCK_MAX
+ 1), (smb_data
+ 1));
3561 if (!ret
|| -ETIMEDOUT
== ret
) {
3566 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3569 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3575 rsize
= smb_data
[1];
3577 if (rsize
> size
) {
3581 memcpy(buf
, (smb_data
+ 2), rsize
);
3587 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
);
3590 static int ssd_init_lm75(struct ssd_device
*dev
, uint8_t saddr
)
3595 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3600 conf
&= (uint8_t)(~1u);
3602 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3611 static int ssd_lm75_read(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3616 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM75_REG_TEMP
, (uint8_t *)&val
);
3621 *data
= u16_swap(val
);
3626 static int ssd_init_lm80(struct ssd_device
*dev
, uint8_t saddr
)
3635 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3642 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_RES
, &val
);
3647 /* set volt limit */
3648 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3649 high
= ssd_lm80_limit
[i
].high
;
3650 low
= ssd_lm80_limit
[i
].low
;
3652 if (SSD_LM80_IN_CAP
== i
) {
3656 if (dev
->hw_info
.nr_ctrl
<= 1 && SSD_LM80_IN_1V2
== i
) {
3662 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MAX(i
), &high
);
3668 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MIN(i
), &low
);
3674 /* set interrupt mask: allow volt in interrupt except cap in*/
3676 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3681 /* set interrupt mask: disable others */
3683 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK2
, &val
);
3690 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3699 static int ssd_lm80_enable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3704 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3708 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3713 val
&= ~(1UL << (uint32_t)idx
);
3715 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3724 static int ssd_lm80_disable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3729 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3733 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3738 val
|= (1UL << (uint32_t)idx
);
3740 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3749 static int ssd_lm80_read_temp(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3754 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_TEMP
, (uint8_t *)&val
);
3759 *data
= u16_swap(val
);
3763 static int ssd_generate_sensor_fault_log(struct ssd_device
*dev
, uint16_t event
, uint8_t addr
,uint32_t ret
)
3766 data
= ((ret
& 0xffff) << 16) | (addr
<< 8) | addr
;
3767 ssd_gen_swlog(dev
,event
,data
);
3770 static int ssd_lm80_check_event(struct ssd_device
*dev
, uint8_t saddr
)
3773 uint16_t val
= 0, status
;
3774 uint8_t alarm1
= 0, alarm2
= 0;
3779 /* read interrupt status to clear interrupt */
3780 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM1
, &alarm1
);
3785 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM2
, &alarm2
);
3790 status
= (uint16_t)alarm1
| ((uint16_t)alarm2
<< 8);
3792 /* parse inetrrupt status */
3793 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3794 if (!((status
>> (uint32_t)i
) & 0x1)) {
3795 if (test_and_clear_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3796 /* enable INx irq */
3797 ret
= ssd_lm80_enable_in(dev
, saddr
, i
);
3806 /* disable INx irq */
3807 ret
= ssd_lm80_disable_in(dev
, saddr
, i
);
3812 if (test_and_set_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3816 high
= (uint32_t)ssd_lm80_limit
[i
].high
* (uint32_t)10;
3817 low
= (uint32_t)ssd_lm80_limit
[i
].low
* (uint32_t)10;
3819 for (j
=0; j
<3; j
++) {
3820 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_IN(i
), (uint8_t *)&val
);
3824 volt
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
3825 if ((volt
>high
) || (volt
<=low
)) {
3827 msleep(SSD_LM80_CONV_INTERVAL
);
3839 case SSD_LM80_IN_CAP
: {
3841 ssd_gen_swlog(dev
, SSD_LOG_CAP_SHORT_CIRCUIT
, 0);
3843 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(volt
));
3848 case SSD_LM80_IN_1V2
:
3849 case SSD_LM80_IN_1V2a
:
3850 case SSD_LM80_IN_1V5
:
3851 case SSD_LM80_IN_1V8
: {
3852 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, volt
));
3855 case SSD_LM80_IN_FPGA_3V3
:
3856 case SSD_LM80_IN_3V3
: {
3857 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, SSD_LM80_3V3_VOLT(volt
)));
3867 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3868 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, (uint32_t)saddr
,ret
);
3871 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3877 static int ssd_init_sensor(struct ssd_device
*dev
)
3881 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3885 ret
= ssd_init_lm75(dev
, SSD_SENSOR_LM75_SADDRESS
);
3887 hio_warn("%s: init lm75 failed\n", dev
->name
);
3888 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3889 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
,ret
);
3894 if (dev
->hw_info
.pcb_ver
>= 'B' || dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_HHHL
) {
3895 ret
= ssd_init_lm80(dev
, SSD_SENSOR_LM80_SADDRESS
);
3897 hio_warn("%s: init lm80 failed\n", dev
->name
);
3898 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3899 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
3906 /* skip error if not in standard mode */
3907 if (mode
!= SSD_DRV_MODE_STANDARD
) {
3914 static int ssd_mon_boardvolt(struct ssd_device
*dev
)
3916 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3920 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3924 return ssd_lm80_check_event(dev
, SSD_SENSOR_LM80_SADDRESS
);
3928 static int ssd_mon_temp(struct ssd_device
*dev
)
3934 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3938 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3943 ret
= ssd_lm80_read_temp(dev
, SSD_SENSOR_LM80_SADDRESS
, &val
);
3945 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3946 ssd_generate_sensor_fault_log(dev
, SSD_LOG_TEMP_SENSOR_EVENT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
3950 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3952 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3953 if (cur
>= SSD_INLET_OT_TEMP
) {
3954 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3955 ssd_gen_swlog(dev
, SSD_LOG_INLET_OVER_TEMP
, (uint32_t)cur
);
3957 } else if(cur
< SSD_INLET_OT_HYST
) {
3958 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3959 ssd_gen_swlog(dev
, SSD_LOG_INLET_NORMAL_TEMP
, (uint32_t)cur
);
3964 ret
= ssd_lm75_read(dev
, SSD_SENSOR_LM75_SADDRESS
, &val
);
3966 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3967 ssd_generate_sensor_fault_log(dev
, SSD_LOG_TEMP_SENSOR_EVENT
, SSD_SENSOR_LM75_SADDRESS
,ret
);
3971 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
);
3973 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3974 if (cur
>= SSD_FLASH_OT_TEMP
) {
3975 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3976 ssd_gen_swlog(dev
, SSD_LOG_FLASH_OVER_TEMP
, (uint32_t)cur
);
3978 } else if(cur
< SSD_FLASH_OT_HYST
) {
3979 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3980 ssd_gen_swlog(dev
, SSD_LOG_FLASH_NORMAL_TEMP
, (uint32_t)cur
);
3989 static inline void ssd_put_tag(struct ssd_device
*dev
, int tag
)
3991 test_and_clear_bit(tag
, dev
->tag_map
);
3992 wake_up(&dev
->tag_wq
);
3995 static inline int ssd_get_tag(struct ssd_device
*dev
, int wait
)
4000 while ((tag
= find_first_zero_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
)) >= atomic_read(&dev
->queue_depth
)) {
4001 DEFINE_WAIT(__wait
);
4007 prepare_to_wait_exclusive(&dev
->tag_wq
, &__wait
, TASK_UNINTERRUPTIBLE
);
4010 finish_wait(&dev
->tag_wq
, &__wait
);
4013 if (test_and_set_bit(tag
, dev
->tag_map
)) {
4020 static void ssd_barrier_put_tag(struct ssd_device
*dev
, int tag
)
4022 test_and_clear_bit(tag
, dev
->tag_map
);
4025 static int ssd_barrier_get_tag(struct ssd_device
*dev
)
4029 if (test_and_set_bit(tag
, dev
->tag_map
)) {
4036 static void ssd_barrier_end(struct ssd_device
*dev
)
4038 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4039 wake_up_all(&dev
->tag_wq
);
4041 mutex_unlock(&dev
->barrier_mutex
);
4044 static int ssd_barrier_start(struct ssd_device
*dev
)
4048 mutex_lock(&dev
->barrier_mutex
);
4050 atomic_set(&dev
->queue_depth
, 0);
4052 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4053 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4057 __set_current_state(TASK_INTERRUPTIBLE
);
4058 schedule_timeout(1);
4061 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4062 wake_up_all(&dev
->tag_wq
);
4064 mutex_unlock(&dev
->barrier_mutex
);
4069 static int ssd_busy(struct ssd_device
*dev
)
4071 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4078 static int ssd_wait_io(struct ssd_device
*dev
)
4082 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4083 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4087 __set_current_state(TASK_INTERRUPTIBLE
);
4088 schedule_timeout(1);
4095 static int ssd_in_barrier(struct ssd_device
*dev
)
4097 return (0 == atomic_read(&dev
->queue_depth
));
4101 static void ssd_cleanup_tag(struct ssd_device
*dev
)
4103 kfree(dev
->tag_map
);
4106 static int ssd_init_tag(struct ssd_device
*dev
)
4108 int nr_ulongs
= ALIGN(dev
->hw_info
.cmd_fifo_sz
, BITS_PER_LONG
) / BITS_PER_LONG
;
4110 mutex_init(&dev
->barrier_mutex
);
4112 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4114 dev
->tag_map
= kmalloc(nr_ulongs
* sizeof(unsigned long), GFP_ATOMIC
);
4115 if (!dev
->tag_map
) {
4119 memset(dev
->tag_map
, 0, nr_ulongs
* sizeof(unsigned long));
4121 init_waitqueue_head(&dev
->tag_wq
);
4127 static void ssd_end_io_acct(struct ssd_cmd
*cmd
)
4129 struct ssd_device
*dev
= cmd
->dev
;
4130 struct bio
*bio
= cmd
->bio
;
4131 unsigned long dur
= jiffies
- cmd
->start_time
;
4132 int rw
= bio_data_dir(bio
);
4133 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4138 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4139 int cpu
= part_stat_lock();
4140 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4141 part_round_stats(cpu
, part
);
4142 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4143 part_dec_in_flight(part
, rw
);
4145 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4146 int cpu
= part_stat_lock();
4147 struct hd_struct
*part
= &dev
->gd
->part0
;
4148 part_round_stats(cpu
, part
);
4149 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4151 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4152 part
->in_flight
[rw
]--;
4153 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4157 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4159 disk_round_stats(dev
->gd
);
4160 disk_stat_add(dev
->gd
, ticks
[rw
], dur
);
4162 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4163 dev
->gd
->in_flight
--;
4164 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4170 disk_round_stats(dev
->gd
);
4172 disk_stat_add(dev
->gd
, write_ticks
, dur
);
4174 disk_stat_add(dev
->gd
, read_ticks
, dur
);
4176 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4177 dev
->gd
->in_flight
--;
4178 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4185 static void ssd_start_io_acct(struct ssd_cmd
*cmd
)
4187 struct ssd_device
*dev
= cmd
->dev
;
4188 struct bio
*bio
= cmd
->bio
;
4189 int rw
= bio_data_dir(bio
);
4190 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4195 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4196 int cpu
= part_stat_lock();
4197 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4198 part_round_stats(cpu
, part
);
4199 part_stat_inc(cpu
, part
, ios
[rw
]);
4200 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4201 part_inc_in_flight(part
, rw
);
4203 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4204 int cpu
= part_stat_lock();
4205 struct hd_struct
*part
= &dev
->gd
->part0
;
4206 part_round_stats(cpu
, part
);
4207 part_stat_inc(cpu
, part
, ios
[rw
]);
4208 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4210 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4211 part
->in_flight
[rw
]++;
4212 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4216 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4218 disk_round_stats(dev
->gd
);
4219 disk_stat_inc(dev
->gd
, ios
[rw
]);
4220 disk_stat_add(dev
->gd
, sectors
[rw
], bio_sectors(bio
));
4222 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4223 dev
->gd
->in_flight
++;
4224 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4229 disk_round_stats(dev
->gd
);
4231 disk_stat_inc(dev
->gd
, writes
);
4232 disk_stat_add(dev
->gd
, write_sectors
, bio_sectors(bio
));
4234 disk_stat_inc(dev
->gd
, reads
);
4235 disk_stat_add(dev
->gd
, read_sectors
, bio_sectors(bio
));
4238 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4239 dev
->gd
->in_flight
++;
4240 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4246 cmd
->start_time
= jiffies
;
4250 static void ssd_queue_bio(struct ssd_device
*dev
, struct bio
*bio
)
4252 spin_lock(&dev
->sendq_lock
);
4253 ssd_blist_add(&dev
->sendq
, bio
);
4254 spin_unlock(&dev
->sendq_lock
);
4256 atomic_inc(&dev
->in_sendq
);
4257 wake_up(&dev
->send_waitq
);
4260 static inline void ssd_end_request(struct ssd_cmd
*cmd
)
4262 struct ssd_device
*dev
= cmd
->dev
;
4263 struct bio
*bio
= cmd
->bio
;
4264 int errors
= cmd
->errors
;
4268 if (!ssd_bio_has_discard(bio
)) {
4269 ssd_end_io_acct(cmd
);
4271 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4272 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4277 ssd_put_tag(dev
, tag
);
4279 if (SSD_INT_MSIX
== dev
->int_mode
|| tag
< 16 || errors
) {
4280 ssd_bio_endio(bio
, errors
);
4281 } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ {
4282 spin_lock(&dev
->doneq_lock
);
4283 ssd_blist_add(&dev
->doneq
, bio
);
4284 spin_unlock(&dev
->doneq_lock
);
4286 atomic_inc(&dev
->in_doneq
);
4287 wake_up(&dev
->done_waitq
);
4291 complete(cmd
->waiting
);
4296 static void ssd_end_timeout_request(struct ssd_cmd
*cmd
)
4298 struct ssd_device
*dev
= cmd
->dev
;
4299 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4302 for (i
=0; i
<dev
->nr_queue
; i
++) {
4303 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
4304 disable_irq(dev
->entry
[i
].vector
);
4306 disable_irq(pci_irq_vector(dev
->pdev
, i
));
4310 atomic_inc(&dev
->tocnt
);
4312 hio_err("%s: cmd timeout: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4313 cmd
->errors
= -ETIMEDOUT
;
4314 ssd_end_request(cmd
);
4317 for (i
=0; i
<dev
->nr_queue
; i
++) {
4318 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
4319 enable_irq(dev
->entry
[i
].vector
);
4321 enable_irq(pci_irq_vector(dev
->pdev
, i
));
4330 static void ssd_cmd_add_timer(struct ssd_cmd
*cmd
, int timeout
, void (*complt
)(struct ssd_cmd
*))
4332 init_timer(&cmd
->cmd_timer
);
4334 cmd
->cmd_timer
.data
= (unsigned long)cmd
;
4335 cmd
->cmd_timer
.expires
= jiffies
+ timeout
;
4336 cmd
->cmd_timer
.function
= (void (*)(unsigned long)) complt
;
4338 add_timer(&cmd
->cmd_timer
);
4341 static int ssd_cmd_del_timer(struct ssd_cmd
*cmd
)
4343 return del_timer(&cmd
->cmd_timer
);
4346 static void ssd_add_timer(struct timer_list
*timer
, int timeout
, void (*complt
)(void *), void *data
)
4350 timer
->data
= (unsigned long)data
;
4351 timer
->expires
= jiffies
+ timeout
;
4352 timer
->function
= (void (*)(unsigned long)) complt
;
4357 static int ssd_del_timer(struct timer_list
*timer
)
4359 return del_timer(timer
);
4362 static void ssd_cmd_timeout(struct ssd_cmd
*cmd
)
4364 struct ssd_device
*dev
= cmd
->dev
;
4365 uint32_t msg
= *(uint32_t *)cmd
->msg
;
4367 ssd_end_timeout_request(cmd
);
4369 ssd_gen_swlog(dev
, SSD_LOG_TIMEOUT
, msg
);
4373 static void __ssd_done(unsigned long data
)
4375 struct ssd_cmd
*cmd
;
4378 local_irq_disable();
4379 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4380 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4382 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4386 while (!list_empty(&localq
)) {
4387 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4388 list_del_init(&cmd
->list
);
4390 ssd_end_request(cmd
);
4394 static void __ssd_done_db(unsigned long data
)
4396 struct ssd_cmd
*cmd
;
4397 struct ssd_device
*dev
;
4401 local_irq_disable();
4402 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4403 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4405 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4409 while (!list_empty(&localq
)) {
4410 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4411 list_del_init(&cmd
->list
);
4413 dev
= (struct ssd_device
*)cmd
->dev
;
4417 sector_t off
= dev
->db_info
.data
.loc
.off
;
4418 uint32_t len
= dev
->db_info
.data
.loc
.len
;
4420 switch (dev
->db_info
.type
) {
4421 case SSD_DEBUG_READ_ERR
:
4422 if (bio_data_dir(bio
) == READ
&&
4423 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4427 case SSD_DEBUG_WRITE_ERR
:
4428 if (bio_data_dir(bio
) == WRITE
&&
4429 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4430 cmd
->errors
= -EROFS
;
4433 case SSD_DEBUG_RW_ERR
:
4434 if (!((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4435 if (bio_data_dir(bio
) == READ
) {
4438 cmd
->errors
= -EROFS
;
4447 ssd_end_request(cmd
);
4451 static inline void ssd_done_bh(struct ssd_cmd
*cmd
)
4453 unsigned long flags
= 0;
4455 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4456 struct ssd_device
*dev
= cmd
->dev
;
4457 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4458 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4465 local_irq_save(flags
);
4466 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4467 list_add_tail(&cmd
->list
, &__get_cpu_var(ssd_doneq
));
4468 tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet
));
4470 list_add_tail(&cmd
->list
, this_cpu_ptr(&ssd_doneq
));
4471 tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet
));
4473 local_irq_restore(flags
);
4478 static inline void ssd_done(struct ssd_cmd
*cmd
)
4480 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4481 struct ssd_device
*dev
= cmd
->dev
;
4482 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4483 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4490 ssd_end_request(cmd
);
4495 static inline void ssd_dispatch_cmd(struct ssd_cmd
*cmd
)
4497 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4499 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4501 spin_lock(&dev
->cmd_lock
);
4502 ssd_reg_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, cmd
->msg_dma
);
4503 spin_unlock(&dev
->cmd_lock
);
4506 static inline void ssd_send_cmd(struct ssd_cmd
*cmd
)
4508 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4510 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4512 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4515 static inline void ssd_send_cmd_db(struct ssd_cmd
*cmd
)
4517 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4518 struct bio
*bio
= cmd
->bio
;
4520 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4523 switch (dev
->db_info
.type
) {
4524 case SSD_DEBUG_READ_TO
:
4525 if (bio_data_dir(bio
) == READ
) {
4529 case SSD_DEBUG_WRITE_TO
:
4530 if (bio_data_dir(bio
) == WRITE
) {
4534 case SSD_DEBUG_RW_TO
:
4542 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4546 /* fixed for BIOVEC_PHYS_MERGEABLE */
4547 #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED
4548 #include <linux/bio.h>
4549 #include <linux/io.h>
4550 #include <xen/page.h>
4552 static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec
*vec1
,
4553 const struct bio_vec
*vec2
)
4555 unsigned long mfn1
= pfn_to_mfn(page_to_pfn(vec1
->bv_page
));
4556 unsigned long mfn2
= pfn_to_mfn(page_to_pfn(vec2
->bv_page
));
4558 return __BIOVEC_PHYS_MERGEABLE(vec1
, vec2
) &&
4559 ((mfn1
== mfn2
) || ((mfn1
+1) == mfn2
));
4562 #ifdef BIOVEC_PHYS_MERGEABLE
4563 #undef BIOVEC_PHYS_MERGEABLE
4565 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
4566 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
4567 (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2)))
4571 static inline int ssd_bio_map_sg(struct ssd_device
*dev
, struct bio
*bio
, struct scatterlist
*sgl
)
4573 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
4574 struct bio_vec
*bvec
, *bvprv
= NULL
;
4575 struct scatterlist
*sg
= NULL
;
4576 int i
= 0, nsegs
= 0;
4578 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23))
4579 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4583 * for each segment in bio
4585 bio_for_each_segment(bvec
, bio
, i
) {
4586 if (bvprv
&& BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
)) {
4587 sg
->length
+= bvec
->bv_len
;
4589 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4593 sg
= sg
? (sg
+ 1) : sgl
;
4594 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4595 sg_set_page(sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
4597 sg
->page
= bvec
->bv_page
;
4598 sg
->length
= bvec
->bv_len
;
4599 sg
->offset
= bvec
->bv_offset
;
4606 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4616 struct bio_vec bvec
, bvprv
;
4617 struct bvec_iter iter
;
4618 struct scatterlist
*sg
= NULL
;
4622 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4625 * for each segment in bio
4627 bio_for_each_segment(bvec
, bio
, iter
) {
4628 if (!first
&& BIOVEC_PHYS_MERGEABLE(&bvprv
, &bvec
)) {
4629 sg
->length
+= bvec
.bv_len
;
4631 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4635 sg
= sg
? (sg
+ 1) : sgl
;
4637 sg_set_page(sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
4654 static int __ssd_submit_pbio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4656 struct ssd_cmd
*cmd
;
4657 struct ssd_rw_msg
*msg
;
4658 struct ssd_sg_entry
*sge
;
4659 sector_t block
= bio_start(bio
);
4663 tag
= ssd_get_tag(dev
, wait
);
4668 cmd
= &dev
->cmd
[tag
];
4672 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4674 if (ssd_bio_has_discard(bio
)) {
4675 unsigned int length
= bio_sectors(bio
);
4677 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4679 msg
->fun
= SSD_FUNC_TRIM
;
4682 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4684 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4687 block
+= sge
->length
;
4688 length
-= sge
->length
;
4696 msg
->nsegs
= cmd
->nsegs
= i
;
4702 //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl);
4703 msg
->nsegs
= cmd
->nsegs
= bio
->bi_vcnt
;
4706 if (bio_data_dir(bio
) == READ
) {
4707 msg
->fun
= SSD_FUNC_READ
;
4710 msg
->fun
= SSD_FUNC_WRITE
;
4711 msg
->flag
= dev
->wmode
;
4715 for (i
=0; i
<bio
->bi_vcnt
; i
++) {
4717 sge
->length
= bio
->bi_io_vec
[i
].bv_len
>> 9;
4718 sge
->buf
= (uint64_t)((void *)bio
->bi_io_vec
[i
].bv_page
+ bio
->bi_io_vec
[i
].bv_offset
);
4720 block
+= sge
->length
;
4726 #ifdef SSD_OT_PROTECT
4727 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4728 msleep_interruptible(dev
->ot_delay
);
4732 ssd_start_io_acct(cmd
);
4738 static inline int ssd_submit_bio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4740 struct ssd_cmd
*cmd
;
4741 struct ssd_rw_msg
*msg
;
4742 struct ssd_sg_entry
*sge
;
4743 struct scatterlist
*sgl
;
4744 sector_t block
= bio_start(bio
);
4748 tag
= ssd_get_tag(dev
, wait
);
4753 cmd
= &dev
->cmd
[tag
];
4757 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4761 if (ssd_bio_has_discard(bio
)) {
4762 unsigned int length
= bio_sectors(bio
);
4764 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4766 msg
->fun
= SSD_FUNC_TRIM
;
4769 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4771 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4774 block
+= sge
->length
;
4775 length
-= sge
->length
;
4783 msg
->nsegs
= cmd
->nsegs
= i
;
4789 msg
->nsegs
= cmd
->nsegs
= ssd_bio_map_sg(dev
, bio
, sgl
);
4792 if (bio_data_dir(bio
) == READ
) {
4793 msg
->fun
= SSD_FUNC_READ
;
4795 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_FROMDEVICE
);
4797 msg
->fun
= SSD_FUNC_WRITE
;
4798 msg
->flag
= dev
->wmode
;
4799 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_TODEVICE
);
4803 for (i
=0; i
<cmd
->nsegs
; i
++) {
4805 sge
->length
= sg_dma_len(sgl
) >> 9;
4806 sge
->buf
= sg_dma_address(sgl
);
4808 block
+= sge
->length
;
4815 #ifdef SSD_OT_PROTECT
4816 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4817 msleep_interruptible(dev
->ot_delay
);
4821 ssd_start_io_acct(cmd
);
4828 static int ssd_done_thread(void *data
)
4830 struct ssd_device
*dev
;
4839 current
->flags
|= PF_NOFREEZE
;
4840 //set_user_nice(current, -5);
4842 while (!kthread_should_stop()) {
4843 wait_event_interruptible(dev
->done_waitq
, (atomic_read(&dev
->in_doneq
) || kthread_should_stop()));
4845 while (atomic_read(&dev
->in_doneq
)) {
4847 spin_lock(&dev
->doneq_lock
);
4848 bio
= ssd_blist_get(&dev
->doneq
);
4849 spin_unlock(&dev
->doneq_lock
);
4851 spin_lock_irq(&dev
->doneq_lock
);
4852 bio
= ssd_blist_get(&dev
->doneq
);
4853 spin_unlock_irq(&dev
->doneq_lock
);
4857 next
= bio
->bi_next
;
4858 bio
->bi_next
= NULL
;
4859 ssd_bio_endio(bio
, 0);
4860 atomic_dec(&dev
->in_doneq
);
4866 #ifdef SSD_ESCAPE_IRQ
4867 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4868 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4869 cpumask_var_t new_mask
;
4870 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4871 cpumask_setall(new_mask
);
4872 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4873 set_cpus_allowed_ptr(current
, new_mask
);
4874 free_cpumask_var(new_mask
);
4878 cpus_setall(new_mask
);
4879 cpu_clear(dev
->irq_cpu
, new_mask
);
4880 set_cpus_allowed(current
, new_mask
);
4889 static int ssd_send_thread(void *data
)
4891 struct ssd_device
*dev
;
4900 current
->flags
|= PF_NOFREEZE
;
4901 //set_user_nice(current, -5);
4903 while (!kthread_should_stop()) {
4904 wait_event_interruptible(dev
->send_waitq
, (atomic_read(&dev
->in_sendq
) || kthread_should_stop()));
4906 while (atomic_read(&dev
->in_sendq
)) {
4907 spin_lock(&dev
->sendq_lock
);
4908 bio
= ssd_blist_get(&dev
->sendq
);
4909 spin_unlock(&dev
->sendq_lock
);
4912 next
= bio
->bi_next
;
4913 bio
->bi_next
= NULL
;
4914 #ifdef SSD_QUEUE_PBIO
4915 if (test_and_clear_bit(BIO_SSD_PBIO
, &bio
->bi_flags
)) {
4916 __ssd_submit_pbio(dev
, bio
, 1);
4918 ssd_submit_bio(dev
, bio
, 1);
4921 ssd_submit_bio(dev
, bio
, 1);
4923 atomic_dec(&dev
->in_sendq
);
4929 #ifdef SSD_ESCAPE_IRQ
4930 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4931 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4932 cpumask_var_t new_mask
;
4933 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4934 cpumask_setall(new_mask
);
4935 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4936 set_cpus_allowed_ptr(current
, new_mask
);
4937 free_cpumask_var(new_mask
);
4941 cpus_setall(new_mask
);
4942 cpu_clear(dev
->irq_cpu
, new_mask
);
4943 set_cpus_allowed(current
, new_mask
);
4953 static void ssd_cleanup_thread(struct ssd_device
*dev
)
4955 kthread_stop(dev
->send_thread
);
4956 kthread_stop(dev
->done_thread
);
4959 static int ssd_init_thread(struct ssd_device
*dev
)
4963 atomic_set(&dev
->in_doneq
, 0);
4964 atomic_set(&dev
->in_sendq
, 0);
4966 spin_lock_init(&dev
->doneq_lock
);
4967 spin_lock_init(&dev
->sendq_lock
);
4969 ssd_blist_init(&dev
->doneq
);
4970 ssd_blist_init(&dev
->sendq
);
4972 init_waitqueue_head(&dev
->done_waitq
);
4973 init_waitqueue_head(&dev
->send_waitq
);
4975 dev
->done_thread
= kthread_run(ssd_done_thread
, dev
, "%s/d", dev
->name
);
4976 if (IS_ERR(dev
->done_thread
)) {
4977 ret
= PTR_ERR(dev
->done_thread
);
4978 goto out_done_thread
;
4981 dev
->send_thread
= kthread_run(ssd_send_thread
, dev
, "%s/s", dev
->name
);
4982 if (IS_ERR(dev
->send_thread
)) {
4983 ret
= PTR_ERR(dev
->send_thread
);
4984 goto out_send_thread
;
4990 kthread_stop(dev
->done_thread
);
4996 static void ssd_put_dcmd(struct ssd_dcmd
*dcmd
)
4998 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
5000 spin_lock(&dev
->dcmd_lock
);
5001 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
5002 spin_unlock(&dev
->dcmd_lock
);
5005 static struct ssd_dcmd
*ssd_get_dcmd(struct ssd_device
*dev
)
5007 struct ssd_dcmd
*dcmd
= NULL
;
5009 spin_lock(&dev
->dcmd_lock
);
5010 if (!list_empty(&dev
->dcmd_list
)) {
5011 dcmd
= list_entry(dev
->dcmd_list
.next
,
5012 struct ssd_dcmd
, list
);
5013 list_del_init(&dcmd
->list
);
5015 spin_unlock(&dev
->dcmd_lock
);
5020 static void ssd_cleanup_dcmd(struct ssd_device
*dev
)
5025 static int ssd_init_dcmd(struct ssd_device
*dev
)
5027 struct ssd_dcmd
*dcmd
;
5028 int dcmd_sz
= sizeof(struct ssd_dcmd
)*dev
->hw_info
.cmd_fifo_sz
;
5031 spin_lock_init(&dev
->dcmd_lock
);
5032 INIT_LIST_HEAD(&dev
->dcmd_list
);
5033 init_waitqueue_head(&dev
->dcmd_wq
);
5035 dev
->dcmd
= kmalloc(dcmd_sz
, GFP_KERNEL
);
5037 hio_warn("%s: can not alloc dcmd\n", dev
->name
);
5038 goto out_alloc_dcmd
;
5040 memset(dev
->dcmd
, 0, dcmd_sz
);
5042 for (i
=0, dcmd
=dev
->dcmd
; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++, dcmd
++) {
5044 INIT_LIST_HEAD(&dcmd
->list
);
5045 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
5054 static void ssd_put_dmsg(void *msg
)
5056 struct ssd_dcmd
*dcmd
= container_of(msg
, struct ssd_dcmd
, msg
);
5057 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
5059 memset(dcmd
->msg
, 0, SSD_DCMD_MAX_SZ
);
5061 wake_up(&dev
->dcmd_wq
);
5064 static void *ssd_get_dmsg(struct ssd_device
*dev
)
5066 struct ssd_dcmd
*dcmd
= ssd_get_dcmd(dev
);
5070 prepare_to_wait_exclusive(&dev
->dcmd_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
5073 dcmd
= ssd_get_dcmd(dev
);
5075 finish_wait(&dev
->dcmd_wq
, &wait
);
5081 static int ssd_do_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5083 DECLARE_COMPLETION(wait
);
5084 struct ssd_cmd
*cmd
;
5088 tag
= ssd_get_tag(dev
, 1);
5093 cmd
= &dev
->cmd
[tag
];
5095 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5096 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5098 cmd
->waiting
= &wait
;
5102 wait_for_completion(cmd
->waiting
);
5103 cmd
->waiting
= NULL
;
5105 if (cmd
->errors
== -ETIMEDOUT
) {
5107 } else if (cmd
->errors
) {
5112 *done
= cmd
->nr_log
;
5114 ssd_put_tag(dev
, cmd
->tag
);
5119 static int ssd_do_barrier_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5121 DECLARE_COMPLETION(wait
);
5122 struct ssd_cmd
*cmd
;
5126 tag
= ssd_barrier_get_tag(dev
);
5131 cmd
= &dev
->cmd
[tag
];
5133 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5134 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5136 cmd
->waiting
= &wait
;
5140 wait_for_completion(cmd
->waiting
);
5141 cmd
->waiting
= NULL
;
5143 if (cmd
->errors
== -ETIMEDOUT
) {
5145 } else if (cmd
->errors
) {
5150 *done
= cmd
->nr_log
;
5152 ssd_barrier_put_tag(dev
, cmd
->tag
);
5157 #ifdef SSD_OT_PROTECT
5158 static void ssd_check_temperature(struct ssd_device
*dev
, int temp
)
5165 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5169 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5172 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5173 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
5175 val
= ssd_reg_read(dev
->ctrlp
+ off
);
5176 if (val
== 0xffffffffffffffffull
) {
5180 cur
= (int)CUR_TEMP(val
);
5182 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5183 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5184 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5185 dev
->ot_delay
= SSD_OT_DELAY
;
5192 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5193 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5194 hio_warn("%s: Temperature is OK.\n", dev
->name
);
5201 static int ssd_get_ot_status(struct ssd_device
*dev
, int *status
)
5207 if (!dev
|| !status
) {
5211 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5212 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5213 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5214 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5215 if ((val
>> 22) & 0x1) {
5221 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5222 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5223 if ((val
>> 22) & 0x1) {
5229 *status
= !!dev
->ot_delay
;
5236 static void ssd_set_ot_protect(struct ssd_device
*dev
, int protect
)
5242 mutex_lock(&dev
->fw_mutex
);
5244 dev
->ot_protect
= !!protect
;
5246 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5247 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5248 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5249 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5250 if (dev
->ot_protect
) {
5255 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5258 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5259 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5260 if (dev
->ot_protect
) {
5265 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5269 mutex_unlock(&dev
->fw_mutex
);
5272 static int ssd_init_ot_protect(struct ssd_device
*dev
)
5274 ssd_set_ot_protect(dev
, ot_protect
);
5276 #ifdef SSD_OT_PROTECT
5277 ssd_check_temperature(dev
, SSD_OT_TEMP
);
5284 static int ssd_read_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
, int *nr_log
)
5286 struct ssd_log_op_msg
*msg
;
5287 struct ssd_log_msg
*lmsg
;
5289 size_t length
= dev
->hw_info
.log_sz
;
5292 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
5296 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
5297 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
5298 ret
= dma_mapping_error(buf_dma
);
5300 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
5303 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
5304 goto out_dma_mapping
;
5307 msg
= (struct ssd_log_op_msg
*)ssd_get_dmsg(dev
);
5309 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5310 lmsg
= (struct ssd_log_msg
*)msg
;
5311 lmsg
->fun
= SSD_FUNC_READ_LOG
;
5312 lmsg
->ctrl_idx
= ctrl_idx
;
5313 lmsg
->buf
= buf_dma
;
5315 msg
->fun
= SSD_FUNC_READ_LOG
;
5316 msg
->ctrl_idx
= ctrl_idx
;
5320 ret
= ssd_do_request(dev
, READ
, msg
, nr_log
);
5323 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
5329 #define SSD_LOG_PRINT_BUF_SZ 256
5330 static int ssd_parse_log(struct ssd_device
*dev
, struct ssd_log
*log
, int print
)
5332 struct ssd_log_desc
*log_desc
= ssd_log_desc
;
5333 struct ssd_log_entry
*le
;
5335 char print_buf
[SSD_LOG_PRINT_BUF_SZ
];
5341 while (log_desc
->event
!= SSD_UNKNOWN_EVENT
) {
5342 if (log_desc
->event
== le
->event
) {
5352 if (log_desc
->level
< log_level
) {
5357 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5360 sn
= dev
->labelv3
.barcode
;
5363 print_len
= snprintf(print_buf
, SSD_LOG_PRINT_BUF_SZ
, "%s (%s): <%#x>", dev
->name
, sn
, le
->event
);
5365 if (log
->ctrl_idx
!= SSD_LOG_SW_IDX
) {
5366 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " controller %d", log
->ctrl_idx
);
5369 switch (log_desc
->data
) {
5370 case SSD_LOG_DATA_NONE
:
5372 case SSD_LOG_DATA_LOC
:
5373 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5374 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc
.flash
);
5375 if (log_desc
->sblock
) {
5376 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc
.block
);
5378 if (log_desc
->spage
) {
5379 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc
.page
);
5382 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc1
.flash
);
5383 if (log_desc
->sblock
) {
5384 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc1
.block
);
5386 if (log_desc
->spage
) {
5387 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc1
.page
);
5391 case SSD_LOG_DATA_HEX
:
5392 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " info %#x", le
->data
.val
);
5397 /*print_len += */snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), ": %s", log_desc
->desc
);
5399 switch (log_desc
->level
) {
5400 case SSD_LOG_LEVEL_INFO
:
5401 hio_info("%s\n", print_buf
);
5403 case SSD_LOG_LEVEL_NOTICE
:
5404 hio_note("%s\n", print_buf
);
5406 case SSD_LOG_LEVEL_WARNING
:
5407 hio_warn("%s\n", print_buf
);
5409 case SSD_LOG_LEVEL_ERR
:
5410 hio_err("%s\n", print_buf
);
5411 //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ.");
5414 hio_warn("%s\n", print_buf
);
5419 return log_desc
->level
;
5422 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
);
5423 static int ssd_switch_wmode(struct ssd_device
*dev
, int wmode
);
5426 static int ssd_handle_event(struct ssd_device
*dev
, uint16_t event
, int level
)
5431 case SSD_LOG_OVER_TEMP
: {
5432 #ifdef SSD_OT_PROTECT
5433 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5434 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5435 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5436 dev
->ot_delay
= SSD_OT_DELAY
;
5443 case SSD_LOG_NORMAL_TEMP
: {
5444 #ifdef SSD_OT_PROTECT
5445 /* need to check all controller's temperature */
5446 ssd_check_temperature(dev
, SSD_OT_TEMP_HYST
);
5451 case SSD_LOG_BATTERY_FAULT
: {
5454 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5455 if (!ssd_bm_get_sfstatus(dev
, &sfstatus
)) {
5456 ssd_gen_swlog(dev
, SSD_LOG_BM_SFSTATUS
, sfstatus
);
5460 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5461 ssd_switch_wmode(dev
, dev
->user_wmode
);
5466 case SSD_LOG_BATTERY_OK
: {
5467 if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5468 ssd_switch_wmode(dev
, dev
->user_wmode
);
5473 case SSD_LOG_BOARD_VOLT_FAULT
: {
5474 ssd_mon_boardvolt(dev
);
5478 case SSD_LOG_CLEAR_LOG
: {
5480 memset(&dev
->smart
.log_info
, 0, sizeof(struct ssd_log_info
));
5484 case SSD_LOG_CAP_VOLT_FAULT
:
5485 case SSD_LOG_CAP_LEARN_FAULT
:
5486 case SSD_LOG_CAP_SHORT_CIRCUIT
: {
5487 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5488 ssd_switch_wmode(dev
, dev
->user_wmode
);
5497 /* ssd event call */
5498 if (dev
->event_call
) {
5499 dev
->event_call(dev
->gd
, event
, level
);
5502 if (SSD_LOG_CAP_VOLT_FAULT
== event
|| SSD_LOG_CAP_LEARN_FAULT
== event
|| SSD_LOG_CAP_SHORT_CIRCUIT
== event
) {
5503 dev
->event_call(dev
->gd
, SSD_LOG_BATTERY_FAULT
, level
);
5510 static int ssd_save_log(struct ssd_device
*dev
, struct ssd_log
*log
)
5516 mutex_lock(&dev
->internal_log_mutex
);
5518 size
= sizeof(struct ssd_log
);
5519 off
= dev
->internal_log
.nr_log
* size
;
5521 if (off
== dev
->rom_info
.log_sz
) {
5522 if (dev
->internal_log
.nr_log
== dev
->smart
.log_info
.nr_log
) {
5523 hio_warn("%s: internal log is full\n", dev
->name
);
5528 internal_log
= dev
->internal_log
.log
+ off
;
5529 memcpy(internal_log
, log
, size
);
5531 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
5532 off
+= dev
->rom_info
.log_base
;
5534 ret
= ssd_spi_write(dev
, log
, off
, size
);
5540 dev
->internal_log
.nr_log
++;
5543 mutex_unlock(&dev
->internal_log_mutex
);
5547 /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
5548 static unsigned short const crc16_table
[256] = {
5549 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
5550 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
5551 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
5552 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
5553 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
5554 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
5555 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
5556 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
5557 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
5558 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
5559 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
5560 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
5561 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
5562 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
5563 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
5564 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
5565 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
5566 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
5567 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
5568 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
5569 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
5570 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
5571 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
5572 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
5573 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
5574 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
5575 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
5576 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
5577 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
5578 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
5579 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
5580 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
5583 static unsigned short crc16_byte(unsigned short crc
, const unsigned char data
)
5585 return (crc
>> 8) ^ crc16_table
[(crc
^ data
) & 0xff];
5588 * crc16 - compute the CRC-16 for the data buffer
5589 * @crc: previous CRC value
5590 * @buffer: data pointer
5591 * @len: number of bytes in the buffer
5593 * Returns the updated CRC value.
5595 static unsigned short crc16(unsigned short crc
, unsigned char const *buffer
, int len
)
5598 crc
= crc16_byte(crc
, *buffer
++);
5602 static int ssd_save_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5609 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5612 memset(&log
, 0, sizeof(struct ssd_log
));
5614 do_gettimeofday(&tv
);
5615 log
.ctrl_idx
= SSD_LOG_SW_IDX
;
5616 log
.time
= tv
.tv_sec
;
5617 log
.le
.event
= event
;
5618 log
.le
.data
.val
= data
;
5620 log
.le
.mod
= SSD_DIF_WITH_OLD_LOG
;
5621 log
.le
.idx
= crc16(0,(const unsigned char *)&log
,14);
5622 level
= ssd_parse_log(dev
, &log
, 0);
5623 if (level
>= SSD_LOG_LEVEL
) {
5624 ret
= ssd_save_log(dev
, &log
);
5628 if (SSD_LOG_LEVEL_ERR
== level
) {
5633 dev
->smart
.log_info
.nr_log
++;
5634 dev
->smart
.log_info
.stat
[level
]++;
5637 ssd_handle_event(dev
, event
, level
);
5642 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5644 struct ssd_log_entry le
;
5647 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5655 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5659 ret
= sfifo_put(&dev
->log_fifo
, &le
);
5664 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
5665 queue_work(dev
->workq
, &dev
->log_work
);
5671 static int ssd_do_swlog(struct ssd_device
*dev
)
5673 struct ssd_log_entry le
;
5676 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5677 while (!sfifo_get(&dev
->log_fifo
, &le
)) {
5678 ret
= ssd_save_swlog(dev
, le
.event
, le
.data
.val
);
5687 static int __ssd_clear_log(struct ssd_device
*dev
)
5689 uint32_t off
, length
;
5692 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5696 if (dev
->internal_log
.nr_log
== 0) {
5700 mutex_lock(&dev
->internal_log_mutex
);
5702 off
= dev
->rom_info
.log_base
;
5703 length
= dev
->rom_info
.log_sz
;
5705 ret
= ssd_spi_erase(dev
, off
, length
);
5707 hio_warn("%s: log erase: failed\n", dev
->name
);
5711 dev
->internal_log
.nr_log
= 0;
5714 mutex_unlock(&dev
->internal_log_mutex
);
5718 static int ssd_clear_log(struct ssd_device
*dev
)
5722 ret
= __ssd_clear_log(dev
);
5724 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_LOG
, 0);
5730 static int ssd_do_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
)
5732 struct ssd_log_entry
*le
;
5739 ret
= ssd_read_log(dev
, ctrl_idx
, buf
, &nr_log
);
5744 do_gettimeofday(&tv
);
5746 log
.time
= tv
.tv_sec
;
5747 log
.ctrl_idx
= ctrl_idx
;
5749 le
= (ssd_log_entry_t
*)buf
;
5750 while (nr_log
> 0) {
5751 memcpy(&log
.le
, le
, sizeof(struct ssd_log_entry
));
5753 log
.le
.mod
= SSD_DIF_WITH_OLD_LOG
;
5754 log
.le
.idx
= crc16(0,(const unsigned char *)&log
,14);
5755 level
= ssd_parse_log(dev
, &log
, 1);
5756 if (level
>= SSD_LOG_LEVEL
) {
5757 ssd_save_log(dev
, &log
);
5761 if (SSD_LOG_LEVEL_ERR
== level
) {
5765 dev
->smart
.log_info
.nr_log
++;
5766 if (SSD_LOG_SEU_FAULT
!= le
->event
&& SSD_LOG_SEU_FAULT1
!= le
->event
) {
5767 dev
->smart
.log_info
.stat
[level
]++;
5771 /* log to the volatile log info */
5772 dev
->log_info
.nr_log
++;
5773 dev
->log_info
.stat
[level
]++;
5777 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
5778 if (le
->event
!= SSD_LOG_SEU_FAULT1
) {
5779 dev
->has_non_0x98_reg_access
= 1;
5782 /*dev->readonly = 1;
5783 set_disk_ro(dev->gd, 1);
5784 hio_warn("%s: switched to read-only mode.\n", dev->name);*/
5788 ssd_handle_event(dev
, le
->event
, level
);
5797 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5798 static void ssd_log_worker(void *data
)
5800 struct ssd_device
*dev
= (struct ssd_device
*)data
;
5802 static void ssd_log_worker(struct work_struct
*work
)
5804 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, log_work
);
5809 if (!test_bit(SSD_LOG_ERR
, &dev
->state
) && test_bit(SSD_ONLINE
, &dev
->state
)) {
5811 if (!dev
->log_buf
) {
5812 dev
->log_buf
= kmalloc(dev
->hw_info
.log_sz
, GFP_KERNEL
);
5813 if (!dev
->log_buf
) {
5814 hio_warn("%s: ssd_log_worker: no mem\n", dev
->name
);
5820 if (test_and_clear_bit(SSD_LOG_HW
, &dev
->state
)) {
5821 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5822 ret
= ssd_do_log(dev
, i
, dev
->log_buf
);
5824 (void)test_and_set_bit(SSD_LOG_ERR
, &dev
->state
);
5825 hio_warn("%s: do log fail\n", dev
->name
);
5831 ret
= ssd_do_swlog(dev
);
5833 hio_warn("%s: do swlog fail\n", dev
->name
);
5837 static void ssd_cleanup_log(struct ssd_device
*dev
)
5840 kfree(dev
->log_buf
);
5841 dev
->log_buf
= NULL
;
5844 sfifo_free(&dev
->log_fifo
);
5846 if (dev
->internal_log
.log
) {
5847 vfree(dev
->internal_log
.log
);
5848 dev
->internal_log
.nr_log
= 0;
5849 dev
->internal_log
.log
= NULL
;
5853 static int ssd_init_log(struct ssd_device
*dev
)
5855 struct ssd_log
*log
;
5860 mutex_init(&dev
->internal_log_mutex
);
5862 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5863 INIT_WORK(&dev
->log_work
, ssd_log_worker
, dev
);
5865 INIT_WORK(&dev
->log_work
, ssd_log_worker
);
5868 off
= dev
->rom_info
.log_base
;
5869 size
= dev
->rom_info
.log_sz
;
5871 dev
->internal_log
.nr_log
= 0;
5872 dev
->internal_log
.log
= vmalloc(size
);
5873 if (!dev
->internal_log
.log
) {
5878 ret
= sfifo_alloc(&dev
->log_fifo
, SSD_LOG_FIFO_SZ
, sizeof(struct ssd_log_entry
));
5880 goto out_alloc_log_fifo
;
5883 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5887 log
= (struct ssd_log
*)dev
->internal_log
.log
;
5888 while (len
< size
) {
5889 ret
= ssd_spi_read(dev
, log
, off
, sizeof(struct ssd_log
));
5894 if (log
->ctrl_idx
== 0xff) {
5898 if (log
->le
.event
== SSD_LOG_POWER_ON
) {
5899 if (dev
->internal_log
.nr_log
> dev
->last_poweron_id
) {
5900 dev
->last_poweron_id
= dev
->internal_log
.nr_log
;
5904 dev
->internal_log
.nr_log
++;
5906 len
+= sizeof(struct ssd_log
);
5907 off
+= sizeof(struct ssd_log
);
5913 sfifo_free(&dev
->log_fifo
);
5915 vfree(dev
->internal_log
.log
);
5916 dev
->internal_log
.log
= NULL
;
5917 dev
->internal_log
.nr_log
= 0;
5919 /* skip error if not in standard mode */
5920 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5927 static void ssd_stop_workq(struct ssd_device
*dev
)
5929 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
5930 flush_workqueue(dev
->workq
);
5933 static void ssd_start_workq(struct ssd_device
*dev
)
5935 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
5938 queue_work(dev
->workq
, &dev
->log_work
);
5941 static void ssd_cleanup_workq(struct ssd_device
*dev
)
5943 flush_workqueue(dev
->workq
);
5944 destroy_workqueue(dev
->workq
);
5948 static int ssd_init_workq(struct ssd_device
*dev
)
5952 dev
->workq
= create_singlethread_workqueue(dev
->name
);
5963 static int ssd_init_rom_info(struct ssd_device
*dev
)
5967 mutex_init(&dev
->spi_mutex
);
5968 mutex_init(&dev
->i2c_mutex
);
5970 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5971 /* fix bug: read data to clear status */
5972 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
5974 dev
->rom_info
.size
= SSD_ROM_SIZE
;
5975 dev
->rom_info
.block_size
= SSD_ROM_BLK_SIZE
;
5976 dev
->rom_info
.page_size
= SSD_ROM_PAGE_SIZE
;
5978 dev
->rom_info
.bridge_fw_base
= SSD_ROM_BRIDGE_FW_BASE
;
5979 dev
->rom_info
.bridge_fw_sz
= SSD_ROM_BRIDGE_FW_SIZE
;
5980 dev
->rom_info
.nr_bridge_fw
= SSD_ROM_NR_BRIDGE_FW
;
5982 dev
->rom_info
.ctrl_fw_base
= SSD_ROM_CTRL_FW_BASE
;
5983 dev
->rom_info
.ctrl_fw_sz
= SSD_ROM_CTRL_FW_SIZE
;
5984 dev
->rom_info
.nr_ctrl_fw
= SSD_ROM_NR_CTRL_FW
;
5986 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5988 dev
->rom_info
.vp_base
= SSD_ROM_VP_BASE
;
5989 dev
->rom_info
.label_base
= SSD_ROM_LABEL_BASE
;
5990 } else if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5991 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5992 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5993 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5994 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5996 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5997 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5998 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5999 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
6001 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
6002 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6003 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6004 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
6006 dev
->rom_info
.bm_fw_base
= dev
->rom_info
.ctrl_fw_base
+ (dev
->rom_info
.nr_ctrl_fw
* dev
->rom_info
.ctrl_fw_sz
);
6007 dev
->rom_info
.bm_fw_sz
= SSD_PV3_ROM_BM_FW_SZ
;
6008 dev
->rom_info
.nr_bm_fw
= SSD_PV3_ROM_NR_BM_FW
;
6010 dev
->rom_info
.log_base
= dev
->rom_info
.bm_fw_base
+ (dev
->rom_info
.nr_bm_fw
* dev
->rom_info
.bm_fw_sz
);
6011 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
6013 dev
->rom_info
.smart_base
= dev
->rom_info
.log_base
+ dev
->rom_info
.log_sz
;
6014 dev
->rom_info
.smart_sz
= SSD_PV3_ROM_SMART_SZ
;
6015 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
6017 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
6018 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
6019 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
+ dev
->rom_info
.block_size
;
6020 if (dev
->rom_info
.label_base
>= dev
->rom_info
.size
) {
6021 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- dev
->rom_info
.block_size
;
6024 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
6025 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
6026 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
6027 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
6029 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
6030 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6031 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6032 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
6034 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
6035 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6036 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6037 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
6039 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
6040 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
6041 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- SSD_PV3_2_ROM_SEC_SZ
;
6043 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
6044 dev
->rom_info
.smart_sz
= SSD_PV3_2_ROM_SEC_SZ
;
6045 dev
->rom_info
.smart_base
= dev
->rom_info
.label_base
- (dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
);
6046 if (dev
->rom_info
.smart_sz
> dev
->rom_info
.block_size
) {
6047 dev
->rom_info
.smart_sz
= dev
->rom_info
.block_size
;
6050 dev
->rom_info
.log_sz
= SSD_PV3_2_ROM_LOG_SZ
;
6051 dev
->rom_info
.log_base
= dev
->rom_info
.smart_base
- dev
->rom_info
.log_sz
;
6054 return ssd_init_spi(dev
);
6058 static int ssd_update_smart(struct ssd_device
*dev
, struct ssd_smart
*smart
)
6062 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
6063 struct hd_struct
*part
;
6069 if (!test_bit(SSD_INIT_BD
, &dev
->state
)) {
6073 do_gettimeofday(&tv
);
6074 if ((uint64_t)tv
.tv_sec
< dev
->uptime
) {
6077 run_time
= tv
.tv_sec
- dev
->uptime
;
6080 /* avoid frequently update */
6081 if (run_time
>= 60) {
6086 smart
->io_stat
.run_time
+= run_time
;
6088 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
6089 cpu
= part_stat_lock();
6090 part
= &dev
->gd
->part0
;
6091 part_round_stats(cpu
, part
);
6094 smart
->io_stat
.nr_read
+= part_stat_read(part
, ios
[READ
]);
6095 smart
->io_stat
.nr_write
+= part_stat_read(part
, ios
[WRITE
]);
6096 smart
->io_stat
.rsectors
+= part_stat_read(part
, sectors
[READ
]);
6097 smart
->io_stat
.wsectors
+= part_stat_read(part
, sectors
[WRITE
]);
6098 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
6100 disk_round_stats(dev
->gd
);
6103 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, ios
[READ
]);
6104 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, ios
[WRITE
]);
6105 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, sectors
[READ
]);
6106 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, sectors
[WRITE
]);
6109 disk_round_stats(dev
->gd
);
6112 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, reads
);
6113 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, writes
);
6114 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, read_sectors
);
6115 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, write_sectors
);
6118 smart
->io_stat
.nr_to
+= atomic_read(&dev
->tocnt
);
6120 for (i
=0; i
<dev
->nr_queue
; i
++) {
6121 smart
->io_stat
.nr_rwerr
+= dev
->queue
[i
].io_stat
.nr_rwerr
;
6122 smart
->io_stat
.nr_ioerr
+= dev
->queue
[i
].io_stat
.nr_ioerr
;
6125 for (i
=0; i
<dev
->nr_queue
; i
++) {
6126 for (j
=0; j
<SSD_ECC_MAX_FLIP
; j
++) {
6127 smart
->ecc_info
.bitflip
[j
] += dev
->queue
[i
].ecc_info
.bitflip
[j
];
6131 //dev->uptime = tv.tv_sec;
6136 static int __ssd_clear_smart(struct ssd_device
*dev
)
6140 uint32_t off
, length
;
6144 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6149 off
= dev
->rom_info
.smart_base
;
6150 length
= dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
;
6152 ret
= ssd_spi_erase(dev
, off
, length
);
6154 hio_warn("%s: info erase: failed\n", dev
->name
);
6158 sversion
= dev
->smart
.version
;
6160 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6161 dev
->smart
.version
= sversion
+ 1;
6162 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6164 /* clear all tmp acc */
6165 for (i
=0; i
<dev
->nr_queue
; i
++) {
6166 memset(&(dev
->queue
[i
].io_stat
), 0, sizeof(struct ssd_io_stat
));
6167 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(struct ssd_ecc_info
));
6170 atomic_set(&dev
->tocnt
, 0);
6172 /* clear tmp log info */
6173 memset(&dev
->log_info
, 0, sizeof(struct ssd_log_info
));
6175 do_gettimeofday(&tv
);
6176 dev
->uptime
= tv
.tv_sec
;
6179 //ssd_clear_alarm(dev);
6184 static int __ssd_clear_warning(struct ssd_device
*dev
)
6189 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6193 /* clear log_info warning */
6194 memset(&dev
->smart
.log_info
, 0, sizeof(dev
->smart
.log_info
));
6196 /* clear io_stat warning */
6197 dev
->smart
.io_stat
.nr_to
= 0;
6198 dev
->smart
.io_stat
.nr_rwerr
= 0;
6199 dev
->smart
.io_stat
.nr_ioerr
= 0;
6201 /* clear ecc_info warning */
6202 memset(&dev
->smart
.ecc_info
, 0, sizeof(dev
->smart
.ecc_info
));
6204 /* clear queued warnings */
6205 for (i
=0; i
<dev
->nr_queue
; i
++) {
6206 /* queued io_stat warning */
6207 dev
->queue
[i
].io_stat
.nr_to
= 0;
6208 dev
->queue
[i
].io_stat
.nr_rwerr
= 0;
6209 dev
->queue
[i
].io_stat
.nr_ioerr
= 0;
6211 /* queued ecc_info warning */
6212 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(dev
->queue
[i
].ecc_info
));
6215 /* write smart back to nor */
6216 for (i
= 0; i
< dev
->rom_info
.nr_smart
; i
++) {
6217 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6218 size
= dev
->rom_info
.smart_sz
;
6220 ret
= ssd_spi_erase(dev
, off
, size
);
6222 hio_warn("%s: warning erase: failed with code 1\n", dev
->name
);
6226 size
= sizeof(struct ssd_smart
);
6228 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6230 hio_warn("%s: warning erase: failed with code 2\n", dev
->name
);
6235 dev
->smart
.version
++;
6237 /* clear cmd timeout warning */
6238 atomic_set(&dev
->tocnt
, 0);
6240 /* clear tmp log info */
6241 memset(&dev
->log_info
, 0, sizeof(dev
->log_info
));
6247 static int ssd_clear_smart(struct ssd_device
*dev
)
6251 ret
= __ssd_clear_smart(dev
);
6253 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_SMART
, 0);
6259 static int ssd_clear_warning(struct ssd_device
*dev
)
6263 ret
= __ssd_clear_warning(dev
);
6265 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_WARNING
, 0);
6271 static int ssd_save_smart(struct ssd_device
*dev
)
6277 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
6280 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6284 if (!ssd_update_smart(dev
, &dev
->smart
)) {
6288 dev
->smart
.version
++;
6290 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6291 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6292 size
= dev
->rom_info
.smart_sz
;
6294 ret
= ssd_spi_erase(dev
, off
, size
);
6296 hio_warn("%s: info erase failed\n", dev
->name
);
6300 size
= sizeof(struct ssd_smart
);
6302 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6304 hio_warn("%s: info write failed\n", dev
->name
);
6315 static int ssd_init_smart(struct ssd_device
*dev
)
6317 struct ssd_smart
*smart
;
6319 uint32_t off
, size
, val
;
6322 int update_smart
= 0;
6324 do_gettimeofday(&tv
);
6325 dev
->uptime
= tv
.tv_sec
;
6327 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6331 smart
= kmalloc(sizeof(struct ssd_smart
) * SSD_ROM_NR_SMART_MAX
, GFP_KERNEL
);
6337 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6340 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6341 memset(&smart
[i
], 0, sizeof(struct ssd_smart
));
6343 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6344 size
= sizeof(struct ssd_smart
);
6346 ret
= ssd_spi_read(dev
, &smart
[i
], off
, size
);
6348 hio_warn("%s: info read failed\n", dev
->name
);
6352 if (smart
[i
].magic
!= SSD_SMART_MAGIC
) {
6354 smart
[i
].version
= 0;
6358 if (smart
[i
].version
> dev
->smart
.version
) {
6359 memcpy(&dev
->smart
, &smart
[i
], sizeof(struct ssd_smart
));
6363 if (dev
->smart
.magic
!= SSD_SMART_MAGIC
) {
6364 /* first time power up */
6365 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6366 dev
->smart
.version
= 1;
6369 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
);
6371 dev
->last_poweron_id
= ~0;
6372 ssd_gen_swlog(dev
, SSD_LOG_POWER_ON
, dev
->hw_info
.bridge_ver
);
6373 if (dev
->smart
.io_stat
.nr_to
) {
6374 dev
->smart
.io_stat
.nr_to
= 0;
6379 /* check log info */
6381 struct ssd_log_info log_info
;
6382 struct ssd_log
*log
= (struct ssd_log
*)dev
->internal_log
.log
;
6384 memset(&log_info
, 0, sizeof(struct ssd_log_info
));
6386 while (log_info
.nr_log
< dev
->internal_log
.nr_log
) {
6389 switch (log
->le
.event
) {
6390 /* skip the volatile log info */
6391 case SSD_LOG_SEU_FAULT
:
6392 case SSD_LOG_SEU_FAULT1
:
6395 case SSD_LOG_TIMEOUT
:
6396 skip
= (dev
->last_poweron_id
>= log_info
.nr_log
);
6401 log_info
.stat
[ssd_parse_log(dev
, log
, 0)]++;
6409 for (i
=(SSD_LOG_NR_LEVEL
-1); i
>=0; i
--) {
6410 if (log_info
.stat
[i
] != dev
->smart
.log_info
.stat
[i
]) {
6412 memcpy(&dev
->smart
.log_info
, &log_info
, sizeof(struct ssd_log_info
));
6419 ++dev
->smart
.version
;
6423 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6424 if (smart
[i
].magic
== SSD_SMART_MAGIC
&& smart
[i
].version
== dev
->smart
.version
) {
6428 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6429 size
= dev
->rom_info
.smart_sz
;
6431 ret
= ssd_spi_erase(dev
, off
, size
);
6433 hio_warn("%s: info erase failed\n", dev
->name
);
6437 size
= sizeof(struct ssd_smart
);
6438 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6440 hio_warn("%s: info write failed\n", dev
->name
);
6447 /* sync smart with alarm led */
6448 if (dev
->smart
.io_stat
.nr_to
|| dev
->smart
.io_stat
.nr_rwerr
|| dev
->smart
.log_info
.stat
[SSD_LOG_LEVEL_ERR
]) {
6449 hio_warn("%s: some fault found in the history info\n", dev
->name
);
6456 /* skip error if not in standard mode */
6457 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6464 static int __ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6466 struct ssd_bm_manufacturer_data bm_md
= {0};
6467 uint16_t sc_id
= SSD_BM_SYSTEM_DATA_SUBCLASS_ID
;
6475 mutex_lock(&dev
->bm_mutex
);
6477 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6478 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6483 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6484 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_manufacturer_data
), (uint8_t *)&bm_md
);
6489 if (bm_md
.firmware_ver
& 0xF000) {
6494 *ver
= bm_md
.firmware_ver
;
6497 mutex_unlock(&dev
->bm_mutex
);
6501 static int ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6504 int i
= SSD_BM_RETRY_MAX
;
6508 ret
= __ssd_bm_get_version(dev
, &tmp
);
6522 static int __ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6524 struct ssd_bm_configuration_registers bm_cr
;
6525 uint16_t sc_id
= SSD_BM_CONFIGURATION_REGISTERS_ID
;
6529 mutex_lock(&dev
->bm_mutex
);
6531 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6532 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6537 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6538 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_configuration_registers
), (uint8_t *)&bm_cr
);
6543 if (bm_cr
.operation_cfg
.cc
== 0 || bm_cr
.operation_cfg
.cc
> 4) {
6548 *nr_cap
= bm_cr
.operation_cfg
.cc
+ 1;
6551 mutex_unlock(&dev
->bm_mutex
);
6555 static int ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6558 int i
= SSD_BM_RETRY_MAX
;
6562 ret
= __ssd_bm_nr_cap(dev
, &tmp
);
6576 static int ssd_bm_enter_cap_learning(struct ssd_device
*dev
)
6578 uint16_t buf
= SSD_BM_ENTER_CAP_LEARNING
;
6579 uint8_t cmd
= SSD_BM_MANUFACTURERACCESS
;
6582 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&buf
);
6591 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
)
6594 uint8_t cmd
= SSD_BM_SAFETYSTATUS
;
6597 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6607 static int ssd_bm_get_opstatus(struct ssd_device
*dev
, uint16_t *status
)
6610 uint8_t cmd
= SSD_BM_OPERATIONSTATUS
;
6613 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6623 static int ssd_get_bmstruct(struct ssd_device
*dev
, struct ssd_bm
*bm_status_out
)
6625 struct sbs_cmd
*bm_sbs
= ssd_bm_sbs
;
6626 struct ssd_bm bm_status
;
6627 uint8_t buf
[2] = {0, };
6632 memset(&bm_status
, 0, sizeof(struct ssd_bm
));
6634 while (bm_sbs
->desc
!= NULL
) {
6635 switch (bm_sbs
->size
) {
6637 ret
= ssd_smbus_read_byte(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, buf
);
6639 //printf("Error: smbus read byte %#x\n", bm_sbs->cmd);
6645 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, (uint8_t *)&val
);
6647 //printf("Error: smbus read word %#x\n", bm_sbs->cmd);
6650 //val = *(uint16_t *)buf;
6658 switch (bm_sbs
->unit
) {
6659 case SBS_UNIT_VALUE
:
6660 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
& bm_sbs
->mask
;
6662 case SBS_UNIT_TEMPERATURE
:
6663 cval
= (uint16_t)(val
- 2731) / 10;
6664 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = cval
;
6666 case SBS_UNIT_VOLTAGE
:
6667 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6669 case SBS_UNIT_CURRENT
:
6670 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6673 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6675 case SBS_UNIT_PERCENT
:
6676 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6678 case SBS_UNIT_CAPACITANCE
:
6679 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6690 memcpy(bm_status_out
, &bm_status
, sizeof(struct ssd_bm
));
6696 static int __ssd_bm_status(struct ssd_device
*dev
, int *status
)
6698 struct ssd_bm bm_status
= {0};
6703 ret
= ssd_get_bmstruct(dev
, &bm_status
);
6708 /* capacitor voltage */
6709 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
6714 for (i
=0; i
<nr_cap
; i
++) {
6715 if (bm_status
.cap_volt
[i
] < SSD_BM_CAP_VOLT_MIN
) {
6716 *status
= SSD_BMSTATUS_WARNING
;
6722 if (bm_status
.sf_status
) {
6723 *status
= SSD_BMSTATUS_WARNING
;
6728 if (!((bm_status
.op_status
>> 12) & 0x1)) {
6729 *status
= SSD_BMSTATUS_CHARGING
;
6731 *status
= SSD_BMSTATUS_OK
;
6738 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int mode
);
6740 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
6741 static void ssd_bm_worker(void *data
)
6743 struct ssd_device
*dev
= (struct ssd_device
*)data
;
6745 static void ssd_bm_worker(struct work_struct
*work
)
6747 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, bm_work
);
6753 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6757 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
6761 if (dev
->hw_info_ext
.plp_type
!= SSD_PLP_SCAP
) {
6765 ret
= ssd_bm_get_opstatus(dev
, &opstatus
);
6767 hio_warn("%s: get bm operationstatus failed\n", dev
->name
);
6771 /* need cap learning ? */
6772 if (!(opstatus
& 0xF0)) {
6773 ret
= ssd_bm_enter_cap_learning(dev
);
6775 hio_warn("%s: enter capacitance learning failed\n", dev
->name
);
6781 static void ssd_bm_routine_start(void *data
)
6783 struct ssd_device
*dev
;
6790 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
6791 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6792 queue_work(dev
->workq
, &dev
->bm_work
);
6794 queue_work(dev
->workq
, &dev
->capmon_work
);
6800 static int ssd_do_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6807 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6812 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6817 /* make sure the lm80 voltage value is updated */
6818 msleep(SSD_LM80_CONV_INTERVAL
);
6820 /* check if full charged */
6823 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6825 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6826 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6830 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6831 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_FULL
) {
6836 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6840 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6843 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U2
, (uint8_t *)&val
);
6845 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6846 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6850 u2
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6857 /* enter cap learn */
6858 ssd_reg32_write(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
, 0x1);
6862 msleep(SSD_PL_CAP_LEARN_WAIT
);
6864 t
= ssd_reg32_read(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
);
6865 if (!((t
>> 1) & 0x1)) {
6870 if (wait
> SSD_PL_CAP_LEARN_MAX_WAIT
) {
6876 if ((t
>> 4) & 0x1) {
6887 *cap
= SSD_PL_CAP_LEARN(u1
, u2
, t
);
6893 static int ssd_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6901 mutex_lock(&dev
->bm_mutex
);
6903 ssd_stop_workq(dev
);
6905 ret
= ssd_do_cap_learn(dev
, cap
);
6907 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
6911 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, *cap
);
6914 ssd_start_workq(dev
);
6915 mutex_unlock(&dev
->bm_mutex
);
6920 static int ssd_check_pl_cap(struct ssd_device
*dev
)
6928 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6932 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6939 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6941 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6942 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6946 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6947 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_READY
) {
6952 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6954 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(u1
));
6957 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6960 low
= ssd_lm80_limit
[SSD_LM80_IN_CAP
].low
;
6961 ret
= ssd_smbus_write_byte(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP
), &low
);
6966 /* enable cap INx */
6967 ret
= ssd_lm80_enable_in(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_IN_CAP
);
6969 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6970 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6976 /* skip error if not in standard mode */
6977 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6983 static int ssd_check_pl_cap_fast(struct ssd_device
*dev
)
6989 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6993 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6998 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
7002 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
7003 if (SSD_PL_CAP_VOLT(u1
) < SSD_PL_CAP_VOLT_READY
) {
7011 static int ssd_init_pl_cap(struct ssd_device
*dev
)
7015 /* set here: user write mode */
7016 dev
->user_wmode
= wmode
;
7018 mutex_init(&dev
->bm_mutex
);
7020 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7022 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BM_FAULT_REG
);
7023 if ((val
>> 1) & 0x1) {
7024 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
7027 ret
= ssd_check_pl_cap(dev
);
7029 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
7037 static void __end_str(char *str
, int len
)
7041 for(i
=0; i
<len
; i
++) {
7042 if (*(str
+i
) == '\0')
7048 static int ssd_init_label(struct ssd_device
*dev
)
7054 /* label location */
7055 off
= dev
->rom_info
.label_base
;
7057 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7058 size
= sizeof(struct ssd_label
);
7061 ret
= ssd_spi_read(dev
, &dev
->label
, off
, size
);
7063 memset(&dev
->label
, 0, size
);
7067 __end_str(dev
->label
.date
, SSD_LABEL_FIELD_SZ
);
7068 __end_str(dev
->label
.sn
, SSD_LABEL_FIELD_SZ
);
7069 __end_str(dev
->label
.part
, SSD_LABEL_FIELD_SZ
);
7070 __end_str(dev
->label
.desc
, SSD_LABEL_FIELD_SZ
);
7071 __end_str(dev
->label
.other
, SSD_LABEL_FIELD_SZ
);
7072 __end_str(dev
->label
.maf
, SSD_LABEL_FIELD_SZ
);
7074 size
= sizeof(struct ssd_labelv3
);
7077 ret
= ssd_spi_read(dev
, &dev
->labelv3
, off
, size
);
7079 memset(&dev
->labelv3
, 0, size
);
7083 __end_str(dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
7084 __end_str(dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
7085 __end_str(dev
->labelv3
.item
, SSD_LABEL_FIELD_SZ
);
7086 __end_str(dev
->labelv3
.description
, SSD_LABEL_DESC_SZ
);
7087 __end_str(dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
7088 __end_str(dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
7089 __end_str(dev
->labelv3
.issuenumber
, SSD_LABEL_FIELD_SZ
);
7090 __end_str(dev
->labelv3
.cleicode
, SSD_LABEL_FIELD_SZ
);
7091 __end_str(dev
->labelv3
.bom
, SSD_LABEL_FIELD_SZ
);
7095 /* skip error if not in standard mode */
7096 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7102 int ssd_get_label(struct block_device
*bdev
, struct ssd_label
*label
)
7104 struct ssd_device
*dev
;
7106 if (!bdev
|| !label
|| !(bdev
->bd_disk
)) {
7110 dev
= bdev
->bd_disk
->private_data
;
7112 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7113 memset(label
, 0, sizeof(struct ssd_label
));
7114 memcpy(label
->date
, dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
7115 memcpy(label
->sn
, dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
7116 memcpy(label
->desc
, dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
7117 memcpy(label
->maf
, dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
7119 memcpy(label
, &dev
->label
, sizeof(struct ssd_label
));
7125 static int __ssd_get_version(struct ssd_device
*dev
, struct ssd_version_info
*ver
)
7127 uint16_t bm_ver
= 0;
7130 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7131 ret
= ssd_bm_get_version(dev
, &bm_ver
);
7137 ver
->bridge_ver
= dev
->hw_info
.bridge_ver
;
7138 ver
->ctrl_ver
= dev
->hw_info
.ctrl_ver
;
7139 ver
->bm_ver
= bm_ver
;
7140 ver
->pcb_ver
= dev
->hw_info
.pcb_ver
;
7141 ver
->upper_pcb_ver
= dev
->hw_info
.upper_pcb_ver
;
7148 int ssd_get_version(struct block_device
*bdev
, struct ssd_version_info
*ver
)
7150 struct ssd_device
*dev
;
7153 if (!bdev
|| !ver
|| !(bdev
->bd_disk
)) {
7157 dev
= bdev
->bd_disk
->private_data
;
7159 mutex_lock(&dev
->fw_mutex
);
7160 ret
= __ssd_get_version(dev
, ver
);
7161 mutex_unlock(&dev
->fw_mutex
);
7166 static int __ssd_get_temperature(struct ssd_device
*dev
, int *temp
)
7174 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7180 if (dev
->db_info
.type
== SSD_DEBUG_LOG
&&
7181 (dev
->db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
||
7182 dev
->db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
||
7183 dev
->db_info
.data
.log
.event
== SSD_LOG_WARN_TEMP
)) {
7184 *temp
= (int)dev
->db_info
.data
.log
.extra
;
7189 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7190 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
7192 val
= ssd_reg_read(dev
->ctrlp
+ off
);
7193 if (val
== 0xffffffffffffffffull
) {
7197 cur
= (int)CUR_TEMP(val
);
7208 int ssd_get_temperature(struct block_device
*bdev
, int *temp
)
7210 struct ssd_device
*dev
;
7213 if (!bdev
|| !temp
|| !(bdev
->bd_disk
)) {
7217 dev
= bdev
->bd_disk
->private_data
;
7220 mutex_lock(&dev
->fw_mutex
);
7221 ret
= __ssd_get_temperature(dev
, temp
);
7222 mutex_unlock(&dev
->fw_mutex
);
7227 int ssd_set_otprotect(struct block_device
*bdev
, int otprotect
)
7229 struct ssd_device
*dev
;
7231 if (!bdev
|| !(bdev
->bd_disk
)) {
7235 dev
= bdev
->bd_disk
->private_data
;
7236 ssd_set_ot_protect(dev
, !!otprotect
);
7241 int ssd_bm_status(struct block_device
*bdev
, int *status
)
7243 struct ssd_device
*dev
;
7246 if (!bdev
|| !status
|| !(bdev
->bd_disk
)) {
7250 dev
= bdev
->bd_disk
->private_data
;
7252 mutex_lock(&dev
->fw_mutex
);
7253 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7254 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7255 *status
= SSD_BMSTATUS_WARNING
;
7257 *status
= SSD_BMSTATUS_OK
;
7259 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7260 ret
= __ssd_bm_status(dev
, status
);
7262 *status
= SSD_BMSTATUS_OK
;
7264 mutex_unlock(&dev
->fw_mutex
);
7269 int ssd_get_pciaddr(struct block_device
*bdev
, struct pci_addr
*paddr
)
7271 struct ssd_device
*dev
;
7273 if (!bdev
|| !paddr
|| !bdev
->bd_disk
) {
7277 dev
= bdev
->bd_disk
->private_data
;
7279 paddr
->domain
= pci_domain_nr(dev
->pdev
->bus
);
7280 paddr
->bus
= dev
->pdev
->bus
->number
;
7281 paddr
->slot
= PCI_SLOT(dev
->pdev
->devfn
);
7282 paddr
->func
= PCI_FUNC(dev
->pdev
->devfn
);
7288 static int ssd_bb_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7293 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7297 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L1_REG
);
7298 if (0xffffffffull
== acc
->threshold_l1
) {
7301 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L2_REG
);
7302 if (0xffffffffull
== acc
->threshold_l2
) {
7307 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7308 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7309 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_BB_ACC_REG_SZ
* chip
));
7310 if (0xffffffffull
== acc
->val
) {
7313 if (val
> acc
->val
) {
7322 static int ssd_ec_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7327 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7331 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L1_REG
);
7332 if (0xffffffffull
== acc
->threshold_l1
) {
7335 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L2_REG
);
7336 if (0xffffffffull
== acc
->threshold_l2
) {
7341 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7342 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7343 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_EC_ACC_REG_SZ
* chip
));
7344 if (0xffffffffull
== acc
->val
) {
7348 if (val
> acc
->val
) {
7359 static int ssd_ram_read_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7361 struct ssd_ram_op_msg
*msg
;
7363 size_t len
= length
;
7367 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7368 || !length
|| length
> dev
->hw_info
.ram_max_len
7369 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7373 len
/= dev
->hw_info
.ram_align
;
7374 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7376 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7377 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7378 ret
= dma_mapping_error(buf_dma
);
7380 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7383 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7384 goto out_dma_mapping
;
7387 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7389 msg
->fun
= SSD_FUNC_RAM_READ
;
7390 msg
->ctrl_idx
= ctrl_idx
;
7391 msg
->start
= (uint32_t)ofs_w
;
7395 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7398 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7404 static int ssd_ram_write_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7406 struct ssd_ram_op_msg
*msg
;
7408 size_t len
= length
;
7412 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7413 || !length
|| length
> dev
->hw_info
.ram_max_len
7414 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7418 len
/= dev
->hw_info
.ram_align
;
7419 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7421 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7422 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7423 ret
= dma_mapping_error(buf_dma
);
7425 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7428 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7429 goto out_dma_mapping
;
7432 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7434 msg
->fun
= SSD_FUNC_RAM_WRITE
;
7435 msg
->ctrl_idx
= ctrl_idx
;
7436 msg
->start
= (uint32_t)ofs_w
;
7440 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7443 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7450 static int ssd_ram_read(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7457 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7458 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7463 len
= dev
->hw_info
.ram_max_len
;
7464 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7468 ret
= ssd_ram_read_4k(dev
, buf
, len
, off
, ctrl_idx
);
7481 static int ssd_ram_write(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7488 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7489 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7494 len
= dev
->hw_info
.ram_max_len
;
7495 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7499 ret
= ssd_ram_write_4k(dev
, buf
, len
, off
, ctrl_idx
);
7514 static int ssd_check_flash(struct ssd_device
*dev
, int flash
, int page
, int ctrl_idx
)
7516 int cur_ch
= flash
% dev
->hw_info
.max_ch
;
7517 int cur_chip
= flash
/dev
->hw_info
.max_ch
;
7519 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
7523 if (cur_ch
>= dev
->hw_info
.nr_ch
|| cur_chip
>= dev
->hw_info
.nr_chip
) {
7527 if (page
>= (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7533 static int ssd_nand_read_id(struct ssd_device
*dev
, void *id
, int flash
, int chip
, int ctrl_idx
)
7535 struct ssd_nand_op_msg
*msg
;
7542 buf_dma
= pci_map_single(dev
->pdev
, id
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7543 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7544 ret
= dma_mapping_error(buf_dma
);
7546 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7549 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7550 goto out_dma_mapping
;
7553 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7554 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7558 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7560 msg
->fun
= SSD_FUNC_NAND_READ_ID
;
7561 msg
->chip_no
= flash
;
7562 msg
->chip_ce
= chip
;
7563 msg
->ctrl_idx
= ctrl_idx
;
7566 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7569 pci_unmap_single(dev
->pdev
, buf_dma
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7576 static int ssd_nand_read(struct ssd_device
*dev
, void *buf
,
7577 int flash
, int chip
, int page
, int page_count
, int ctrl_idx
)
7579 struct ssd_nand_op_msg
*msg
;
7588 if ((page
+ page_count
) > dev
->hw_info
.block_count
*dev
->hw_info
.page_count
) {
7592 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7597 length
= page_count
* dev
->hw_info
.page_size
;
7599 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7600 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7601 ret
= dma_mapping_error(buf_dma
);
7603 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7606 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7607 goto out_dma_mapping
;
7610 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7611 flash
= (flash
<< 1) | chip
;
7615 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7617 msg
->fun
= SSD_FUNC_NAND_READ
;
7618 msg
->ctrl_idx
= ctrl_idx
;
7619 msg
->chip_no
= flash
;
7620 msg
->chip_ce
= chip
;
7621 msg
->page_no
= page
;
7622 msg
->page_count
= page_count
;
7625 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7628 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7635 static int ssd_nand_read_w_oob(struct ssd_device
*dev
, void *buf
,
7636 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7638 struct ssd_nand_op_msg
*msg
;
7647 if ((page
+ count
) > (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7651 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7656 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7658 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7659 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7660 ret
= dma_mapping_error(buf_dma
);
7662 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7665 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7666 goto out_dma_mapping
;
7669 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7670 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7674 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7676 msg
->fun
= SSD_FUNC_NAND_READ_WOOB
;
7677 msg
->ctrl_idx
= ctrl_idx
;
7678 msg
->chip_no
= flash
;
7679 msg
->chip_ce
= chip
;
7680 msg
->page_no
= page
;
7681 msg
->page_count
= count
;
7684 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7687 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7694 static int ssd_nand_write(struct ssd_device
*dev
, void *buf
,
7695 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7697 struct ssd_nand_op_msg
*msg
;
7702 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7714 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7719 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7721 /* write data to ram */
7722 /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx);
7727 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7728 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7729 ret
= dma_mapping_error(buf_dma
);
7731 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7734 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7735 goto out_dma_mapping
;
7738 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7739 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7743 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7745 msg
->fun
= SSD_FUNC_NAND_WRITE
;
7746 msg
->ctrl_idx
= ctrl_idx
;
7747 msg
->chip_no
= flash
;
7748 msg
->chip_ce
= chip
;
7750 msg
->page_no
= page
;
7751 msg
->page_count
= count
;
7754 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7757 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7763 static int ssd_nand_erase(struct ssd_device
*dev
, int flash
, int chip
, int page
, int ctrl_idx
)
7765 struct ssd_nand_op_msg
*msg
;
7768 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7773 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7774 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7778 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7780 msg
->fun
= SSD_FUNC_NAND_ERASE
;
7781 msg
->ctrl_idx
= ctrl_idx
;
7782 msg
->chip_no
= flash
;
7783 msg
->chip_ce
= chip
;
7784 msg
->page_no
= page
;
7786 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7792 static int ssd_update_bbt(struct ssd_device
*dev
, int flash
, int ctrl_idx
)
7794 struct ssd_nand_op_msg
*msg
;
7795 struct ssd_flush_msg
*fmsg
;
7798 ret
= ssd_check_flash(dev
, flash
, 0, ctrl_idx
);
7803 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7805 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7806 fmsg
= (struct ssd_flush_msg
*)msg
;
7808 fmsg
->fun
= SSD_FUNC_FLUSH
;
7810 fmsg
->flash
= flash
;
7811 fmsg
->ctrl_idx
= ctrl_idx
;
7813 msg
->fun
= SSD_FUNC_FLUSH
;
7815 msg
->chip_no
= flash
;
7816 msg
->ctrl_idx
= ctrl_idx
;
7819 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7825 /* flash controller init state */
7826 static int __ssd_check_init_state(struct ssd_device
*dev
)
7828 uint32_t *init_state
= NULL
;
7829 int reg_base
, reg_sz
;
7830 int max_wait
= SSD_INIT_MAX_WAIT
;
7836 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7837 ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data);
7838 read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8);
7839 if (read_data == ~test_data) {
7840 //dev->hw_info.nr_ctrl++;
7841 dev->hw_info.nr_ctrl_map |= 1<<i;
7847 read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG);
7849 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7850 if (((read_data>>i) & 0x1) == 0) {
7855 if (dev->hw_info.nr_ctrl != j) {
7856 printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j);
7862 init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0);
7863 for (j=1; j<dev->hw_info.nr_ctrl;j++) {
7864 if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) {
7865 printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j);
7871 /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0);
7872 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7873 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) {
7874 printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j);
7879 init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8);
7880 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7881 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) {
7882 printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j);
7888 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7889 max_wait
= SSD_INIT_MAX_WAIT_V3_2
;
7892 reg_base
= dev
->protocol_info
.init_state_reg
;
7893 reg_sz
= dev
->protocol_info
.init_state_reg_sz
;
7895 init_state
= (uint32_t *)kmalloc(reg_sz
, GFP_KERNEL
);
7900 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7902 for (j
=0, k
=0; j
<reg_sz
; j
+=sizeof(uint32_t), k
++) {
7903 init_state
[k
] = ssd_reg32_read(dev
->ctrlp
+ reg_base
+ j
);
7906 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7907 /* just check the last bit, no need to check all channel */
7908 ch_start
= dev
->hw_info
.max_ch
- 1;
7913 for (j
=0; j
<dev
->hw_info
.nr_chip
; j
++) {
7914 for (k
=ch_start
; k
<dev
->hw_info
.max_ch
; k
++) {
7915 if (test_bit((j
*dev
->hw_info
.max_ch
+ k
), (void *)init_state
)) {
7920 if (init_wait
<= max_wait
) {
7921 msleep(SSD_INIT_WAIT
);
7924 if (k
< dev
->hw_info
.nr_ch
) {
7925 hio_warn("%s: controller %d chip %d ch %d init failed\n",
7926 dev
->name
, i
, j
, k
);
7928 hio_warn("%s: controller %d chip %d init failed\n",
7939 //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait);
7945 static int ssd_check_init_state(struct ssd_device
*dev
)
7947 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7951 return __ssd_check_init_state(dev
);
7954 static void ssd_reset_resp_ptr(struct ssd_device
*dev
);
7956 /* reset flash controller etc */
7957 static int __ssd_reset(struct ssd_device
*dev
, int type
)
7960 if (type
< SSD_RST_NOINIT
|| type
> SSD_RST_FULL
) {
7964 mutex_lock(&dev
->fw_mutex
);
7966 if (type
== SSD_RST_NOINIT
) { //no init
7967 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET_NOINIT
);
7968 } else if (type
== SSD_RST_NORMAL
) { //reset & init
7969 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET
);
7970 } else { // full reset
7971 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7972 mutex_unlock(&dev
->fw_mutex
);
7976 ssd_reg32_write(dev
->ctrlp
+ SSD_FULL_RESET_REG
, SSD_RESET_FULL
);
7979 ssd_reset_resp_ptr(dev
);
7982 #ifdef SSD_OT_PROTECT
7989 ssd_set_flush_timeout(dev
, dev
->wmode
);
7991 mutex_unlock(&dev
->fw_mutex
);
7992 ssd_gen_swlog(dev
, SSD_LOG_RESET
, (uint32_t)type
);
7993 do_gettimeofday(&tv
);
7994 dev
->reset_time
= tv
.tv_sec
;
7996 return __ssd_check_init_state(dev
);
7999 static int ssd_save_md(struct ssd_device
*dev
)
8001 struct ssd_nand_op_msg
*msg
;
8004 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8007 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
8011 if (!dev
->save_md
) {
8015 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8017 msg
->fun
= SSD_FUNC_FLUSH
;
8022 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
8028 static int ssd_barrier_save_md(struct ssd_device
*dev
)
8030 struct ssd_nand_op_msg
*msg
;
8033 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8036 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
8040 if (!dev
->save_md
) {
8044 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8046 msg
->fun
= SSD_FUNC_FLUSH
;
8051 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
8057 static int ssd_flush(struct ssd_device
*dev
)
8059 struct ssd_nand_op_msg
*msg
;
8060 struct ssd_flush_msg
*fmsg
;
8063 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8066 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8068 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
8069 fmsg
= (struct ssd_flush_msg
*)msg
;
8071 fmsg
->fun
= SSD_FUNC_FLUSH
;
8076 msg
->fun
= SSD_FUNC_FLUSH
;
8082 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
8088 static int ssd_barrier_flush(struct ssd_device
*dev
)
8090 struct ssd_nand_op_msg
*msg
;
8091 struct ssd_flush_msg
*fmsg
;
8094 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8097 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8099 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
8100 fmsg
= (struct ssd_flush_msg
*)msg
;
8102 fmsg
->fun
= SSD_FUNC_FLUSH
;
8107 msg
->fun
= SSD_FUNC_FLUSH
;
8113 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
8119 #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710
8120 #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8
8121 #define SSD_WMODE_FUA_TIMEOUT 0x000503E8
8122 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int m
)
8127 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8132 case SSD_WMODE_BUFFER
:
8133 to
= SSD_WMODE_BUFFER_TIMEOUT
;
8135 case SSD_WMODE_BUFFER_EX
:
8136 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_1
) {
8137 to
= SSD_WMODE_BUFFER_EX_TIMEOUT
;
8139 to
= SSD_WMODE_BUFFER_TIMEOUT
;
8143 to
= SSD_WMODE_FUA_TIMEOUT
;
8149 val
= (((uint32_t)((uint32_t)m
& 0x3) << 28) | to
);
8151 ssd_reg32_write(dev
->ctrlp
+ SSD_FLUSH_TIMEOUT_REG
, val
);
8154 static int ssd_do_switch_wmode(struct ssd_device
*dev
, int m
)
8158 ret
= ssd_barrier_start(dev
);
8163 ret
= ssd_barrier_flush(dev
);
8165 goto out_barrier_end
;
8168 /* set contoller flush timeout */
8169 ssd_set_flush_timeout(dev
, m
);
8175 ssd_barrier_end(dev
);
8180 static int ssd_switch_wmode(struct ssd_device
*dev
, int m
)
8186 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8190 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8191 default_wmode
= SSD_WMODE_BUFFER
;
8193 default_wmode
= SSD_WMODE_BUFFER_EX
;
8196 if (SSD_WMODE_AUTO
== m
) {
8197 /* battery fault ? */
8198 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8199 next_wmode
= SSD_WMODE_FUA
;
8201 next_wmode
= default_wmode
;
8203 } else if (SSD_WMODE_DEFAULT
== m
) {
8204 next_wmode
= default_wmode
;
8209 if (next_wmode
!= dev
->wmode
) {
8210 hio_warn("%s: switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8211 ret
= ssd_do_switch_wmode(dev
, next_wmode
);
8213 hio_err("%s: can not switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8220 static int ssd_init_wmode(struct ssd_device
*dev
)
8225 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8226 default_wmode
= SSD_WMODE_BUFFER
;
8228 default_wmode
= SSD_WMODE_BUFFER_EX
;
8232 if (SSD_WMODE_AUTO
== dev
->user_wmode
) {
8233 /* battery fault ? */
8234 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8235 dev
->wmode
= SSD_WMODE_FUA
;
8237 dev
->wmode
= default_wmode
;
8239 } else if (SSD_WMODE_DEFAULT
== dev
->user_wmode
) {
8240 dev
->wmode
= default_wmode
;
8242 dev
->wmode
= dev
->user_wmode
;
8244 ssd_set_flush_timeout(dev
, dev
->wmode
);
8249 static int __ssd_set_wmode(struct ssd_device
*dev
, int m
)
8253 /* not support old fw*/
8254 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8259 if (m
< SSD_WMODE_BUFFER
|| m
> SSD_WMODE_DEFAULT
) {
8264 ssd_gen_swlog(dev
, SSD_LOG_SET_WMODE
, m
);
8266 dev
->user_wmode
= m
;
8268 ret
= ssd_switch_wmode(dev
, dev
->user_wmode
);
8277 int ssd_set_wmode(struct block_device
*bdev
, int m
)
8279 struct ssd_device
*dev
;
8281 if (!bdev
|| !(bdev
->bd_disk
)) {
8285 dev
= bdev
->bd_disk
->private_data
;
8287 return __ssd_set_wmode(dev
, m
);
8290 static int ssd_do_reset(struct ssd_device
*dev
)
8294 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8298 ssd_stop_workq(dev
);
8300 ret
= ssd_barrier_start(dev
);
8305 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8307 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8310 //ret = __ssd_reset(dev, SSD_RST_FULL);
8311 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8314 goto out_barrier_end
;
8318 ssd_barrier_end(dev
);
8320 ssd_start_workq(dev
);
8321 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8325 static int ssd_full_reset(struct ssd_device
*dev
)
8329 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8333 ssd_stop_workq(dev
);
8335 ret
= ssd_barrier_start(dev
);
8340 ret
= ssd_barrier_flush(dev
);
8342 goto out_barrier_end
;
8345 ret
= ssd_barrier_save_md(dev
);
8347 goto out_barrier_end
;
8350 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8352 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8355 //ret = __ssd_reset(dev, SSD_RST_FULL);
8356 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8359 goto out_barrier_end
;
8363 ssd_barrier_end(dev
);
8365 ssd_start_workq(dev
);
8366 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8370 int ssd_reset(struct block_device
*bdev
)
8373 struct ssd_device
*dev
;
8375 if (!bdev
|| !(bdev
->bd_disk
)) {
8379 dev
= bdev
->bd_disk
->private_data
;
8381 ret
= ssd_full_reset(dev
);
8383 if (!dev
->has_non_0x98_reg_access
) {
8384 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, 0);
8391 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
8392 static int ssd_issue_flush_fn(struct request_queue
*q
, struct gendisk
*disk
,
8393 sector_t
*error_sector
)
8395 struct ssd_device
*dev
= q
->queuedata
;
8397 return ssd_flush(dev
);
8401 void ssd_submit_pbio(struct request_queue
*q
, struct bio
*bio
)
8403 struct ssd_device
*dev
= q
->queuedata
;
8404 #ifdef SSD_QUEUE_PBIO
8408 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8409 ssd_bio_endio(bio
, -ENODEV
);
8413 #ifdef SSD_DEBUG_ERR
8414 if (atomic_read(&dev
->tocnt
)) {
8415 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8416 ssd_bio_endio(bio
, -EIO
);
8421 if (unlikely(ssd_bio_has_barrier_or_fua(bio
))) {
8422 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8426 if (unlikely(dev
->readonly
&& bio_data_dir(bio
) == WRITE
)) {
8427 ssd_bio_endio(bio
, -EROFS
);
8431 #ifdef SSD_QUEUE_PBIO
8432 if (0 == atomic_read(&dev
->in_sendq
)) {
8433 ret
= __ssd_submit_pbio(dev
, bio
, 0);
8437 (void)test_and_set_bit(BIO_SSD_PBIO
, &bio
->bi_flags
);
8438 ssd_queue_bio(dev
, bio
);
8441 __ssd_submit_pbio(dev
, bio
, 1);
8448 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8449 static blk_qc_t
ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8450 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8451 static void ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8453 static int ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8456 struct ssd_device
*dev
= q
->queuedata
;
8459 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8460 ssd_bio_endio(bio
, -ENODEV
);
8464 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0))
8465 blk_queue_split(q
, &bio
);
8466 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
8467 blk_queue_split(q
, &bio
, q
->bio_split
);
8470 #ifdef SSD_DEBUG_ERR
8471 if (atomic_read(&dev
->tocnt
)) {
8472 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8473 ssd_bio_endio(bio
, -EIO
);
8478 if (unlikely(ssd_bio_has_barrier_or_fua(bio
))) {
8479 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8483 /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */
8484 if (unlikely(ssd_bio_has_flush(bio
) && !bio_sectors(bio
))) {
8485 ssd_bio_endio(bio
, 0);
8489 if (0 == atomic_read(&dev
->in_sendq
)) {
8490 ret
= ssd_submit_bio(dev
, bio
, 0);
8494 ssd_queue_bio(dev
, bio
);
8498 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8499 return BLK_QC_T_NONE
;
8500 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8507 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
8508 static int ssd_block_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
8510 struct ssd_device
*dev
;
8516 dev
= bdev
->bd_disk
->private_data
;
8523 geo
->cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
8528 static int ssd_init_queue(struct ssd_device
*dev
);
8529 static void ssd_cleanup_queue(struct ssd_device
*dev
);
8530 static void ssd_cleanup_blkdev(struct ssd_device
*dev
);
8531 static int ssd_init_blkdev(struct ssd_device
*dev
);
8532 static int ssd_ioctl_common(struct ssd_device
*dev
, unsigned int cmd
, unsigned long arg
)
8534 void __user
*argp
= (void __user
*)arg
;
8535 void __user
*buf
= NULL
;
8540 case SSD_CMD_GET_PROTOCOL_INFO
:
8541 if (copy_to_user(argp
, &dev
->protocol_info
, sizeof(struct ssd_protocol_info
))) {
8542 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8548 case SSD_CMD_GET_HW_INFO
:
8549 if (copy_to_user(argp
, &dev
->hw_info
, sizeof(struct ssd_hw_info
))) {
8550 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8556 case SSD_CMD_GET_ROM_INFO
:
8557 if (copy_to_user(argp
, &dev
->rom_info
, sizeof(struct ssd_rom_info
))) {
8558 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8564 case SSD_CMD_GET_SMART
: {
8565 struct ssd_smart smart
;
8568 memcpy(&smart
, &dev
->smart
, sizeof(struct ssd_smart
));
8570 mutex_lock(&dev
->gd_mutex
);
8571 ssd_update_smart(dev
, &smart
);
8572 mutex_unlock(&dev
->gd_mutex
);
8574 /* combine the volatile log info */
8575 if (dev
->log_info
.nr_log
) {
8576 for (i
=0; i
<SSD_LOG_NR_LEVEL
; i
++) {
8577 smart
.log_info
.stat
[i
] += dev
->log_info
.stat
[i
];
8581 if (copy_to_user(argp
, &smart
, sizeof(struct ssd_smart
))) {
8582 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8590 case SSD_CMD_GET_IDX
:
8591 if (copy_to_user(argp
, &dev
->idx
, sizeof(int))) {
8592 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8598 case SSD_CMD_GET_AMOUNT
: {
8599 int nr_ssd
= atomic_read(&ssd_nr
);
8600 if (copy_to_user(argp
, &nr_ssd
, sizeof(int))) {
8601 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8608 case SSD_CMD_GET_TO_INFO
: {
8609 int tocnt
= atomic_read(&dev
->tocnt
);
8611 if (copy_to_user(argp
, &tocnt
, sizeof(int))) {
8612 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8619 case SSD_CMD_GET_DRV_VER
: {
8620 char ver
[] = DRIVER_VERSION
;
8621 int len
= sizeof(ver
);
8623 if (len
> (DRIVER_VERSION_LEN
- 1)) {
8624 len
= (DRIVER_VERSION_LEN
- 1);
8626 if (copy_to_user(argp
, ver
, len
)) {
8627 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8634 case SSD_CMD_GET_BBACC_INFO
: {
8635 struct ssd_acc_info acc
;
8637 mutex_lock(&dev
->fw_mutex
);
8638 ret
= ssd_bb_acc(dev
, &acc
);
8639 mutex_unlock(&dev
->fw_mutex
);
8644 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8645 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8652 case SSD_CMD_GET_ECACC_INFO
: {
8653 struct ssd_acc_info acc
;
8655 mutex_lock(&dev
->fw_mutex
);
8656 ret
= ssd_ec_acc(dev
, &acc
);
8657 mutex_unlock(&dev
->fw_mutex
);
8662 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8663 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8670 case SSD_CMD_GET_HW_INFO_EXT
:
8671 if (copy_to_user(argp
, &dev
->hw_info_ext
, sizeof(struct ssd_hw_info_extend
))) {
8672 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8678 case SSD_CMD_REG_READ
: {
8679 struct ssd_reg_op_info reg_info
;
8681 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8682 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8687 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8692 reg_info
.value
= ssd_reg32_read(dev
->ctrlp
+ reg_info
.offset
);
8693 if (copy_to_user(argp
, ®_info
, sizeof(struct ssd_reg_op_info
))) {
8694 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8702 case SSD_CMD_REG_WRITE
: {
8703 struct ssd_reg_op_info reg_info
;
8705 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8706 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8711 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8716 ssd_reg32_write(dev
->ctrlp
+ reg_info
.offset
, reg_info
.value
);
8721 case SSD_CMD_SPI_READ
: {
8722 struct ssd_spi_op_info spi_info
;
8725 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8726 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8732 size
= spi_info
.len
;
8735 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8740 kbuf
= kmalloc(size
, GFP_KERNEL
);
8746 ret
= ssd_spi_page_read(dev
, kbuf
, off
, size
);
8752 if (copy_to_user(buf
, kbuf
, size
)) {
8753 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8764 case SSD_CMD_SPI_WRITE
: {
8765 struct ssd_spi_op_info spi_info
;
8768 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8769 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8775 size
= spi_info
.len
;
8778 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8783 kbuf
= kmalloc(size
, GFP_KERNEL
);
8789 if (copy_from_user(kbuf
, buf
, size
)) {
8790 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8796 ret
= ssd_spi_page_write(dev
, kbuf
, off
, size
);
8807 case SSD_CMD_SPI_ERASE
: {
8808 struct ssd_spi_op_info spi_info
;
8811 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8812 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8819 if ((off
+ dev
->rom_info
.block_size
) > dev
->rom_info
.size
) {
8824 ret
= ssd_spi_block_erase(dev
, off
);
8832 case SSD_CMD_I2C_READ
: {
8833 struct ssd_i2c_op_info i2c_info
;
8837 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8838 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8843 saddr
= i2c_info
.saddr
;
8844 rsize
= i2c_info
.rsize
;
8845 buf
= i2c_info
.rbuf
;
8847 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8852 kbuf
= kmalloc(rsize
, GFP_KERNEL
);
8858 ret
= ssd_i2c_read(dev
, saddr
, rsize
, kbuf
);
8864 if (copy_to_user(buf
, kbuf
, rsize
)) {
8865 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8876 case SSD_CMD_I2C_WRITE
: {
8877 struct ssd_i2c_op_info i2c_info
;
8881 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8882 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8887 saddr
= i2c_info
.saddr
;
8888 wsize
= i2c_info
.wsize
;
8889 buf
= i2c_info
.wbuf
;
8891 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8896 kbuf
= kmalloc(wsize
, GFP_KERNEL
);
8902 if (copy_from_user(kbuf
, buf
, wsize
)) {
8903 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8909 ret
= ssd_i2c_write(dev
, saddr
, wsize
, kbuf
);
8920 case SSD_CMD_I2C_WRITE_READ
: {
8921 struct ssd_i2c_op_info i2c_info
;
8927 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8928 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8933 saddr
= i2c_info
.saddr
;
8934 wsize
= i2c_info
.wsize
;
8935 rsize
= i2c_info
.rsize
;
8936 buf
= i2c_info
.wbuf
;
8938 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8943 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8948 size
= wsize
+ rsize
;
8950 kbuf
= kmalloc(size
, GFP_KERNEL
);
8956 if (copy_from_user((kbuf
+ rsize
), buf
, wsize
)) {
8957 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8963 buf
= i2c_info
.rbuf
;
8965 ret
= ssd_i2c_write_read(dev
, saddr
, wsize
, (kbuf
+ rsize
), rsize
, kbuf
);
8971 if (copy_to_user(buf
, kbuf
, rsize
)) {
8972 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8983 case SSD_CMD_SMBUS_SEND_BYTE
: {
8984 struct ssd_smbus_op_info smbus_info
;
8985 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8989 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8990 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8995 saddr
= smbus_info
.saddr
;
8996 buf
= smbus_info
.buf
;
8999 if (copy_from_user(smb_data
, buf
, size
)) {
9000 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9005 ret
= ssd_smbus_send_byte(dev
, saddr
, smb_data
);
9013 case SSD_CMD_SMBUS_RECEIVE_BYTE
: {
9014 struct ssd_smbus_op_info smbus_info
;
9015 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9019 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9020 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9025 saddr
= smbus_info
.saddr
;
9026 buf
= smbus_info
.buf
;
9029 ret
= ssd_smbus_receive_byte(dev
, saddr
, smb_data
);
9034 if (copy_to_user(buf
, smb_data
, size
)) {
9035 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9043 case SSD_CMD_SMBUS_WRITE_BYTE
: {
9044 struct ssd_smbus_op_info smbus_info
;
9045 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9050 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9051 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9056 saddr
= smbus_info
.saddr
;
9057 command
= smbus_info
.cmd
;
9058 buf
= smbus_info
.buf
;
9061 if (copy_from_user(smb_data
, buf
, size
)) {
9062 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9067 ret
= ssd_smbus_write_byte(dev
, saddr
, command
, smb_data
);
9075 case SSD_CMD_SMBUS_READ_BYTE
: {
9076 struct ssd_smbus_op_info smbus_info
;
9077 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9082 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9083 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9088 saddr
= smbus_info
.saddr
;
9089 command
= smbus_info
.cmd
;
9090 buf
= smbus_info
.buf
;
9093 ret
= ssd_smbus_read_byte(dev
, saddr
, command
, smb_data
);
9098 if (copy_to_user(buf
, smb_data
, size
)) {
9099 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9107 case SSD_CMD_SMBUS_WRITE_WORD
: {
9108 struct ssd_smbus_op_info smbus_info
;
9109 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9114 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9115 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9120 saddr
= smbus_info
.saddr
;
9121 command
= smbus_info
.cmd
;
9122 buf
= smbus_info
.buf
;
9125 if (copy_from_user(smb_data
, buf
, size
)) {
9126 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9131 ret
= ssd_smbus_write_word(dev
, saddr
, command
, smb_data
);
9139 case SSD_CMD_SMBUS_READ_WORD
: {
9140 struct ssd_smbus_op_info smbus_info
;
9141 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9146 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9147 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9152 saddr
= smbus_info
.saddr
;
9153 command
= smbus_info
.cmd
;
9154 buf
= smbus_info
.buf
;
9157 ret
= ssd_smbus_read_word(dev
, saddr
, command
, smb_data
);
9162 if (copy_to_user(buf
, smb_data
, size
)) {
9163 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9171 case SSD_CMD_SMBUS_WRITE_BLOCK
: {
9172 struct ssd_smbus_op_info smbus_info
;
9173 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9178 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9179 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9184 saddr
= smbus_info
.saddr
;
9185 command
= smbus_info
.cmd
;
9186 buf
= smbus_info
.buf
;
9187 size
= smbus_info
.size
;
9189 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9194 if (copy_from_user(smb_data
, buf
, size
)) {
9195 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9200 ret
= ssd_smbus_write_block(dev
, saddr
, command
, size
, smb_data
);
9208 case SSD_CMD_SMBUS_READ_BLOCK
: {
9209 struct ssd_smbus_op_info smbus_info
;
9210 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9215 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9216 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9221 saddr
= smbus_info
.saddr
;
9222 command
= smbus_info
.cmd
;
9223 buf
= smbus_info
.buf
;
9224 size
= smbus_info
.size
;
9226 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9231 ret
= ssd_smbus_read_block(dev
, saddr
, command
, size
, smb_data
);
9236 if (copy_to_user(buf
, smb_data
, size
)) {
9237 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9245 case SSD_CMD_BM_GET_VER
: {
9248 ret
= ssd_bm_get_version(dev
, &ver
);
9253 if (copy_to_user(argp
, &ver
, sizeof(uint16_t))) {
9254 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9262 case SSD_CMD_BM_GET_NR_CAP
: {
9265 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
9270 if (copy_to_user(argp
, &nr_cap
, sizeof(int))) {
9271 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9279 case SSD_CMD_BM_CAP_LEARNING
: {
9280 ret
= ssd_bm_enter_cap_learning(dev
);
9289 case SSD_CMD_CAP_LEARN
: {
9292 ret
= ssd_cap_learn(dev
, &cap
);
9297 if (copy_to_user(argp
, &cap
, sizeof(uint32_t))) {
9298 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9306 case SSD_CMD_GET_CAP_STATUS
: {
9309 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9313 if (copy_to_user(argp
, &cap_status
, sizeof(int))) {
9314 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9322 case SSD_CMD_RAM_READ
: {
9323 struct ssd_ram_op_info ram_info
;
9326 size_t rlen
, len
= dev
->hw_info
.ram_max_len
;
9329 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9330 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9335 ofs
= ram_info
.start
;
9336 length
= ram_info
.length
;
9338 ctrl_idx
= ram_info
.ctrl_idx
;
9340 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9345 kbuf
= kmalloc(len
, GFP_KERNEL
);
9351 for (rlen
=0; rlen
<length
; rlen
+=len
, buf
+=len
, ofs
+=len
) {
9352 if ((length
- rlen
) < len
) {
9353 len
= length
- rlen
;
9356 ret
= ssd_ram_read(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9361 if (copy_to_user(buf
, kbuf
, len
)) {
9372 case SSD_CMD_RAM_WRITE
: {
9373 struct ssd_ram_op_info ram_info
;
9376 size_t wlen
, len
= dev
->hw_info
.ram_max_len
;
9379 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9380 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9384 ofs
= ram_info
.start
;
9385 length
= ram_info
.length
;
9387 ctrl_idx
= ram_info
.ctrl_idx
;
9389 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9394 kbuf
= kmalloc(len
, GFP_KERNEL
);
9400 for (wlen
=0; wlen
<length
; wlen
+=len
, buf
+=len
, ofs
+=len
) {
9401 if ((length
- wlen
) < len
) {
9402 len
= length
- wlen
;
9405 if (copy_from_user(kbuf
, buf
, len
)) {
9410 ret
= ssd_ram_write(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9421 case SSD_CMD_NAND_READ_ID
: {
9422 struct ssd_flash_op_info flash_info
;
9423 int chip_no
, chip_ce
, length
, ctrl_idx
;
9425 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9426 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9431 chip_no
= flash_info
.flash
;
9432 chip_ce
= flash_info
.chip
;
9433 ctrl_idx
= flash_info
.ctrl_idx
;
9434 buf
= flash_info
.buf
;
9435 length
= dev
->hw_info
.id_size
;
9437 //kbuf = kmalloc(length, GFP_KERNEL);
9438 kbuf
= kmalloc(SSD_NAND_ID_BUFF_SZ
, GFP_KERNEL
); //xx
9443 memset(kbuf
, 0, length
);
9445 ret
= ssd_nand_read_id(dev
, kbuf
, chip_no
, chip_ce
, ctrl_idx
);
9451 if (copy_to_user(buf
, kbuf
, length
)) {
9462 case SSD_CMD_NAND_READ
: { //with oob
9463 struct ssd_flash_op_info flash_info
;
9465 int flash
, chip
, page
, ctrl_idx
;
9468 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9469 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9474 flash
= flash_info
.flash
;
9475 chip
= flash_info
.chip
;
9476 page
= flash_info
.page
;
9477 buf
= flash_info
.buf
;
9478 ctrl_idx
= flash_info
.ctrl_idx
;
9480 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9482 kbuf
= kmalloc(length
, GFP_KERNEL
);
9488 err
= ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9489 if (ret
&& (-EIO
!= ret
)) {
9494 if (copy_to_user(buf
, kbuf
, length
)) {
9506 case SSD_CMD_NAND_WRITE
: {
9507 struct ssd_flash_op_info flash_info
;
9508 int flash
, chip
, page
, ctrl_idx
;
9511 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9512 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9517 flash
= flash_info
.flash
;
9518 chip
= flash_info
.chip
;
9519 page
= flash_info
.page
;
9520 buf
= flash_info
.buf
;
9521 ctrl_idx
= flash_info
.ctrl_idx
;
9523 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9525 kbuf
= kmalloc(length
, GFP_KERNEL
);
9531 if (copy_from_user(kbuf
, buf
, length
)) {
9537 ret
= ssd_nand_write(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9547 case SSD_CMD_NAND_ERASE
: {
9548 struct ssd_flash_op_info flash_info
;
9549 int flash
, chip
, page
, ctrl_idx
;
9551 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9552 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9557 flash
= flash_info
.flash
;
9558 chip
= flash_info
.chip
;
9559 page
= flash_info
.page
;
9560 ctrl_idx
= flash_info
.ctrl_idx
;
9562 if ((page
% dev
->hw_info
.page_count
) != 0) {
9567 //hio_warn("erase fs = %llx\n", ofs);
9568 ret
= ssd_nand_erase(dev
, flash
, chip
, page
, ctrl_idx
);
9576 case SSD_CMD_NAND_READ_EXT
: { //ingore EIO
9577 struct ssd_flash_op_info flash_info
;
9579 int flash
, chip
, page
, ctrl_idx
;
9581 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9582 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9587 flash
= flash_info
.flash
;
9588 chip
= flash_info
.chip
;
9589 page
= flash_info
.page
;
9590 buf
= flash_info
.buf
;
9591 ctrl_idx
= flash_info
.ctrl_idx
;
9593 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9595 kbuf
= kmalloc(length
, GFP_KERNEL
);
9601 ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9602 if (-EIO
== ret
) { //ingore EIO
9610 if (copy_to_user(buf
, kbuf
, length
)) {
9620 case SSD_CMD_UPDATE_BBT
: {
9621 struct ssd_flash_op_info flash_info
;
9622 int ctrl_idx
, flash
;
9624 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9625 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9630 ctrl_idx
= flash_info
.ctrl_idx
;
9631 flash
= flash_info
.flash
;
9632 ret
= ssd_update_bbt(dev
, flash
, ctrl_idx
);
9640 case SSD_CMD_CLEAR_ALARM
:
9641 ssd_clear_alarm(dev
);
9644 case SSD_CMD_SET_ALARM
:
9649 ret
= ssd_do_reset(dev
);
9652 case SSD_CMD_RELOAD_FW
:
9654 dev
->has_non_0x98_reg_access
= 1;
9655 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9656 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
9657 } else if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_1_1
) {
9658 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
9663 case SSD_CMD_UNLOAD_DEV
: {
9664 if (atomic_read(&dev
->refcnt
)) {
9670 ssd_save_smart(dev
);
9672 ret
= ssd_flush(dev
);
9677 /* cleanup the block device */
9678 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
9679 mutex_lock(&dev
->gd_mutex
);
9680 ssd_cleanup_blkdev(dev
);
9681 ssd_cleanup_queue(dev
);
9682 mutex_unlock(&dev
->gd_mutex
);
9688 case SSD_CMD_LOAD_DEV
: {
9690 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9695 ret
= ssd_init_smart(dev
);
9697 hio_warn("%s: init info: failed\n", dev
->name
);
9701 ret
= ssd_init_queue(dev
);
9703 hio_warn("%s: init queue failed\n", dev
->name
);
9706 ret
= ssd_init_blkdev(dev
);
9708 hio_warn("%s: register block device: failed\n", dev
->name
);
9711 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
9716 case SSD_CMD_UPDATE_VP
: {
9718 uint32_t new_vp
, new_vp1
= 0;
9720 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9725 if (copy_from_user(&new_vp
, argp
, sizeof(uint32_t))) {
9726 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9731 if (new_vp
> dev
->hw_info
.max_valid_pages
|| new_vp
<= 0) {
9736 while (new_vp
<= dev
->hw_info
.max_valid_pages
) {
9737 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, new_vp
);
9739 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
9740 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9741 new_vp1
= val
& 0x3FF;
9743 new_vp1
= val
& 0x7FFF;
9746 if (new_vp1
== new_vp
) {
9751 /*if (new_vp == dev->hw_info.valid_pages) {
9756 if (new_vp1
!= new_vp
|| new_vp
> dev
->hw_info
.max_valid_pages
) {
9758 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9763 if (copy_to_user(argp
, &new_vp
, sizeof(uint32_t))) {
9764 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9765 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9771 dev
->hw_info
.valid_pages
= new_vp
;
9772 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
9773 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
9774 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
9779 case SSD_CMD_FULL_RESET
: {
9780 ret
= ssd_full_reset(dev
);
9784 case SSD_CMD_GET_NR_LOG
: {
9785 if (copy_to_user(argp
, &dev
->internal_log
.nr_log
, sizeof(dev
->internal_log
.nr_log
))) {
9792 case SSD_CMD_GET_LOG
: {
9793 uint32_t length
= dev
->rom_info
.log_sz
;
9797 if (copy_to_user(buf
, dev
->internal_log
.log
, length
)) {
9805 case SSD_CMD_LOG_LEVEL
: {
9807 if (copy_from_user(&level
, argp
, sizeof(int))) {
9808 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9813 if (level
>= SSD_LOG_NR_LEVEL
|| level
< SSD_LOG_LEVEL_INFO
) {
9814 level
= SSD_LOG_LEVEL_ERR
;
9817 //just for showing log, no need to protect
9822 case SSD_CMD_OT_PROTECT
: {
9825 if (copy_from_user(&protect
, argp
, sizeof(int))) {
9826 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9831 ssd_set_ot_protect(dev
, !!protect
);
9835 case SSD_CMD_GET_OT_STATUS
: {
9836 int status
= ssd_get_ot_status(dev
, &status
);
9838 if (copy_to_user(argp
, &status
, sizeof(int))) {
9839 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9846 case SSD_CMD_CLEAR_LOG
: {
9847 ret
= ssd_clear_log(dev
);
9851 case SSD_CMD_CLEAR_SMART
: {
9852 ret
= ssd_clear_smart(dev
);
9856 case SSD_CMD_CLEAR_WARNING
: {
9857 ret
= ssd_clear_warning(dev
);
9861 case SSD_CMD_SW_LOG
: {
9862 struct ssd_sw_log_info sw_log
;
9864 if (copy_from_user(&sw_log
, argp
, sizeof(struct ssd_sw_log_info
))) {
9865 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9870 ret
= ssd_gen_swlog(dev
, sw_log
.event
, sw_log
.data
);
9874 case SSD_CMD_GET_LABEL
: {
9876 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9881 if (copy_to_user(argp
, &dev
->label
, sizeof(struct ssd_label
))) {
9882 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9889 case SSD_CMD_GET_VERSION
: {
9890 struct ssd_version_info ver
;
9892 mutex_lock(&dev
->fw_mutex
);
9893 ret
= __ssd_get_version(dev
, &ver
);
9894 mutex_unlock(&dev
->fw_mutex
);
9899 if (copy_to_user(argp
, &ver
, sizeof(struct ssd_version_info
))) {
9900 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9907 case SSD_CMD_GET_TEMPERATURE
: {
9910 mutex_lock(&dev
->fw_mutex
);
9911 ret
= __ssd_get_temperature(dev
, &temp
);
9912 mutex_unlock(&dev
->fw_mutex
);
9917 if (copy_to_user(argp
, &temp
, sizeof(int))) {
9918 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9925 case SSD_CMD_GET_BMSTATUS
: {
9928 mutex_lock(&dev
->fw_mutex
);
9929 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9930 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9931 status
= SSD_BMSTATUS_WARNING
;
9933 status
= SSD_BMSTATUS_OK
;
9935 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
9936 ret
= __ssd_bm_status(dev
, &status
);
9938 status
= SSD_BMSTATUS_OK
;
9940 mutex_unlock(&dev
->fw_mutex
);
9945 if (copy_to_user(argp
, &status
, sizeof(int))) {
9946 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9953 case SSD_CMD_GET_LABEL2
: {
9957 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9958 label
= &dev
->label
;
9959 length
= sizeof(struct ssd_label
);
9961 label
= &dev
->labelv3
;
9962 length
= sizeof(struct ssd_labelv3
);
9965 if (copy_to_user(argp
, label
, length
)) {
9973 ret
= ssd_flush(dev
);
9975 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9981 case SSD_CMD_SAVE_MD
: {
9984 if (copy_from_user(&save_md
, argp
, sizeof(int))) {
9985 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9990 dev
->save_md
= !!save_md
;
9994 case SSD_CMD_SET_WMODE
: {
9997 if (copy_from_user(&new_wmode
, argp
, sizeof(int))) {
9998 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
10003 ret
= __ssd_set_wmode(dev
, new_wmode
);
10011 case SSD_CMD_GET_WMODE
: {
10012 if (copy_to_user(argp
, &dev
->wmode
, sizeof(int))) {
10013 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10021 case SSD_CMD_GET_USER_WMODE
: {
10022 if (copy_to_user(argp
, &dev
->user_wmode
, sizeof(int))) {
10023 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10031 case SSD_CMD_DEBUG
: {
10032 struct ssd_debug_info db_info
;
10039 if (copy_from_user(&db_info
, argp
, sizeof(struct ssd_debug_info
))) {
10040 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
10045 if (db_info
.type
< SSD_DEBUG_NONE
|| db_info
.type
>= SSD_DEBUG_NR
) {
10051 if (db_info
.type
>= SSD_DEBUG_READ_ERR
&& db_info
.type
<= SSD_DEBUG_RW_ERR
&&
10052 (db_info
.data
.loc
.off
+ db_info
.data
.loc
.len
) > (dev
->hw_info
.size
>> 9)) {
10057 memcpy(&dev
->db_info
, &db_info
, sizeof(struct ssd_debug_info
));
10059 #ifdef SSD_OT_PROTECT
10061 if (db_info
.type
== SSD_DEBUG_NONE
) {
10062 ssd_check_temperature(dev
, SSD_OT_TEMP
);
10063 } else if (db_info
.type
== SSD_DEBUG_LOG
) {
10064 if (db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
) {
10065 dev
->ot_delay
= SSD_OT_DELAY
;
10066 } else if (db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
) {
10073 if (db_info
.type
== SSD_DEBUG_OFFLINE
) {
10074 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
10075 } else if (db_info
.type
== SSD_DEBUG_NONE
) {
10076 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
10080 if (db_info
.type
== SSD_DEBUG_LOG
&& dev
->event_call
&& dev
->gd
) {
10081 dev
->event_call(dev
->gd
, db_info
.data
.log
.event
, 0);
10087 case SSD_CMD_DRV_PARAM_INFO
: {
10088 struct ssd_drv_param_info drv_param
;
10090 memset(&drv_param
, 0, sizeof(struct ssd_drv_param_info
));
10092 drv_param
.mode
= mode
;
10093 drv_param
.status_mask
= status_mask
;
10094 drv_param
.int_mode
= int_mode
;
10095 drv_param
.threaded_irq
= threaded_irq
;
10096 drv_param
.log_level
= log_level
;
10097 drv_param
.wmode
= wmode
;
10098 drv_param
.ot_protect
= ot_protect
;
10099 drv_param
.finject
= finject
;
10101 if (copy_to_user(argp
, &drv_param
, sizeof(struct ssd_drv_param_info
))) {
10102 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10118 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10119 static int ssd_block_ioctl(struct inode
*inode
, struct file
*file
,
10120 unsigned int cmd
, unsigned long arg
)
10122 struct ssd_device
*dev
;
10123 void __user
*argp
= (void __user
*)arg
;
10129 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10134 static int ssd_block_ioctl(struct block_device
*bdev
, fmode_t mode
,
10135 unsigned int cmd
, unsigned long arg
)
10137 struct ssd_device
*dev
;
10138 void __user
*argp
= (void __user
*)arg
;
10145 dev
= bdev
->bd_disk
->private_data
;
10152 case HDIO_GETGEO
: {
10153 struct hd_geometry geo
;
10154 geo
.cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
10157 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10158 geo
.start
= get_start_sect(inode
->i_bdev
);
10160 geo
.start
= get_start_sect(bdev
);
10162 if (copy_to_user(argp
, &geo
, sizeof(geo
))) {
10171 ret
= ssd_flush(dev
);
10173 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
10181 ret
= ssd_ioctl_common(dev
, cmd
, arg
);
10192 static void ssd_free_dev(struct kref
*kref
)
10194 struct ssd_device
*dev
;
10200 dev
= container_of(kref
, struct ssd_device
, kref
);
10204 ssd_put_index(dev
->slave
, dev
->idx
);
10209 static void ssd_put(struct ssd_device
*dev
)
10211 kref_put(&dev
->kref
, ssd_free_dev
);
10214 static int ssd_get(struct ssd_device
*dev
)
10216 kref_get(&dev
->kref
);
10221 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10222 static int ssd_block_open(struct inode
*inode
, struct file
*filp
)
10224 struct ssd_device
*dev
;
10230 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10235 static int ssd_block_open(struct block_device
*bdev
, fmode_t mode
)
10237 struct ssd_device
*dev
;
10243 dev
= bdev
->bd_disk
->private_data
;
10249 /*if (!try_module_get(dev->owner))
10255 atomic_inc(&dev
->refcnt
);
10260 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10261 static int ssd_block_release(struct inode
*inode
, struct file
*filp
)
10263 struct ssd_device
*dev
;
10269 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10273 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10274 static int ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10276 struct ssd_device
*dev
;
10282 dev
= disk
->private_data
;
10287 static void ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10289 struct ssd_device
*dev
;
10295 dev
= disk
->private_data
;
10301 atomic_dec(&dev
->refcnt
);
10305 //module_put(dev->owner);
10306 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10311 static struct block_device_operations ssd_fops
= {
10312 .owner
= THIS_MODULE
,
10313 .open
= ssd_block_open
,
10314 .release
= ssd_block_release
,
10315 .ioctl
= ssd_block_ioctl
,
10316 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
10317 .getgeo
= ssd_block_getgeo
,
10321 static void ssd_init_trim(ssd_device_t
*dev
)
10323 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
10324 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10327 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, dev
->rq
);
10329 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6))
10330 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
10331 dev
->rq
->limits
.discard_zeroes_data
= 1;
10333 dev
->rq
->limits
.discard_alignment
= 4096;
10334 dev
->rq
->limits
.discard_granularity
= 4096;
10336 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_4
) {
10337 dev
->rq
->limits
.max_discard_sectors
= dev
->hw_info
.sg_max_sec
;
10339 dev
->rq
->limits
.max_discard_sectors
= (dev
->hw_info
.sg_max_sec
) * (dev
->hw_info
.cmd_max_sg
);
10344 static void ssd_cleanup_queue(struct ssd_device
*dev
)
10348 blk_cleanup_queue(dev
->rq
);
10352 static int ssd_init_queue(struct ssd_device
*dev
)
10354 dev
->rq
= blk_alloc_queue(GFP_KERNEL
);
10355 if (dev
->rq
== NULL
) {
10356 hio_warn("%s: alloc queue: failed\n ", dev
->name
);
10357 goto out_init_queue
;
10360 /* must be first */
10361 blk_queue_make_request(dev
->rq
, ssd_make_request
);
10363 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6))
10364 blk_queue_max_hw_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10365 blk_queue_max_phys_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10366 blk_queue_max_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10368 blk_queue_max_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10369 blk_queue_max_hw_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10372 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
10373 blk_queue_hardsect_size(dev
->rq
, 512);
10375 blk_queue_logical_block_size(dev
->rq
, 512);
10377 /* not work for make_request based drivers(bio) */
10378 blk_queue_max_segment_size(dev
->rq
, dev
->hw_info
.sg_max_sec
<< 9);
10380 blk_queue_bounce_limit(dev
->rq
, BLK_BOUNCE_HIGH
);
10382 dev
->rq
->queuedata
= dev
;
10384 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
10385 blk_queue_issue_flush_fn(dev
->rq
, ssd_issue_flush_fn
);
10388 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
10389 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, dev
->rq
);
10392 ssd_init_trim(dev
);
10400 static void ssd_cleanup_blkdev(struct ssd_device
*dev
)
10402 del_gendisk(dev
->gd
);
10405 static int ssd_init_blkdev(struct ssd_device
*dev
)
10411 dev
->gd
= alloc_disk(ssd_minors
);
10413 hio_warn("%s: alloc_disk fail\n", dev
->name
);
10416 dev
->gd
->major
= dev
->major
;
10417 dev
->gd
->first_minor
= dev
->idx
* ssd_minors
;
10418 dev
->gd
->fops
= &ssd_fops
;
10419 dev
->gd
->queue
= dev
->rq
;
10420 dev
->gd
->private_data
= dev
;
10422 snprintf (dev
->gd
->disk_name
, sizeof(dev
->gd
->disk_name
), "%s", dev
->name
);
10424 set_capacity(dev
->gd
, dev
->hw_info
.size
>> 9);
10426 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
10427 device_add_disk(&dev
->pdev
->dev
, dev
->gd
);
10429 dev
->gd
->driverfs_dev
= &dev
->pdev
->dev
;
10439 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10440 static int ssd_ioctl(struct inode
*inode
, struct file
*file
,
10441 unsigned int cmd
, unsigned long arg
)
10443 static long ssd_ioctl(struct file
*file
,
10444 unsigned int cmd
, unsigned long arg
)
10447 struct ssd_device
*dev
;
10453 dev
= file
->private_data
;
10458 return (long)ssd_ioctl_common(dev
, cmd
, arg
);
10461 static int ssd_open(struct inode
*inode
, struct file
*file
)
10463 struct ssd_device
*dev
= NULL
;
10464 struct ssd_device
*n
= NULL
;
10468 if (!inode
|| !file
) {
10472 idx
= iminor(inode
);
10474 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
10475 if (dev
->idx
== idx
) {
10485 file
->private_data
= dev
;
10492 static int ssd_release(struct inode
*inode
, struct file
*file
)
10494 struct ssd_device
*dev
;
10500 dev
= file
->private_data
;
10507 file
->private_data
= NULL
;
10512 static int ssd_reload_ssd_ptr(struct ssd_device
*dev
)
10514 ssd_reset_resp_ptr(dev
);
10516 //update base reg address
10517 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
10519 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
10522 //update response base reg address
10523 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
10524 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
10529 static struct file_operations ssd_cfops
= {
10530 .owner
= THIS_MODULE
,
10532 .release
= ssd_release
,
10533 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10534 .ioctl
= ssd_ioctl
,
10536 .unlocked_ioctl
= ssd_ioctl
,
10540 static void ssd_cleanup_chardev(struct ssd_device
*dev
)
10546 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10547 class_simple_device_remove(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10548 devfs_remove("c%s", dev
->name
);
10549 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10550 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10551 devfs_remove("c%s", dev
->name
);
10552 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10553 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10554 devfs_remove("c%s", dev
->name
);
10555 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10556 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10558 device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10562 static int ssd_init_chardev(struct ssd_device
*dev
)
10570 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10571 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10575 class_simple_device_add(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10577 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10578 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10582 class_device_create(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10584 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10585 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10589 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10591 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10592 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10593 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
10594 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), "c%s", dev
->name
);
10595 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10596 device_create_drvdata(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10598 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10604 static int ssd_check_hw(struct ssd_device
*dev
)
10606 uint32_t test_data
= 0x55AA5AA5;
10607 uint32_t read_data
;
10609 ssd_reg32_write(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
, test_data
);
10610 read_data
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
);
10611 if (read_data
!= ~(test_data
)) {
10612 //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data);
10619 static int ssd_check_fw(struct ssd_device
*dev
)
10624 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10628 for (i
=0; i
<SSD_CONTROLLER_WAIT
; i
++) {
10629 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10630 if ((val
& 0x1) && ((val
>> 8) & 0x1)) {
10634 msleep(SSD_INIT_WAIT
);
10637 if (!(val
& 0x1)) {
10638 /* controller fw status */
10639 hio_warn("%s: controller firmware load failed: %#x\n", dev
->name
, val
);
10641 } else if (!((val
>> 8) & 0x1)) {
10642 /* controller state */
10643 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10647 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RELOAD_FW_REG
);
10649 dev
->reload_fw
= 1;
10655 static int ssd_init_fw_info(struct ssd_device
*dev
)
10660 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_VER_REG
);
10661 dev
->hw_info
.bridge_ver
= val
& 0xFFF;
10662 if (dev
->hw_info
.bridge_ver
< SSD_FW_MIN
) {
10663 hio_warn("%s: bridge firmware version %03X is not supported\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10666 hio_info("%s: bridge firmware version: %03X\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10668 ret
= ssd_check_fw(dev
);
10674 /* skip error if not in standard mode */
10675 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10681 static int ssd_check_clock(struct ssd_device
*dev
)
10686 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10690 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10693 if (!((val
>> 4 ) & 0x1)) {
10694 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST
), &dev
->hwmon
)) {
10695 hio_warn("%s: 166MHz clock losed: %#x\n", dev
->name
, val
);
10696 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10701 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
10702 if (!((val
>> 5 ) & 0x1)) {
10703 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW
), &dev
->hwmon
)) {
10704 hio_warn("%s: 166MHz clock is skew: %#x\n", dev
->name
, val
);
10705 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10709 if (!((val
>> 6 ) & 0x1)) {
10710 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST
), &dev
->hwmon
)) {
10711 hio_warn("%s: 156.25MHz clock lost: %#x\n", dev
->name
, val
);
10712 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10716 if (!((val
>> 7 ) & 0x1)) {
10717 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW
), &dev
->hwmon
)) {
10718 hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev
->name
, val
);
10719 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10728 static int ssd_check_volt(struct ssd_device
*dev
)
10735 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10739 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10741 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
)) {
10742 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V0_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10743 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10744 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10745 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10746 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10747 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10751 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10752 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10753 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10754 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10755 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10761 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
)) {
10762 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V8_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10763 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10764 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10765 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10766 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10767 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10771 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10772 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10773 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10774 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10775 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10784 static int ssd_check_reset_sync(struct ssd_device
*dev
)
10788 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10792 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10793 if (!((val
>> 8) & 0x1)) {
10794 /* controller state */
10795 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10799 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10803 if (((val
>> 9 ) & 0x1)) {
10804 hio_warn("%s: controller reset asynchronously: %#x\n", dev
->name
, val
);
10805 ssd_gen_swlog(dev
, SSD_LOG_CTRL_RST_SYNC
, val
);
10812 static int ssd_check_hw_bh(struct ssd_device
*dev
)
10816 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10821 ret
= ssd_check_clock(dev
);
10827 /* skip error if not in standard mode */
10828 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10834 static int ssd_check_controller(struct ssd_device
*dev
)
10838 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10843 ret
= ssd_check_reset_sync(dev
);
10849 /* skip error if not in standard mode */
10850 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10856 static int ssd_check_controller_bh(struct ssd_device
*dev
)
10858 uint32_t test_data
= 0x55AA5AA5;
10860 int reg_base
, reg_sz
;
10865 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10870 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_READY_REG
);
10872 hio_warn("%s: controller 0 not ready\n", dev
->name
);
10876 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10877 reg_base
= SSD_CTRL_TEST_REG0
+ i
* SSD_CTRL_TEST_REG_SZ
;
10878 ssd_reg32_write(dev
->ctrlp
+ reg_base
, test_data
);
10879 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10880 if (val
!= ~(test_data
)) {
10881 hio_warn("%s: check controller %d error: %#x\n", dev
->name
, i
, val
);
10887 ret
= ssd_check_volt(dev
);
10893 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
10894 reg_base
= SSD_PV3_RAM_STATUS_REG0
;
10895 reg_sz
= SSD_PV3_RAM_STATUS_REG_SZ
;
10897 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10899 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10901 if (!((val
>> 1) & 0x1)) {
10903 if (init_wait
<= SSD_RAM_INIT_MAX_WAIT
) {
10904 msleep(SSD_INIT_WAIT
);
10905 goto check_ram_status
;
10907 hio_warn("%s: controller %d ram init failed: %#x\n", dev
->name
, i
, val
);
10908 ssd_gen_swlog(dev
, SSD_LOG_DDR_INIT_ERR
, i
);
10913 reg_base
+= reg_sz
;
10918 for (i
=0; i
<SSD_CH_INFO_MAX_WAIT
; i
++) {
10919 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10920 if (!((val
>> 31) & 0x1)) {
10924 msleep(SSD_INIT_WAIT
);
10926 if ((val
>> 31) & 0x1) {
10927 hio_warn("%s: channel info init failed: %#x\n", dev
->name
, val
);
10934 static int ssd_init_protocol_info(struct ssd_device
*dev
)
10938 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PROTOCOL_VER_REG
);
10939 if (val
== (uint32_t)-1) {
10940 hio_warn("%s: protocol version error: %#x\n", dev
->name
, val
);
10943 dev
->protocol_info
.ver
= val
;
10945 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10946 dev
->protocol_info
.init_state_reg
= SSD_INIT_STATE_REG0
;
10947 dev
->protocol_info
.init_state_reg_sz
= SSD_INIT_STATE_REG_SZ
;
10949 dev
->protocol_info
.chip_info_reg
= SSD_CHIP_INFO_REG0
;
10950 dev
->protocol_info
.chip_info_reg_sz
= SSD_CHIP_INFO_REG_SZ
;
10952 dev
->protocol_info
.init_state_reg
= SSD_PV3_INIT_STATE_REG0
;
10953 dev
->protocol_info
.init_state_reg_sz
= SSD_PV3_INIT_STATE_REG_SZ
;
10955 dev
->protocol_info
.chip_info_reg
= SSD_PV3_CHIP_INFO_REG0
;
10956 dev
->protocol_info
.chip_info_reg_sz
= SSD_PV3_CHIP_INFO_REG_SZ
;
10962 static int ssd_init_hw_info(struct ssd_device
*dev
)
10970 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESP_INFO_REG
);
10971 dev
->hw_info
.resp_ptr_sz
= 16 * (1U << (val
& 0xFF));
10972 dev
->hw_info
.resp_msg_sz
= 16 * (1U << ((val
>> 8) & 0xFF));
10974 if (0 == dev
->hw_info
.resp_ptr_sz
|| 0 == dev
->hw_info
.resp_msg_sz
) {
10975 hio_warn("%s: response info error\n", dev
->name
);
10980 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10981 dev
->hw_info
.cmd_fifo_sz
= 1U << ((val
>> 4) & 0xF);
10982 dev
->hw_info
.cmd_max_sg
= 1U << ((val
>> 8) & 0xF);
10983 dev
->hw_info
.sg_max_sec
= 1U << ((val
>> 12) & 0xF);
10984 dev
->hw_info
.cmd_fifo_sz_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
10986 if (0 == dev
->hw_info
.cmd_fifo_sz
|| 0 == dev
->hw_info
.cmd_max_sg
|| 0 == dev
->hw_info
.sg_max_sec
) {
10987 hio_warn("%s: cmd info error\n", dev
->name
);
10993 if (ssd_check_hw_bh(dev
)) {
10994 hio_warn("%s: check hardware status failed\n", dev
->name
);
10999 if (ssd_check_controller(dev
)) {
11000 hio_warn("%s: check controller state failed\n", dev
->name
);
11005 /* nr controller : read again*/
11006 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
11007 dev
->hw_info
.nr_ctrl
= (val
>> 16) & 0xF;
11009 /* nr ctrl configured */
11010 nr_ctrl
= (val
>> 20) & 0xF;
11011 if (0 == dev
->hw_info
.nr_ctrl
) {
11012 hio_warn("%s: nr controller error: %u\n", dev
->name
, dev
->hw_info
.nr_ctrl
);
11015 } else if (0 != nr_ctrl
&& nr_ctrl
!= dev
->hw_info
.nr_ctrl
) {
11016 hio_warn("%s: nr controller error: configured %u but found %u\n", dev
->name
, nr_ctrl
, dev
->hw_info
.nr_ctrl
);
11017 if (mode
<= SSD_DRV_MODE_STANDARD
) {
11023 if (ssd_check_controller_bh(dev
)) {
11024 hio_warn("%s: check controller failed\n", dev
->name
);
11029 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
11030 dev
->hw_info
.pcb_ver
= (uint8_t) ((val
>> 4) & 0xF) + 'A' -1;
11031 if ((val
& 0xF) != 0xF) {
11032 dev
->hw_info
.upper_pcb_ver
= (uint8_t) (val
& 0xF) + 'A' -1;
11035 if (dev
->hw_info
.pcb_ver
< 'A' || (0 != dev
->hw_info
.upper_pcb_ver
&& dev
->hw_info
.upper_pcb_ver
< 'A')) {
11036 hio_warn("%s: PCB version error: %#x %#x\n", dev
->name
, dev
->hw_info
.pcb_ver
, dev
->hw_info
.upper_pcb_ver
);
11042 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11043 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11044 dev
->hw_info
.nr_data_ch
= val
& 0xFF;
11045 dev
->hw_info
.nr_ch
= dev
->hw_info
.nr_data_ch
+ ((val
>> 8) & 0xFF);
11046 dev
->hw_info
.nr_chip
= (val
>> 16) & 0xFF;
11048 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11049 dev
->hw_info
.max_ch
= 1;
11050 while (dev
->hw_info
.max_ch
< dev
->hw_info
.nr_ch
) dev
->hw_info
.max_ch
<<= 1;
11052 /* set max channel 32 */
11053 dev
->hw_info
.max_ch
= 32;
11056 if (0 == dev
->hw_info
.nr_chip
) {
11058 dev
->hw_info
.nr_chip
= 1;
11062 dev
->hw_info
.id_size
= SSD_NAND_ID_SZ
;
11063 dev
->hw_info
.max_ce
= SSD_NAND_MAX_CE
;
11065 if (0 == dev
->hw_info
.nr_data_ch
|| 0 == dev
->hw_info
.nr_ch
|| 0 == dev
->hw_info
.nr_chip
) {
11066 hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev
->name
, dev
->hw_info
.nr_data_ch
, dev
->hw_info
.nr_ch
, dev
->hw_info
.nr_chip
);
11073 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11074 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RAM_INFO_REG
);
11075 dev
->hw_info
.ram_size
= 0x4000000ull
* (1ULL << (val
& 0xF));
11076 dev
->hw_info
.ram_align
= 1U << ((val
>> 12) & 0xF);
11077 if (dev
->hw_info
.ram_align
< SSD_RAM_ALIGN
) {
11078 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11079 dev
->hw_info
.ram_align
= SSD_RAM_ALIGN
;
11081 hio_warn("%s: ram align error: %u\n", dev
->name
, dev
->hw_info
.ram_align
);
11086 dev
->hw_info
.ram_max_len
= 0x1000 * (1U << ((val
>> 16) & 0xF));
11088 if (0 == dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.ram_align
|| 0 == dev
->hw_info
.ram_max_len
|| dev
->hw_info
.ram_align
> dev
->hw_info
.ram_max_len
) {
11089 hio_warn("%s: ram info error\n", dev
->name
);
11094 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11095 dev
->hw_info
.log_sz
= SSD_LOG_MAX_SZ
;
11097 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LOG_INFO_REG
);
11098 dev
->hw_info
.log_sz
= 0x1000 * (1U << (val
& 0xFF));
11100 if (0 == dev
->hw_info
.log_sz
) {
11101 hio_warn("%s: log size error\n", dev
->name
);
11106 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BBT_BASE_REG
);
11107 dev
->hw_info
.bbt_base
= 0x40000ull
* (val
& 0xFFFF);
11108 dev
->hw_info
.bbt_size
= 0x40000 * (((val
>> 16) & 0xFFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
11109 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11110 if (dev
->hw_info
.bbt_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.bbt_size
) {
11111 hio_warn("%s: bbt info error\n", dev
->name
);
11117 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ECT_BASE_REG
);
11118 dev
->hw_info
.md_base
= 0x40000ull
* (val
& 0xFFFF);
11119 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
11120 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
11122 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.nr_chip
);
11124 dev
->hw_info
.md_entry_sz
= 8 * (1U << ((val
>> 28) & 0xF));
11125 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
11126 if (dev
->hw_info
.md_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.md_size
||
11127 0 == dev
->hw_info
.md_entry_sz
|| dev
->hw_info
.md_entry_sz
> dev
->hw_info
.md_size
) {
11128 hio_warn("%s: md info error\n", dev
->name
);
11134 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11135 dev
->hw_info
.nand_wbuff_base
= dev
->hw_info
.ram_size
+ 1;
11137 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_NAND_BUFF_BASE
);
11138 dev
->hw_info
.nand_wbuff_base
= 0x8000ull
* val
;
11143 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11144 if (dev
->hw_info
.nr_ctrl
> 1) {
11145 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CTRL_VER_REG
);
11146 dev
->hw_info
.ctrl_ver
= val
& 0xFFF;
11147 hio_info("%s: controller firmware version: %03X\n", dev
->name
, dev
->hw_info
.ctrl_ver
);
11150 val64
= ssd_reg_read(dev
->ctrlp
+ SSD_FLASH_INFO_REG0
);
11151 dev
->hw_info
.nand_vendor_id
= ((val64
>> 56) & 0xFF);
11152 dev
->hw_info
.nand_dev_id
= ((val64
>> 48) & 0xFF);
11154 dev
->hw_info
.block_count
= (((val64
>> 32) & 0xFFFF) + 1);
11155 dev
->hw_info
.page_count
= ((val64
>>16) & 0xFFFF);
11156 dev
->hw_info
.page_size
= (val64
& 0xFFFF);
11158 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_INFO_REG
);
11159 dev
->hw_info
.bbf_pages
= val
& 0xFF;
11160 dev
->hw_info
.bbf_seek
= (val
>> 8) & 0x1;
11162 if (0 == dev
->hw_info
.block_count
|| 0 == dev
->hw_info
.page_count
|| 0 == dev
->hw_info
.page_size
|| dev
->hw_info
.block_count
> INT_MAX
) {
11163 hio_warn("%s: flash info error\n", dev
->name
);
11169 dev
->hw_info
.oob_size
= SSD_NAND_OOB_SZ
; //(dev->hw_info.page_size) >> 5;
11171 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
11172 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11173 dev
->hw_info
.valid_pages
= val
& 0x3FF;
11174 dev
->hw_info
.max_valid_pages
= (val
>>20) & 0x3FF;
11176 dev
->hw_info
.valid_pages
= val
& 0x7FFF;
11177 dev
->hw_info
.max_valid_pages
= (val
>>15) & 0x7FFF;
11179 if (0 == dev
->hw_info
.valid_pages
|| 0 == dev
->hw_info
.max_valid_pages
||
11180 dev
->hw_info
.valid_pages
> dev
->hw_info
.max_valid_pages
|| dev
->hw_info
.max_valid_pages
> dev
->hw_info
.page_count
) {
11181 hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev
->name
, dev
->hw_info
.valid_pages
, dev
->hw_info
.max_valid_pages
);
11186 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESERVED_BLKS_REG
);
11187 dev
->hw_info
.reserved_blks
= val
& 0xFFFF;
11188 dev
->hw_info
.md_reserved_blks
= (val
>> 16) & 0xFF;
11189 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
11190 dev
->hw_info
.md_reserved_blks
= SSD_BBT_RESERVED
;
11192 if (dev
->hw_info
.reserved_blks
> dev
->hw_info
.block_count
|| dev
->hw_info
.md_reserved_blks
> dev
->hw_info
.block_count
) {
11193 hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev
->name
, dev
->hw_info
.reserved_blks
, dev
->hw_info
.md_reserved_blks
);
11200 if (mode
< SSD_DRV_MODE_DEBUG
) {
11201 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
11202 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
11203 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
11206 /* extend hardware info */
11207 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
11208 dev
->hw_info_ext
.board_type
= (val
>> 24) & 0xF;
11210 dev
->hw_info_ext
.form_factor
= SSD_FORM_FACTOR_FHHL
;
11211 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_1
) {
11212 dev
->hw_info_ext
.form_factor
= (val
>> 31) & 0x1;
11215 dev->hw_info_ext.cap_type = (val >> 28) & 0x3;
11216 if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) {
11217 dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA;
11220 /* power loss protect */
11221 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PLP_INFO_REG
);
11222 dev
->hw_info_ext
.plp_type
= (val
& 0x3);
11223 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
11225 dev
->hw_info_ext
.cap_type
= ((val
>> 2)& 0x1);
11229 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11230 dev
->hw_info_ext
.work_mode
= (val
>> 25) & 0x1;
11233 /* skip error if not in standard mode */
11234 if (mode
!= SSD_DRV_MODE_STANDARD
) {
11240 static void ssd_cleanup_response(struct ssd_device
*dev
)
11242 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11243 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11245 pci_free_consistent(dev
->pdev
, resp_ptr_sz
, dev
->resp_ptr_base
, dev
->resp_ptr_base_dma
);
11246 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11249 static int ssd_init_response(struct ssd_device
*dev
)
11251 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11252 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11254 dev
->resp_msg_base
= pci_alloc_consistent(dev
->pdev
, resp_msg_sz
, &(dev
->resp_msg_base_dma
));
11255 if (!dev
->resp_msg_base
) {
11256 hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev
->name
);
11257 goto out_alloc_resp_msg
;
11259 memset(dev
->resp_msg_base
, 0xFF, resp_msg_sz
);
11261 dev
->resp_ptr_base
= pci_alloc_consistent(dev
->pdev
, resp_ptr_sz
, &(dev
->resp_ptr_base_dma
));
11262 if (!dev
->resp_ptr_base
){
11263 hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev
->name
);
11264 goto out_alloc_resp_ptr
;
11266 memset(dev
->resp_ptr_base
, 0, resp_ptr_sz
);
11267 dev
->resp_idx
= *(uint32_t *)(dev
->resp_ptr_base
) = dev
->hw_info
.cmd_fifo_sz
* 2 - 1;
11269 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
11270 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
11274 out_alloc_resp_ptr
:
11275 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11276 out_alloc_resp_msg
:
11280 static int ssd_cleanup_cmd(struct ssd_device
*dev
)
11282 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11285 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11286 kfree(dev
->cmd
[i
].sgl
);
11289 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11293 static int ssd_init_cmd(struct ssd_device
*dev
)
11295 int sgl_sz
= sizeof(struct scatterlist
) * dev
->hw_info
.cmd_max_sg
;
11296 int cmd_sz
= sizeof(struct ssd_cmd
) * dev
->hw_info
.cmd_fifo_sz
;
11297 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11300 spin_lock_init(&dev
->cmd_lock
);
11302 dev
->msg_base
= pci_alloc_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), &dev
->msg_base_dma
);
11303 if (!dev
->msg_base
) {
11304 hio_warn("%s: can not alloc cmd msg\n", dev
->name
);
11305 goto out_alloc_msg
;
11308 dev
->cmd
= kmalloc(cmd_sz
, GFP_KERNEL
);
11310 hio_warn("%s: can not alloc cmd\n", dev
->name
);
11311 goto out_alloc_cmd
;
11313 memset(dev
->cmd
, 0, cmd_sz
);
11315 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11316 dev
->cmd
[i
].sgl
= kmalloc(sgl_sz
, GFP_KERNEL
);
11317 if (!dev
->cmd
[i
].sgl
) {
11318 hio_warn("%s: can not alloc cmd sgl %d\n", dev
->name
, i
);
11319 goto out_alloc_sgl
;
11322 dev
->cmd
[i
].msg
= dev
->msg_base
+ (msg_sz
* i
);
11323 dev
->cmd
[i
].msg_dma
= dev
->msg_base_dma
+ ((dma_addr_t
)msg_sz
* i
);
11325 dev
->cmd
[i
].dev
= dev
;
11326 dev
->cmd
[i
].tag
= i
;
11327 dev
->cmd
[i
].flag
= 0;
11329 INIT_LIST_HEAD(&dev
->cmd
[i
].list
);
11332 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11333 dev
->scmd
= ssd_dispatch_cmd
;
11335 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
11337 dev
->scmd
= ssd_send_cmd_db
;
11339 dev
->scmd
= ssd_send_cmd
;
11346 for (i
--; i
>=0; i
--) {
11347 kfree(dev
->cmd
[i
].sgl
);
11351 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11356 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11357 static irqreturn_t
ssd_interrupt_check(int irq
, void *dev_id
)
11359 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11361 if (*(uint32_t *)queue
->resp_ptr
== queue
->resp_idx
) {
11365 return IRQ_WAKE_THREAD
;
11368 static irqreturn_t
ssd_interrupt_threaded(int irq
, void *dev_id
)
11370 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11371 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11372 struct ssd_cmd
*cmd
;
11373 union ssd_response_msq __msg
;
11374 union ssd_response_msq
*msg
= &__msg
;
11376 uint32_t resp_idx
= queue
->resp_idx
;
11377 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11378 uint32_t end_resp_idx
;
11380 if (unlikely(resp_idx
== new_resp_idx
)) {
11384 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11387 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11390 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11391 msg
->u64_msg
= *u64_msg
;
11393 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11394 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11397 /* clear the resp msg */
11398 *u64_msg
= (uint64_t)(-1);
11400 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11401 /*if (unlikely(!cmd->bio)) {
11402 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11403 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11407 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11408 cmd
->errors
= -EIO
;
11412 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11416 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11417 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11418 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11419 queue_work(dev
->workq
, &dev
->log_work
);
11423 if (unlikely(msg
->resp_msg
.status
)) {
11424 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11425 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11426 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11429 ssd_set_alarm(dev
);
11430 queue
->io_stat
.nr_rwerr
++;
11431 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11433 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11434 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11436 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11438 queue
->io_stat
.nr_ioerr
++;
11441 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11442 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11443 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11445 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11447 }while (resp_idx
!= end_resp_idx
);
11449 queue
->resp_idx
= new_resp_idx
;
11451 return IRQ_HANDLED
;
11455 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11456 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
11458 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
)
11461 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11462 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11463 struct ssd_cmd
*cmd
;
11464 union ssd_response_msq __msg
;
11465 union ssd_response_msq
*msg
= &__msg
;
11467 uint32_t resp_idx
= queue
->resp_idx
;
11468 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11469 uint32_t end_resp_idx
;
11471 if (unlikely(resp_idx
== new_resp_idx
)) {
11475 #if (defined SSD_ESCAPE_IRQ)
11476 if (SSD_INT_MSIX
!= dev
->int_mode
) {
11477 dev
->irq_cpu
= smp_processor_id();
11481 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11484 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11487 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11488 msg
->u64_msg
= *u64_msg
;
11490 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11491 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11494 /* clear the resp msg */
11495 *u64_msg
= (uint64_t)(-1);
11497 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11498 /*if (unlikely(!cmd->bio)) {
11499 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11500 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11504 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11505 cmd
->errors
= -EIO
;
11509 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11513 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11514 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11515 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11516 queue_work(dev
->workq
, &dev
->log_work
);
11520 if (unlikely(msg
->resp_msg
.status
)) {
11521 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11522 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11523 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11526 ssd_set_alarm(dev
);
11527 queue
->io_stat
.nr_rwerr
++;
11528 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11530 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11531 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11533 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11535 queue
->io_stat
.nr_ioerr
++;
11538 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11539 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11540 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11542 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11544 }while (resp_idx
!= end_resp_idx
);
11546 queue
->resp_idx
= new_resp_idx
;
11548 return IRQ_HANDLED
;
11551 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11552 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
, struct pt_regs
*regs
)
11554 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
)
11558 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11559 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11561 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11562 ret
= ssd_interrupt(irq
, dev_id
, regs
);
11564 ret
= ssd_interrupt(irq
, dev_id
);
11568 if (IRQ_HANDLED
== ret
) {
11569 ssd_reg32_write(dev
->ctrlp
+ SSD_CLEAR_INTR_REG
, 1);
11575 static void ssd_reset_resp_ptr(struct ssd_device
*dev
)
11579 for (i
=0; i
<dev
->nr_queue
; i
++) {
11580 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11584 static void ssd_free_irq(struct ssd_device
*dev
)
11588 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11589 if (SSD_INT_MSIX
== dev
->int_mode
) {
11590 for (i
=0; i
<dev
->nr_queue
; i
++) {
11591 irq_set_affinity_hint(dev
->entry
[i
].vector
, NULL
);
11596 for (i
=0; i
<dev
->nr_queue
; i
++) {
11597 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11598 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11600 free_irq(pci_irq_vector(dev
->pdev
, i
), &dev
->queue
[i
]);
11604 if (SSD_INT_MSIX
== dev
->int_mode
) {
11605 pci_disable_msix(dev
->pdev
);
11606 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11607 pci_disable_msi(dev
->pdev
);
11612 static int ssd_init_irq(struct ssd_device
*dev
)
11614 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11615 const struct cpumask
*cpu_mask
= NULL
;
11616 static int cpu_affinity
= 0;
11618 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11619 const struct cpumask
*mask
= NULL
;
11620 static int cpu
= 0;
11624 unsigned long flags
= 0;
11627 ssd_reg32_write(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
, 0x800);
11629 #ifdef SSD_ESCAPE_IRQ
11633 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11634 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11635 dev
->nr_queue
= SSD_MSIX_VEC
;
11637 for (i
=0; i
<dev
->nr_queue
; i
++) {
11638 dev
->entry
[i
].entry
= i
;
11641 ret
= pci_enable_msix(dev
->pdev
, dev
->entry
, dev
->nr_queue
);
11644 } else if (ret
> 0) {
11645 dev
->nr_queue
= ret
;
11647 hio_warn("%s: can not enable msix\n", dev
->name
);
11649 ssd_set_alarm(dev
);
11654 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11655 mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11656 if ((0 == cpu
) || (!cpumask_intersects(mask
, cpumask_of(cpu
)))) {
11657 cpu
= cpumask_first(mask
);
11659 for (i
=0; i
<dev
->nr_queue
; i
++) {
11660 irq_set_affinity_hint(dev
->entry
[i
].vector
, cpumask_of(cpu
));
11661 cpu
= cpumask_next(cpu
, mask
);
11662 if (cpu
>= nr_cpu_ids
) {
11663 cpu
= cpumask_first(mask
);
11668 dev
->int_mode
= SSD_INT_MSIX
;
11669 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11670 ret
= pci_enable_msi(dev
->pdev
);
11672 hio_warn("%s: can not enable msi\n", dev
->name
);
11674 ssd_set_alarm(dev
);
11679 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11681 dev
->int_mode
= SSD_INT_MSI
;
11684 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11686 dev
->int_mode
= SSD_INT_LEGACY
;
11689 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11690 dev
->nr_queue
= SSD_MSIX_VEC
;
11692 dev
->nr_queue
= pci_alloc_irq_vectors(dev
->pdev
, 1, dev
->nr_queue
, PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
);
11693 if (dev
->nr_queue
<= 0) {
11695 hio_warn("%s: can not enable msix\n", dev
->name
);
11696 ssd_set_alarm(dev
);
11700 dev
->int_mode
= SSD_INT_MSIX
;
11701 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11703 ret
= pci_alloc_irq_vectors(dev
->pdev
, 1, 1, PCI_IRQ_MSI
| PCI_IRQ_AFFINITY
);
11706 hio_warn("%s: can not enable msi\n", dev
->name
);
11708 ssd_set_alarm(dev
);
11713 dev
->int_mode
= SSD_INT_MSI
;
11715 ret
= pci_alloc_irq_vectors(dev
->pdev
, 1, 1, PCI_IRQ_LEGACY
);
11719 hio_warn("%s: can not enable msi\n", dev
->name
);
11721 ssd_set_alarm(dev
);
11726 dev
->int_mode
= SSD_INT_LEGACY
;
11730 for (i
=0; i
<dev
->nr_queue
; i
++) {
11731 if (dev
->nr_queue
> 1) {
11732 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100-%d", dev
->name
, i
);
11734 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100", dev
->name
);
11737 dev
->queue
[i
].dev
= dev
;
11738 dev
->queue
[i
].idx
= i
;
11740 dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11741 dev
->queue
[i
].resp_idx_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
11743 dev
->queue
[i
].resp_msg_sz
= dev
->hw_info
.resp_msg_sz
;
11744 dev
->queue
[i
].resp_msg
= dev
->resp_msg_base
+ dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* i
;
11745 dev
->queue
[i
].resp_ptr
= dev
->resp_ptr_base
+ dev
->hw_info
.resp_ptr_sz
* i
;
11746 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
;
11748 dev
->queue
[i
].cmd
= dev
->cmd
;
11751 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
11752 flags
= IRQF_SHARED
;
11757 for (i
=0; i
<dev
->nr_queue
; i
++) {
11758 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
11759 if (dev
->int_mode
== SSD_INT_LEGACY
) {
11760 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11762 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11764 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11765 if (threaded_irq
) {
11766 ret
= request_threaded_irq(dev
->entry
[i
].vector
, ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11767 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11768 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11770 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11773 if (threaded_irq
) {
11774 ret
= request_threaded_irq(pci_irq_vector(dev
->pdev
, i
), ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11775 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11776 ret
= request_irq(pci_irq_vector(dev
->pdev
, i
), &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11778 ret
= request_irq(pci_irq_vector(dev
->pdev
, i
), &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11782 hio_warn("%s: request irq failed\n", dev
->name
);
11784 ssd_set_alarm(dev
);
11785 goto out_request_irq
;
11788 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11789 cpu_mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11790 if (SSD_INT_MSIX
== dev
->int_mode
) {
11791 if ((0 == cpu_affinity
) || (!cpumask_intersects(mask
, cpumask_of(cpu_affinity
)))) {
11792 cpu_affinity
= cpumask_first(cpu_mask
);
11795 irq_set_affinity(dev
->entry
[i
].vector
, cpumask_of(cpu_affinity
));
11796 cpu_affinity
= cpumask_next(cpu_affinity
, cpu_mask
);
11797 if (cpu_affinity
>= nr_cpu_ids
) {
11798 cpu_affinity
= cpumask_first(cpu_mask
);
11807 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11808 if (SSD_INT_MSIX
== dev
->int_mode
) {
11809 for (j
=0; j
<dev
->nr_queue
; j
++) {
11810 irq_set_affinity_hint(dev
->entry
[j
].vector
, NULL
);
11815 for (i
--; i
>=0; i
--) {
11816 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11817 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11819 free_irq(pci_irq_vector(dev
->pdev
, i
), &dev
->queue
[i
]);
11823 if (SSD_INT_MSIX
== dev
->int_mode
) {
11824 pci_disable_msix(dev
->pdev
);
11825 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11826 pci_disable_msi(dev
->pdev
);
11833 static void ssd_initial_log(struct ssd_device
*dev
)
11836 uint32_t speed
, width
;
11838 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11842 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_POWER_ON_REG
);
11844 // Poweron detection switched to SSD_INTR_INTERVAL_REG in 'ssd_init_smart'
11845 //ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver);
11848 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCIE_LINKSTATUS_REG
);
11850 width
= (val
>> 4)& 0x3F;
11851 if (0x1 == speed
) {
11852 hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev
->name
, width
);
11853 } else if (0x2 == speed
) {
11854 hio_info("%s: PCIe: 5GT/s, x%u\n", dev
->name
, width
);
11856 hio_info("%s: PCIe: unknown GT/s, x%u\n", dev
->name
, width
);
11858 ssd_gen_swlog(dev
, SSD_LOG_PCIE_LINK_STATUS
, val
);
11863 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11864 static void ssd_hwmon_worker(void *data
)
11866 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11868 static void ssd_hwmon_worker(struct work_struct
*work
)
11870 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, hwmon_work
);
11873 if (ssd_check_hw(dev
)) {
11874 //hio_err("%s: check hardware failed\n", dev->name);
11878 ssd_check_clock(dev
);
11879 ssd_check_volt(dev
);
11881 ssd_mon_boardvolt(dev
);
11884 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11885 static void ssd_tempmon_worker(void *data
)
11887 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11889 static void ssd_tempmon_worker(struct work_struct
*work
)
11891 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, tempmon_work
);
11894 if (ssd_check_hw(dev
)) {
11895 //hio_err("%s: check hardware failed\n", dev->name);
11903 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11904 static void ssd_capmon_worker(void *data
)
11906 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11908 static void ssd_capmon_worker(struct work_struct
*work
)
11910 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, capmon_work
);
11913 uint32_t cap_threshold
= SSD_PL_CAP_THRESHOLD
;
11916 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11920 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
11924 /* fault before? */
11925 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11926 ret
= ssd_check_pl_cap_fast(dev
);
11933 ret
= ssd_do_cap_learn(dev
, &cap
);
11935 hio_err("%s: cap learn failed\n", dev
->name
);
11936 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
11940 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, cap
);
11942 if (SSD_PL_CAP_CP
== dev
->hw_info_ext
.cap_type
) {
11943 cap_threshold
= SSD_PL_CAP_CP_THRESHOLD
;
11946 //use the fw event id?
11947 if (cap
< cap_threshold
) {
11948 if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11949 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_FAULT
, 0);
11951 } else if (cap
>= (cap_threshold
+ SSD_PL_CAP_THRESHOLD_HYST
)) {
11952 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11953 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_OK
, 0);
11958 static void ssd_routine_start(void *data
)
11960 struct ssd_device
*dev
;
11967 dev
->routine_tick
++;
11969 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
) && !ssd_busy(dev
)) {
11970 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11971 queue_work(dev
->workq
, &dev
->log_work
);
11974 if ((dev
->routine_tick
% SSD_HWMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11975 queue_work(dev
->workq
, &dev
->hwmon_work
);
11978 if ((dev
->routine_tick
% SSD_CAPMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11979 queue_work(dev
->workq
, &dev
->capmon_work
);
11982 if ((dev
->routine_tick
% SSD_CAPMON2_ROUTINE_TICK
) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
) && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11983 /* CAP fault? check again */
11984 queue_work(dev
->workq
, &dev
->capmon_work
);
11987 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11988 queue_work(dev
->workq
, &dev
->tempmon_work
);
11991 /* schedule routine */
11992 mod_timer(&dev
->routine_timer
, jiffies
+ msecs_to_jiffies(SSD_ROUTINE_INTERVAL
));
11995 static void ssd_cleanup_routine(struct ssd_device
*dev
)
11997 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
12000 (void)ssd_del_timer(&dev
->routine_timer
);
12002 (void)ssd_del_timer(&dev
->bm_timer
);
12005 static int ssd_init_routine(struct ssd_device
*dev
)
12007 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
12010 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
12011 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
, dev
);
12012 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
, dev
);
12013 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
, dev
);
12014 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
, dev
);
12016 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
);
12017 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
);
12018 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
);
12019 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
);
12023 ssd_initial_log(dev
);
12025 /* schedule bm routine */
12026 ssd_add_timer(&dev
->bm_timer
, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY
), ssd_bm_routine_start
, dev
);
12028 /* schedule routine */
12029 ssd_add_timer(&dev
->routine_timer
, msecs_to_jiffies(SSD_ROUTINE_INTERVAL
), ssd_routine_start
, dev
);
12035 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12038 ssd_remove_one (struct pci_dev
*pdev
)
12040 struct ssd_device
*dev
;
12046 dev
= pci_get_drvdata(pdev
);
12051 list_del_init(&dev
->list
);
12053 ssd_unregister_sysfs(dev
);
12055 /* offline firstly */
12056 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12058 /* clean work queue first */
12060 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12061 ssd_cleanup_workq(dev
);
12065 (void)ssd_flush(dev
);
12066 (void)ssd_save_md(dev
);
12070 ssd_save_smart(dev
);
12073 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
12074 ssd_cleanup_blkdev(dev
);
12078 ssd_cleanup_chardev(dev
);
12081 /* clean routine */
12083 ssd_cleanup_routine(dev
);
12086 ssd_cleanup_queue(dev
);
12088 ssd_cleanup_tag(dev
);
12089 ssd_cleanup_thread(dev
);
12093 ssd_cleanup_dcmd(dev
);
12094 ssd_cleanup_cmd(dev
);
12095 ssd_cleanup_response(dev
);
12098 ssd_cleanup_log(dev
);
12101 if (dev
->reload_fw
) { //reload fw
12102 dev
->has_non_0x98_reg_access
= 1;
12103 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12106 /* unmap physical adress */
12107 #ifdef LINUX_SUSE_OS
12108 iounmap(dev
->ctrlp
);
12110 pci_iounmap(pdev
, dev
->ctrlp
);
12113 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12115 pci_disable_device(pdev
);
12117 pci_set_drvdata(pdev
, NULL
);
12123 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12126 ssd_init_one(struct pci_dev
*pdev
,
12127 const struct pci_device_id
*ent
)
12129 struct ssd_device
*dev
;
12133 if (!pdev
|| !ent
) {
12138 dev
= kmalloc(sizeof(struct ssd_device
), GFP_KERNEL
);
12141 goto out_alloc_dev
;
12143 memset(dev
, 0, sizeof(struct ssd_device
));
12145 dev
->owner
= THIS_MODULE
;
12147 if (SSD_SLAVE_PORT_DEVID
== ent
->device
) {
12151 dev
->idx
= ssd_get_index(dev
->slave
);
12152 if (dev
->idx
< 0) {
12154 goto out_get_index
;
12158 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_DEV_NAME
);
12159 ssd_set_dev_name(&dev
->name
[strlen(SSD_DEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_DEV_NAME
), dev
->idx
);
12161 dev
->major
= ssd_major
;
12162 dev
->cmajor
= ssd_cmajor
;
12164 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_SDEV_NAME
);
12165 ssd_set_dev_name(&dev
->name
[strlen(SSD_SDEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_SDEV_NAME
), dev
->idx
);
12166 dev
->major
= ssd_major_sl
;
12170 do_gettimeofday(&tv
);
12171 dev
->reset_time
= tv
.tv_sec
;
12173 atomic_set(&(dev
->refcnt
), 0);
12174 atomic_set(&(dev
->tocnt
), 0);
12176 mutex_init(&dev
->fw_mutex
);
12179 mutex_init(&dev
->gd_mutex
);
12180 dev
->has_non_0x98_reg_access
= 0;
12182 //init in_flight lock
12183 spin_lock_init(&dev
->in_flight_lock
);
12186 pci_set_drvdata(pdev
, dev
);
12188 kref_init(&dev
->kref
);
12190 ret
= pci_enable_device(pdev
);
12192 hio_warn("%s: can not enable device\n", dev
->name
);
12193 goto out_enable_device
;
12196 pci_set_master(pdev
);
12198 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12199 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
12201 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
12204 hio_warn("%s: set dma mask: failed\n", dev
->name
);
12205 goto out_set_dma_mask
;
12208 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12209 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
12211 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
12214 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
12215 goto out_set_dma_mask
;
12218 dev
->mmio_base
= pci_resource_start(pdev
, 0);
12219 dev
->mmio_len
= pci_resource_len(pdev
, 0);
12221 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
12222 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
12224 goto out_request_mem_region
;
12227 /* 2.6.9 kernel bug */
12228 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
12230 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
12232 goto out_pci_iomap
;
12235 ret
= ssd_check_hw(dev
);
12237 hio_err("%s: check hardware failed\n", dev
->name
);
12241 ret
= ssd_init_protocol_info(dev
);
12243 hio_err("%s: init protocol info failed\n", dev
->name
);
12244 goto out_init_protocol_info
;
12248 ssd_clear_alarm(dev
);
12250 ret
= ssd_init_fw_info(dev
);
12252 hio_err("%s: init firmware info failed\n", dev
->name
);
12254 ssd_set_alarm(dev
);
12255 goto out_init_fw_info
;
12263 ret
= ssd_init_rom_info(dev
);
12265 hio_err("%s: init rom info failed\n", dev
->name
);
12267 ssd_set_alarm(dev
);
12268 goto out_init_rom_info
;
12271 ret
= ssd_init_label(dev
);
12273 hio_err("%s: init label failed\n", dev
->name
);
12275 ssd_set_alarm(dev
);
12276 goto out_init_label
;
12279 ret
= ssd_init_workq(dev
);
12281 hio_warn("%s: init workq failed\n", dev
->name
);
12282 goto out_init_workq
;
12284 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12286 ret
= ssd_init_log(dev
);
12288 hio_err("%s: init log failed\n", dev
->name
);
12290 ssd_set_alarm(dev
);
12294 ret
= ssd_init_smart(dev
);
12296 hio_err("%s: init info failed\n", dev
->name
);
12298 ssd_set_alarm(dev
);
12299 goto out_init_smart
;
12303 ret
= ssd_init_hw_info(dev
);
12305 hio_err("%s: init hardware info failed\n", dev
->name
);
12307 ssd_set_alarm(dev
);
12308 goto out_init_hw_info
;
12316 ret
= ssd_init_sensor(dev
);
12318 hio_err("%s: init sensor failed\n", dev
->name
);
12320 ssd_set_alarm(dev
);
12321 goto out_init_sensor
;
12324 ret
= ssd_init_pl_cap(dev
);
12326 hio_err("%s: int pl_cap failed\n", dev
->name
);
12328 ssd_set_alarm(dev
);
12329 goto out_init_pl_cap
;
12333 ret
= ssd_check_init_state(dev
);
12335 hio_err("%s: check init state failed\n", dev
->name
);
12337 ssd_set_alarm(dev
);
12338 goto out_check_init_state
;
12341 ret
= ssd_init_response(dev
);
12343 hio_warn("%s: init resp_msg failed\n", dev
->name
);
12344 goto out_init_response
;
12347 ret
= ssd_init_cmd(dev
);
12349 hio_warn("%s: init msg failed\n", dev
->name
);
12353 ret
= ssd_init_dcmd(dev
);
12355 hio_warn("%s: init cmd failed\n", dev
->name
);
12356 goto out_init_dcmd
;
12359 ret
= ssd_init_irq(dev
);
12361 hio_warn("%s: init irq failed\n", dev
->name
);
12365 ret
= ssd_init_thread(dev
);
12367 hio_warn("%s: init thread failed\n", dev
->name
);
12368 goto out_init_thread
;
12371 ret
= ssd_init_tag(dev
);
12373 hio_warn("%s: init tags failed\n", dev
->name
);
12374 goto out_init_tags
;
12378 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12380 ret
= ssd_init_queue(dev
);
12382 hio_warn("%s: init queue failed\n", dev
->name
);
12383 goto out_init_queue
;
12391 ret
= ssd_init_ot_protect(dev
);
12393 hio_err("%s: int ot_protect failed\n", dev
->name
);
12395 ssd_set_alarm(dev
);
12396 goto out_int_ot_protect
;
12399 ret
= ssd_init_wmode(dev
);
12401 hio_warn("%s: init write mode\n", dev
->name
);
12402 goto out_init_wmode
;
12405 /* init routine after hw is ready */
12406 ret
= ssd_init_routine(dev
);
12408 hio_warn("%s: init routine\n", dev
->name
);
12409 goto out_init_routine
;
12412 ret
= ssd_init_chardev(dev
);
12414 hio_warn("%s: register char device failed\n", dev
->name
);
12415 goto out_init_chardev
;
12419 ret
= ssd_init_blkdev(dev
);
12421 hio_warn("%s: register block device failed\n", dev
->name
);
12422 goto out_init_blkdev
;
12424 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12426 ret
= ssd_register_sysfs(dev
);
12428 hio_warn("%s: register sysfs failed\n", dev
->name
);
12429 goto out_register_sysfs
;
12434 list_add_tail(&dev
->list
, &ssd_list
);
12438 out_register_sysfs
:
12439 test_and_clear_bit(SSD_INIT_BD
, &dev
->state
);
12440 ssd_cleanup_blkdev(dev
);
12444 ssd_cleanup_chardev(dev
);
12449 ssd_cleanup_routine(dev
);
12453 out_int_ot_protect
:
12454 ssd_cleanup_queue(dev
);
12456 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12457 ssd_cleanup_tag(dev
);
12459 ssd_cleanup_thread(dev
);
12463 ssd_cleanup_dcmd(dev
);
12465 ssd_cleanup_cmd(dev
);
12467 ssd_cleanup_response(dev
);
12469 out_check_init_state
:
12476 ssd_cleanup_log(dev
);
12481 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12482 ssd_cleanup_workq(dev
);
12488 out_init_protocol_info
:
12490 #ifdef LINUX_SUSE_OS
12491 iounmap(dev
->ctrlp
);
12493 pci_iounmap(pdev
, dev
->ctrlp
);
12496 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12497 out_request_mem_region
:
12499 pci_disable_device(pdev
);
12501 pci_set_drvdata(pdev
, NULL
);
12509 static void ssd_cleanup_tasklet(void)
12512 for_each_online_cpu(i
) {
12513 tasklet_kill(&per_cpu(ssd_tasklet
, i
));
12517 static int ssd_init_tasklet(void)
12521 for_each_online_cpu(i
) {
12522 INIT_LIST_HEAD(&per_cpu(ssd_doneq
, i
));
12525 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done_db
, 0);
12527 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done
, 0);
12534 static struct pci_device_id ssd_pci_tbl
[] = {
12535 { 0x10ee, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* g3 */
12536 { 0x19e5, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v1 */
12537 //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/
12538 { 0x19e5, 0x0009, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 */
12539 { 0x19e5, 0x000a, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 dp slave*/
12543 /*driver power management handler for pm_ops*/
12544 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12545 static int ssd_hio_suspend(struct pci_dev
*pdev
, pm_message_t state
)
12548 static int ssd_hio_suspend(struct device
*ddev
)
12550 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12552 struct ssd_device
*dev
;
12559 dev
= pci_get_drvdata(pdev
);
12564 hio_warn("%s: suspend disk start.\n", dev
->name
);
12565 ssd_unregister_sysfs(dev
);
12567 /* offline firstly */
12568 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12570 /* clean work queue first */
12572 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12573 ssd_cleanup_workq(dev
);
12577 (void)ssd_flush(dev
);
12578 (void)ssd_save_md(dev
);
12582 ssd_save_smart(dev
);
12585 /* clean routine */
12587 ssd_cleanup_routine(dev
);
12590 ssd_cleanup_thread(dev
);
12595 ssd_cleanup_log(dev
);
12598 if (dev
->reload_fw
) { //reload fw
12599 dev
->has_non_0x98_reg_access
= 1;
12600 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12603 /* unmap physical adress */
12605 #ifdef LINUX_SUSE_OS
12606 iounmap(dev
->ctrlp
);
12608 pci_iounmap(pdev
, dev
->ctrlp
);
12613 if (dev
->mmio_base
) {
12614 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12615 dev
->mmio_base
= 0;
12618 pci_disable_device(pdev
);
12620 hio_warn("%s: suspend disk finish.\n", dev
->name
);
12626 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12627 static int ssd_hio_resume(struct pci_dev
*pdev
)
12630 static int ssd_hio_resume(struct device
*ddev
)
12632 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12634 struct ssd_device
*dev
= NULL
;
12642 dev
= pci_get_drvdata(pdev
);
12645 goto out_alloc_dev
;
12648 hio_warn("%s: resume disk start.\n", dev
->name
);
12649 ret
= pci_enable_device(pdev
);
12651 hio_warn("%s: can not enable device\n", dev
->name
);
12652 goto out_enable_device
;
12655 pci_set_master(pdev
);
12657 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12658 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
12660 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
12663 hio_warn("%s: set dma mask: failed\n", dev
->name
);
12664 goto out_set_dma_mask
;
12667 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12668 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
12670 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
12673 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
12674 goto out_set_dma_mask
;
12677 dev
->mmio_base
= pci_resource_start(pdev
, 0);
12678 dev
->mmio_len
= pci_resource_len(pdev
, 0);
12680 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
12681 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
12683 goto out_request_mem_region
;
12686 /* 2.6.9 kernel bug */
12687 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
12689 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
12691 goto out_pci_iomap
;
12694 ret
= ssd_check_hw(dev
);
12696 hio_err("%s: check hardware failed\n", dev
->name
);
12701 ssd_clear_alarm(dev
);
12703 ret
= ssd_init_fw_info(dev
);
12705 hio_err("%s: init firmware info failed\n", dev
->name
);
12707 ssd_set_alarm(dev
);
12708 goto out_init_fw_info
;
12716 ret
= ssd_init_rom_info(dev
);
12718 hio_err("%s: init rom info failed\n", dev
->name
);
12720 ssd_set_alarm(dev
);
12721 goto out_init_rom_info
;
12724 ret
= ssd_init_label(dev
);
12726 hio_err("%s: init label failed\n", dev
->name
);
12728 ssd_set_alarm(dev
);
12729 goto out_init_label
;
12732 ret
= ssd_init_workq(dev
);
12734 hio_warn("%s: init workq failed\n", dev
->name
);
12735 goto out_init_workq
;
12737 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12739 ret
= ssd_init_log(dev
);
12741 hio_err("%s: init log failed\n", dev
->name
);
12743 ssd_set_alarm(dev
);
12747 ret
= ssd_init_smart(dev
);
12749 hio_err("%s: init info failed\n", dev
->name
);
12751 ssd_set_alarm(dev
);
12752 goto out_init_smart
;
12756 ret
= ssd_init_hw_info(dev
);
12758 hio_err("%s: init hardware info failed\n", dev
->name
);
12760 ssd_set_alarm(dev
);
12761 goto out_init_hw_info
;
12769 ret
= ssd_init_sensor(dev
);
12771 hio_err("%s: init sensor failed\n", dev
->name
);
12773 ssd_set_alarm(dev
);
12774 goto out_init_sensor
;
12777 ret
= ssd_init_pl_cap(dev
);
12779 hio_err("%s: int pl_cap failed\n", dev
->name
);
12781 ssd_set_alarm(dev
);
12782 goto out_init_pl_cap
;
12786 ret
= ssd_check_init_state(dev
);
12788 hio_err("%s: check init state failed\n", dev
->name
);
12790 ssd_set_alarm(dev
);
12791 goto out_check_init_state
;
12794 //flush all base pointer to ssd
12795 (void)ssd_reload_ssd_ptr(dev
);
12797 ret
= ssd_init_irq(dev
);
12799 hio_warn("%s: init irq failed\n", dev
->name
);
12803 ret
= ssd_init_thread(dev
);
12805 hio_warn("%s: init thread failed\n", dev
->name
);
12806 goto out_init_thread
;
12810 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12817 ret
= ssd_init_ot_protect(dev
);
12819 hio_err("%s: int ot_protect failed\n", dev
->name
);
12821 ssd_set_alarm(dev
);
12822 goto out_int_ot_protect
;
12825 ret
= ssd_init_wmode(dev
);
12827 hio_warn("%s: init write mode\n", dev
->name
);
12828 goto out_init_wmode
;
12831 /* init routine after hw is ready */
12832 ret
= ssd_init_routine(dev
);
12834 hio_warn("%s: init routine\n", dev
->name
);
12835 goto out_init_routine
;
12839 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12843 hio_warn("%s: resume disk finish.\n", dev
->name
);
12849 out_int_ot_protect
:
12850 ssd_cleanup_thread(dev
);
12854 out_check_init_state
:
12861 ssd_cleanup_log(dev
);
12866 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12867 ssd_cleanup_workq(dev
);
12874 #ifdef LINUX_SUSE_OS
12875 iounmap(dev
->ctrlp
);
12877 pci_iounmap(pdev
, dev
->ctrlp
);
12880 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12881 out_request_mem_region
:
12883 pci_disable_device(pdev
);
12888 hio_warn("%s: resume disk fail.\n", dev
->name
);
12893 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12895 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12897 SIMPLE_DEV_PM_OPS(hio_pm_ops
, ssd_hio_suspend
, ssd_hio_resume
);
12900 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12901 struct pci_driver ssd_driver
= {
12902 .name
= MODULE_NAME
,
12903 .id_table
= ssd_pci_tbl
,
12904 .probe
= ssd_init_one
,
12905 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12906 .remove
= __devexit_p(ssd_remove_one
),
12908 .remove
= ssd_remove_one
,
12911 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12912 .suspend
= ssd_hio_suspend
,
12913 .resume
= ssd_hio_resume
,
12921 /* notifier block to get a notify on system shutdown/halt/reboot */
12922 static int ssd_notify_reboot(struct notifier_block
*nb
, unsigned long event
, void *buf
)
12924 struct ssd_device
*dev
= NULL
;
12925 struct ssd_device
*n
= NULL
;
12927 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
12928 ssd_gen_swlog(dev
, SSD_LOG_POWER_OFF
, 0);
12930 (void)ssd_flush(dev
);
12931 (void)ssd_save_md(dev
);
12935 ssd_save_smart(dev
);
12937 ssd_stop_workq(dev
);
12939 if (dev
->reload_fw
) {
12940 dev
->has_non_0x98_reg_access
= 1;
12941 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12949 static struct notifier_block ssd_notifier
= {
12950 ssd_notify_reboot
, NULL
, 0
12953 static int __init
ssd_init_module(void)
12957 hio_info("driver version: %s\n", DRIVER_VERSION
);
12959 ret
= ssd_init_index();
12961 hio_warn("init index failed\n");
12962 goto out_init_index
;
12965 ret
= ssd_init_proc();
12967 hio_warn("init proc failed\n");
12968 goto out_init_proc
;
12971 ret
= ssd_init_sysfs();
12973 hio_warn("init sysfs failed\n");
12974 goto out_init_sysfs
;
12977 ret
= ssd_init_tasklet();
12979 hio_warn("init tasklet failed\n");
12980 goto out_init_tasklet
;
12983 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12984 ssd_class
= class_simple_create(THIS_MODULE
, SSD_DEV_NAME
);
12986 ssd_class
= class_create(THIS_MODULE
, SSD_DEV_NAME
);
12988 if (IS_ERR(ssd_class
)) {
12989 ret
= PTR_ERR(ssd_class
);
12990 goto out_class_create
;
12993 if (ssd_cmajor
> 0) {
12994 ret
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12996 ret
= ssd_cmajor
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12999 hio_warn("unable to register chardev major number\n");
13000 goto out_register_chardev
;
13003 if (ssd_major
> 0) {
13004 ret
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
13006 ret
= ssd_major
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
13009 hio_warn("unable to register major number\n");
13010 goto out_register_blkdev
;
13013 if (ssd_major_sl
> 0) {
13014 ret
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13016 ret
= ssd_major_sl
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13019 hio_warn("unable to register slave major number\n");
13020 goto out_register_blkdev_sl
;
13023 if (mode
< SSD_DRV_MODE_STANDARD
|| mode
> SSD_DRV_MODE_BASE
) {
13024 mode
= SSD_DRV_MODE_STANDARD
;
13028 if (mode
!= SSD_DRV_MODE_STANDARD
) {
13032 if (int_mode
< SSD_INT_LEGACY
|| int_mode
> SSD_INT_MSIX
) {
13033 int_mode
= SSD_INT_MODE_DEFAULT
;
13036 if (threaded_irq
) {
13037 int_mode
= SSD_INT_MSI
;
13040 if (log_level
>= SSD_LOG_NR_LEVEL
|| log_level
< SSD_LOG_LEVEL_INFO
) {
13041 log_level
= SSD_LOG_LEVEL_ERR
;
13044 if (wmode
< SSD_WMODE_BUFFER
|| wmode
> SSD_WMODE_DEFAULT
) {
13045 wmode
= SSD_WMODE_DEFAULT
;
13048 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
13049 ret
= pci_module_init(&ssd_driver
);
13051 ret
= pci_register_driver(&ssd_driver
);
13054 hio_warn("pci init failed\n");
13058 ret
= register_reboot_notifier(&ssd_notifier
);
13060 hio_warn("register reboot notifier failed\n");
13061 goto out_register_reboot_notifier
;
13066 out_register_reboot_notifier
:
13068 pci_unregister_driver(&ssd_driver
);
13069 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13070 out_register_blkdev_sl
:
13071 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
13072 out_register_blkdev
:
13073 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
13074 out_register_chardev
:
13075 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
13076 class_simple_destroy(ssd_class
);
13078 class_destroy(ssd_class
);
13081 ssd_cleanup_tasklet();
13083 ssd_cleanup_sysfs();
13085 ssd_cleanup_proc();
13087 ssd_cleanup_index();
13093 static void __exit
ssd_cleanup_module(void)
13096 hio_info("unload driver: %s\n", DRIVER_VERSION
);
13100 unregister_reboot_notifier(&ssd_notifier
);
13102 pci_unregister_driver(&ssd_driver
);
13104 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13105 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
13106 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
13107 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
13108 class_simple_destroy(ssd_class
);
13110 class_destroy(ssd_class
);
13113 ssd_cleanup_tasklet();
13114 ssd_cleanup_sysfs();
13115 ssd_cleanup_proc();
13116 ssd_cleanup_index();
13119 int ssd_register_event_notifier(struct block_device
*bdev
, ssd_event_call event_call
)
13121 struct ssd_device
*dev
;
13123 struct ssd_log
*le
, *temp_le
= NULL
;
13128 if (!bdev
|| !event_call
|| !(bdev
->bd_disk
)) {
13132 dev
= bdev
->bd_disk
->private_data
;
13133 dev
->event_call
= event_call
;
13135 do_gettimeofday(&tv
);
13138 le
= (struct ssd_log
*)(dev
->internal_log
.log
);
13139 log_nr
= dev
->internal_log
.nr_log
;
13142 if (le
->time
<= cur
&& le
->time
>= dev
->uptime
) {
13143 if ((le
->le
.event
== SSD_LOG_SEU_FAULT1
) && (le
->time
< dev
->reset_time
)) {
13147 if (le
->le
.event
== SSD_LOG_OVER_TEMP
|| le
->le
.event
== SSD_LOG_NORMAL_TEMP
|| le
->le
.event
== SSD_LOG_WARN_TEMP
) {
13148 if (!temp_le
|| le
->time
>= temp_le
->time
) {
13154 (void)dev
->event_call(dev
->gd
, le
->le
.event
, ssd_parse_log(dev
, le
, 0));
13159 ssd_get_temperature(bdev
, &temp
);
13160 if (temp_le
&& (temp
>= SSD_OT_TEMP_HYST
)) {
13161 (void)dev
->event_call(dev
->gd
, temp_le
->le
.event
, ssd_parse_log(dev
, temp_le
, 0));
13167 int ssd_unregister_event_notifier(struct block_device
*bdev
)
13169 struct ssd_device
*dev
;
13171 if (!bdev
|| !(bdev
->bd_disk
)) {
13175 dev
= bdev
->bd_disk
->private_data
;
13176 dev
->event_call
= NULL
;
13181 EXPORT_SYMBOL(ssd_get_label
);
13182 EXPORT_SYMBOL(ssd_get_version
);
13183 EXPORT_SYMBOL(ssd_set_otprotect
);
13184 EXPORT_SYMBOL(ssd_bm_status
);
13185 EXPORT_SYMBOL(ssd_submit_pbio
);
13186 EXPORT_SYMBOL(ssd_get_pciaddr
);
13187 EXPORT_SYMBOL(ssd_get_temperature
);
13188 EXPORT_SYMBOL(ssd_register_event_notifier
);
13189 EXPORT_SYMBOL(ssd_unregister_event_notifier
);
13190 EXPORT_SYMBOL(ssd_reset
);
13191 EXPORT_SYMBOL(ssd_set_wmode
);
13195 module_init(ssd_init_module
);
13196 module_exit(ssd_cleanup_module
);
13197 MODULE_VERSION(DRIVER_VERSION
);
13198 MODULE_LICENSE("GPL");
13199 MODULE_AUTHOR("Huawei SSD DEV Team");
13200 MODULE_DESCRIPTION("Huawei SSD driver");