2 * Huawei SSD device driver
3 * Copyright (c) 2016, Huawei Technologies Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #ifndef LINUX_VERSION_CODE
16 #include <linux/version.h>
18 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
19 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/bio.h>
25 #include <linux/timer.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/blkdev.h>
31 #include <linux/sched.h>
32 #include <linux/fcntl.h>
33 #include <linux/interrupt.h>
34 #include <linux/compiler.h>
35 #include <linux/bitops.h>
36 #include <linux/delay.h>
37 #include <linux/time.h>
38 #include <linux/stat.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
44 #include <linux/ioctl.h>
45 #include <linux/hdreg.h> /* HDIO_GETGEO */
46 #include <linux/list.h>
47 #include <linux/reboot.h>
48 #include <linux/kthread.h>
49 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
50 #include <linux/seq_file.h>
52 #include <asm/uaccess.h>
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
54 #include <linux/scatterlist.h>
55 #include <linux/vmalloc.h>
57 #include <asm/scatterlist.h>
60 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
61 #include <linux/devfs_fs_kernel.h>
65 #define MODULE_NAME "hio"
66 #define DRIVER_VERSION "2.1.0.40"
67 #define DRIVER_VERSION_LEN 16
69 #define SSD_FW_MIN 0x1
71 #define SSD_DEV_NAME MODULE_NAME
72 #define SSD_DEV_NAME_LEN 16
73 #define SSD_CDEV_NAME "c"SSD_DEV_NAME
74 #define SSD_SDEV_NAME "s"SSD_DEV_NAME
79 #define SSD_MAJOR_SL 0
82 #define SSD_MAX_DEV 702
83 #define SSD_ALPHABET_NUM 26
85 #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg)
86 #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg)
87 #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg)
88 #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg)
91 #define SSD_SLAVE_PORT_DEVID 0x000a
95 /* 2.6.9 msi affinity bug, should turn msi & msi-x off */
97 #define SSD_ESCAPE_IRQ
103 #define SSD_MSIX_VEC 8
106 #undef SSD_ESCAPE_IRQ
107 #define SSD_MSIX_AFFINITY_FORCE
112 /* Over temperature protect */
113 #define SSD_OT_PROTECT
115 #ifdef SSD_QUEUE_PBIO
116 #define BIO_SSD_PBIO 20
120 //#define SSD_DEBUG_ERR
123 #define SSD_CMD_TIMEOUT (60*HZ)
126 #define SSD_SPI_TIMEOUT (5*HZ)
127 #define SSD_I2C_TIMEOUT (5*HZ)
129 #define SSD_I2C_MAX_DATA (127)
130 #define SSD_SMBUS_BLOCK_MAX (32)
131 #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2)
134 #define SSD_INIT_WAIT (1000) //1s
135 #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s
136 #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s
137 #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s
138 #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
139 #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
141 /* blkdev busy wait */
142 #define SSD_DEV_BUSY_WAIT 1000 //ms
143 #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s
146 #define SSD_SMBUS_RETRY_INTERVAL (5) //ms
147 #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL)
149 #define SSD_BM_RETRY_MAX 7
151 /* bm routine interval */
152 #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000)
154 /* routine interval */
155 #define SSD_ROUTINE_INTERVAL (10*1000) //10s
156 #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL)
157 #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30)
158 #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover
161 #define SSD_DMA_ALIGN (16)
163 /* some hw defalut */
164 #define SSD_LOG_MAX_SZ 4096
166 #define SSD_NAND_OOB_SZ 1024
167 #define SSD_NAND_ID_SZ 8
168 #define SSD_NAND_ID_BUFF_SZ 1024
169 #define SSD_NAND_MAX_CE 2
171 #define SSD_BBT_RESERVED 8
173 #define SSD_ECC_MAX_FLIP (64+1)
175 #define SSD_RAM_ALIGN 16
178 #define SSD_RELOAD_FLAG 0x3333CCCC
179 #define SSD_RELOAD_FW 0xAA5555AA
180 #define SSD_RESET_NOINIT 0xAA5555AA
181 #define SSD_RESET 0x55AAAA55
182 #define SSD_RESET_FULL 0x5A
183 //#define SSD_RESET_WAIT 1000 //1s
184 //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s
188 #define SSD_PROTOCOL_V1 0x0
190 #define SSD_ROM_SIZE (16*1024*1024)
191 #define SSD_ROM_BLK_SIZE (256*1024)
192 #define SSD_ROM_PAGE_SIZE (256)
193 #define SSD_ROM_NR_BRIDGE_FW 2
194 #define SSD_ROM_NR_CTRL_FW 2
195 #define SSD_ROM_BRIDGE_FW_BASE 0
196 #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024)
197 #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE)
198 #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024)
199 #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW)
200 #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE)
203 #define SSD_PROTOCOL_V3 0x3000000
204 #define SSD_PROTOCOL_V3_1_1 0x3010001
205 #define SSD_PROTOCOL_V3_1_3 0x3010003
206 #define SSD_PROTOCOL_V3_2 0x3020000
207 #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */
208 #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */
209 #define SSD_PROTOCOL_V3_2_4 0x3020004
212 #define SSD_PV3_ROM_NR_BM_FW 1
213 #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8)
215 #define SSD_ROM_LOG_SZ (64*1024*4)
217 #define SSD_ROM_NR_SMART_MAX 2
218 #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX
219 #define SSD_PV3_ROM_SMART_SZ (64*1024)
222 #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */
223 #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */
227 #define SSD_REQ_FIFO_REG 0x0000
228 #define SSD_RESP_FIFO_REG 0x0008 //0x0010
229 #define SSD_RESP_PTR_REG 0x0010 //0x0018
230 #define SSD_INTR_INTERVAL_REG 0x0018
231 #define SSD_READY_REG 0x001C
232 #define SSD_BRIDGE_TEST_REG 0x0020
233 #define SSD_STRIPE_SIZE_REG 0x0028
234 #define SSD_CTRL_VER_REG 0x0030 //controller
235 #define SSD_BRIDGE_VER_REG 0x0034 //bridge
236 #define SSD_PCB_VER_REG 0x0038
237 #define SSD_BURN_FLAG_REG 0x0040
238 #define SSD_BRIDGE_INFO_REG 0x0044
240 #define SSD_WL_VAL_REG 0x0048 //32-bit
242 #define SSD_BB_INFO_REG 0x004C
244 #define SSD_ECC_TEST_REG 0x0050 //test only
245 #define SSD_ERASE_TEST_REG 0x0058 //test only
246 #define SSD_WRITE_TEST_REG 0x0060 //test only
248 #define SSD_RESET_REG 0x0068
249 #define SSD_RELOAD_FW_REG 0x0070
251 #define SSD_RESERVED_BLKS_REG 0x0074
252 #define SSD_VALID_PAGES_REG 0x0078
253 #define SSD_CH_INFO_REG 0x007C
255 #define SSD_CTRL_TEST_REG_SZ 0x8
256 #define SSD_CTRL_TEST_REG0 0x0080
257 #define SSD_CTRL_TEST_REG1 0x0088
258 #define SSD_CTRL_TEST_REG2 0x0090
259 #define SSD_CTRL_TEST_REG3 0x0098
260 #define SSD_CTRL_TEST_REG4 0x00A0
261 #define SSD_CTRL_TEST_REG5 0x00A8
262 #define SSD_CTRL_TEST_REG6 0x00B0
263 #define SSD_CTRL_TEST_REG7 0x00B8
265 #define SSD_FLASH_INFO_REG0 0x00C0
266 #define SSD_FLASH_INFO_REG1 0x00C8
267 #define SSD_FLASH_INFO_REG2 0x00D0
268 #define SSD_FLASH_INFO_REG3 0x00D8
269 #define SSD_FLASH_INFO_REG4 0x00E0
270 #define SSD_FLASH_INFO_REG5 0x00E8
271 #define SSD_FLASH_INFO_REG6 0x00F0
272 #define SSD_FLASH_INFO_REG7 0x00F8
274 #define SSD_RESP_INFO_REG 0x01B8
275 #define SSD_NAND_BUFF_BASE 0x01BC //for nand write
277 #define SSD_CHIP_INFO_REG_SZ 0x10
278 #define SSD_CHIP_INFO_REG0 0x0100 //128 bit
279 #define SSD_CHIP_INFO_REG1 0x0110
280 #define SSD_CHIP_INFO_REG2 0x0120
281 #define SSD_CHIP_INFO_REG3 0x0130
282 #define SSD_CHIP_INFO_REG4 0x0140
283 #define SSD_CHIP_INFO_REG5 0x0150
284 #define SSD_CHIP_INFO_REG6 0x0160
285 #define SSD_CHIP_INFO_REG7 0x0170
287 #define SSD_RAM_INFO_REG 0x01C4
289 #define SSD_BBT_BASE_REG 0x01C8
290 #define SSD_ECT_BASE_REG 0x01CC
292 #define SSD_CLEAR_INTR_REG 0x01F0
294 #define SSD_INIT_STATE_REG_SZ 0x8
295 #define SSD_INIT_STATE_REG0 0x0200
296 #define SSD_INIT_STATE_REG1 0x0208
297 #define SSD_INIT_STATE_REG2 0x0210
298 #define SSD_INIT_STATE_REG3 0x0218
299 #define SSD_INIT_STATE_REG4 0x0220
300 #define SSD_INIT_STATE_REG5 0x0228
301 #define SSD_INIT_STATE_REG6 0x0230
302 #define SSD_INIT_STATE_REG7 0x0238
304 #define SSD_ROM_INFO_REG 0x0600
305 #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604
306 #define SSD_ROM_CTRL_FW_INFO_REG 0x0608
307 #define SSD_ROM_VP_INFO_REG 0x060C
309 #define SSD_LOG_INFO_REG 0x0610
310 #define SSD_LED_REG 0x0614
311 #define SSD_MSG_BASE_REG 0x06F8
314 #define SSD_SPI_REG_CMD 0x0180
315 #define SSD_SPI_REG_CMD_HI 0x0184
316 #define SSD_SPI_REG_WDATA 0x0188
317 #define SSD_SPI_REG_ID 0x0190
318 #define SSD_SPI_REG_STATUS 0x0198
319 #define SSD_SPI_REG_RDATA 0x01A0
320 #define SSD_SPI_REG_READY 0x01A8
323 #define SSD_I2C_CTRL_REG 0x06F0
324 #define SSD_I2C_RDATA_REG 0x06F4
326 /* temperature reg */
327 #define SSD_BRIGE_TEMP_REG 0x0618
329 #define SSD_CTRL_TEMP_REG0 0x0700
330 #define SSD_CTRL_TEMP_REG1 0x0708
331 #define SSD_CTRL_TEMP_REG2 0x0710
332 #define SSD_CTRL_TEMP_REG3 0x0718
333 #define SSD_CTRL_TEMP_REG4 0x0720
334 #define SSD_CTRL_TEMP_REG5 0x0728
335 #define SSD_CTRL_TEMP_REG6 0x0730
336 #define SSD_CTRL_TEMP_REG7 0x0738
338 /* reversion 3 reg */
339 #define SSD_PROTOCOL_VER_REG 0x01B4
341 #define SSD_FLUSH_TIMEOUT_REG 0x02A4
342 #define SSD_BM_FAULT_REG 0x0660
344 #define SSD_PV3_RAM_STATUS_REG_SZ 0x4
345 #define SSD_PV3_RAM_STATUS_REG0 0x0260
346 #define SSD_PV3_RAM_STATUS_REG1 0x0264
347 #define SSD_PV3_RAM_STATUS_REG2 0x0268
348 #define SSD_PV3_RAM_STATUS_REG3 0x026C
349 #define SSD_PV3_RAM_STATUS_REG4 0x0270
350 #define SSD_PV3_RAM_STATUS_REG5 0x0274
351 #define SSD_PV3_RAM_STATUS_REG6 0x0278
352 #define SSD_PV3_RAM_STATUS_REG7 0x027C
354 #define SSD_PV3_CHIP_INFO_REG_SZ 0x40
355 #define SSD_PV3_CHIP_INFO_REG0 0x0300
356 #define SSD_PV3_CHIP_INFO_REG1 0x0340
357 #define SSD_PV3_CHIP_INFO_REG2 0x0380
358 #define SSD_PV3_CHIP_INFO_REG3 0x03B0
359 #define SSD_PV3_CHIP_INFO_REG4 0x0400
360 #define SSD_PV3_CHIP_INFO_REG5 0x0440
361 #define SSD_PV3_CHIP_INFO_REG6 0x0480
362 #define SSD_PV3_CHIP_INFO_REG7 0x04B0
364 #define SSD_PV3_INIT_STATE_REG_SZ 0x20
365 #define SSD_PV3_INIT_STATE_REG0 0x0500
366 #define SSD_PV3_INIT_STATE_REG1 0x0520
367 #define SSD_PV3_INIT_STATE_REG2 0x0540
368 #define SSD_PV3_INIT_STATE_REG3 0x0560
369 #define SSD_PV3_INIT_STATE_REG4 0x0580
370 #define SSD_PV3_INIT_STATE_REG5 0x05A0
371 #define SSD_PV3_INIT_STATE_REG6 0x05C0
372 #define SSD_PV3_INIT_STATE_REG7 0x05E0
374 /* reversion 3.1.1 reg */
375 #define SSD_FULL_RESET_REG 0x01B0
377 #define SSD_CTRL_REG_ZONE_SZ 0x800
379 #define SSD_BB_THRESHOLD_L1_REG 0x2C0
380 #define SSD_BB_THRESHOLD_L2_REG 0x2C4
382 #define SSD_BB_ACC_REG_SZ 0x4
383 #define SSD_BB_ACC_REG0 0x21C0
384 #define SSD_BB_ACC_REG1 0x29C0
385 #define SSD_BB_ACC_REG2 0x31C0
387 #define SSD_EC_THRESHOLD_L1_REG 0x2C8
388 #define SSD_EC_THRESHOLD_L2_REG 0x2CC
390 #define SSD_EC_ACC_REG_SZ 0x4
391 #define SSD_EC_ACC_REG0 0x21E0
392 #define SSD_EC_ACC_REG1 0x29E0
393 #define SSD_EC_ACC_REG2 0x31E0
395 /* reversion 3.1.2 & 3.1.3 reg */
396 #define SSD_HW_STATUS_REG 0x02AC
398 #define SSD_PLP_INFO_REG 0x0664
400 /*reversion 3.2 reg*/
401 #define SSD_POWER_ON_REG 0x01EC
402 #define SSD_PCIE_LINKSTATUS_REG 0x01F8
403 #define SSD_PL_CAP_LEARN_REG 0x01FC
405 #define SSD_FPGA_1V0_REG0 0x2070
406 #define SSD_FPGA_1V8_REG0 0x2078
407 #define SSD_FPGA_1V0_REG1 0x2870
408 #define SSD_FPGA_1V8_REG1 0x2878
410 /*reversion 3.2 reg*/
411 #define SSD_READ_OT_REG0 0x2260
412 #define SSD_WRITE_OT_REG0 0x2264
413 #define SSD_READ_OT_REG1 0x2A60
414 #define SSD_WRITE_OT_REG1 0x2A64
418 #define SSD_FUNC_READ 0x01
419 #define SSD_FUNC_WRITE 0x02
420 #define SSD_FUNC_NAND_READ_WOOB 0x03
421 #define SSD_FUNC_NAND_READ 0x04
422 #define SSD_FUNC_NAND_WRITE 0x05
423 #define SSD_FUNC_NAND_ERASE 0x06
424 #define SSD_FUNC_NAND_READ_ID 0x07
425 #define SSD_FUNC_READ_LOG 0x08
426 #define SSD_FUNC_TRIM 0x09
427 #define SSD_FUNC_RAM_READ 0x10
428 #define SSD_FUNC_RAM_WRITE 0x11
429 #define SSD_FUNC_FLUSH 0x12 //cache / bbt
432 #define SSD_SPI_CMD_PROGRAM 0x02
433 #define SSD_SPI_CMD_READ 0x03
434 #define SSD_SPI_CMD_W_DISABLE 0x04
435 #define SSD_SPI_CMD_READ_STATUS 0x05
436 #define SSD_SPI_CMD_W_ENABLE 0x06
437 #define SSD_SPI_CMD_ERASE 0xd8
438 #define SSD_SPI_CMD_CLSR 0x30
439 #define SSD_SPI_CMD_READ_ID 0x9f
442 #define SSD_I2C_CTRL_READ 0x00
443 #define SSD_I2C_CTRL_WRITE 0x01
445 /* i2c internal register */
446 #define SSD_I2C_CFG_REG 0x00
447 #define SSD_I2C_DATA_REG 0x01
448 #define SSD_I2C_CMD_REG 0x02
449 #define SSD_I2C_STATUS_REG 0x03
450 #define SSD_I2C_SADDR_REG 0x04
451 #define SSD_I2C_LEN_REG 0x05
452 #define SSD_I2C_RLEN_REG 0x06
453 #define SSD_I2C_WLEN_REG 0x07
454 #define SSD_I2C_RESET_REG 0x08 //write for reset
455 #define SSD_I2C_PRER_REG 0x09
459 /* FPGA volt = ADC_value / 4096 * 3v */
460 #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v
461 #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v
462 #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v
463 #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98
466 #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4)
467 #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4)
468 #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4)
469 #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12)
471 #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt))
482 SSD_CLOCK_166M_LOST
= 0,
490 #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1)
491 #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1)
493 #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8))
495 #define SSD_INLET_OT_TEMP (55) //55 DegC
496 #define SSD_INLET_OT_HYST (50) //50 DegC
497 #define SSD_FLASH_OT_TEMP (70) //70 DegC
498 #define SSD_FLASH_OT_HYST (65) //65 DegC
511 SSD_LM75_REG_TEMP
= 0,
518 #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2)
519 #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2)
520 #define SSD_LM80_REG_IN(nr) (0x20 + (nr))
522 #define SSD_LM80_REG_FAN1 0x28
523 #define SSD_LM80_REG_FAN2 0x29
524 #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr))
526 #define SSD_LM80_REG_TEMP 0x27
527 #define SSD_LM80_REG_TEMP_HOT_MAX 0x38
528 #define SSD_LM80_REG_TEMP_HOT_HYST 0x39
529 #define SSD_LM80_REG_TEMP_OS_MAX 0x3a
530 #define SSD_LM80_REG_TEMP_OS_HYST 0x3b
532 #define SSD_LM80_REG_CONFIG 0x00
533 #define SSD_LM80_REG_ALARM1 0x01
534 #define SSD_LM80_REG_ALARM2 0x02
535 #define SSD_LM80_REG_MASK1 0x03
536 #define SSD_LM80_REG_MASK2 0x04
537 #define SSD_LM80_REG_FANDIV 0x05
538 #define SSD_LM80_REG_RES 0x06
540 #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8)
542 #define SSD_LM80_3V3_VOLT(val) ((val)*33/19)
544 #define SSD_LM80_CONV_INTERVAL (1000)
553 SSD_LM80_IN_FPGA_3V3
,
558 struct ssd_lm80_limit
564 /* +/- 5% except cap in*/
565 static struct ssd_lm80_limit ssd_lm80_limit
[SSD_LM80_IN_NR
] = {
566 {171, 217}, /* CAP in: 1710 ~ 2170 */
575 /* temperature sensors */
585 #ifdef SSD_OT_PROTECT
586 #define SSD_OT_DELAY (60) //ms
588 #define SSD_OT_TEMP (90) //90 DegC
590 #define SSD_OT_TEMP_HYST (85) //85 DegC
593 /* fpga temperature */
594 //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f)
595 #define CONVERT_TEMP(val) ((val)*504/4096-273)
597 #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4))
598 #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4))
599 #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4))
603 #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP)
604 #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8)
605 #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2)))
606 #define SSD_PL_CAP_LEARN_WAIT (20) //20ms
607 #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s
609 #define SSD_PL_CAP_CHARGE_WAIT (1000)
610 #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s
612 #define SSD_PL_CAP_VOLT(val) (val*7)
614 #define SSD_PL_CAP_VOLT_FULL (13700)
615 #define SSD_PL_CAP_VOLT_READY (12880)
617 #define SSD_PL_CAP_THRESHOLD (8900)
618 #define SSD_PL_CAP_CP_THRESHOLD (5800)
619 #define SSD_PL_CAP_THRESHOLD_HYST (100)
621 enum ssd_pl_cap_status
629 SSD_PL_CAP_DEFAULT
= 0, /* 4 cap */
630 SSD_PL_CAP_CP
/* 3 cap */
635 #define SSD_HWMON_OFFS_TEMP (0)
636 #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR)
637 #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR)
638 #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR)
639 #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR)
640 #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR)
642 #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx)
643 #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx)
644 #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx)
645 #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx)
646 #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx)
647 #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx)
663 static int sfifo_alloc(struct sfifo
*fifo
, uint32_t size
, uint32_t esize
)
667 if (!fifo
|| size
> INT_MAX
|| esize
== 0) {
671 while (__size
< size
) __size
<<= 1;
677 fifo
->data
= vmalloc(esize
* __size
);
684 fifo
->mask
= __size
- 1;
687 spin_lock_init(&fifo
->lock
);
692 static void sfifo_free(struct sfifo
*fifo
)
707 static int __sfifo_put(struct sfifo
*fifo
, void *val
)
709 if (((fifo
->in
+ 1) & fifo
->mask
) == fifo
->out
) {
713 memcpy((fifo
->data
+ (fifo
->in
* fifo
->esize
)), val
, fifo
->esize
);
714 fifo
->in
= (fifo
->in
+ 1) & fifo
->mask
;
719 static int sfifo_put(struct sfifo
*fifo
, void *val
)
727 if (!in_interrupt()) {
728 spin_lock_irq(&fifo
->lock
);
729 ret
= __sfifo_put(fifo
, val
);
730 spin_unlock_irq(&fifo
->lock
);
732 spin_lock(&fifo
->lock
);
733 ret
= __sfifo_put(fifo
, val
);
734 spin_unlock(&fifo
->lock
);
740 static int __sfifo_get(struct sfifo
*fifo
, void *val
)
742 if (fifo
->out
== fifo
->in
) {
746 memcpy(val
, (fifo
->data
+ (fifo
->out
* fifo
->esize
)), fifo
->esize
);
747 fifo
->out
= (fifo
->out
+ 1) & fifo
->mask
;
752 static int sfifo_get(struct sfifo
*fifo
, void *val
)
760 if (!in_interrupt()) {
761 spin_lock_irq(&fifo
->lock
);
762 ret
= __sfifo_get(fifo
, val
);
763 spin_unlock_irq(&fifo
->lock
);
765 spin_lock(&fifo
->lock
);
766 ret
= __sfifo_get(fifo
, val
);
767 spin_unlock(&fifo
->lock
);
774 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
780 static inline void ssd_blist_init(struct ssd_blist
*ssd_bl
)
786 static inline struct bio
*ssd_blist_get(struct ssd_blist
*ssd_bl
)
788 struct bio
*bio
= ssd_bl
->prev
;
796 static inline void ssd_blist_add(struct ssd_blist
*ssd_bl
, struct bio
*bio
)
801 ssd_bl
->next
->bi_next
= bio
;
810 #define ssd_blist bio_list
811 #define ssd_blist_init bio_list_init
812 #define ssd_blist_get bio_list_get
813 #define ssd_blist_add bio_list_add
816 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
817 #define bio_start(bio) (bio->bi_sector)
819 #define bio_start(bio) (bio->bi_iter.bi_sector)
823 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
824 #define mutex_lock down
825 #define mutex_unlock up
826 #define mutex semaphore
827 #define mutex_init init_MUTEX
831 typedef union ssd_i2c_ctrl
{
839 }__attribute__((packed
)) ssd_i2c_ctrl_t
;
841 typedef union ssd_i2c_data
{
848 }__attribute__((packed
)) ssd_i2c_data_t
;
853 SSD_WMODE_BUFFER
= 0,
870 typedef struct ssd_sg_entry
875 }__attribute__((packed
))ssd_sg_entry_t
;
877 typedef struct ssd_rw_msg
883 uint32_t reserved
; //for 64-bit align
884 struct ssd_sg_entry sge
[1]; //base
885 }__attribute__((packed
))ssd_rw_msg_t
;
887 typedef struct ssd_resp_msg
895 }__attribute__((packed
))ssd_resp_msg_t
;
897 typedef struct ssd_flush_msg
900 uint8_t flag
:2; //flash cache 0 or bbt 1
904 uint32_t reserved
; //align
905 }__attribute__((packed
))ssd_flush_msg_t
;
907 typedef struct ssd_nand_op_msg
913 uint32_t reserved
; //align
919 }__attribute__((packed
))ssd_nand_op_msg_t
;
921 typedef struct ssd_ram_op_msg
927 uint32_t reserved
; //align
931 }__attribute__((packed
))ssd_ram_op_msg_t
;
935 typedef struct ssd_log_msg
941 uint32_t reserved
; //align
943 }__attribute__((packed
))ssd_log_msg_t
;
945 typedef struct ssd_log_op_msg
951 uint32_t reserved
; //align
952 uint64_t reserved1
; //align
954 }__attribute__((packed
))ssd_log_op_msg_t
;
956 typedef struct ssd_log_resp_msg
960 uint16_t reserved1
:2; //align with the normal resp msg
964 }__attribute__((packed
))ssd_log_resp_msg_t
;
968 typedef union ssd_response_msq
970 ssd_resp_msg_t resp_msg
;
971 ssd_log_resp_msg_t log_resp_msg
;
974 } ssd_response_msq_t
;
978 typedef struct ssd_protocol_info
981 uint32_t init_state_reg
;
982 uint32_t init_state_reg_sz
;
983 uint32_t chip_info_reg
;
984 uint32_t chip_info_reg_sz
;
985 } ssd_protocol_info_t
;
987 typedef struct ssd_hw_info
992 uint32_t cmd_fifo_sz
;
993 uint32_t cmd_fifo_sz_mask
;
996 uint32_t resp_ptr_sz
;
997 uint32_t resp_msg_sz
;
1001 uint16_t nr_data_ch
;
1007 uint8_t upper_pcb_ver
;
1009 uint8_t nand_vendor_id
;
1010 uint8_t nand_dev_id
;
1017 uint16_t bbf_seek
; //
1019 uint16_t page_count
; //per block
1021 uint32_t block_count
; //per flash
1025 uint32_t ram_max_len
;
1029 uint64_t md_base
; //metadata
1031 uint32_t md_entry_sz
;
1035 uint64_t nand_wbuff_base
;
1037 uint32_t md_reserved_blks
;
1038 uint32_t reserved_blks
;
1039 uint32_t valid_pages
;
1040 uint32_t max_valid_pages
;
1044 typedef struct ssd_hw_info_extend
1050 uint8_t form_factor
;
1053 }ssd_hw_info_extend_t
;
1055 typedef struct ssd_rom_info
1058 uint32_t block_size
;
1060 uint8_t nr_bridge_fw
;
1064 uint32_t bridge_fw_base
;
1065 uint32_t bridge_fw_sz
;
1066 uint32_t ctrl_fw_base
;
1067 uint32_t ctrl_fw_sz
;
1068 uint32_t bm_fw_base
;
1072 uint32_t smart_base
;
1075 uint32_t label_base
;
1083 SSD_DEBUG_WRITE_ERR
,
1093 typedef struct ssd_debug_info
1109 #define SSD_LABEL_FIELD_SZ 32
1110 #define SSD_SN_SZ 16
1112 typedef struct ssd_label
1114 char date
[SSD_LABEL_FIELD_SZ
];
1115 char sn
[SSD_LABEL_FIELD_SZ
];
1116 char part
[SSD_LABEL_FIELD_SZ
];
1117 char desc
[SSD_LABEL_FIELD_SZ
];
1118 char other
[SSD_LABEL_FIELD_SZ
];
1119 char maf
[SSD_LABEL_FIELD_SZ
];
1122 #define SSD_LABEL_DESC_SZ 256
1124 typedef struct ssd_labelv3
1126 char boardtype
[SSD_LABEL_FIELD_SZ
];
1127 char barcode
[SSD_LABEL_FIELD_SZ
];
1128 char item
[SSD_LABEL_FIELD_SZ
];
1129 char description
[SSD_LABEL_DESC_SZ
];
1130 char manufactured
[SSD_LABEL_FIELD_SZ
];
1131 char vendorname
[SSD_LABEL_FIELD_SZ
];
1132 char issuenumber
[SSD_LABEL_FIELD_SZ
];
1133 char cleicode
[SSD_LABEL_FIELD_SZ
];
1134 char bom
[SSD_LABEL_FIELD_SZ
];
1138 typedef struct ssd_battery_info
1141 } ssd_battery_info_t
;
1143 /* ssd power stat */
1144 typedef struct ssd_power_stat
1146 uint64_t nr_poweron
;
1147 uint64_t nr_powerloss
;
1148 uint64_t init_failed
;
1152 typedef struct ssd_io_stat
1165 typedef struct ssd_ecc_info
1167 uint64_t bitflip
[SSD_ECC_MAX_FLIP
];
1173 SSD_LOG_LEVEL_INFO
= 0,
1174 SSD_LOG_LEVEL_NOTICE
,
1175 SSD_LOG_LEVEL_WARNING
,
1180 typedef struct ssd_log_info
1183 uint64_t stat
[SSD_LOG_NR_LEVEL
];
1187 #define SSD_SMART_MAGIC (0x5452414D53445353ull)
1189 typedef struct ssd_smart
1191 struct ssd_power_stat pstat
;
1192 struct ssd_io_stat io_stat
;
1193 struct ssd_ecc_info ecc_info
;
1194 struct ssd_log_info log_info
;
1200 typedef struct ssd_internal_log
1204 } ssd_internal_log_t
;
1207 typedef struct ssd_cmd
1210 struct scatterlist
*sgl
;
1211 struct list_head list
;
1214 int flag
; /*pbio(1) or bio(0)*/
1220 unsigned long start_time
;
1223 unsigned int nr_log
;
1225 struct timer_list cmd_timer
;
1226 struct completion
*waiting
;
1229 typedef void (*send_cmd_func
)(struct ssd_cmd
*);
1230 typedef int (*ssd_event_call
)(struct gendisk
*, int, int); /* gendisk, event id, event level */
1233 #define SSD_DCMD_MAX_SZ 32
1235 typedef struct ssd_dcmd
1237 struct list_head list
;
1239 uint8_t msg
[SSD_DCMD_MAX_SZ
];
1255 #define SSD_QUEUE_NAME_LEN 16
1256 typedef struct ssd_queue
{
1257 char name
[SSD_QUEUE_NAME_LEN
];
1263 uint32_t resp_idx_mask
;
1264 uint32_t resp_msg_sz
;
1269 struct ssd_cmd
*cmd
;
1271 struct ssd_io_stat io_stat
;
1272 struct ssd_ecc_info ecc_info
;
1275 typedef struct ssd_device
{
1276 char name
[SSD_DEV_NAME_LEN
];
1283 #ifdef SSD_ESCAPE_IRQ
1289 int ot_delay
; //in ms
1293 atomic_t in_flight
[2]; //r&w
1297 struct list_head list
;
1298 struct pci_dev
*pdev
;
1300 unsigned long mmio_base
;
1301 unsigned long mmio_len
;
1302 void __iomem
*ctrlp
;
1304 struct mutex spi_mutex
;
1305 struct mutex i2c_mutex
;
1307 struct ssd_protocol_info protocol_info
;
1308 struct ssd_hw_info hw_info
;
1309 struct ssd_rom_info rom_info
;
1310 struct ssd_label label
;
1312 struct ssd_smart smart
;
1315 spinlock_t sendq_lock
;
1316 struct ssd_blist sendq
;
1317 struct task_struct
*send_thread
;
1318 wait_queue_head_t send_waitq
;
1321 spinlock_t doneq_lock
;
1322 struct ssd_blist doneq
;
1323 struct task_struct
*done_thread
;
1324 wait_queue_head_t done_waitq
;
1326 struct ssd_dcmd
*dcmd
;
1327 spinlock_t dcmd_lock
;
1328 struct list_head dcmd_list
; /* direct cmd list */
1329 wait_queue_head_t dcmd_wq
;
1331 unsigned long *tag_map
;
1332 wait_queue_head_t tag_wq
;
1334 spinlock_t cmd_lock
;
1335 struct ssd_cmd
*cmd
;
1338 ssd_event_call event_call
;
1340 dma_addr_t msg_base_dma
;
1343 void *resp_msg_base
;
1344 void *resp_ptr_base
;
1345 dma_addr_t resp_msg_base_dma
;
1346 dma_addr_t resp_ptr_base_dma
;
1349 struct msix_entry entry
[SSD_MSIX_VEC
];
1350 struct ssd_queue queue
[SSD_MSIX_VEC
];
1352 struct request_queue
*rq
; /* The device request queue */
1353 struct gendisk
*gd
; /* The gendisk structure */
1355 struct mutex internal_log_mutex
;
1356 struct ssd_internal_log internal_log
;
1357 struct workqueue_struct
*workq
;
1358 struct work_struct log_work
; /* get log */
1361 unsigned long state
; /* device state, for example, block device inited */
1363 struct module
*owner
;
1374 struct mutex gd_mutex
;
1375 struct ssd_log_info log_info
; /* volatile */
1377 atomic_t queue_depth
;
1378 struct mutex barrier_mutex
;
1379 struct mutex fw_mutex
;
1381 struct ssd_hw_info_extend hw_info_ext
;
1382 struct ssd_labelv3 labelv3
;
1386 struct mutex bm_mutex
;
1387 struct work_struct bm_work
; /* check bm */
1388 struct timer_list bm_timer
;
1389 struct sfifo log_fifo
;
1391 struct timer_list routine_timer
;
1392 unsigned long routine_tick
;
1393 unsigned long hwmon
;
1395 struct work_struct hwmon_work
; /* check hw */
1396 struct work_struct capmon_work
; /* check battery */
1397 struct work_struct tempmon_work
; /* check temp */
1400 struct ssd_debug_info db_info
;
1401 uint64_t reset_time
;
1402 int has_non_0x98_reg_access
;
1403 spinlock_t in_flight_lock
;
1405 uint64_t last_poweron_id
;
1411 typedef struct ssd_acc_info
{
1412 uint32_t threshold_l1
;
1413 uint32_t threshold_l2
;
1417 typedef struct ssd_reg_op_info
1421 } ssd_reg_op_info_t
;
1423 typedef struct ssd_spi_op_info
1428 } ssd_spi_op_info_t
;
1430 typedef struct ssd_i2c_op_info
1437 } ssd_i2c_op_info_t
;
1439 typedef struct ssd_smbus_op_info
1445 } ssd_smbus_op_info_t
;
1447 typedef struct ssd_ram_op_info
{
1451 uint8_t __user
*buf
;
1452 } ssd_ram_op_info_t
;
1454 typedef struct ssd_flash_op_info
{
1459 uint8_t __user
*buf
;
1460 } ssd_flash_op_info_t
;
1462 typedef struct ssd_sw_log_info
{
1466 } ssd_sw_log_info_t
;
1468 typedef struct ssd_version_info
1470 uint32_t bridge_ver
; /* bridge fw version */
1471 uint32_t ctrl_ver
; /* controller fw version */
1472 uint32_t bm_ver
; /* battery manager fw version */
1473 uint8_t pcb_ver
; /* main pcb version */
1474 uint8_t upper_pcb_ver
;
1477 } ssd_version_info_t
;
1479 typedef struct pci_addr
1487 typedef struct ssd_drv_param_info
{
1497 } ssd_drv_param_info_t
;
1501 enum ssd_form_factor
1503 SSD_FORM_FACTOR_HHHL
= 0,
1504 SSD_FORM_FACTOR_FHHL
1508 /* ssd power loss protect */
1517 #define SSD_BM_SLAVE_ADDRESS 0x16
1518 #define SSD_BM_CAP 5
1521 #define SSD_BM_SAFETYSTATUS 0x51
1522 #define SSD_BM_OPERATIONSTATUS 0x54
1524 /* ManufacturerAccess */
1525 #define SSD_BM_MANUFACTURERACCESS 0x00
1526 #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */
1528 /* Data flash access */
1529 #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77
1530 #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78
1531 #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56
1532 #define SSD_BM_CONFIGURATION_REGISTERS_ID 64
1534 /* min cap voltage */
1535 #define SSD_BM_CAP_VOLT_MIN 500
1540 SSD_BM_CAP_VINA = 1,
1546 SSD_BMSTATUS_OK
= 0,
1547 SSD_BMSTATUS_CHARGING
, /* not fully charged */
1548 SSD_BMSTATUS_WARNING
1553 SBS_UNIT_TEMPERATURE
,
1558 SBS_UNIT_CAPACITANCE
1586 uint16_t cap_volt
[SSD_BM_CAP
];
1593 struct ssd_bm_manufacturer_data
1595 uint16_t pack_lot_code
;
1596 uint16_t pcb_lot_code
;
1597 uint16_t firmware_ver
;
1598 uint16_t hardware_ver
;
1601 struct ssd_bm_configuration_registers
1614 uint16_t fet_action
;
1619 #define SBS_VALUE_MASK 0xffff
1621 #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var)
1622 #define bm_var(start, offset) ((void *) start + (offset))
1624 static struct sbs_cmd ssd_bm_sbs
[] = {
1625 {0x08, SBS_SIZE_WORD
, SBS_UNIT_TEMPERATURE
, bm_var_offset(temp
), SBS_VALUE_MASK
, "Temperature"},
1626 {0x09, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(volt
), SBS_VALUE_MASK
, "Voltage"},
1627 {0x0a, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(curr
), SBS_VALUE_MASK
, "Current"},
1628 {0x0b, SBS_SIZE_WORD
, SBS_UNIT_ESR
, bm_var_offset(esr
), SBS_VALUE_MASK
, "ESR"},
1629 {0x0d, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(rsoc
), SBS_VALUE_MASK
, "RelativeStateOfCharge"},
1630 {0x0e, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(health
), SBS_VALUE_MASK
, "Health"},
1631 {0x10, SBS_SIZE_WORD
, SBS_UNIT_CAPACITANCE
, bm_var_offset(cap
), SBS_VALUE_MASK
, "Capacitance"},
1632 {0x14, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(chg_curr
), SBS_VALUE_MASK
, "ChargingCurrent"},
1633 {0x15, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(chg_volt
), SBS_VALUE_MASK
, "ChargingVoltage"},
1634 {0x3b, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[4]), SBS_VALUE_MASK
, "CapacitorVoltage5"},
1635 {0x3c, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[3]), SBS_VALUE_MASK
, "CapacitorVoltage4"},
1636 {0x3d, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[2]), SBS_VALUE_MASK
, "CapacitorVoltage3"},
1637 {0x3e, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[1]), SBS_VALUE_MASK
, "CapacitorVoltage2"},
1638 {0x3f, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[0]), SBS_VALUE_MASK
, "CapacitorVoltage1"},
1639 {0x50, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_alert
), 0x870F, "SafetyAlert"},
1640 {0x51, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_status
), 0xE7BF, "SafetyStatus"},
1641 {0x54, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(op_status
), 0x79F4, "OperationStatus"},
1642 {0x5a, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(sys_volt
), SBS_VALUE_MASK
, "SystemVoltage"},
1643 {0, 0, 0, 0, 0, NULL
},
1647 #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info)
1648 #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info)
1649 #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info)
1650 #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart)
1651 #define SSD_CMD_GET_IDX _IOR('H', 105, int)
1652 #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int)
1653 #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int)
1654 #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN])
1656 #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info)
1657 #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info)
1659 #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend)
1661 #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info)
1662 #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info)
1664 #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info)
1665 #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info)
1666 #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info)
1668 #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info)
1669 #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info)
1670 #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info)
1672 #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info)
1673 #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info)
1674 #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info)
1675 #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info)
1676 #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info)
1677 #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info)
1678 #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info)
1679 #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info)
1681 #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t)
1682 #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int)
1683 #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int)
1684 #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t)
1685 #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int)
1687 #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info)
1688 #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info)
1690 #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info)
1691 #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob
1692 #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info)
1693 #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info)
1694 #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO
1696 #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info)
1698 #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int)
1699 #define SSD_CMD_SET_ALARM _IOW('H', 191, int)
1701 #define SSD_CMD_RESET _IOW('H', 200, int)
1702 #define SSD_CMD_RELOAD_FW _IOW('H', 201, int)
1703 #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int)
1704 #define SSD_CMD_LOAD_DEV _IOW('H', 203, int)
1705 #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t)
1706 #define SSD_CMD_FULL_RESET _IOW('H', 206, int)
1708 #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t)
1709 #define SSD_CMD_GET_LOG _IOR('H', 221, void *)
1710 #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int)
1712 #define SSD_CMD_OT_PROTECT _IOW('H', 223, int)
1713 #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int)
1715 #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int)
1716 #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int)
1718 #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info)
1720 #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label)
1721 #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info)
1722 #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int)
1723 #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int)
1724 #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *)
1727 #define SSD_CMD_FLUSH _IOW('H', 240, int)
1728 #define SSD_CMD_SAVE_MD _IOW('H', 241, int)
1730 #define SSD_CMD_SET_WMODE _IOW('H', 242, int)
1731 #define SSD_CMD_GET_WMODE _IOR('H', 243, int)
1732 #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int)
1734 #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info)
1735 #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info)
1737 #define SSD_CMD_CLEAR_WARNING _IOW('H', 260, int)
1741 #define SSD_LOG_MAX_SZ 4096
1742 #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE
1743 #define SSD_DIF_WITH_OLD_LOG 0x3f
1747 SSD_LOG_DATA_NONE
= 0,
1752 typedef struct ssd_log_entry
1770 }__attribute__((packed
))ssd_log_entry_t
;
1772 typedef struct ssd_log
1775 uint64_t ctrl_idx
:8;
1777 } __attribute__((packed
)) ssd_log_t
;
1779 typedef struct ssd_log_desc
1787 } __attribute__((packed
)) ssd_log_desc_t
;
1789 #define SSD_LOG_SW_IDX 0xF
1790 #define SSD_UNKNOWN_EVENT ((uint16_t)-1)
1791 static struct ssd_log_desc ssd_log_desc
[] = {
1792 /* event, level, show flash, show block, show page, desc */
1793 {0x0, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Create BBT failure"}, //g3
1794 {0x1, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Read BBT failure"}, //g3
1795 {0x2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Mark bad block"},
1796 {0x3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flush BBT failure"},
1797 {0x4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1798 {0x7, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "No available blocks"},
1799 {0x8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Bad EC header"},
1800 {0x9, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 0, "Bad VID header"}, //g3
1801 {0xa, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Wear leveling"},
1802 {0xb, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "WL read back failure"},
1803 {0x11, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Data recovery failure"}, // err
1804 {0x20, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan mapping table failure"}, // err g3
1805 {0x21, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1806 {0x22, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1807 {0x23, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1808 {0x24, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Merge: read mapping page failure"},
1809 {0x25, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: read back failure"},
1810 {0x26, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1811 {0x27, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Data corrupted for abnormal power down"}, //g3
1812 {0x28, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: mapping page corrupted"},
1813 {0x29, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: no mapping page"},
1814 {0x2a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: mapping pages incomplete"},
1815 {0x2b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read back failure after programming failure"}, // err
1816 {0xf1, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure without recovery"}, // err
1817 {0xf2, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available blocks"}, // maybe err g3
1818 {0xf3, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: RAID incomplete"}, // err g3
1819 {0xf4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1820 {0xf5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure in moving data"},
1821 {0xf6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1822 {0xf7, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Init: RAID not complete"},
1823 {0xf8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: data moving interrupted"},
1824 {0xfe, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Data inspection failure"},
1825 {0xff, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "IO: ECC failed"},
1828 {0x2e, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available reserved blocks" }, // err
1829 {0x30, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PMT membership not found"},
1830 {0x31, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PMT corrupted"},
1831 {0x32, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT membership not found"},
1832 {0x33, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT not found"},
1833 {0x34, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT corrupted"},
1834 {0x35, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT page read failure"},
1835 {0x36, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT page read failure"},
1836 {0x37, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT backup page read failure"},
1837 {0x38, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT read failure"},
1838 {0x39, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT scan failure"}, // err
1839 {0x3a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page read failure"},
1840 {0x3b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page scan failure"}, // err
1841 {0x3c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan unclosed block failure"}, // err
1842 {0x3d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: write pointer mismatch"},
1843 {0x3e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: PBMT read failure"},
1844 {0x3f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: PMT recovery: PBMT scan failure"},
1845 {0x40, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: data page read failure"}, //err
1846 {0x41, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT write pointer mismatch"},
1847 {0x42, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT latest version corrupted"},
1848 {0x43, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: too many unclosed blocks"},
1849 {0x44, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PDW block found"},
1850 {0x45, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Init: more than one PDW block found"}, //err
1851 {0x46, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page is blank or read failure"},
1852 {0x47, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PDW block not found"},
1854 {0x50, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: hit error data"}, // err
1855 {0x51, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: read back failure"}, // err
1856 {0x52, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Cache: unknown command"}, //?
1857 {0x53, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "GC/WL read back failure"}, // err
1859 {0x60, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Erase failure"},
1861 {0x70, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "LPA not matched"},
1862 {0x71, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "PBN not matched"},
1863 {0x72, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read retry failure"},
1864 {0x73, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Need raid recovery"},
1865 {0x74, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "Need read retry"},
1866 {0x75, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read invalid data page"},
1867 {0x76, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN matched"},
1868 {0x77, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN not matched"},
1869 {0x78, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in flash, PBN not matched"},
1870 {0x79, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in cache, LPA not matched"},
1871 {0x7a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in flash, LPA not matched"},
1872 {0x7b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in cache, LPA not matched"},
1873 {0x7c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in flash, LPA not matched"},
1874 {0x7d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data page status error"},
1875 {0x7e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1876 {0x7f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Access flash timeout"},
1878 {0x80, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "EC overflow"},
1879 {0x81, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_NONE
, 0, 0, "Scrubbing completed"},
1880 {0x82, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Unstable block(too much bit flip)"},
1881 {0x83, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: ram error"}, //?
1882 {0x84, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: one PBMT read failure"},
1884 {0x88, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: mark bad block"},
1885 {0x89, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: invalid page count error"}, // maybe err
1886 {0x8a, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Bad Block close to limit"},
1887 {0x8b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: Bad Block over limit"},
1888 {0x8c, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: P/E cycles close to limit"},
1889 {0x8d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: P/E cycles over limit"},
1891 {0x90, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Over temperature"}, //90
1892 {0x91, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature is OK"}, //80
1893 {0x92, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Battery fault"},
1894 {0x93, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault"}, //err
1895 {0x94, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "DDR error"}, //err
1896 {0x95, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Controller serdes error"}, //err
1897 {0x96, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 1 error"}, //err
1898 {0x97, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 2 error"}, //err
1899 {0x98, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault (corrected)"}, //err
1900 {0x99, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Battery is OK"},
1901 {0x9a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature close to limit"}, //85
1903 {0x9b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (low)"},
1904 {0x9c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (high)"},
1905 {0x9d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "I2C fault" },
1906 {0x9e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "DDR single bit error" },
1907 {0x9f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Board voltage fault" },
1909 {0xa0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "LPA not matched"},
1910 {0xa1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Re-read data in cache"},
1911 {0xa2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1912 {0xa3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Read blank page"},
1913 {0xa4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: new data in cache"},
1914 {0xa5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: PBN not matched"},
1915 {0xa6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data with error flag"},
1916 {0xa7, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: recoverd data with error flag"},
1917 {0xa8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Blank page in cache, PBN matched"},
1918 {0xa9, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Blank page in cache, PBN matched"},
1919 {0xaa, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flash init failure"},
1920 {0xab, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Mapping table recovery failure"},
1921 {0xac, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: ECC failed"},
1922 {0xb0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 95 degrees C"},
1923 {0xb1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 100 degrees C"},
1925 {0x300, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "CMD timeout"},
1926 {0x301, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Power on"},
1927 {0x302, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Power off"},
1928 {0x303, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear log"},
1929 {0x304, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity"},
1930 {0x305, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data"},
1931 {0x306, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "BM safety status"},
1932 {0x307, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "I/O error"},
1933 {0x308, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CMD error"},
1934 {0x309, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set wmode"},
1935 {0x30a, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "DDR init failed" },
1936 {0x30b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "PCIe link status" },
1937 {0x30c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Controller reset sync error" },
1938 {0x30d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Clock fault" },
1939 {0x30e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "FPGA voltage fault status" },
1940 {0x30f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity finished"},
1941 {0x310, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data finished"},
1942 {0x311, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Reset"},
1943 {0x312, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "CAP: voltage fault"},
1944 {0x313, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: learn fault"},
1945 {0x314, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CAP status"},
1946 {0x315, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Board voltage fault status"},
1947 {0x316, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 55 degrees C"}, //55
1948 {0x317, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 50 degrees C"}, //50
1949 {0x318, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash over temperature"}, //70
1950 {0x319, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash temperature is OK"}, //65
1951 {0x31a, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: short circuit"},
1952 {0x31b, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "Sensor fault"},
1953 {0x31c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data"},
1954 {0x31d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data finished"},
1955 {0x320, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Temperature sensor event"},
1957 {0x350, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear smart"},
1958 {0x351, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear warning"},
1960 {SSD_UNKNOWN_EVENT
, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "unknown event"},
1963 #define SSD_LOG_OVER_TEMP 0x90
1964 #define SSD_LOG_NORMAL_TEMP 0x91
1965 #define SSD_LOG_WARN_TEMP 0x9a
1966 #define SSD_LOG_SEU_FAULT 0x93
1967 #define SSD_LOG_SEU_FAULT1 0x98
1968 #define SSD_LOG_BATTERY_FAULT 0x92
1969 #define SSD_LOG_BATTERY_OK 0x99
1970 #define SSD_LOG_BOARD_VOLT_FAULT 0x9f
1973 #define SSD_LOG_TIMEOUT 0x300
1974 #define SSD_LOG_POWER_ON 0x301
1975 #define SSD_LOG_POWER_OFF 0x302
1976 #define SSD_LOG_CLEAR_LOG 0x303
1977 #define SSD_LOG_SET_CAPACITY 0x304
1978 #define SSD_LOG_CLEAR_DATA 0x305
1979 #define SSD_LOG_BM_SFSTATUS 0x306
1980 #define SSD_LOG_EIO 0x307
1981 #define SSD_LOG_ECMD 0x308
1982 #define SSD_LOG_SET_WMODE 0x309
1983 #define SSD_LOG_DDR_INIT_ERR 0x30a
1984 #define SSD_LOG_PCIE_LINK_STATUS 0x30b
1985 #define SSD_LOG_CTRL_RST_SYNC 0x30c
1986 #define SSD_LOG_CLK_FAULT 0x30d
1987 #define SSD_LOG_VOLT_FAULT 0x30e
1988 #define SSD_LOG_SET_CAPACITY_END 0x30F
1989 #define SSD_LOG_CLEAR_DATA_END 0x310
1990 #define SSD_LOG_RESET 0x311
1991 #define SSD_LOG_CAP_VOLT_FAULT 0x312
1992 #define SSD_LOG_CAP_LEARN_FAULT 0x313
1993 #define SSD_LOG_CAP_STATUS 0x314
1994 #define SSD_LOG_VOLT_STATUS 0x315
1995 #define SSD_LOG_INLET_OVER_TEMP 0x316
1996 #define SSD_LOG_INLET_NORMAL_TEMP 0x317
1997 #define SSD_LOG_FLASH_OVER_TEMP 0x318
1998 #define SSD_LOG_FLASH_NORMAL_TEMP 0x319
1999 #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a
2000 #define SSD_LOG_SENSOR_FAULT 0x31b
2001 #define SSD_LOG_ERASE_ALL 0x31c
2002 #define SSD_LOG_ERASE_ALL_END 0x31d
2003 #define SSD_LOG_TEMP_SENSOR_EVENT 0x320
2004 #define SSD_LOG_CLEAR_SMART 0x350
2005 #define SSD_LOG_CLEAR_WARNING 0x351
2008 /* sw log fifo depth */
2009 #define SSD_LOG_FIFO_SZ 1024
2013 static DEFINE_PER_CPU(struct list_head
, ssd_doneq
);
2014 static DEFINE_PER_CPU(struct tasklet_struct
, ssd_tasklet
);
2017 /* unloading driver */
2018 static volatile int ssd_exiting
= 0;
2020 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
2021 static struct class_simple
*ssd_class
;
2023 static struct class *ssd_class
;
2026 static int ssd_cmajor
= SSD_CMAJOR
;
2028 /* ssd block device major, minors */
2029 static int ssd_major
= SSD_MAJOR
;
2030 static int ssd_major_sl
= SSD_MAJOR_SL
;
2031 static int ssd_minors
= SSD_MINORS
;
2033 /* ssd device list */
2034 static struct list_head ssd_list
;
2035 static unsigned long ssd_index_bits
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2036 static unsigned long ssd_index_bits_sl
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2037 static atomic_t ssd_nr
;
2042 SSD_DRV_MODE_STANDARD
= 0, /* full */
2043 SSD_DRV_MODE_DEBUG
= 2, /* debug */
2044 SSD_DRV_MODE_BASE
/* base only */
2054 #if (defined SSD_MSIX)
2055 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2056 #elif (defined SSD_MSI)
2057 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2059 /* auto select the defaut int mode according to the kernel version*/
2060 /* suse 11 sp1 irqbalance bug: use msi instead*/
2061 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5))
2062 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2064 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2068 static int mode
= SSD_DRV_MODE_STANDARD
;
2069 static int status_mask
= 0xFF;
2070 static int int_mode
= SSD_INT_MODE_DEFAULT
;
2071 static int threaded_irq
= 0;
2072 static int log_level
= SSD_LOG_LEVEL_WARNING
;
2073 static int ot_protect
= 1;
2074 static int wmode
= SSD_WMODE_DEFAULT
;
2075 static int finject
= 0;
2077 module_param(mode
, int, 0);
2078 module_param(status_mask
, int, 0);
2079 module_param(int_mode
, int, 0);
2080 module_param(threaded_irq
, int, 0);
2081 module_param(log_level
, int, 0);
2082 module_param(ot_protect
, int, 0);
2083 module_param(wmode
, int, 0);
2084 module_param(finject
, int, 0);
2087 MODULE_PARM_DESC(mode
, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode");
2088 MODULE_PARM_DESC(status_mask
, "command status mask, 0 - without command error, 0xff - with command error");
2089 MODULE_PARM_DESC(int_mode
, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix");
2090 MODULE_PARM_DESC(threaded_irq
, "threaded irq, 0 - normal irq, 1 - threaded irq");
2091 MODULE_PARM_DESC(log_level
, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only");
2092 MODULE_PARM_DESC(ot_protect
, "over temperature protect, 0 - disable, 1 - enable");
2093 MODULE_PARM_DESC(wmode
, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default");
2094 MODULE_PARM_DESC(finject
, "enable fault simulation, 0 - off, 1 - on, for debug purpose only");
2096 // API adaption layer
2097 static inline void ssd_bio_endio(struct bio
*bio
, int error
)
2099 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
2100 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0))
2101 bio
->bi_error
= error
;
2103 bio
->bi_status
= errno_to_blk_status(error
);
2106 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
2107 bio_endio(bio
, error
);
2109 bio_endio(bio
, bio
->bi_size
, error
);
2113 static inline int ssd_bio_has_discard(struct bio
*bio
)
2117 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2118 return bio_op(bio
) == REQ_OP_DISCARD
;
2119 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
2120 return bio
->bi_rw
& REQ_DISCARD
;
2121 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
2122 return bio_rw_flagged(bio
, BIO_RW_DISCARD
);
2128 static inline int ssd_bio_has_flush(struct bio
*bio
)
2130 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2131 return bio_op(bio
) == REQ_OP_FLUSH
;
2132 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
2133 return bio
->bi_rw
& REQ_FLUSH
;
2139 static inline int ssd_bio_has_barrier_or_fua(struct bio
* bio
)
2141 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2142 return bio
->bi_opf
& REQ_FUA
;
2143 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
2144 return bio
->bi_rw
& REQ_FUA
;
2145 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
2146 return bio
->bi_rw
& REQ_HARDBARRIER
;
2147 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
2148 return bio_rw_flagged(bio
, BIO_RW_BARRIER
);
2150 return bio_barrier(bio
);
2155 static int __init
ssd_drv_mode(char *str
)
2157 mode
= (int)simple_strtoul(str
, NULL
, 0);
2162 static int __init
ssd_status_mask(char *str
)
2164 status_mask
= (int)simple_strtoul(str
, NULL
, 16);
2169 static int __init
ssd_int_mode(char *str
)
2171 int_mode
= (int)simple_strtoul(str
, NULL
, 0);
2176 static int __init
ssd_threaded_irq(char *str
)
2178 threaded_irq
= (int)simple_strtoul(str
, NULL
, 0);
2183 static int __init
ssd_log_level(char *str
)
2185 log_level
= (int)simple_strtoul(str
, NULL
, 0);
2190 static int __init
ssd_ot_protect(char *str
)
2192 ot_protect
= (int)simple_strtoul(str
, NULL
, 0);
2197 static int __init
ssd_wmode(char *str
)
2199 wmode
= (int)simple_strtoul(str
, NULL
, 0);
2204 static int __init
ssd_finject(char *str
)
2206 finject
= (int)simple_strtoul(str
, NULL
, 0);
2211 __setup(MODULE_NAME
"_mode=", ssd_drv_mode
);
2212 __setup(MODULE_NAME
"_status_mask=", ssd_status_mask
);
2213 __setup(MODULE_NAME
"_int_mode=", ssd_int_mode
);
2214 __setup(MODULE_NAME
"_threaded_irq=", ssd_threaded_irq
);
2215 __setup(MODULE_NAME
"_log_level=", ssd_log_level
);
2216 __setup(MODULE_NAME
"_ot_protect=", ssd_ot_protect
);
2217 __setup(MODULE_NAME
"_wmode=", ssd_wmode
);
2218 __setup(MODULE_NAME
"_finject=", ssd_finject
);
2222 #ifdef CONFIG_PROC_FS
2223 #include <linux/proc_fs.h>
2224 #include <asm/uaccess.h>
2226 #define SSD_PROC_DIR MODULE_NAME
2227 #define SSD_PROC_INFO "info"
2229 static struct proc_dir_entry
*ssd_proc_dir
= NULL
;
2230 static struct proc_dir_entry
*ssd_proc_info
= NULL
;
2232 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2233 static int ssd_proc_read(char *page
, char **start
,
2234 off_t off
, int count
, int *eof
, void *data
)
2236 struct ssd_device
*dev
= NULL
;
2237 struct ssd_device
*n
= NULL
;
2243 if (ssd_exiting
|| off
!= 0) {
2247 len
+= snprintf((page
+ len
), (count
- len
), "Driver Version:\t%s\n", DRIVER_VERSION
);
2249 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2251 size
= dev
->hw_info
.size
;
2252 do_div(size
, 1000000000);
2254 len
+= snprintf((page
+ len
), (count
- len
), "\n");
2256 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2258 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2259 if (dev
->hw_info
.ctrl_ver
!= 0) {
2260 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2263 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2265 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2266 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2269 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Device:\t%s\n", idx
, dev
->name
);
2278 static int ssd_proc_show(struct seq_file
*m
, void *v
)
2280 struct ssd_device
*dev
= NULL
;
2281 struct ssd_device
*n
= NULL
;
2289 seq_printf(m
, "Driver Version:\t%s\n", DRIVER_VERSION
);
2291 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2293 size
= dev
->hw_info
.size
;
2294 do_div(size
, 1000000000);
2296 seq_printf(m
, "\n");
2298 seq_printf(m
, "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2300 seq_printf(m
, "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2301 if (dev
->hw_info
.ctrl_ver
!= 0) {
2302 seq_printf(m
, "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2305 seq_printf(m
, "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2307 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2308 seq_printf(m
, "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2311 seq_printf(m
, "HIO %d Device:\t%s\n", idx
, dev
->name
);
2317 static int ssd_proc_open(struct inode
*inode
, struct file
*file
)
2319 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
2320 return single_open(file
, ssd_proc_show
, PDE(inode
)->data
);
2322 return single_open(file
, ssd_proc_show
, PDE_DATA(inode
));
2326 static const struct file_operations ssd_proc_fops
= {
2327 .open
= ssd_proc_open
,
2329 .llseek
= seq_lseek
,
2330 .release
= single_release
,
2335 static void ssd_cleanup_proc(void)
2337 if (ssd_proc_info
) {
2338 remove_proc_entry(SSD_PROC_INFO
, ssd_proc_dir
);
2339 ssd_proc_info
= NULL
;
2342 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2343 ssd_proc_dir
= NULL
;
2346 static int ssd_init_proc(void)
2348 ssd_proc_dir
= proc_mkdir(SSD_PROC_DIR
, NULL
);
2350 goto out_proc_mkdir
;
2352 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2353 ssd_proc_info
= create_proc_entry(SSD_PROC_INFO
, S_IFREG
| S_IRUGO
| S_IWUSR
, ssd_proc_dir
);
2355 goto out_create_proc_entry
;
2357 ssd_proc_info
->read_proc
= ssd_proc_read
;
2360 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
2361 ssd_proc_info
->owner
= THIS_MODULE
;
2364 ssd_proc_info
= proc_create(SSD_PROC_INFO
, 0600, ssd_proc_dir
, &ssd_proc_fops
);
2366 goto out_create_proc_entry
;
2371 out_create_proc_entry
:
2372 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2378 static void ssd_cleanup_proc(void)
2382 static int ssd_init_proc(void)
2386 #endif /* CONFIG_PROC_FS */
2389 static void ssd_unregister_sysfs(struct ssd_device
*dev
)
2394 static int ssd_register_sysfs(struct ssd_device
*dev
)
2399 static void ssd_cleanup_sysfs(void)
2404 static int ssd_init_sysfs(void)
2409 static inline void ssd_put_index(int slave
, int index
)
2411 unsigned long *index_bits
= ssd_index_bits
;
2414 index_bits
= ssd_index_bits_sl
;
2417 if (test_and_clear_bit(index
, index_bits
)) {
2418 atomic_dec(&ssd_nr
);
2422 static inline int ssd_get_index(int slave
)
2424 unsigned long *index_bits
= ssd_index_bits
;
2428 index_bits
= ssd_index_bits_sl
;
2432 if ((index
= find_first_zero_bit(index_bits
, SSD_MAX_DEV
)) >= SSD_MAX_DEV
) {
2436 if (test_and_set_bit(index
, index_bits
)) {
2440 atomic_inc(&ssd_nr
);
2445 static void ssd_cleanup_index(void)
2450 static int ssd_init_index(void)
2452 INIT_LIST_HEAD(&ssd_list
);
2453 atomic_set(&ssd_nr
, 0);
2454 memset(ssd_index_bits
, 0, sizeof(ssd_index_bits
));
2455 memset(ssd_index_bits_sl
, 0, sizeof(ssd_index_bits_sl
));
2460 static void ssd_set_dev_name(char *name
, size_t size
, int idx
)
2462 if(idx
< SSD_ALPHABET_NUM
) {
2463 snprintf(name
, size
, "%c", 'a'+idx
);
2465 idx
-= SSD_ALPHABET_NUM
;
2466 snprintf(name
, size
, "%c%c", 'a'+(idx
/SSD_ALPHABET_NUM
), 'a'+(idx
%SSD_ALPHABET_NUM
));
2470 /* pci register r&w */
2471 static inline void ssd_reg_write(void *addr
, uint64_t val
)
2473 iowrite32((uint32_t)val
, addr
);
2474 iowrite32((uint32_t)(val
>> 32), addr
+ 4);
2478 static inline uint64_t ssd_reg_read(void *addr
)
2481 uint32_t val_lo
, val_hi
;
2483 val_lo
= ioread32(addr
);
2484 val_hi
= ioread32(addr
+ 4);
2487 val
= val_lo
| ((uint64_t)val_hi
<< 32);
2493 #define ssd_reg32_write(addr, val) writel(val, addr)
2494 #define ssd_reg32_read(addr) readl(addr)
2497 static void ssd_clear_alarm(struct ssd_device
*dev
)
2501 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2505 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2507 /* firmware control */
2510 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2513 static void ssd_set_alarm(struct ssd_device
*dev
)
2517 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2521 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2525 /* software control */
2528 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2531 #define u32_swap(x) \
2533 (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
2534 (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
2535 (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
2536 (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24)))
2538 #define u16_swap(x) \
2540 (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \
2541 (((uint16_t)(x) & (uint16_t)0xff00) >> 8) ))
2545 /* No lock, for init only*/
2546 static int ssd_spi_read_id(struct ssd_device
*dev
, uint32_t *id
)
2556 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_ID
);
2558 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2559 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2560 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2561 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2565 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2566 if (val
== 0x1000000) {
2570 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2577 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_ID
);
2586 static int ssd_init_spi(struct ssd_device
*dev
)
2592 mutex_lock(&dev
->spi_mutex
);
2595 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2598 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2600 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2605 } while (val
!= 0x1000000);
2607 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2612 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2620 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2622 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2625 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2626 mutex_unlock(&dev
->spi_mutex
);
2633 static int ssd_spi_page_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2644 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2645 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
) {
2649 mutex_lock(&dev
->spi_mutex
);
2650 while (rlen
< size
) {
2651 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, ((off
+ rlen
) >> 24));
2653 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, (((off
+ rlen
) << 8) | SSD_SPI_CMD_READ
));
2655 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2656 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2657 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2658 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2662 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2663 if (val
== 0x1000000) {
2667 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2674 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
2675 *(uint32_t *)(buf
+ rlen
)= u32_swap(val
);
2677 rlen
+= sizeof(uint32_t);
2681 mutex_unlock(&dev
->spi_mutex
);
2685 static int ssd_spi_page_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2697 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2698 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
||
2699 (off
/ dev
->rom_info
.page_size
) != ((off
+ size
- 1) / dev
->rom_info
.page_size
)) {
2703 mutex_lock(&dev
->spi_mutex
);
2705 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2707 wlen
= size
/ sizeof(uint32_t);
2708 for (i
=0; i
<(int)wlen
; i
++) {
2709 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_WDATA
, u32_swap(*((uint32_t *)buf
+ i
)));
2713 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2715 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_PROGRAM
));
2721 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2723 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2725 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2730 } while (val
!= 0x1000000);
2732 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2737 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2744 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2745 if ((val
>> 6) & 0x1) {
2752 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2754 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2757 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2759 mutex_unlock(&dev
->spi_mutex
);
2764 static int ssd_spi_block_erase(struct ssd_device
*dev
, uint32_t off
)
2774 if ((off
% dev
->rom_info
.block_size
) != 0 || off
>= dev
->rom_info
.size
) {
2778 mutex_lock(&dev
->spi_mutex
);
2780 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2781 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2784 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2786 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_ERASE
));
2790 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2793 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2795 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2800 } while (val
!= 0x1000000);
2802 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2807 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2814 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2815 if ((val
>> 5) & 0x1) {
2822 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2824 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2827 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2829 mutex_unlock(&dev
->spi_mutex
);
2834 static int ssd_spi_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2845 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2846 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2850 while (len
< size
) {
2851 roff
= (off
+ len
) % dev
->rom_info
.page_size
;
2852 rsize
= dev
->rom_info
.page_size
- roff
;
2853 if ((size
- len
) < rsize
) {
2854 rsize
= (size
- len
);
2858 ret
= ssd_spi_page_read(dev
, (buf
+ len
), roff
, rsize
);
2872 static int ssd_spi_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2883 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2884 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2888 while (len
< size
) {
2889 woff
= (off
+ len
) % dev
->rom_info
.page_size
;
2890 wsize
= dev
->rom_info
.page_size
- woff
;
2891 if ((size
- len
) < wsize
) {
2892 wsize
= (size
- len
);
2896 ret
= ssd_spi_page_write(dev
, (buf
+ len
), woff
, wsize
);
2910 static int ssd_spi_erase(struct ssd_device
*dev
, uint32_t off
, uint32_t size
)
2920 if (size
== 0 || ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
||
2921 (off
% dev
->rom_info
.block_size
) != 0 || (size
% dev
->rom_info
.block_size
) != 0) {
2925 while (len
< size
) {
2928 ret
= ssd_spi_block_erase(dev
, eoff
);
2933 len
+= dev
->rom_info
.block_size
;
2943 static uint32_t __ssd_i2c_reg32_read(void *addr
)
2945 return ssd_reg32_read(addr
);
2948 static void __ssd_i2c_reg32_write(void *addr
, uint32_t val
)
2950 ssd_reg32_write(addr
, val
);
2951 ssd_reg32_read(addr
);
2954 static int __ssd_i2c_clear(struct ssd_device
*dev
, uint8_t saddr
)
2956 ssd_i2c_ctrl_t ctrl
;
2957 ssd_i2c_data_t data
;
2964 ctrl
.bits
.wdata
= 0;
2965 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
2966 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2967 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2971 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2972 if (data
.bits
.valid
== 0) {
2977 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2983 status
= data
.bits
.rdata
;
2985 if (!(status
& 0x4)) {
2986 /* clear read fifo data */
2987 ctrl
.bits
.wdata
= 0;
2988 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2989 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2990 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2994 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2995 if (data
.bits
.valid
== 0) {
3000 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3008 if (nr_data
<= SSD_I2C_MAX_DATA
) {
3017 ctrl
.bits
.wdata
= 0x04;
3018 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3019 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3020 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3023 if (!(status
& 0x8)) {
3025 /* reset i2c controller */
3026 ctrl
.bits
.wdata
= 0x0;
3027 ctrl
.bits
.addr
= SSD_I2C_RESET_REG
;
3028 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3029 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3036 static int ssd_i2c_write(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3038 ssd_i2c_ctrl_t ctrl
;
3039 ssd_i2c_data_t data
;
3045 mutex_lock(&dev
->i2c_mutex
);
3050 ctrl
.bits
.wdata
= saddr
;
3051 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3052 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3053 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3056 while (off
< size
) {
3057 ctrl
.bits
.wdata
= buf
[off
];
3058 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3059 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3060 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3066 ctrl
.bits
.wdata
= 0x01;
3067 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3068 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3069 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3074 ctrl
.bits
.wdata
= 0;
3075 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3076 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3077 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3080 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3081 if (data
.bits
.valid
== 0) {
3086 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3093 status
= data
.bits
.rdata
;
3098 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3105 if (!(status
& 0x1)) {
3111 if (status
& 0x20) {
3117 if (status
& 0x10) {
3124 if (__ssd_i2c_clear(dev
, saddr
)) {
3128 mutex_unlock(&dev
->i2c_mutex
);
3133 static int ssd_i2c_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3135 ssd_i2c_ctrl_t ctrl
;
3136 ssd_i2c_data_t data
;
3142 mutex_lock(&dev
->i2c_mutex
);
3147 ctrl
.bits
.wdata
= saddr
;
3148 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3149 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3150 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3153 ctrl
.bits
.wdata
= size
;
3154 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3155 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3156 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3159 ctrl
.bits
.wdata
= 0x02;
3160 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3161 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3162 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3167 ctrl
.bits
.wdata
= 0;
3168 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3169 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3170 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3173 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3174 if (data
.bits
.valid
== 0) {
3179 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3186 status
= data
.bits
.rdata
;
3191 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3198 if (!(status
& 0x2)) {
3204 if (status
& 0x20) {
3210 if (status
& 0x10) {
3216 while (off
< size
) {
3217 ctrl
.bits
.wdata
= 0;
3218 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3219 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3220 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3224 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3225 if (data
.bits
.valid
== 0) {
3230 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3237 buf
[off
] = data
.bits
.rdata
;
3244 if (__ssd_i2c_clear(dev
, saddr
)) {
3248 mutex_unlock(&dev
->i2c_mutex
);
3253 static int ssd_i2c_write_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t wsize
, uint8_t *wbuf
, uint8_t rsize
, uint8_t *rbuf
)
3255 ssd_i2c_ctrl_t ctrl
;
3256 ssd_i2c_data_t data
;
3262 mutex_lock(&dev
->i2c_mutex
);
3267 ctrl
.bits
.wdata
= saddr
;
3268 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3269 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3270 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3274 while (off
< wsize
) {
3275 ctrl
.bits
.wdata
= wbuf
[off
];
3276 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3277 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3278 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3284 ctrl
.bits
.wdata
= rsize
;
3285 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3286 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3287 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3290 ctrl
.bits
.wdata
= 0x03;
3291 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3292 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3293 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3298 ctrl
.bits
.wdata
= 0;
3299 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3300 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3301 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3304 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3305 if (data
.bits
.valid
== 0) {
3310 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3317 status
= data
.bits
.rdata
;
3322 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3329 if (!(status
& 0x2)) {
3335 if (status
& 0x20) {
3341 if (status
& 0x10) {
3348 while (off
< rsize
) {
3349 ctrl
.bits
.wdata
= 0;
3350 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3351 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3352 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3356 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3357 if (data
.bits
.valid
== 0) {
3362 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3369 rbuf
[off
] = data
.bits
.rdata
;
3376 if (__ssd_i2c_clear(dev
, saddr
)) {
3379 mutex_unlock(&dev
->i2c_mutex
);
3384 static int ssd_smbus_send_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3390 ret
= ssd_i2c_write(dev
, saddr
, 1, buf
);
3391 if (!ret
|| -ETIMEDOUT
== ret
) {
3396 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3399 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3405 static int ssd_smbus_receive_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3411 ret
= ssd_i2c_read(dev
, saddr
, 1, buf
);
3412 if (!ret
|| -ETIMEDOUT
== ret
) {
3417 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3420 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3426 static int ssd_smbus_write_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3428 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3433 memcpy((smb_data
+ 1), buf
, 1);
3436 ret
= ssd_i2c_write(dev
, saddr
, 2, smb_data
);
3437 if (!ret
|| -ETIMEDOUT
== ret
) {
3442 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3445 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3451 static int ssd_smbus_read_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3453 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3460 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 1, buf
);
3461 if (!ret
|| -ETIMEDOUT
== ret
) {
3466 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3469 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3475 static int ssd_smbus_write_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3477 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3482 memcpy((smb_data
+ 1), buf
, 2);
3485 ret
= ssd_i2c_write(dev
, saddr
, 3, smb_data
);
3486 if (!ret
|| -ETIMEDOUT
== ret
) {
3491 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3494 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3500 static int ssd_smbus_read_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3502 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3509 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 2, buf
);
3510 if (!ret
|| -ETIMEDOUT
== ret
) {
3515 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3518 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3524 static int ssd_smbus_write_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3526 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3532 memcpy((smb_data
+ 2), buf
, size
);
3535 ret
= ssd_i2c_write(dev
, saddr
, (2 + size
), smb_data
);
3536 if (!ret
|| -ETIMEDOUT
== ret
) {
3541 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3544 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3550 static int ssd_smbus_read_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3552 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3560 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, (SSD_SMBUS_BLOCK_MAX
+ 1), (smb_data
+ 1));
3561 if (!ret
|| -ETIMEDOUT
== ret
) {
3566 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3569 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3575 rsize
= smb_data
[1];
3577 if (rsize
> size
) {
3581 memcpy(buf
, (smb_data
+ 2), rsize
);
3587 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
);
3590 static int ssd_init_lm75(struct ssd_device
*dev
, uint8_t saddr
)
3595 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3600 conf
&= (uint8_t)(~1u);
3602 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3611 static int ssd_lm75_read(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3616 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM75_REG_TEMP
, (uint8_t *)&val
);
3621 *data
= u16_swap(val
);
3626 static int ssd_init_lm80(struct ssd_device
*dev
, uint8_t saddr
)
3635 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3642 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_RES
, &val
);
3647 /* set volt limit */
3648 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3649 high
= ssd_lm80_limit
[i
].high
;
3650 low
= ssd_lm80_limit
[i
].low
;
3652 if (SSD_LM80_IN_CAP
== i
) {
3656 if (dev
->hw_info
.nr_ctrl
<= 1 && SSD_LM80_IN_1V2
== i
) {
3662 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MAX(i
), &high
);
3668 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MIN(i
), &low
);
3674 /* set interrupt mask: allow volt in interrupt except cap in*/
3676 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3681 /* set interrupt mask: disable others */
3683 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK2
, &val
);
3690 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3699 static int ssd_lm80_enable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3704 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3708 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3713 val
&= ~(1UL << (uint32_t)idx
);
3715 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3724 static int ssd_lm80_disable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3729 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3733 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3738 val
|= (1UL << (uint32_t)idx
);
3740 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3749 static int ssd_lm80_read_temp(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3754 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_TEMP
, (uint8_t *)&val
);
3759 *data
= u16_swap(val
);
3763 static int ssd_generate_sensor_fault_log(struct ssd_device
*dev
, uint16_t event
, uint8_t addr
,uint32_t ret
)
3766 data
= ((ret
& 0xffff) << 16) | (addr
<< 8) | addr
;
3767 ssd_gen_swlog(dev
,event
,data
);
3770 static int ssd_lm80_check_event(struct ssd_device
*dev
, uint8_t saddr
)
3773 uint16_t val
= 0, status
;
3774 uint8_t alarm1
= 0, alarm2
= 0;
3779 /* read interrupt status to clear interrupt */
3780 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM1
, &alarm1
);
3785 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM2
, &alarm2
);
3790 status
= (uint16_t)alarm1
| ((uint16_t)alarm2
<< 8);
3792 /* parse inetrrupt status */
3793 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3794 if (!((status
>> (uint32_t)i
) & 0x1)) {
3795 if (test_and_clear_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3796 /* enable INx irq */
3797 ret
= ssd_lm80_enable_in(dev
, saddr
, i
);
3806 /* disable INx irq */
3807 ret
= ssd_lm80_disable_in(dev
, saddr
, i
);
3812 if (test_and_set_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3816 high
= (uint32_t)ssd_lm80_limit
[i
].high
* (uint32_t)10;
3817 low
= (uint32_t)ssd_lm80_limit
[i
].low
* (uint32_t)10;
3819 for (j
=0; j
<3; j
++) {
3820 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_IN(i
), (uint8_t *)&val
);
3824 volt
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
3825 if ((volt
>high
) || (volt
<=low
)) {
3827 msleep(SSD_LM80_CONV_INTERVAL
);
3839 case SSD_LM80_IN_CAP
: {
3841 ssd_gen_swlog(dev
, SSD_LOG_CAP_SHORT_CIRCUIT
, 0);
3843 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(volt
));
3848 case SSD_LM80_IN_1V2
:
3849 case SSD_LM80_IN_1V2a
:
3850 case SSD_LM80_IN_1V5
:
3851 case SSD_LM80_IN_1V8
: {
3852 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, volt
));
3855 case SSD_LM80_IN_FPGA_3V3
:
3856 case SSD_LM80_IN_3V3
: {
3857 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, SSD_LM80_3V3_VOLT(volt
)));
3867 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3868 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, (uint32_t)saddr
,ret
);
3871 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3877 static int ssd_init_sensor(struct ssd_device
*dev
)
3881 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3885 ret
= ssd_init_lm75(dev
, SSD_SENSOR_LM75_SADDRESS
);
3887 hio_warn("%s: init lm75 failed\n", dev
->name
);
3888 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3889 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
,ret
);
3894 if (dev
->hw_info
.pcb_ver
>= 'B' || dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_HHHL
) {
3895 ret
= ssd_init_lm80(dev
, SSD_SENSOR_LM80_SADDRESS
);
3897 hio_warn("%s: init lm80 failed\n", dev
->name
);
3898 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3899 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
3906 /* skip error if not in standard mode */
3907 if (mode
!= SSD_DRV_MODE_STANDARD
) {
3914 static int ssd_mon_boardvolt(struct ssd_device
*dev
)
3916 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3920 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3924 return ssd_lm80_check_event(dev
, SSD_SENSOR_LM80_SADDRESS
);
3928 static int ssd_mon_temp(struct ssd_device
*dev
)
3934 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3938 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3943 ret
= ssd_lm80_read_temp(dev
, SSD_SENSOR_LM80_SADDRESS
, &val
);
3945 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3946 ssd_generate_sensor_fault_log(dev
, SSD_LOG_TEMP_SENSOR_EVENT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
3950 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3952 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3953 if (cur
>= SSD_INLET_OT_TEMP
) {
3954 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3955 ssd_gen_swlog(dev
, SSD_LOG_INLET_OVER_TEMP
, (uint32_t)cur
);
3957 } else if(cur
< SSD_INLET_OT_HYST
) {
3958 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3959 ssd_gen_swlog(dev
, SSD_LOG_INLET_NORMAL_TEMP
, (uint32_t)cur
);
3964 ret
= ssd_lm75_read(dev
, SSD_SENSOR_LM75_SADDRESS
, &val
);
3966 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3967 ssd_generate_sensor_fault_log(dev
, SSD_LOG_TEMP_SENSOR_EVENT
, SSD_SENSOR_LM75_SADDRESS
,ret
);
3971 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
);
3973 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3974 if (cur
>= SSD_FLASH_OT_TEMP
) {
3975 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3976 ssd_gen_swlog(dev
, SSD_LOG_FLASH_OVER_TEMP
, (uint32_t)cur
);
3978 } else if(cur
< SSD_FLASH_OT_HYST
) {
3979 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3980 ssd_gen_swlog(dev
, SSD_LOG_FLASH_NORMAL_TEMP
, (uint32_t)cur
);
3989 static inline void ssd_put_tag(struct ssd_device
*dev
, int tag
)
3991 test_and_clear_bit(tag
, dev
->tag_map
);
3992 wake_up(&dev
->tag_wq
);
3995 static inline int ssd_get_tag(struct ssd_device
*dev
, int wait
)
4000 while ((tag
= find_first_zero_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
)) >= atomic_read(&dev
->queue_depth
)) {
4001 DEFINE_WAIT(__wait
);
4007 prepare_to_wait_exclusive(&dev
->tag_wq
, &__wait
, TASK_UNINTERRUPTIBLE
);
4010 finish_wait(&dev
->tag_wq
, &__wait
);
4013 if (test_and_set_bit(tag
, dev
->tag_map
)) {
4020 static void ssd_barrier_put_tag(struct ssd_device
*dev
, int tag
)
4022 test_and_clear_bit(tag
, dev
->tag_map
);
4025 static int ssd_barrier_get_tag(struct ssd_device
*dev
)
4029 if (test_and_set_bit(tag
, dev
->tag_map
)) {
4036 static void ssd_barrier_end(struct ssd_device
*dev
)
4038 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4039 wake_up_all(&dev
->tag_wq
);
4041 mutex_unlock(&dev
->barrier_mutex
);
4044 static int ssd_barrier_start(struct ssd_device
*dev
)
4048 mutex_lock(&dev
->barrier_mutex
);
4050 atomic_set(&dev
->queue_depth
, 0);
4052 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4053 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4057 __set_current_state(TASK_INTERRUPTIBLE
);
4058 schedule_timeout(1);
4061 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4062 wake_up_all(&dev
->tag_wq
);
4064 mutex_unlock(&dev
->barrier_mutex
);
4069 static int ssd_busy(struct ssd_device
*dev
)
4071 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4078 static int ssd_wait_io(struct ssd_device
*dev
)
4082 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4083 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4087 __set_current_state(TASK_INTERRUPTIBLE
);
4088 schedule_timeout(1);
4095 static int ssd_in_barrier(struct ssd_device
*dev
)
4097 return (0 == atomic_read(&dev
->queue_depth
));
4101 static void ssd_cleanup_tag(struct ssd_device
*dev
)
4103 kfree(dev
->tag_map
);
4106 static int ssd_init_tag(struct ssd_device
*dev
)
4108 int nr_ulongs
= ALIGN(dev
->hw_info
.cmd_fifo_sz
, BITS_PER_LONG
) / BITS_PER_LONG
;
4110 mutex_init(&dev
->barrier_mutex
);
4112 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4114 dev
->tag_map
= kmalloc(nr_ulongs
* sizeof(unsigned long), GFP_ATOMIC
);
4115 if (!dev
->tag_map
) {
4119 memset(dev
->tag_map
, 0, nr_ulongs
* sizeof(unsigned long));
4121 init_waitqueue_head(&dev
->tag_wq
);
4127 static void ssd_end_io_acct(struct ssd_cmd
*cmd
)
4129 struct ssd_device
*dev
= cmd
->dev
;
4130 struct bio
*bio
= cmd
->bio
;
4131 unsigned long dur
= jiffies
- cmd
->start_time
;
4132 int rw
= bio_data_dir(bio
);
4133 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4138 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
4139 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4140 generic_end_io_acct(dev
->rq
, rw
, part
, cmd
->start_time
);
4141 #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4142 int cpu
= part_stat_lock();
4143 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4144 part_round_stats(cpu
, part
);
4145 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4146 part_dec_in_flight(part
, rw
);
4148 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4149 int cpu
= part_stat_lock();
4150 struct hd_struct
*part
= &dev
->gd
->part0
;
4151 part_round_stats(cpu
, part
);
4152 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4154 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4155 part
->in_flight
[rw
]--;
4156 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4160 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4162 disk_round_stats(dev
->gd
);
4163 disk_stat_add(dev
->gd
, ticks
[rw
], dur
);
4165 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4166 dev
->gd
->in_flight
--;
4167 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4173 disk_round_stats(dev
->gd
);
4175 disk_stat_add(dev
->gd
, write_ticks
, dur
);
4177 disk_stat_add(dev
->gd
, read_ticks
, dur
);
4179 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4180 dev
->gd
->in_flight
--;
4181 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4188 static void ssd_start_io_acct(struct ssd_cmd
*cmd
)
4190 struct ssd_device
*dev
= cmd
->dev
;
4191 struct bio
*bio
= cmd
->bio
;
4192 int rw
= bio_data_dir(bio
);
4193 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4198 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
4199 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4200 generic_start_io_acct(dev
->rq
, rw
, bio_sectors(bio
), part
);
4201 #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4202 int cpu
= part_stat_lock();
4203 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4204 part_round_stats(cpu
, part
);
4205 part_stat_inc(cpu
, part
, ios
[rw
]);
4206 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4207 part_inc_in_flight(part
, rw
);
4209 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4210 int cpu
= part_stat_lock();
4211 struct hd_struct
*part
= &dev
->gd
->part0
;
4212 part_round_stats(cpu
, part
);
4213 part_stat_inc(cpu
, part
, ios
[rw
]);
4214 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4216 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4217 part
->in_flight
[rw
]++;
4218 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4222 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4224 disk_round_stats(dev
->gd
);
4225 disk_stat_inc(dev
->gd
, ios
[rw
]);
4226 disk_stat_add(dev
->gd
, sectors
[rw
], bio_sectors(bio
));
4228 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4229 dev
->gd
->in_flight
++;
4230 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4235 disk_round_stats(dev
->gd
);
4237 disk_stat_inc(dev
->gd
, writes
);
4238 disk_stat_add(dev
->gd
, write_sectors
, bio_sectors(bio
));
4240 disk_stat_inc(dev
->gd
, reads
);
4241 disk_stat_add(dev
->gd
, read_sectors
, bio_sectors(bio
));
4244 spin_lock_irqsave(&dev
->in_flight_lock
,flag
);
4245 dev
->gd
->in_flight
++;
4246 spin_unlock_irqrestore(&dev
->in_flight_lock
,flag
);
4252 cmd
->start_time
= jiffies
;
4256 static void ssd_queue_bio(struct ssd_device
*dev
, struct bio
*bio
)
4258 spin_lock(&dev
->sendq_lock
);
4259 ssd_blist_add(&dev
->sendq
, bio
);
4260 spin_unlock(&dev
->sendq_lock
);
4262 atomic_inc(&dev
->in_sendq
);
4263 wake_up(&dev
->send_waitq
);
4266 static inline void ssd_end_request(struct ssd_cmd
*cmd
)
4268 struct ssd_device
*dev
= cmd
->dev
;
4269 struct bio
*bio
= cmd
->bio
;
4270 int errors
= cmd
->errors
;
4274 if (!ssd_bio_has_discard(bio
)) {
4275 ssd_end_io_acct(cmd
);
4277 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4278 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4283 ssd_put_tag(dev
, tag
);
4285 if (SSD_INT_MSIX
== dev
->int_mode
|| tag
< 16 || errors
) {
4286 ssd_bio_endio(bio
, errors
);
4287 } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ {
4288 spin_lock(&dev
->doneq_lock
);
4289 ssd_blist_add(&dev
->doneq
, bio
);
4290 spin_unlock(&dev
->doneq_lock
);
4292 atomic_inc(&dev
->in_doneq
);
4293 wake_up(&dev
->done_waitq
);
4297 complete(cmd
->waiting
);
4302 static void ssd_end_timeout_request(struct ssd_cmd
*cmd
)
4304 struct ssd_device
*dev
= cmd
->dev
;
4305 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4308 for (i
=0; i
<dev
->nr_queue
; i
++) {
4309 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
4310 disable_irq(dev
->entry
[i
].vector
);
4312 disable_irq(pci_irq_vector(dev
->pdev
, i
));
4316 atomic_inc(&dev
->tocnt
);
4318 hio_err("%s: cmd timeout: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4319 cmd
->errors
= -ETIMEDOUT
;
4320 ssd_end_request(cmd
);
4323 for (i
=0; i
<dev
->nr_queue
; i
++) {
4324 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
4325 enable_irq(dev
->entry
[i
].vector
);
4327 enable_irq(pci_irq_vector(dev
->pdev
, i
));
4336 static void ssd_cmd_add_timer(struct ssd_cmd
*cmd
, int timeout
, void (*complt
)(struct ssd_cmd
*))
4338 init_timer(&cmd
->cmd_timer
);
4340 cmd
->cmd_timer
.data
= (unsigned long)cmd
;
4341 cmd
->cmd_timer
.expires
= jiffies
+ timeout
;
4342 cmd
->cmd_timer
.function
= (void (*)(unsigned long)) complt
;
4344 add_timer(&cmd
->cmd_timer
);
4347 static int ssd_cmd_del_timer(struct ssd_cmd
*cmd
)
4349 return del_timer(&cmd
->cmd_timer
);
4352 static void ssd_add_timer(struct timer_list
*timer
, int timeout
, void (*complt
)(void *), void *data
)
4356 timer
->data
= (unsigned long)data
;
4357 timer
->expires
= jiffies
+ timeout
;
4358 timer
->function
= (void (*)(unsigned long)) complt
;
4363 static int ssd_del_timer(struct timer_list
*timer
)
4365 return del_timer(timer
);
4368 static void ssd_cmd_timeout(struct ssd_cmd
*cmd
)
4370 struct ssd_device
*dev
= cmd
->dev
;
4371 uint32_t msg
= *(uint32_t *)cmd
->msg
;
4373 ssd_end_timeout_request(cmd
);
4375 ssd_gen_swlog(dev
, SSD_LOG_TIMEOUT
, msg
);
4379 static void __ssd_done(unsigned long data
)
4381 struct ssd_cmd
*cmd
;
4384 local_irq_disable();
4385 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4386 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4388 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4392 while (!list_empty(&localq
)) {
4393 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4394 list_del_init(&cmd
->list
);
4396 ssd_end_request(cmd
);
4400 static void __ssd_done_db(unsigned long data
)
4402 struct ssd_cmd
*cmd
;
4403 struct ssd_device
*dev
;
4407 local_irq_disable();
4408 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4409 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4411 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4415 while (!list_empty(&localq
)) {
4416 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4417 list_del_init(&cmd
->list
);
4419 dev
= (struct ssd_device
*)cmd
->dev
;
4423 sector_t off
= dev
->db_info
.data
.loc
.off
;
4424 uint32_t len
= dev
->db_info
.data
.loc
.len
;
4426 switch (dev
->db_info
.type
) {
4427 case SSD_DEBUG_READ_ERR
:
4428 if (bio_data_dir(bio
) == READ
&&
4429 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4433 case SSD_DEBUG_WRITE_ERR
:
4434 if (bio_data_dir(bio
) == WRITE
&&
4435 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4436 cmd
->errors
= -EROFS
;
4439 case SSD_DEBUG_RW_ERR
:
4440 if (!((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4441 if (bio_data_dir(bio
) == READ
) {
4444 cmd
->errors
= -EROFS
;
4453 ssd_end_request(cmd
);
4457 static inline void ssd_done_bh(struct ssd_cmd
*cmd
)
4459 unsigned long flags
= 0;
4461 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4462 struct ssd_device
*dev
= cmd
->dev
;
4463 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4464 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4471 local_irq_save(flags
);
4472 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4473 list_add_tail(&cmd
->list
, &__get_cpu_var(ssd_doneq
));
4474 tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet
));
4476 list_add_tail(&cmd
->list
, this_cpu_ptr(&ssd_doneq
));
4477 tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet
));
4479 local_irq_restore(flags
);
4484 static inline void ssd_done(struct ssd_cmd
*cmd
)
4486 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4487 struct ssd_device
*dev
= cmd
->dev
;
4488 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4489 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4496 ssd_end_request(cmd
);
4501 static inline void ssd_dispatch_cmd(struct ssd_cmd
*cmd
)
4503 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4505 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4507 spin_lock(&dev
->cmd_lock
);
4508 ssd_reg_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, cmd
->msg_dma
);
4509 spin_unlock(&dev
->cmd_lock
);
4512 static inline void ssd_send_cmd(struct ssd_cmd
*cmd
)
4514 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4516 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4518 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4521 static inline void ssd_send_cmd_db(struct ssd_cmd
*cmd
)
4523 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4524 struct bio
*bio
= cmd
->bio
;
4526 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4529 switch (dev
->db_info
.type
) {
4530 case SSD_DEBUG_READ_TO
:
4531 if (bio_data_dir(bio
) == READ
) {
4535 case SSD_DEBUG_WRITE_TO
:
4536 if (bio_data_dir(bio
) == WRITE
) {
4540 case SSD_DEBUG_RW_TO
:
4548 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4552 /* fixed for BIOVEC_PHYS_MERGEABLE */
4553 #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED
4554 #include <linux/bio.h>
4555 #include <linux/io.h>
4556 #include <xen/page.h>
4558 static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec
*vec1
,
4559 const struct bio_vec
*vec2
)
4561 unsigned long mfn1
= pfn_to_mfn(page_to_pfn(vec1
->bv_page
));
4562 unsigned long mfn2
= pfn_to_mfn(page_to_pfn(vec2
->bv_page
));
4564 return __BIOVEC_PHYS_MERGEABLE(vec1
, vec2
) &&
4565 ((mfn1
== mfn2
) || ((mfn1
+1) == mfn2
));
4568 #ifdef BIOVEC_PHYS_MERGEABLE
4569 #undef BIOVEC_PHYS_MERGEABLE
4571 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
4572 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
4573 (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2)))
4577 static inline int ssd_bio_map_sg(struct ssd_device
*dev
, struct bio
*bio
, struct scatterlist
*sgl
)
4579 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
4580 struct bio_vec
*bvec
, *bvprv
= NULL
;
4581 struct scatterlist
*sg
= NULL
;
4582 int i
= 0, nsegs
= 0;
4584 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23))
4585 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4589 * for each segment in bio
4591 bio_for_each_segment(bvec
, bio
, i
) {
4592 if (bvprv
&& BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
)) {
4593 sg
->length
+= bvec
->bv_len
;
4595 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4599 sg
= sg
? (sg
+ 1) : sgl
;
4600 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4601 sg_set_page(sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
4603 sg
->page
= bvec
->bv_page
;
4604 sg
->length
= bvec
->bv_len
;
4605 sg
->offset
= bvec
->bv_offset
;
4612 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4622 struct bio_vec bvec
, bvprv
;
4623 struct bvec_iter iter
;
4624 struct scatterlist
*sg
= NULL
;
4628 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4631 * for each segment in bio
4633 bio_for_each_segment(bvec
, bio
, iter
) {
4634 if (!first
&& BIOVEC_PHYS_MERGEABLE(&bvprv
, &bvec
)) {
4635 sg
->length
+= bvec
.bv_len
;
4637 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4641 sg
= sg
? (sg
+ 1) : sgl
;
4643 sg_set_page(sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
4660 static int __ssd_submit_pbio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4662 struct ssd_cmd
*cmd
;
4663 struct ssd_rw_msg
*msg
;
4664 struct ssd_sg_entry
*sge
;
4665 sector_t block
= bio_start(bio
);
4669 tag
= ssd_get_tag(dev
, wait
);
4674 cmd
= &dev
->cmd
[tag
];
4678 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4680 if (ssd_bio_has_discard(bio
)) {
4681 unsigned int length
= bio_sectors(bio
);
4683 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4685 msg
->fun
= SSD_FUNC_TRIM
;
4688 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4690 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4693 block
+= sge
->length
;
4694 length
-= sge
->length
;
4702 msg
->nsegs
= cmd
->nsegs
= i
;
4708 //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl);
4709 msg
->nsegs
= cmd
->nsegs
= bio
->bi_vcnt
;
4712 if (bio_data_dir(bio
) == READ
) {
4713 msg
->fun
= SSD_FUNC_READ
;
4716 msg
->fun
= SSD_FUNC_WRITE
;
4717 msg
->flag
= dev
->wmode
;
4721 for (i
=0; i
<bio
->bi_vcnt
; i
++) {
4723 sge
->length
= bio
->bi_io_vec
[i
].bv_len
>> 9;
4724 sge
->buf
= (uint64_t)((void *)bio
->bi_io_vec
[i
].bv_page
+ bio
->bi_io_vec
[i
].bv_offset
);
4726 block
+= sge
->length
;
4732 #ifdef SSD_OT_PROTECT
4733 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4734 msleep_interruptible(dev
->ot_delay
);
4738 ssd_start_io_acct(cmd
);
4744 static inline int ssd_submit_bio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4746 struct ssd_cmd
*cmd
;
4747 struct ssd_rw_msg
*msg
;
4748 struct ssd_sg_entry
*sge
;
4749 struct scatterlist
*sgl
;
4750 sector_t block
= bio_start(bio
);
4754 tag
= ssd_get_tag(dev
, wait
);
4759 cmd
= &dev
->cmd
[tag
];
4763 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4767 if (ssd_bio_has_discard(bio
)) {
4768 unsigned int length
= bio_sectors(bio
);
4770 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4772 msg
->fun
= SSD_FUNC_TRIM
;
4775 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4777 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4780 block
+= sge
->length
;
4781 length
-= sge
->length
;
4789 msg
->nsegs
= cmd
->nsegs
= i
;
4795 msg
->nsegs
= cmd
->nsegs
= ssd_bio_map_sg(dev
, bio
, sgl
);
4798 if (bio_data_dir(bio
) == READ
) {
4799 msg
->fun
= SSD_FUNC_READ
;
4801 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_FROMDEVICE
);
4803 msg
->fun
= SSD_FUNC_WRITE
;
4804 msg
->flag
= dev
->wmode
;
4805 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_TODEVICE
);
4809 for (i
=0; i
<cmd
->nsegs
; i
++) {
4811 sge
->length
= sg_dma_len(sgl
) >> 9;
4812 sge
->buf
= sg_dma_address(sgl
);
4814 block
+= sge
->length
;
4821 #ifdef SSD_OT_PROTECT
4822 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4823 msleep_interruptible(dev
->ot_delay
);
4827 ssd_start_io_acct(cmd
);
4834 static int ssd_done_thread(void *data
)
4836 struct ssd_device
*dev
;
4845 current
->flags
|= PF_NOFREEZE
;
4846 //set_user_nice(current, -5);
4848 while (!kthread_should_stop()) {
4849 wait_event_interruptible(dev
->done_waitq
, (atomic_read(&dev
->in_doneq
) || kthread_should_stop()));
4851 while (atomic_read(&dev
->in_doneq
)) {
4853 spin_lock(&dev
->doneq_lock
);
4854 bio
= ssd_blist_get(&dev
->doneq
);
4855 spin_unlock(&dev
->doneq_lock
);
4857 spin_lock_irq(&dev
->doneq_lock
);
4858 bio
= ssd_blist_get(&dev
->doneq
);
4859 spin_unlock_irq(&dev
->doneq_lock
);
4863 next
= bio
->bi_next
;
4864 bio
->bi_next
= NULL
;
4865 ssd_bio_endio(bio
, 0);
4866 atomic_dec(&dev
->in_doneq
);
4872 #ifdef SSD_ESCAPE_IRQ
4873 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4874 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4875 cpumask_var_t new_mask
;
4876 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4877 cpumask_setall(new_mask
);
4878 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4879 set_cpus_allowed_ptr(current
, new_mask
);
4880 free_cpumask_var(new_mask
);
4884 cpus_setall(new_mask
);
4885 cpu_clear(dev
->irq_cpu
, new_mask
);
4886 set_cpus_allowed(current
, new_mask
);
4895 static int ssd_send_thread(void *data
)
4897 struct ssd_device
*dev
;
4906 current
->flags
|= PF_NOFREEZE
;
4907 //set_user_nice(current, -5);
4909 while (!kthread_should_stop()) {
4910 wait_event_interruptible(dev
->send_waitq
, (atomic_read(&dev
->in_sendq
) || kthread_should_stop()));
4912 while (atomic_read(&dev
->in_sendq
)) {
4913 spin_lock(&dev
->sendq_lock
);
4914 bio
= ssd_blist_get(&dev
->sendq
);
4915 spin_unlock(&dev
->sendq_lock
);
4918 next
= bio
->bi_next
;
4919 bio
->bi_next
= NULL
;
4920 #ifdef SSD_QUEUE_PBIO
4921 if (test_and_clear_bit(BIO_SSD_PBIO
, &bio
->bi_flags
)) {
4922 __ssd_submit_pbio(dev
, bio
, 1);
4924 ssd_submit_bio(dev
, bio
, 1);
4927 ssd_submit_bio(dev
, bio
, 1);
4929 atomic_dec(&dev
->in_sendq
);
4935 #ifdef SSD_ESCAPE_IRQ
4936 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4937 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4938 cpumask_var_t new_mask
;
4939 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4940 cpumask_setall(new_mask
);
4941 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4942 set_cpus_allowed_ptr(current
, new_mask
);
4943 free_cpumask_var(new_mask
);
4947 cpus_setall(new_mask
);
4948 cpu_clear(dev
->irq_cpu
, new_mask
);
4949 set_cpus_allowed(current
, new_mask
);
4959 static void ssd_cleanup_thread(struct ssd_device
*dev
)
4961 kthread_stop(dev
->send_thread
);
4962 kthread_stop(dev
->done_thread
);
4965 static int ssd_init_thread(struct ssd_device
*dev
)
4969 atomic_set(&dev
->in_doneq
, 0);
4970 atomic_set(&dev
->in_sendq
, 0);
4972 spin_lock_init(&dev
->doneq_lock
);
4973 spin_lock_init(&dev
->sendq_lock
);
4975 ssd_blist_init(&dev
->doneq
);
4976 ssd_blist_init(&dev
->sendq
);
4978 init_waitqueue_head(&dev
->done_waitq
);
4979 init_waitqueue_head(&dev
->send_waitq
);
4981 dev
->done_thread
= kthread_run(ssd_done_thread
, dev
, "%s/d", dev
->name
);
4982 if (IS_ERR(dev
->done_thread
)) {
4983 ret
= PTR_ERR(dev
->done_thread
);
4984 goto out_done_thread
;
4987 dev
->send_thread
= kthread_run(ssd_send_thread
, dev
, "%s/s", dev
->name
);
4988 if (IS_ERR(dev
->send_thread
)) {
4989 ret
= PTR_ERR(dev
->send_thread
);
4990 goto out_send_thread
;
4996 kthread_stop(dev
->done_thread
);
5002 static void ssd_put_dcmd(struct ssd_dcmd
*dcmd
)
5004 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
5006 spin_lock(&dev
->dcmd_lock
);
5007 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
5008 spin_unlock(&dev
->dcmd_lock
);
5011 static struct ssd_dcmd
*ssd_get_dcmd(struct ssd_device
*dev
)
5013 struct ssd_dcmd
*dcmd
= NULL
;
5015 spin_lock(&dev
->dcmd_lock
);
5016 if (!list_empty(&dev
->dcmd_list
)) {
5017 dcmd
= list_entry(dev
->dcmd_list
.next
,
5018 struct ssd_dcmd
, list
);
5019 list_del_init(&dcmd
->list
);
5021 spin_unlock(&dev
->dcmd_lock
);
5026 static void ssd_cleanup_dcmd(struct ssd_device
*dev
)
5031 static int ssd_init_dcmd(struct ssd_device
*dev
)
5033 struct ssd_dcmd
*dcmd
;
5034 int dcmd_sz
= sizeof(struct ssd_dcmd
)*dev
->hw_info
.cmd_fifo_sz
;
5037 spin_lock_init(&dev
->dcmd_lock
);
5038 INIT_LIST_HEAD(&dev
->dcmd_list
);
5039 init_waitqueue_head(&dev
->dcmd_wq
);
5041 dev
->dcmd
= kmalloc(dcmd_sz
, GFP_KERNEL
);
5043 hio_warn("%s: can not alloc dcmd\n", dev
->name
);
5044 goto out_alloc_dcmd
;
5046 memset(dev
->dcmd
, 0, dcmd_sz
);
5048 for (i
=0, dcmd
=dev
->dcmd
; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++, dcmd
++) {
5050 INIT_LIST_HEAD(&dcmd
->list
);
5051 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
5060 static void ssd_put_dmsg(void *msg
)
5062 struct ssd_dcmd
*dcmd
= container_of(msg
, struct ssd_dcmd
, msg
);
5063 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
5065 memset(dcmd
->msg
, 0, SSD_DCMD_MAX_SZ
);
5067 wake_up(&dev
->dcmd_wq
);
5070 static void *ssd_get_dmsg(struct ssd_device
*dev
)
5072 struct ssd_dcmd
*dcmd
= ssd_get_dcmd(dev
);
5076 prepare_to_wait_exclusive(&dev
->dcmd_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
5079 dcmd
= ssd_get_dcmd(dev
);
5081 finish_wait(&dev
->dcmd_wq
, &wait
);
5087 static int ssd_do_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5089 DECLARE_COMPLETION(wait
);
5090 struct ssd_cmd
*cmd
;
5094 tag
= ssd_get_tag(dev
, 1);
5099 cmd
= &dev
->cmd
[tag
];
5101 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5102 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5104 cmd
->waiting
= &wait
;
5108 wait_for_completion(cmd
->waiting
);
5109 cmd
->waiting
= NULL
;
5111 if (cmd
->errors
== -ETIMEDOUT
) {
5113 } else if (cmd
->errors
) {
5118 *done
= cmd
->nr_log
;
5120 ssd_put_tag(dev
, cmd
->tag
);
5125 static int ssd_do_barrier_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5127 DECLARE_COMPLETION(wait
);
5128 struct ssd_cmd
*cmd
;
5132 tag
= ssd_barrier_get_tag(dev
);
5137 cmd
= &dev
->cmd
[tag
];
5139 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5140 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5142 cmd
->waiting
= &wait
;
5146 wait_for_completion(cmd
->waiting
);
5147 cmd
->waiting
= NULL
;
5149 if (cmd
->errors
== -ETIMEDOUT
) {
5151 } else if (cmd
->errors
) {
5156 *done
= cmd
->nr_log
;
5158 ssd_barrier_put_tag(dev
, cmd
->tag
);
5163 #ifdef SSD_OT_PROTECT
5164 static void ssd_check_temperature(struct ssd_device
*dev
, int temp
)
5171 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5175 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5178 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5179 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
5181 val
= ssd_reg_read(dev
->ctrlp
+ off
);
5182 if (val
== 0xffffffffffffffffull
) {
5186 cur
= (int)CUR_TEMP(val
);
5188 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5189 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5190 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5191 dev
->ot_delay
= SSD_OT_DELAY
;
5198 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5199 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5200 hio_warn("%s: Temperature is OK.\n", dev
->name
);
5207 static int ssd_get_ot_status(struct ssd_device
*dev
, int *status
)
5213 if (!dev
|| !status
) {
5217 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5218 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5219 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5220 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5221 if ((val
>> 22) & 0x1) {
5227 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5228 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5229 if ((val
>> 22) & 0x1) {
5235 *status
= !!dev
->ot_delay
;
5242 static void ssd_set_ot_protect(struct ssd_device
*dev
, int protect
)
5248 mutex_lock(&dev
->fw_mutex
);
5250 dev
->ot_protect
= !!protect
;
5252 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5253 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5254 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5255 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5256 if (dev
->ot_protect
) {
5261 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5264 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5265 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5266 if (dev
->ot_protect
) {
5271 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5275 mutex_unlock(&dev
->fw_mutex
);
5278 static int ssd_init_ot_protect(struct ssd_device
*dev
)
5280 ssd_set_ot_protect(dev
, ot_protect
);
5282 #ifdef SSD_OT_PROTECT
5283 ssd_check_temperature(dev
, SSD_OT_TEMP
);
5290 static int ssd_read_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
, int *nr_log
)
5292 struct ssd_log_op_msg
*msg
;
5293 struct ssd_log_msg
*lmsg
;
5295 size_t length
= dev
->hw_info
.log_sz
;
5298 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
5302 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
5303 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
5304 ret
= dma_mapping_error(buf_dma
);
5306 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
5309 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
5310 goto out_dma_mapping
;
5313 msg
= (struct ssd_log_op_msg
*)ssd_get_dmsg(dev
);
5315 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5316 lmsg
= (struct ssd_log_msg
*)msg
;
5317 lmsg
->fun
= SSD_FUNC_READ_LOG
;
5318 lmsg
->ctrl_idx
= ctrl_idx
;
5319 lmsg
->buf
= buf_dma
;
5321 msg
->fun
= SSD_FUNC_READ_LOG
;
5322 msg
->ctrl_idx
= ctrl_idx
;
5326 ret
= ssd_do_request(dev
, READ
, msg
, nr_log
);
5329 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
5335 #define SSD_LOG_PRINT_BUF_SZ 256
5336 static int ssd_parse_log(struct ssd_device
*dev
, struct ssd_log
*log
, int print
)
5338 struct ssd_log_desc
*log_desc
= ssd_log_desc
;
5339 struct ssd_log_entry
*le
;
5341 char print_buf
[SSD_LOG_PRINT_BUF_SZ
];
5347 while (log_desc
->event
!= SSD_UNKNOWN_EVENT
) {
5348 if (log_desc
->event
== le
->event
) {
5358 if (log_desc
->level
< log_level
) {
5363 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5366 sn
= dev
->labelv3
.barcode
;
5369 print_len
= snprintf(print_buf
, SSD_LOG_PRINT_BUF_SZ
, "%s (%s): <%#x>", dev
->name
, sn
, le
->event
);
5371 if (log
->ctrl_idx
!= SSD_LOG_SW_IDX
) {
5372 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " controller %d", log
->ctrl_idx
);
5375 switch (log_desc
->data
) {
5376 case SSD_LOG_DATA_NONE
:
5378 case SSD_LOG_DATA_LOC
:
5379 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5380 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc
.flash
);
5381 if (log_desc
->sblock
) {
5382 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc
.block
);
5384 if (log_desc
->spage
) {
5385 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc
.page
);
5388 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc1
.flash
);
5389 if (log_desc
->sblock
) {
5390 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc1
.block
);
5392 if (log_desc
->spage
) {
5393 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc1
.page
);
5397 case SSD_LOG_DATA_HEX
:
5398 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " info %#x", le
->data
.val
);
5403 /*print_len += */snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), ": %s", log_desc
->desc
);
5405 switch (log_desc
->level
) {
5406 case SSD_LOG_LEVEL_INFO
:
5407 hio_info("%s\n", print_buf
);
5409 case SSD_LOG_LEVEL_NOTICE
:
5410 hio_note("%s\n", print_buf
);
5412 case SSD_LOG_LEVEL_WARNING
:
5413 hio_warn("%s\n", print_buf
);
5415 case SSD_LOG_LEVEL_ERR
:
5416 hio_err("%s\n", print_buf
);
5417 //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ.");
5420 hio_warn("%s\n", print_buf
);
5425 return log_desc
->level
;
5428 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
);
5429 static int ssd_switch_wmode(struct ssd_device
*dev
, int wmode
);
5432 static int ssd_handle_event(struct ssd_device
*dev
, uint16_t event
, int level
)
5437 case SSD_LOG_OVER_TEMP
: {
5438 #ifdef SSD_OT_PROTECT
5439 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5440 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5441 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5442 dev
->ot_delay
= SSD_OT_DELAY
;
5449 case SSD_LOG_NORMAL_TEMP
: {
5450 #ifdef SSD_OT_PROTECT
5451 /* need to check all controller's temperature */
5452 ssd_check_temperature(dev
, SSD_OT_TEMP_HYST
);
5457 case SSD_LOG_BATTERY_FAULT
: {
5460 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5461 if (!ssd_bm_get_sfstatus(dev
, &sfstatus
)) {
5462 ssd_gen_swlog(dev
, SSD_LOG_BM_SFSTATUS
, sfstatus
);
5466 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5467 ssd_switch_wmode(dev
, dev
->user_wmode
);
5472 case SSD_LOG_BATTERY_OK
: {
5473 if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5474 ssd_switch_wmode(dev
, dev
->user_wmode
);
5479 case SSD_LOG_BOARD_VOLT_FAULT
: {
5480 ssd_mon_boardvolt(dev
);
5484 case SSD_LOG_CLEAR_LOG
: {
5486 memset(&dev
->smart
.log_info
, 0, sizeof(struct ssd_log_info
));
5490 case SSD_LOG_CAP_VOLT_FAULT
:
5491 case SSD_LOG_CAP_LEARN_FAULT
:
5492 case SSD_LOG_CAP_SHORT_CIRCUIT
: {
5493 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5494 ssd_switch_wmode(dev
, dev
->user_wmode
);
5503 /* ssd event call */
5504 if (dev
->event_call
) {
5505 dev
->event_call(dev
->gd
, event
, level
);
5508 if (SSD_LOG_CAP_VOLT_FAULT
== event
|| SSD_LOG_CAP_LEARN_FAULT
== event
|| SSD_LOG_CAP_SHORT_CIRCUIT
== event
) {
5509 dev
->event_call(dev
->gd
, SSD_LOG_BATTERY_FAULT
, level
);
5516 static int ssd_save_log(struct ssd_device
*dev
, struct ssd_log
*log
)
5522 mutex_lock(&dev
->internal_log_mutex
);
5524 size
= sizeof(struct ssd_log
);
5525 off
= dev
->internal_log
.nr_log
* size
;
5527 if (off
== dev
->rom_info
.log_sz
) {
5528 if (dev
->internal_log
.nr_log
== dev
->smart
.log_info
.nr_log
) {
5529 hio_warn("%s: internal log is full\n", dev
->name
);
5534 internal_log
= dev
->internal_log
.log
+ off
;
5535 memcpy(internal_log
, log
, size
);
5537 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
5538 off
+= dev
->rom_info
.log_base
;
5540 ret
= ssd_spi_write(dev
, log
, off
, size
);
5546 dev
->internal_log
.nr_log
++;
5549 mutex_unlock(&dev
->internal_log_mutex
);
5553 /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
5554 static unsigned short const crc16_table
[256] = {
5555 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
5556 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
5557 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
5558 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
5559 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
5560 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
5561 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
5562 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
5563 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
5564 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
5565 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
5566 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
5567 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
5568 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
5569 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
5570 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
5571 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
5572 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
5573 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
5574 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
5575 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
5576 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
5577 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
5578 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
5579 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
5580 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
5581 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
5582 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
5583 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
5584 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
5585 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
5586 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
5589 static unsigned short crc16_byte(unsigned short crc
, const unsigned char data
)
5591 return (crc
>> 8) ^ crc16_table
[(crc
^ data
) & 0xff];
5594 * crc16 - compute the CRC-16 for the data buffer
5595 * @crc: previous CRC value
5596 * @buffer: data pointer
5597 * @len: number of bytes in the buffer
5599 * Returns the updated CRC value.
5601 static unsigned short crc16(unsigned short crc
, unsigned char const *buffer
, int len
)
5604 crc
= crc16_byte(crc
, *buffer
++);
5608 static int ssd_save_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5615 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5618 memset(&log
, 0, sizeof(struct ssd_log
));
5620 do_gettimeofday(&tv
);
5621 log
.ctrl_idx
= SSD_LOG_SW_IDX
;
5622 log
.time
= tv
.tv_sec
;
5623 log
.le
.event
= event
;
5624 log
.le
.data
.val
= data
;
5626 log
.le
.mod
= SSD_DIF_WITH_OLD_LOG
;
5627 log
.le
.idx
= crc16(0,(const unsigned char *)&log
,14);
5628 level
= ssd_parse_log(dev
, &log
, 0);
5629 if (level
>= SSD_LOG_LEVEL
) {
5630 ret
= ssd_save_log(dev
, &log
);
5634 if (SSD_LOG_LEVEL_ERR
== level
) {
5639 dev
->smart
.log_info
.nr_log
++;
5640 dev
->smart
.log_info
.stat
[level
]++;
5643 ssd_handle_event(dev
, event
, level
);
5648 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5650 struct ssd_log_entry le
;
5653 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5661 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5665 ret
= sfifo_put(&dev
->log_fifo
, &le
);
5670 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
5671 queue_work(dev
->workq
, &dev
->log_work
);
5677 static int ssd_do_swlog(struct ssd_device
*dev
)
5679 struct ssd_log_entry le
;
5682 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5683 while (!sfifo_get(&dev
->log_fifo
, &le
)) {
5684 ret
= ssd_save_swlog(dev
, le
.event
, le
.data
.val
);
5693 static int __ssd_clear_log(struct ssd_device
*dev
)
5695 uint32_t off
, length
;
5698 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5702 if (dev
->internal_log
.nr_log
== 0) {
5706 mutex_lock(&dev
->internal_log_mutex
);
5708 off
= dev
->rom_info
.log_base
;
5709 length
= dev
->rom_info
.log_sz
;
5711 ret
= ssd_spi_erase(dev
, off
, length
);
5713 hio_warn("%s: log erase: failed\n", dev
->name
);
5717 dev
->internal_log
.nr_log
= 0;
5720 mutex_unlock(&dev
->internal_log_mutex
);
5724 static int ssd_clear_log(struct ssd_device
*dev
)
5728 ret
= __ssd_clear_log(dev
);
5730 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_LOG
, 0);
5736 static int ssd_do_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
)
5738 struct ssd_log_entry
*le
;
5745 ret
= ssd_read_log(dev
, ctrl_idx
, buf
, &nr_log
);
5750 do_gettimeofday(&tv
);
5752 log
.time
= tv
.tv_sec
;
5753 log
.ctrl_idx
= ctrl_idx
;
5755 le
= (ssd_log_entry_t
*)buf
;
5756 while (nr_log
> 0) {
5757 memcpy(&log
.le
, le
, sizeof(struct ssd_log_entry
));
5759 log
.le
.mod
= SSD_DIF_WITH_OLD_LOG
;
5760 log
.le
.idx
= crc16(0,(const unsigned char *)&log
,14);
5761 level
= ssd_parse_log(dev
, &log
, 1);
5762 if (level
>= SSD_LOG_LEVEL
) {
5763 ssd_save_log(dev
, &log
);
5767 if (SSD_LOG_LEVEL_ERR
== level
) {
5771 dev
->smart
.log_info
.nr_log
++;
5772 if (SSD_LOG_SEU_FAULT
!= le
->event
&& SSD_LOG_SEU_FAULT1
!= le
->event
) {
5773 dev
->smart
.log_info
.stat
[level
]++;
5777 /* log to the volatile log info */
5778 dev
->log_info
.nr_log
++;
5779 dev
->log_info
.stat
[level
]++;
5783 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
5784 if (le
->event
!= SSD_LOG_SEU_FAULT1
) {
5785 dev
->has_non_0x98_reg_access
= 1;
5788 /*dev->readonly = 1;
5789 set_disk_ro(dev->gd, 1);
5790 hio_warn("%s: switched to read-only mode.\n", dev->name);*/
5794 ssd_handle_event(dev
, le
->event
, level
);
5803 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5804 static void ssd_log_worker(void *data
)
5806 struct ssd_device
*dev
= (struct ssd_device
*)data
;
5808 static void ssd_log_worker(struct work_struct
*work
)
5810 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, log_work
);
5815 if (!test_bit(SSD_LOG_ERR
, &dev
->state
) && test_bit(SSD_ONLINE
, &dev
->state
)) {
5817 if (!dev
->log_buf
) {
5818 dev
->log_buf
= kmalloc(dev
->hw_info
.log_sz
, GFP_KERNEL
);
5819 if (!dev
->log_buf
) {
5820 hio_warn("%s: ssd_log_worker: no mem\n", dev
->name
);
5826 if (test_and_clear_bit(SSD_LOG_HW
, &dev
->state
)) {
5827 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5828 ret
= ssd_do_log(dev
, i
, dev
->log_buf
);
5830 (void)test_and_set_bit(SSD_LOG_ERR
, &dev
->state
);
5831 hio_warn("%s: do log fail\n", dev
->name
);
5837 ret
= ssd_do_swlog(dev
);
5839 hio_warn("%s: do swlog fail\n", dev
->name
);
5843 static void ssd_cleanup_log(struct ssd_device
*dev
)
5846 kfree(dev
->log_buf
);
5847 dev
->log_buf
= NULL
;
5850 sfifo_free(&dev
->log_fifo
);
5852 if (dev
->internal_log
.log
) {
5853 vfree(dev
->internal_log
.log
);
5854 dev
->internal_log
.nr_log
= 0;
5855 dev
->internal_log
.log
= NULL
;
5859 static int ssd_init_log(struct ssd_device
*dev
)
5861 struct ssd_log
*log
;
5866 mutex_init(&dev
->internal_log_mutex
);
5868 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5869 INIT_WORK(&dev
->log_work
, ssd_log_worker
, dev
);
5871 INIT_WORK(&dev
->log_work
, ssd_log_worker
);
5874 off
= dev
->rom_info
.log_base
;
5875 size
= dev
->rom_info
.log_sz
;
5877 dev
->internal_log
.nr_log
= 0;
5878 dev
->internal_log
.log
= vmalloc(size
);
5879 if (!dev
->internal_log
.log
) {
5884 ret
= sfifo_alloc(&dev
->log_fifo
, SSD_LOG_FIFO_SZ
, sizeof(struct ssd_log_entry
));
5886 goto out_alloc_log_fifo
;
5889 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5893 log
= (struct ssd_log
*)dev
->internal_log
.log
;
5894 while (len
< size
) {
5895 ret
= ssd_spi_read(dev
, log
, off
, sizeof(struct ssd_log
));
5900 if (log
->ctrl_idx
== 0xff) {
5904 if (log
->le
.event
== SSD_LOG_POWER_ON
) {
5905 if (dev
->internal_log
.nr_log
> dev
->last_poweron_id
) {
5906 dev
->last_poweron_id
= dev
->internal_log
.nr_log
;
5910 dev
->internal_log
.nr_log
++;
5912 len
+= sizeof(struct ssd_log
);
5913 off
+= sizeof(struct ssd_log
);
5919 sfifo_free(&dev
->log_fifo
);
5921 vfree(dev
->internal_log
.log
);
5922 dev
->internal_log
.log
= NULL
;
5923 dev
->internal_log
.nr_log
= 0;
5925 /* skip error if not in standard mode */
5926 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5933 static void ssd_stop_workq(struct ssd_device
*dev
)
5935 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
5936 flush_workqueue(dev
->workq
);
5939 static void ssd_start_workq(struct ssd_device
*dev
)
5941 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
5944 queue_work(dev
->workq
, &dev
->log_work
);
5947 static void ssd_cleanup_workq(struct ssd_device
*dev
)
5949 flush_workqueue(dev
->workq
);
5950 destroy_workqueue(dev
->workq
);
5954 static int ssd_init_workq(struct ssd_device
*dev
)
5958 dev
->workq
= create_singlethread_workqueue(dev
->name
);
5969 static int ssd_init_rom_info(struct ssd_device
*dev
)
5973 mutex_init(&dev
->spi_mutex
);
5974 mutex_init(&dev
->i2c_mutex
);
5976 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5977 /* fix bug: read data to clear status */
5978 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
5980 dev
->rom_info
.size
= SSD_ROM_SIZE
;
5981 dev
->rom_info
.block_size
= SSD_ROM_BLK_SIZE
;
5982 dev
->rom_info
.page_size
= SSD_ROM_PAGE_SIZE
;
5984 dev
->rom_info
.bridge_fw_base
= SSD_ROM_BRIDGE_FW_BASE
;
5985 dev
->rom_info
.bridge_fw_sz
= SSD_ROM_BRIDGE_FW_SIZE
;
5986 dev
->rom_info
.nr_bridge_fw
= SSD_ROM_NR_BRIDGE_FW
;
5988 dev
->rom_info
.ctrl_fw_base
= SSD_ROM_CTRL_FW_BASE
;
5989 dev
->rom_info
.ctrl_fw_sz
= SSD_ROM_CTRL_FW_SIZE
;
5990 dev
->rom_info
.nr_ctrl_fw
= SSD_ROM_NR_CTRL_FW
;
5992 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5994 dev
->rom_info
.vp_base
= SSD_ROM_VP_BASE
;
5995 dev
->rom_info
.label_base
= SSD_ROM_LABEL_BASE
;
5996 } else if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5997 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5998 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5999 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
6000 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
6002 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
6003 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6004 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6005 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
6007 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
6008 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6009 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6010 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
6012 dev
->rom_info
.bm_fw_base
= dev
->rom_info
.ctrl_fw_base
+ (dev
->rom_info
.nr_ctrl_fw
* dev
->rom_info
.ctrl_fw_sz
);
6013 dev
->rom_info
.bm_fw_sz
= SSD_PV3_ROM_BM_FW_SZ
;
6014 dev
->rom_info
.nr_bm_fw
= SSD_PV3_ROM_NR_BM_FW
;
6016 dev
->rom_info
.log_base
= dev
->rom_info
.bm_fw_base
+ (dev
->rom_info
.nr_bm_fw
* dev
->rom_info
.bm_fw_sz
);
6017 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
6019 dev
->rom_info
.smart_base
= dev
->rom_info
.log_base
+ dev
->rom_info
.log_sz
;
6020 dev
->rom_info
.smart_sz
= SSD_PV3_ROM_SMART_SZ
;
6021 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
6023 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
6024 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
6025 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
+ dev
->rom_info
.block_size
;
6026 if (dev
->rom_info
.label_base
>= dev
->rom_info
.size
) {
6027 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- dev
->rom_info
.block_size
;
6030 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
6031 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
6032 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
6033 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
6035 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
6036 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6037 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6038 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
6040 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
6041 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
6042 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
6043 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
6045 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
6046 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
6047 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- SSD_PV3_2_ROM_SEC_SZ
;
6049 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
6050 dev
->rom_info
.smart_sz
= SSD_PV3_2_ROM_SEC_SZ
;
6051 dev
->rom_info
.smart_base
= dev
->rom_info
.label_base
- (dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
);
6052 if (dev
->rom_info
.smart_sz
> dev
->rom_info
.block_size
) {
6053 dev
->rom_info
.smart_sz
= dev
->rom_info
.block_size
;
6056 dev
->rom_info
.log_sz
= SSD_PV3_2_ROM_LOG_SZ
;
6057 dev
->rom_info
.log_base
= dev
->rom_info
.smart_base
- dev
->rom_info
.log_sz
;
6060 return ssd_init_spi(dev
);
6064 static int ssd_update_smart(struct ssd_device
*dev
, struct ssd_smart
*smart
)
6068 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
6069 struct hd_struct
*part
;
6075 if (!test_bit(SSD_INIT_BD
, &dev
->state
)) {
6079 do_gettimeofday(&tv
);
6080 if ((uint64_t)tv
.tv_sec
< dev
->uptime
) {
6083 run_time
= tv
.tv_sec
- dev
->uptime
;
6086 /* avoid frequently update */
6087 if (run_time
>= 60) {
6092 smart
->io_stat
.run_time
+= run_time
;
6094 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
6095 cpu
= part_stat_lock();
6096 part
= &dev
->gd
->part0
;
6097 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
6098 part_round_stats(dev
->rq
, cpu
, part
);
6100 part_round_stats(cpu
, part
);
6104 smart
->io_stat
.nr_read
+= part_stat_read(part
, ios
[READ
]);
6105 smart
->io_stat
.nr_write
+= part_stat_read(part
, ios
[WRITE
]);
6106 smart
->io_stat
.rsectors
+= part_stat_read(part
, sectors
[READ
]);
6107 smart
->io_stat
.wsectors
+= part_stat_read(part
, sectors
[WRITE
]);
6108 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
6110 disk_round_stats(dev
->gd
);
6113 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, ios
[READ
]);
6114 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, ios
[WRITE
]);
6115 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, sectors
[READ
]);
6116 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, sectors
[WRITE
]);
6119 disk_round_stats(dev
->gd
);
6122 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, reads
);
6123 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, writes
);
6124 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, read_sectors
);
6125 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, write_sectors
);
6128 smart
->io_stat
.nr_to
+= atomic_read(&dev
->tocnt
);
6130 for (i
=0; i
<dev
->nr_queue
; i
++) {
6131 smart
->io_stat
.nr_rwerr
+= dev
->queue
[i
].io_stat
.nr_rwerr
;
6132 smart
->io_stat
.nr_ioerr
+= dev
->queue
[i
].io_stat
.nr_ioerr
;
6135 for (i
=0; i
<dev
->nr_queue
; i
++) {
6136 for (j
=0; j
<SSD_ECC_MAX_FLIP
; j
++) {
6137 smart
->ecc_info
.bitflip
[j
] += dev
->queue
[i
].ecc_info
.bitflip
[j
];
6141 //dev->uptime = tv.tv_sec;
6146 static int __ssd_clear_smart(struct ssd_device
*dev
)
6150 uint32_t off
, length
;
6154 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6159 off
= dev
->rom_info
.smart_base
;
6160 length
= dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
;
6162 ret
= ssd_spi_erase(dev
, off
, length
);
6164 hio_warn("%s: info erase: failed\n", dev
->name
);
6168 sversion
= dev
->smart
.version
;
6170 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6171 dev
->smart
.version
= sversion
+ 1;
6172 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6174 /* clear all tmp acc */
6175 for (i
=0; i
<dev
->nr_queue
; i
++) {
6176 memset(&(dev
->queue
[i
].io_stat
), 0, sizeof(struct ssd_io_stat
));
6177 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(struct ssd_ecc_info
));
6180 atomic_set(&dev
->tocnt
, 0);
6182 /* clear tmp log info */
6183 memset(&dev
->log_info
, 0, sizeof(struct ssd_log_info
));
6185 do_gettimeofday(&tv
);
6186 dev
->uptime
= tv
.tv_sec
;
6189 //ssd_clear_alarm(dev);
6194 static int __ssd_clear_warning(struct ssd_device
*dev
)
6199 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6203 /* clear log_info warning */
6204 memset(&dev
->smart
.log_info
, 0, sizeof(dev
->smart
.log_info
));
6206 /* clear io_stat warning */
6207 dev
->smart
.io_stat
.nr_to
= 0;
6208 dev
->smart
.io_stat
.nr_rwerr
= 0;
6209 dev
->smart
.io_stat
.nr_ioerr
= 0;
6211 /* clear ecc_info warning */
6212 memset(&dev
->smart
.ecc_info
, 0, sizeof(dev
->smart
.ecc_info
));
6214 /* clear queued warnings */
6215 for (i
=0; i
<dev
->nr_queue
; i
++) {
6216 /* queued io_stat warning */
6217 dev
->queue
[i
].io_stat
.nr_to
= 0;
6218 dev
->queue
[i
].io_stat
.nr_rwerr
= 0;
6219 dev
->queue
[i
].io_stat
.nr_ioerr
= 0;
6221 /* queued ecc_info warning */
6222 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(dev
->queue
[i
].ecc_info
));
6225 /* write smart back to nor */
6226 for (i
= 0; i
< dev
->rom_info
.nr_smart
; i
++) {
6227 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6228 size
= dev
->rom_info
.smart_sz
;
6230 ret
= ssd_spi_erase(dev
, off
, size
);
6232 hio_warn("%s: warning erase: failed with code 1\n", dev
->name
);
6236 size
= sizeof(struct ssd_smart
);
6238 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6240 hio_warn("%s: warning erase: failed with code 2\n", dev
->name
);
6245 dev
->smart
.version
++;
6247 /* clear cmd timeout warning */
6248 atomic_set(&dev
->tocnt
, 0);
6250 /* clear tmp log info */
6251 memset(&dev
->log_info
, 0, sizeof(dev
->log_info
));
6257 static int ssd_clear_smart(struct ssd_device
*dev
)
6261 ret
= __ssd_clear_smart(dev
);
6263 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_SMART
, 0);
6269 static int ssd_clear_warning(struct ssd_device
*dev
)
6273 ret
= __ssd_clear_warning(dev
);
6275 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_WARNING
, 0);
6281 static int ssd_save_smart(struct ssd_device
*dev
)
6287 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
6290 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6294 if (!ssd_update_smart(dev
, &dev
->smart
)) {
6298 dev
->smart
.version
++;
6300 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6301 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6302 size
= dev
->rom_info
.smart_sz
;
6304 ret
= ssd_spi_erase(dev
, off
, size
);
6306 hio_warn("%s: info erase failed\n", dev
->name
);
6310 size
= sizeof(struct ssd_smart
);
6312 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6314 hio_warn("%s: info write failed\n", dev
->name
);
6325 static int ssd_init_smart(struct ssd_device
*dev
)
6327 struct ssd_smart
*smart
;
6329 uint32_t off
, size
, val
;
6332 int update_smart
= 0;
6334 do_gettimeofday(&tv
);
6335 dev
->uptime
= tv
.tv_sec
;
6337 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6341 smart
= kmalloc(sizeof(struct ssd_smart
) * SSD_ROM_NR_SMART_MAX
, GFP_KERNEL
);
6347 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6350 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6351 memset(&smart
[i
], 0, sizeof(struct ssd_smart
));
6353 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6354 size
= sizeof(struct ssd_smart
);
6356 ret
= ssd_spi_read(dev
, &smart
[i
], off
, size
);
6358 hio_warn("%s: info read failed\n", dev
->name
);
6362 if (smart
[i
].magic
!= SSD_SMART_MAGIC
) {
6364 smart
[i
].version
= 0;
6368 if (smart
[i
].version
> dev
->smart
.version
) {
6369 memcpy(&dev
->smart
, &smart
[i
], sizeof(struct ssd_smart
));
6373 if (dev
->smart
.magic
!= SSD_SMART_MAGIC
) {
6374 /* first time power up */
6375 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6376 dev
->smart
.version
= 1;
6379 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
);
6381 dev
->last_poweron_id
= ~0;
6382 ssd_gen_swlog(dev
, SSD_LOG_POWER_ON
, dev
->hw_info
.bridge_ver
);
6383 if (dev
->smart
.io_stat
.nr_to
) {
6384 dev
->smart
.io_stat
.nr_to
= 0;
6389 /* check log info */
6391 struct ssd_log_info log_info
;
6392 struct ssd_log
*log
= (struct ssd_log
*)dev
->internal_log
.log
;
6394 memset(&log_info
, 0, sizeof(struct ssd_log_info
));
6396 while (log_info
.nr_log
< dev
->internal_log
.nr_log
) {
6399 switch (log
->le
.event
) {
6400 /* skip the volatile log info */
6401 case SSD_LOG_SEU_FAULT
:
6402 case SSD_LOG_SEU_FAULT1
:
6405 case SSD_LOG_TIMEOUT
:
6406 skip
= (dev
->last_poweron_id
>= log_info
.nr_log
);
6411 log_info
.stat
[ssd_parse_log(dev
, log
, 0)]++;
6419 for (i
=(SSD_LOG_NR_LEVEL
-1); i
>=0; i
--) {
6420 if (log_info
.stat
[i
] != dev
->smart
.log_info
.stat
[i
]) {
6422 memcpy(&dev
->smart
.log_info
, &log_info
, sizeof(struct ssd_log_info
));
6429 ++dev
->smart
.version
;
6433 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6434 if (smart
[i
].magic
== SSD_SMART_MAGIC
&& smart
[i
].version
== dev
->smart
.version
) {
6438 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6439 size
= dev
->rom_info
.smart_sz
;
6441 ret
= ssd_spi_erase(dev
, off
, size
);
6443 hio_warn("%s: info erase failed\n", dev
->name
);
6447 size
= sizeof(struct ssd_smart
);
6448 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6450 hio_warn("%s: info write failed\n", dev
->name
);
6457 /* sync smart with alarm led */
6458 if (dev
->smart
.io_stat
.nr_to
|| dev
->smart
.io_stat
.nr_rwerr
|| dev
->smart
.log_info
.stat
[SSD_LOG_LEVEL_ERR
]) {
6459 hio_warn("%s: some fault found in the history info\n", dev
->name
);
6466 /* skip error if not in standard mode */
6467 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6474 static int __ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6476 struct ssd_bm_manufacturer_data bm_md
= {0};
6477 uint16_t sc_id
= SSD_BM_SYSTEM_DATA_SUBCLASS_ID
;
6485 mutex_lock(&dev
->bm_mutex
);
6487 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6488 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6493 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6494 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_manufacturer_data
), (uint8_t *)&bm_md
);
6499 if (bm_md
.firmware_ver
& 0xF000) {
6504 *ver
= bm_md
.firmware_ver
;
6507 mutex_unlock(&dev
->bm_mutex
);
6511 static int ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6514 int i
= SSD_BM_RETRY_MAX
;
6518 ret
= __ssd_bm_get_version(dev
, &tmp
);
6532 static int __ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6534 struct ssd_bm_configuration_registers bm_cr
;
6535 uint16_t sc_id
= SSD_BM_CONFIGURATION_REGISTERS_ID
;
6539 mutex_lock(&dev
->bm_mutex
);
6541 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6542 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6547 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6548 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_configuration_registers
), (uint8_t *)&bm_cr
);
6553 if (bm_cr
.operation_cfg
.cc
== 0 || bm_cr
.operation_cfg
.cc
> 4) {
6558 *nr_cap
= bm_cr
.operation_cfg
.cc
+ 1;
6561 mutex_unlock(&dev
->bm_mutex
);
6565 static int ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6568 int i
= SSD_BM_RETRY_MAX
;
6572 ret
= __ssd_bm_nr_cap(dev
, &tmp
);
6586 static int ssd_bm_enter_cap_learning(struct ssd_device
*dev
)
6588 uint16_t buf
= SSD_BM_ENTER_CAP_LEARNING
;
6589 uint8_t cmd
= SSD_BM_MANUFACTURERACCESS
;
6592 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&buf
);
6601 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
)
6604 uint8_t cmd
= SSD_BM_SAFETYSTATUS
;
6607 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6617 static int ssd_bm_get_opstatus(struct ssd_device
*dev
, uint16_t *status
)
6620 uint8_t cmd
= SSD_BM_OPERATIONSTATUS
;
6623 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6633 static int ssd_get_bmstruct(struct ssd_device
*dev
, struct ssd_bm
*bm_status_out
)
6635 struct sbs_cmd
*bm_sbs
= ssd_bm_sbs
;
6636 struct ssd_bm bm_status
;
6637 uint8_t buf
[2] = {0, };
6642 memset(&bm_status
, 0, sizeof(struct ssd_bm
));
6644 while (bm_sbs
->desc
!= NULL
) {
6645 switch (bm_sbs
->size
) {
6647 ret
= ssd_smbus_read_byte(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, buf
);
6649 //printf("Error: smbus read byte %#x\n", bm_sbs->cmd);
6655 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, (uint8_t *)&val
);
6657 //printf("Error: smbus read word %#x\n", bm_sbs->cmd);
6660 //val = *(uint16_t *)buf;
6668 switch (bm_sbs
->unit
) {
6669 case SBS_UNIT_VALUE
:
6670 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
& bm_sbs
->mask
;
6672 case SBS_UNIT_TEMPERATURE
:
6673 cval
= (uint16_t)(val
- 2731) / 10;
6674 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = cval
;
6676 case SBS_UNIT_VOLTAGE
:
6677 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6679 case SBS_UNIT_CURRENT
:
6680 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6683 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6685 case SBS_UNIT_PERCENT
:
6686 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6688 case SBS_UNIT_CAPACITANCE
:
6689 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6700 memcpy(bm_status_out
, &bm_status
, sizeof(struct ssd_bm
));
6706 static int __ssd_bm_status(struct ssd_device
*dev
, int *status
)
6708 struct ssd_bm bm_status
= {0};
6713 ret
= ssd_get_bmstruct(dev
, &bm_status
);
6718 /* capacitor voltage */
6719 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
6724 for (i
=0; i
<nr_cap
; i
++) {
6725 if (bm_status
.cap_volt
[i
] < SSD_BM_CAP_VOLT_MIN
) {
6726 *status
= SSD_BMSTATUS_WARNING
;
6732 if (bm_status
.sf_status
) {
6733 *status
= SSD_BMSTATUS_WARNING
;
6738 if (!((bm_status
.op_status
>> 12) & 0x1)) {
6739 *status
= SSD_BMSTATUS_CHARGING
;
6741 *status
= SSD_BMSTATUS_OK
;
6748 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int mode
);
6750 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
6751 static void ssd_bm_worker(void *data
)
6753 struct ssd_device
*dev
= (struct ssd_device
*)data
;
6755 static void ssd_bm_worker(struct work_struct
*work
)
6757 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, bm_work
);
6763 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6767 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
6771 if (dev
->hw_info_ext
.plp_type
!= SSD_PLP_SCAP
) {
6775 ret
= ssd_bm_get_opstatus(dev
, &opstatus
);
6777 hio_warn("%s: get bm operationstatus failed\n", dev
->name
);
6781 /* need cap learning ? */
6782 if (!(opstatus
& 0xF0)) {
6783 ret
= ssd_bm_enter_cap_learning(dev
);
6785 hio_warn("%s: enter capacitance learning failed\n", dev
->name
);
6791 static void ssd_bm_routine_start(void *data
)
6793 struct ssd_device
*dev
;
6800 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
6801 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6802 queue_work(dev
->workq
, &dev
->bm_work
);
6804 queue_work(dev
->workq
, &dev
->capmon_work
);
6810 static int ssd_do_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6817 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6822 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6827 /* make sure the lm80 voltage value is updated */
6828 msleep(SSD_LM80_CONV_INTERVAL
);
6830 /* check if full charged */
6833 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6835 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6836 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6840 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6841 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_FULL
) {
6846 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6850 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6853 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U2
, (uint8_t *)&val
);
6855 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6856 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6860 u2
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6867 /* enter cap learn */
6868 ssd_reg32_write(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
, 0x1);
6872 msleep(SSD_PL_CAP_LEARN_WAIT
);
6874 t
= ssd_reg32_read(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
);
6875 if (!((t
>> 1) & 0x1)) {
6880 if (wait
> SSD_PL_CAP_LEARN_MAX_WAIT
) {
6886 if ((t
>> 4) & 0x1) {
6897 *cap
= SSD_PL_CAP_LEARN(u1
, u2
, t
);
6903 static int ssd_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6911 mutex_lock(&dev
->bm_mutex
);
6913 ssd_stop_workq(dev
);
6915 ret
= ssd_do_cap_learn(dev
, cap
);
6917 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
6921 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, *cap
);
6924 ssd_start_workq(dev
);
6925 mutex_unlock(&dev
->bm_mutex
);
6930 static int ssd_check_pl_cap(struct ssd_device
*dev
)
6938 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6942 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6949 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6951 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6952 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6956 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6957 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_READY
) {
6962 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6964 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(u1
));
6967 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6970 low
= ssd_lm80_limit
[SSD_LM80_IN_CAP
].low
;
6971 ret
= ssd_smbus_write_byte(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP
), &low
);
6976 /* enable cap INx */
6977 ret
= ssd_lm80_enable_in(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_IN_CAP
);
6979 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6980 ssd_generate_sensor_fault_log(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
,ret
);
6986 /* skip error if not in standard mode */
6987 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6993 static int ssd_check_pl_cap_fast(struct ssd_device
*dev
)
6999 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7003 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
7008 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
7012 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
7013 if (SSD_PL_CAP_VOLT(u1
) < SSD_PL_CAP_VOLT_READY
) {
7021 static int ssd_init_pl_cap(struct ssd_device
*dev
)
7025 /* set here: user write mode */
7026 dev
->user_wmode
= wmode
;
7028 mutex_init(&dev
->bm_mutex
);
7030 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7032 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BM_FAULT_REG
);
7033 if ((val
>> 1) & 0x1) {
7034 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
7037 ret
= ssd_check_pl_cap(dev
);
7039 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
7047 static void __end_str(char *str
, int len
)
7051 for(i
=0; i
<len
; i
++) {
7052 if (*(str
+i
) == '\0')
7058 static int ssd_init_label(struct ssd_device
*dev
)
7064 /* label location */
7065 off
= dev
->rom_info
.label_base
;
7067 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7068 size
= sizeof(struct ssd_label
);
7071 ret
= ssd_spi_read(dev
, &dev
->label
, off
, size
);
7073 memset(&dev
->label
, 0, size
);
7077 __end_str(dev
->label
.date
, SSD_LABEL_FIELD_SZ
);
7078 __end_str(dev
->label
.sn
, SSD_LABEL_FIELD_SZ
);
7079 __end_str(dev
->label
.part
, SSD_LABEL_FIELD_SZ
);
7080 __end_str(dev
->label
.desc
, SSD_LABEL_FIELD_SZ
);
7081 __end_str(dev
->label
.other
, SSD_LABEL_FIELD_SZ
);
7082 __end_str(dev
->label
.maf
, SSD_LABEL_FIELD_SZ
);
7084 size
= sizeof(struct ssd_labelv3
);
7087 ret
= ssd_spi_read(dev
, &dev
->labelv3
, off
, size
);
7089 memset(&dev
->labelv3
, 0, size
);
7093 __end_str(dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
7094 __end_str(dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
7095 __end_str(dev
->labelv3
.item
, SSD_LABEL_FIELD_SZ
);
7096 __end_str(dev
->labelv3
.description
, SSD_LABEL_DESC_SZ
);
7097 __end_str(dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
7098 __end_str(dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
7099 __end_str(dev
->labelv3
.issuenumber
, SSD_LABEL_FIELD_SZ
);
7100 __end_str(dev
->labelv3
.cleicode
, SSD_LABEL_FIELD_SZ
);
7101 __end_str(dev
->labelv3
.bom
, SSD_LABEL_FIELD_SZ
);
7105 /* skip error if not in standard mode */
7106 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7112 int ssd_get_label(struct block_device
*bdev
, struct ssd_label
*label
)
7114 struct ssd_device
*dev
;
7116 if (!bdev
|| !label
|| !(bdev
->bd_disk
)) {
7120 dev
= bdev
->bd_disk
->private_data
;
7122 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7123 memset(label
, 0, sizeof(struct ssd_label
));
7124 memcpy(label
->date
, dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
7125 memcpy(label
->sn
, dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
7126 memcpy(label
->desc
, dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
7127 memcpy(label
->maf
, dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
7129 memcpy(label
, &dev
->label
, sizeof(struct ssd_label
));
7135 static int __ssd_get_version(struct ssd_device
*dev
, struct ssd_version_info
*ver
)
7137 uint16_t bm_ver
= 0;
7140 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7141 ret
= ssd_bm_get_version(dev
, &bm_ver
);
7147 ver
->bridge_ver
= dev
->hw_info
.bridge_ver
;
7148 ver
->ctrl_ver
= dev
->hw_info
.ctrl_ver
;
7149 ver
->bm_ver
= bm_ver
;
7150 ver
->pcb_ver
= dev
->hw_info
.pcb_ver
;
7151 ver
->upper_pcb_ver
= dev
->hw_info
.upper_pcb_ver
;
7158 int ssd_get_version(struct block_device
*bdev
, struct ssd_version_info
*ver
)
7160 struct ssd_device
*dev
;
7163 if (!bdev
|| !ver
|| !(bdev
->bd_disk
)) {
7167 dev
= bdev
->bd_disk
->private_data
;
7169 mutex_lock(&dev
->fw_mutex
);
7170 ret
= __ssd_get_version(dev
, ver
);
7171 mutex_unlock(&dev
->fw_mutex
);
7176 static int __ssd_get_temperature(struct ssd_device
*dev
, int *temp
)
7184 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7190 if (dev
->db_info
.type
== SSD_DEBUG_LOG
&&
7191 (dev
->db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
||
7192 dev
->db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
||
7193 dev
->db_info
.data
.log
.event
== SSD_LOG_WARN_TEMP
)) {
7194 *temp
= (int)dev
->db_info
.data
.log
.extra
;
7199 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7200 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
7202 val
= ssd_reg_read(dev
->ctrlp
+ off
);
7203 if (val
== 0xffffffffffffffffull
) {
7207 cur
= (int)CUR_TEMP(val
);
7218 int ssd_get_temperature(struct block_device
*bdev
, int *temp
)
7220 struct ssd_device
*dev
;
7223 if (!bdev
|| !temp
|| !(bdev
->bd_disk
)) {
7227 dev
= bdev
->bd_disk
->private_data
;
7230 mutex_lock(&dev
->fw_mutex
);
7231 ret
= __ssd_get_temperature(dev
, temp
);
7232 mutex_unlock(&dev
->fw_mutex
);
7237 int ssd_set_otprotect(struct block_device
*bdev
, int otprotect
)
7239 struct ssd_device
*dev
;
7241 if (!bdev
|| !(bdev
->bd_disk
)) {
7245 dev
= bdev
->bd_disk
->private_data
;
7246 ssd_set_ot_protect(dev
, !!otprotect
);
7251 int ssd_bm_status(struct block_device
*bdev
, int *status
)
7253 struct ssd_device
*dev
;
7256 if (!bdev
|| !status
|| !(bdev
->bd_disk
)) {
7260 dev
= bdev
->bd_disk
->private_data
;
7262 mutex_lock(&dev
->fw_mutex
);
7263 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7264 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7265 *status
= SSD_BMSTATUS_WARNING
;
7267 *status
= SSD_BMSTATUS_OK
;
7269 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7270 ret
= __ssd_bm_status(dev
, status
);
7272 *status
= SSD_BMSTATUS_OK
;
7274 mutex_unlock(&dev
->fw_mutex
);
7279 int ssd_get_pciaddr(struct block_device
*bdev
, struct pci_addr
*paddr
)
7281 struct ssd_device
*dev
;
7283 if (!bdev
|| !paddr
|| !bdev
->bd_disk
) {
7287 dev
= bdev
->bd_disk
->private_data
;
7289 paddr
->domain
= pci_domain_nr(dev
->pdev
->bus
);
7290 paddr
->bus
= dev
->pdev
->bus
->number
;
7291 paddr
->slot
= PCI_SLOT(dev
->pdev
->devfn
);
7292 paddr
->func
= PCI_FUNC(dev
->pdev
->devfn
);
7298 static int ssd_bb_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7303 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7307 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L1_REG
);
7308 if (0xffffffffull
== acc
->threshold_l1
) {
7311 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L2_REG
);
7312 if (0xffffffffull
== acc
->threshold_l2
) {
7317 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7318 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7319 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_BB_ACC_REG_SZ
* chip
));
7320 if (0xffffffffull
== acc
->val
) {
7323 if (val
> acc
->val
) {
7332 static int ssd_ec_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7337 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7341 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L1_REG
);
7342 if (0xffffffffull
== acc
->threshold_l1
) {
7345 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L2_REG
);
7346 if (0xffffffffull
== acc
->threshold_l2
) {
7351 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7352 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7353 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_EC_ACC_REG_SZ
* chip
));
7354 if (0xffffffffull
== acc
->val
) {
7358 if (val
> acc
->val
) {
7369 static int ssd_ram_read_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7371 struct ssd_ram_op_msg
*msg
;
7373 size_t len
= length
;
7377 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7378 || !length
|| length
> dev
->hw_info
.ram_max_len
7379 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7383 len
/= dev
->hw_info
.ram_align
;
7384 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7386 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7387 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7388 ret
= dma_mapping_error(buf_dma
);
7390 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7393 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7394 goto out_dma_mapping
;
7397 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7399 msg
->fun
= SSD_FUNC_RAM_READ
;
7400 msg
->ctrl_idx
= ctrl_idx
;
7401 msg
->start
= (uint32_t)ofs_w
;
7405 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7408 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7414 static int ssd_ram_write_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7416 struct ssd_ram_op_msg
*msg
;
7418 size_t len
= length
;
7422 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7423 || !length
|| length
> dev
->hw_info
.ram_max_len
7424 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7428 len
/= dev
->hw_info
.ram_align
;
7429 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7431 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7432 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7433 ret
= dma_mapping_error(buf_dma
);
7435 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7438 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7439 goto out_dma_mapping
;
7442 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7444 msg
->fun
= SSD_FUNC_RAM_WRITE
;
7445 msg
->ctrl_idx
= ctrl_idx
;
7446 msg
->start
= (uint32_t)ofs_w
;
7450 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7453 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7460 static int ssd_ram_read(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7467 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7468 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7473 len
= dev
->hw_info
.ram_max_len
;
7474 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7478 ret
= ssd_ram_read_4k(dev
, buf
, len
, off
, ctrl_idx
);
7491 static int ssd_ram_write(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7498 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7499 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7504 len
= dev
->hw_info
.ram_max_len
;
7505 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7509 ret
= ssd_ram_write_4k(dev
, buf
, len
, off
, ctrl_idx
);
7524 static int ssd_check_flash(struct ssd_device
*dev
, int flash
, int page
, int ctrl_idx
)
7526 int cur_ch
= flash
% dev
->hw_info
.max_ch
;
7527 int cur_chip
= flash
/dev
->hw_info
.max_ch
;
7529 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
7533 if (cur_ch
>= dev
->hw_info
.nr_ch
|| cur_chip
>= dev
->hw_info
.nr_chip
) {
7537 if (page
>= (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7543 static int ssd_nand_read_id(struct ssd_device
*dev
, void *id
, int flash
, int chip
, int ctrl_idx
)
7545 struct ssd_nand_op_msg
*msg
;
7552 buf_dma
= pci_map_single(dev
->pdev
, id
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7553 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7554 ret
= dma_mapping_error(buf_dma
);
7556 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7559 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7560 goto out_dma_mapping
;
7563 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7564 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7568 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7570 msg
->fun
= SSD_FUNC_NAND_READ_ID
;
7571 msg
->chip_no
= flash
;
7572 msg
->chip_ce
= chip
;
7573 msg
->ctrl_idx
= ctrl_idx
;
7576 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7579 pci_unmap_single(dev
->pdev
, buf_dma
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7586 static int ssd_nand_read(struct ssd_device
*dev
, void *buf
,
7587 int flash
, int chip
, int page
, int page_count
, int ctrl_idx
)
7589 struct ssd_nand_op_msg
*msg
;
7598 if ((page
+ page_count
) > dev
->hw_info
.block_count
*dev
->hw_info
.page_count
) {
7602 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7607 length
= page_count
* dev
->hw_info
.page_size
;
7609 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7610 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7611 ret
= dma_mapping_error(buf_dma
);
7613 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7616 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7617 goto out_dma_mapping
;
7620 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7621 flash
= (flash
<< 1) | chip
;
7625 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7627 msg
->fun
= SSD_FUNC_NAND_READ
;
7628 msg
->ctrl_idx
= ctrl_idx
;
7629 msg
->chip_no
= flash
;
7630 msg
->chip_ce
= chip
;
7631 msg
->page_no
= page
;
7632 msg
->page_count
= page_count
;
7635 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7638 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7645 static int ssd_nand_read_w_oob(struct ssd_device
*dev
, void *buf
,
7646 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7648 struct ssd_nand_op_msg
*msg
;
7657 if ((page
+ count
) > (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7661 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7666 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7668 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7669 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7670 ret
= dma_mapping_error(buf_dma
);
7672 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7675 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7676 goto out_dma_mapping
;
7679 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7680 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7684 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7686 msg
->fun
= SSD_FUNC_NAND_READ_WOOB
;
7687 msg
->ctrl_idx
= ctrl_idx
;
7688 msg
->chip_no
= flash
;
7689 msg
->chip_ce
= chip
;
7690 msg
->page_no
= page
;
7691 msg
->page_count
= count
;
7694 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7697 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7704 static int ssd_nand_write(struct ssd_device
*dev
, void *buf
,
7705 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7707 struct ssd_nand_op_msg
*msg
;
7712 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7724 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7729 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7731 /* write data to ram */
7732 /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx);
7737 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7738 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7739 ret
= dma_mapping_error(buf_dma
);
7741 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7744 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7745 goto out_dma_mapping
;
7748 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7749 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7753 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7755 msg
->fun
= SSD_FUNC_NAND_WRITE
;
7756 msg
->ctrl_idx
= ctrl_idx
;
7757 msg
->chip_no
= flash
;
7758 msg
->chip_ce
= chip
;
7760 msg
->page_no
= page
;
7761 msg
->page_count
= count
;
7764 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7767 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7773 static int ssd_nand_erase(struct ssd_device
*dev
, int flash
, int chip
, int page
, int ctrl_idx
)
7775 struct ssd_nand_op_msg
*msg
;
7778 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7783 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7784 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7788 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7790 msg
->fun
= SSD_FUNC_NAND_ERASE
;
7791 msg
->ctrl_idx
= ctrl_idx
;
7792 msg
->chip_no
= flash
;
7793 msg
->chip_ce
= chip
;
7794 msg
->page_no
= page
;
7796 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7802 static int ssd_update_bbt(struct ssd_device
*dev
, int flash
, int ctrl_idx
)
7804 struct ssd_nand_op_msg
*msg
;
7805 struct ssd_flush_msg
*fmsg
;
7808 ret
= ssd_check_flash(dev
, flash
, 0, ctrl_idx
);
7813 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7815 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7816 fmsg
= (struct ssd_flush_msg
*)msg
;
7818 fmsg
->fun
= SSD_FUNC_FLUSH
;
7820 fmsg
->flash
= flash
;
7821 fmsg
->ctrl_idx
= ctrl_idx
;
7823 msg
->fun
= SSD_FUNC_FLUSH
;
7825 msg
->chip_no
= flash
;
7826 msg
->ctrl_idx
= ctrl_idx
;
7829 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7835 /* flash controller init state */
7836 static int __ssd_check_init_state(struct ssd_device
*dev
)
7838 uint32_t *init_state
= NULL
;
7839 int reg_base
, reg_sz
;
7840 int max_wait
= SSD_INIT_MAX_WAIT
;
7846 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7847 ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data);
7848 read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8);
7849 if (read_data == ~test_data) {
7850 //dev->hw_info.nr_ctrl++;
7851 dev->hw_info.nr_ctrl_map |= 1<<i;
7857 read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG);
7859 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7860 if (((read_data>>i) & 0x1) == 0) {
7865 if (dev->hw_info.nr_ctrl != j) {
7866 printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j);
7872 init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0);
7873 for (j=1; j<dev->hw_info.nr_ctrl;j++) {
7874 if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) {
7875 printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j);
7881 /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0);
7882 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7883 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) {
7884 printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j);
7889 init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8);
7890 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7891 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) {
7892 printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j);
7898 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7899 max_wait
= SSD_INIT_MAX_WAIT_V3_2
;
7902 reg_base
= dev
->protocol_info
.init_state_reg
;
7903 reg_sz
= dev
->protocol_info
.init_state_reg_sz
;
7905 init_state
= (uint32_t *)kmalloc(reg_sz
, GFP_KERNEL
);
7910 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7912 for (j
=0, k
=0; j
<reg_sz
; j
+=sizeof(uint32_t), k
++) {
7913 init_state
[k
] = ssd_reg32_read(dev
->ctrlp
+ reg_base
+ j
);
7916 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7917 /* just check the last bit, no need to check all channel */
7918 ch_start
= dev
->hw_info
.max_ch
- 1;
7923 for (j
=0; j
<dev
->hw_info
.nr_chip
; j
++) {
7924 for (k
=ch_start
; k
<dev
->hw_info
.max_ch
; k
++) {
7925 if (test_bit((j
*dev
->hw_info
.max_ch
+ k
), (void *)init_state
)) {
7930 if (init_wait
<= max_wait
) {
7931 msleep(SSD_INIT_WAIT
);
7934 if (k
< dev
->hw_info
.nr_ch
) {
7935 hio_warn("%s: controller %d chip %d ch %d init failed\n",
7936 dev
->name
, i
, j
, k
);
7938 hio_warn("%s: controller %d chip %d init failed\n",
7949 //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait);
7955 static int ssd_check_init_state(struct ssd_device
*dev
)
7957 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7961 return __ssd_check_init_state(dev
);
7964 static void ssd_reset_resp_ptr(struct ssd_device
*dev
);
7966 /* reset flash controller etc */
7967 static int __ssd_reset(struct ssd_device
*dev
, int type
)
7970 if (type
< SSD_RST_NOINIT
|| type
> SSD_RST_FULL
) {
7974 mutex_lock(&dev
->fw_mutex
);
7976 if (type
== SSD_RST_NOINIT
) { //no init
7977 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET_NOINIT
);
7978 } else if (type
== SSD_RST_NORMAL
) { //reset & init
7979 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET
);
7980 } else { // full reset
7981 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7982 mutex_unlock(&dev
->fw_mutex
);
7986 ssd_reg32_write(dev
->ctrlp
+ SSD_FULL_RESET_REG
, SSD_RESET_FULL
);
7989 ssd_reset_resp_ptr(dev
);
7992 #ifdef SSD_OT_PROTECT
7999 ssd_set_flush_timeout(dev
, dev
->wmode
);
8001 mutex_unlock(&dev
->fw_mutex
);
8002 ssd_gen_swlog(dev
, SSD_LOG_RESET
, (uint32_t)type
);
8003 do_gettimeofday(&tv
);
8004 dev
->reset_time
= tv
.tv_sec
;
8006 return __ssd_check_init_state(dev
);
8009 static int ssd_save_md(struct ssd_device
*dev
)
8011 struct ssd_nand_op_msg
*msg
;
8014 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8017 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
8021 if (!dev
->save_md
) {
8025 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8027 msg
->fun
= SSD_FUNC_FLUSH
;
8032 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
8038 static int ssd_barrier_save_md(struct ssd_device
*dev
)
8040 struct ssd_nand_op_msg
*msg
;
8043 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8046 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
8050 if (!dev
->save_md
) {
8054 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8056 msg
->fun
= SSD_FUNC_FLUSH
;
8061 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
8067 static int ssd_flush(struct ssd_device
*dev
)
8069 struct ssd_nand_op_msg
*msg
;
8070 struct ssd_flush_msg
*fmsg
;
8073 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8076 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8078 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
8079 fmsg
= (struct ssd_flush_msg
*)msg
;
8081 fmsg
->fun
= SSD_FUNC_FLUSH
;
8086 msg
->fun
= SSD_FUNC_FLUSH
;
8092 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
8098 static int ssd_barrier_flush(struct ssd_device
*dev
)
8100 struct ssd_nand_op_msg
*msg
;
8101 struct ssd_flush_msg
*fmsg
;
8104 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
8107 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
8109 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
8110 fmsg
= (struct ssd_flush_msg
*)msg
;
8112 fmsg
->fun
= SSD_FUNC_FLUSH
;
8117 msg
->fun
= SSD_FUNC_FLUSH
;
8123 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
8129 #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710
8130 #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8
8131 #define SSD_WMODE_FUA_TIMEOUT 0x000503E8
8132 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int m
)
8137 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8142 case SSD_WMODE_BUFFER
:
8143 to
= SSD_WMODE_BUFFER_TIMEOUT
;
8145 case SSD_WMODE_BUFFER_EX
:
8146 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_1
) {
8147 to
= SSD_WMODE_BUFFER_EX_TIMEOUT
;
8149 to
= SSD_WMODE_BUFFER_TIMEOUT
;
8153 to
= SSD_WMODE_FUA_TIMEOUT
;
8159 val
= (((uint32_t)((uint32_t)m
& 0x3) << 28) | to
);
8161 ssd_reg32_write(dev
->ctrlp
+ SSD_FLUSH_TIMEOUT_REG
, val
);
8164 static int ssd_do_switch_wmode(struct ssd_device
*dev
, int m
)
8168 ret
= ssd_barrier_start(dev
);
8173 ret
= ssd_barrier_flush(dev
);
8175 goto out_barrier_end
;
8178 /* set contoller flush timeout */
8179 ssd_set_flush_timeout(dev
, m
);
8185 ssd_barrier_end(dev
);
8190 static int ssd_switch_wmode(struct ssd_device
*dev
, int m
)
8196 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8200 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8201 default_wmode
= SSD_WMODE_BUFFER
;
8203 default_wmode
= SSD_WMODE_BUFFER_EX
;
8206 if (SSD_WMODE_AUTO
== m
) {
8207 /* battery fault ? */
8208 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8209 next_wmode
= SSD_WMODE_FUA
;
8211 next_wmode
= default_wmode
;
8213 } else if (SSD_WMODE_DEFAULT
== m
) {
8214 next_wmode
= default_wmode
;
8219 if (next_wmode
!= dev
->wmode
) {
8220 hio_warn("%s: switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8221 ret
= ssd_do_switch_wmode(dev
, next_wmode
);
8223 hio_err("%s: can not switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8230 static int ssd_init_wmode(struct ssd_device
*dev
)
8235 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8236 default_wmode
= SSD_WMODE_BUFFER
;
8238 default_wmode
= SSD_WMODE_BUFFER_EX
;
8242 if (SSD_WMODE_AUTO
== dev
->user_wmode
) {
8243 /* battery fault ? */
8244 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8245 dev
->wmode
= SSD_WMODE_FUA
;
8247 dev
->wmode
= default_wmode
;
8249 } else if (SSD_WMODE_DEFAULT
== dev
->user_wmode
) {
8250 dev
->wmode
= default_wmode
;
8252 dev
->wmode
= dev
->user_wmode
;
8254 ssd_set_flush_timeout(dev
, dev
->wmode
);
8259 static int __ssd_set_wmode(struct ssd_device
*dev
, int m
)
8263 /* not support old fw*/
8264 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8269 if (m
< SSD_WMODE_BUFFER
|| m
> SSD_WMODE_DEFAULT
) {
8274 ssd_gen_swlog(dev
, SSD_LOG_SET_WMODE
, m
);
8276 dev
->user_wmode
= m
;
8278 ret
= ssd_switch_wmode(dev
, dev
->user_wmode
);
8287 int ssd_set_wmode(struct block_device
*bdev
, int m
)
8289 struct ssd_device
*dev
;
8291 if (!bdev
|| !(bdev
->bd_disk
)) {
8295 dev
= bdev
->bd_disk
->private_data
;
8297 return __ssd_set_wmode(dev
, m
);
8300 static int ssd_do_reset(struct ssd_device
*dev
)
8304 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8308 ssd_stop_workq(dev
);
8310 ret
= ssd_barrier_start(dev
);
8315 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8317 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8320 //ret = __ssd_reset(dev, SSD_RST_FULL);
8321 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8324 goto out_barrier_end
;
8328 ssd_barrier_end(dev
);
8330 ssd_start_workq(dev
);
8331 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8335 static int ssd_full_reset(struct ssd_device
*dev
)
8339 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8343 ssd_stop_workq(dev
);
8345 ret
= ssd_barrier_start(dev
);
8350 ret
= ssd_barrier_flush(dev
);
8352 goto out_barrier_end
;
8355 ret
= ssd_barrier_save_md(dev
);
8357 goto out_barrier_end
;
8360 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8362 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8365 //ret = __ssd_reset(dev, SSD_RST_FULL);
8366 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8369 goto out_barrier_end
;
8373 ssd_barrier_end(dev
);
8375 ssd_start_workq(dev
);
8376 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8380 int ssd_reset(struct block_device
*bdev
)
8383 struct ssd_device
*dev
;
8385 if (!bdev
|| !(bdev
->bd_disk
)) {
8389 dev
= bdev
->bd_disk
->private_data
;
8391 ret
= ssd_full_reset(dev
);
8393 if (!dev
->has_non_0x98_reg_access
) {
8394 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, 0);
8401 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
8402 static int ssd_issue_flush_fn(struct request_queue
*q
, struct gendisk
*disk
,
8403 sector_t
*error_sector
)
8405 struct ssd_device
*dev
= q
->queuedata
;
8407 return ssd_flush(dev
);
8411 void ssd_submit_pbio(struct request_queue
*q
, struct bio
*bio
)
8413 struct ssd_device
*dev
= q
->queuedata
;
8414 #ifdef SSD_QUEUE_PBIO
8418 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8419 ssd_bio_endio(bio
, -ENODEV
);
8423 #ifdef SSD_DEBUG_ERR
8424 if (atomic_read(&dev
->tocnt
)) {
8425 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8426 ssd_bio_endio(bio
, -EIO
);
8431 if (unlikely(ssd_bio_has_barrier_or_fua(bio
))) {
8432 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8436 if (unlikely(dev
->readonly
&& bio_data_dir(bio
) == WRITE
)) {
8437 ssd_bio_endio(bio
, -EROFS
);
8441 #ifdef SSD_QUEUE_PBIO
8442 if (0 == atomic_read(&dev
->in_sendq
)) {
8443 ret
= __ssd_submit_pbio(dev
, bio
, 0);
8447 (void)test_and_set_bit(BIO_SSD_PBIO
, &bio
->bi_flags
);
8448 ssd_queue_bio(dev
, bio
);
8451 __ssd_submit_pbio(dev
, bio
, 1);
8458 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8459 static blk_qc_t
ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8460 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8461 static void ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8463 static int ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8466 struct ssd_device
*dev
= q
->queuedata
;
8469 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8470 ssd_bio_endio(bio
, -ENODEV
);
8474 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0))
8475 blk_queue_split(q
, &bio
);
8476 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
8477 blk_queue_split(q
, &bio
, q
->bio_split
);
8480 #ifdef SSD_DEBUG_ERR
8481 if (atomic_read(&dev
->tocnt
)) {
8482 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8483 ssd_bio_endio(bio
, -EIO
);
8488 if (unlikely(ssd_bio_has_barrier_or_fua(bio
))) {
8489 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8493 /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */
8494 if (unlikely(ssd_bio_has_flush(bio
) && !bio_sectors(bio
))) {
8495 ssd_bio_endio(bio
, 0);
8499 if (0 == atomic_read(&dev
->in_sendq
)) {
8500 ret
= ssd_submit_bio(dev
, bio
, 0);
8504 ssd_queue_bio(dev
, bio
);
8508 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8509 return BLK_QC_T_NONE
;
8510 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8517 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
8518 static int ssd_block_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
8520 struct ssd_device
*dev
;
8526 dev
= bdev
->bd_disk
->private_data
;
8533 geo
->cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
8538 static int ssd_init_queue(struct ssd_device
*dev
);
8539 static void ssd_cleanup_queue(struct ssd_device
*dev
);
8540 static void ssd_cleanup_blkdev(struct ssd_device
*dev
);
8541 static int ssd_init_blkdev(struct ssd_device
*dev
);
8542 static int ssd_ioctl_common(struct ssd_device
*dev
, unsigned int cmd
, unsigned long arg
)
8544 void __user
*argp
= (void __user
*)arg
;
8545 void __user
*buf
= NULL
;
8550 case SSD_CMD_GET_PROTOCOL_INFO
:
8551 if (copy_to_user(argp
, &dev
->protocol_info
, sizeof(struct ssd_protocol_info
))) {
8552 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8558 case SSD_CMD_GET_HW_INFO
:
8559 if (copy_to_user(argp
, &dev
->hw_info
, sizeof(struct ssd_hw_info
))) {
8560 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8566 case SSD_CMD_GET_ROM_INFO
:
8567 if (copy_to_user(argp
, &dev
->rom_info
, sizeof(struct ssd_rom_info
))) {
8568 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8574 case SSD_CMD_GET_SMART
: {
8575 struct ssd_smart smart
;
8578 memcpy(&smart
, &dev
->smart
, sizeof(struct ssd_smart
));
8580 mutex_lock(&dev
->gd_mutex
);
8581 ssd_update_smart(dev
, &smart
);
8582 mutex_unlock(&dev
->gd_mutex
);
8584 /* combine the volatile log info */
8585 if (dev
->log_info
.nr_log
) {
8586 for (i
=0; i
<SSD_LOG_NR_LEVEL
; i
++) {
8587 smart
.log_info
.stat
[i
] += dev
->log_info
.stat
[i
];
8591 if (copy_to_user(argp
, &smart
, sizeof(struct ssd_smart
))) {
8592 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8600 case SSD_CMD_GET_IDX
:
8601 if (copy_to_user(argp
, &dev
->idx
, sizeof(int))) {
8602 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8608 case SSD_CMD_GET_AMOUNT
: {
8609 int nr_ssd
= atomic_read(&ssd_nr
);
8610 if (copy_to_user(argp
, &nr_ssd
, sizeof(int))) {
8611 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8618 case SSD_CMD_GET_TO_INFO
: {
8619 int tocnt
= atomic_read(&dev
->tocnt
);
8621 if (copy_to_user(argp
, &tocnt
, sizeof(int))) {
8622 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8629 case SSD_CMD_GET_DRV_VER
: {
8630 char ver
[] = DRIVER_VERSION
;
8631 int len
= sizeof(ver
);
8633 if (len
> (DRIVER_VERSION_LEN
- 1)) {
8634 len
= (DRIVER_VERSION_LEN
- 1);
8636 if (copy_to_user(argp
, ver
, len
)) {
8637 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8644 case SSD_CMD_GET_BBACC_INFO
: {
8645 struct ssd_acc_info acc
;
8647 mutex_lock(&dev
->fw_mutex
);
8648 ret
= ssd_bb_acc(dev
, &acc
);
8649 mutex_unlock(&dev
->fw_mutex
);
8654 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8655 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8662 case SSD_CMD_GET_ECACC_INFO
: {
8663 struct ssd_acc_info acc
;
8665 mutex_lock(&dev
->fw_mutex
);
8666 ret
= ssd_ec_acc(dev
, &acc
);
8667 mutex_unlock(&dev
->fw_mutex
);
8672 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8673 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8680 case SSD_CMD_GET_HW_INFO_EXT
:
8681 if (copy_to_user(argp
, &dev
->hw_info_ext
, sizeof(struct ssd_hw_info_extend
))) {
8682 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8688 case SSD_CMD_REG_READ
: {
8689 struct ssd_reg_op_info reg_info
;
8691 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8692 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8697 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8702 reg_info
.value
= ssd_reg32_read(dev
->ctrlp
+ reg_info
.offset
);
8703 if (copy_to_user(argp
, ®_info
, sizeof(struct ssd_reg_op_info
))) {
8704 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8712 case SSD_CMD_REG_WRITE
: {
8713 struct ssd_reg_op_info reg_info
;
8715 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8716 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8721 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8726 ssd_reg32_write(dev
->ctrlp
+ reg_info
.offset
, reg_info
.value
);
8731 case SSD_CMD_SPI_READ
: {
8732 struct ssd_spi_op_info spi_info
;
8735 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8736 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8742 size
= spi_info
.len
;
8745 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8750 kbuf
= kmalloc(size
, GFP_KERNEL
);
8756 ret
= ssd_spi_page_read(dev
, kbuf
, off
, size
);
8762 if (copy_to_user(buf
, kbuf
, size
)) {
8763 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8774 case SSD_CMD_SPI_WRITE
: {
8775 struct ssd_spi_op_info spi_info
;
8778 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8779 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8785 size
= spi_info
.len
;
8788 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8793 kbuf
= kmalloc(size
, GFP_KERNEL
);
8799 if (copy_from_user(kbuf
, buf
, size
)) {
8800 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8806 ret
= ssd_spi_page_write(dev
, kbuf
, off
, size
);
8817 case SSD_CMD_SPI_ERASE
: {
8818 struct ssd_spi_op_info spi_info
;
8821 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8822 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8829 if ((off
+ dev
->rom_info
.block_size
) > dev
->rom_info
.size
) {
8834 ret
= ssd_spi_block_erase(dev
, off
);
8842 case SSD_CMD_I2C_READ
: {
8843 struct ssd_i2c_op_info i2c_info
;
8847 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8848 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8853 saddr
= i2c_info
.saddr
;
8854 rsize
= i2c_info
.rsize
;
8855 buf
= i2c_info
.rbuf
;
8857 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8862 kbuf
= kmalloc(rsize
, GFP_KERNEL
);
8868 ret
= ssd_i2c_read(dev
, saddr
, rsize
, kbuf
);
8874 if (copy_to_user(buf
, kbuf
, rsize
)) {
8875 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8886 case SSD_CMD_I2C_WRITE
: {
8887 struct ssd_i2c_op_info i2c_info
;
8891 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8892 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8897 saddr
= i2c_info
.saddr
;
8898 wsize
= i2c_info
.wsize
;
8899 buf
= i2c_info
.wbuf
;
8901 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8906 kbuf
= kmalloc(wsize
, GFP_KERNEL
);
8912 if (copy_from_user(kbuf
, buf
, wsize
)) {
8913 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8919 ret
= ssd_i2c_write(dev
, saddr
, wsize
, kbuf
);
8930 case SSD_CMD_I2C_WRITE_READ
: {
8931 struct ssd_i2c_op_info i2c_info
;
8937 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8938 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8943 saddr
= i2c_info
.saddr
;
8944 wsize
= i2c_info
.wsize
;
8945 rsize
= i2c_info
.rsize
;
8946 buf
= i2c_info
.wbuf
;
8948 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8953 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8958 size
= wsize
+ rsize
;
8960 kbuf
= kmalloc(size
, GFP_KERNEL
);
8966 if (copy_from_user((kbuf
+ rsize
), buf
, wsize
)) {
8967 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8973 buf
= i2c_info
.rbuf
;
8975 ret
= ssd_i2c_write_read(dev
, saddr
, wsize
, (kbuf
+ rsize
), rsize
, kbuf
);
8981 if (copy_to_user(buf
, kbuf
, rsize
)) {
8982 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8993 case SSD_CMD_SMBUS_SEND_BYTE
: {
8994 struct ssd_smbus_op_info smbus_info
;
8995 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8999 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9000 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9005 saddr
= smbus_info
.saddr
;
9006 buf
= smbus_info
.buf
;
9009 if (copy_from_user(smb_data
, buf
, size
)) {
9010 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9015 ret
= ssd_smbus_send_byte(dev
, saddr
, smb_data
);
9023 case SSD_CMD_SMBUS_RECEIVE_BYTE
: {
9024 struct ssd_smbus_op_info smbus_info
;
9025 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9029 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9030 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9035 saddr
= smbus_info
.saddr
;
9036 buf
= smbus_info
.buf
;
9039 ret
= ssd_smbus_receive_byte(dev
, saddr
, smb_data
);
9044 if (copy_to_user(buf
, smb_data
, size
)) {
9045 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9053 case SSD_CMD_SMBUS_WRITE_BYTE
: {
9054 struct ssd_smbus_op_info smbus_info
;
9055 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9060 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9061 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9066 saddr
= smbus_info
.saddr
;
9067 command
= smbus_info
.cmd
;
9068 buf
= smbus_info
.buf
;
9071 if (copy_from_user(smb_data
, buf
, size
)) {
9072 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9077 ret
= ssd_smbus_write_byte(dev
, saddr
, command
, smb_data
);
9085 case SSD_CMD_SMBUS_READ_BYTE
: {
9086 struct ssd_smbus_op_info smbus_info
;
9087 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9092 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9093 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9098 saddr
= smbus_info
.saddr
;
9099 command
= smbus_info
.cmd
;
9100 buf
= smbus_info
.buf
;
9103 ret
= ssd_smbus_read_byte(dev
, saddr
, command
, smb_data
);
9108 if (copy_to_user(buf
, smb_data
, size
)) {
9109 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9117 case SSD_CMD_SMBUS_WRITE_WORD
: {
9118 struct ssd_smbus_op_info smbus_info
;
9119 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9124 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9125 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9130 saddr
= smbus_info
.saddr
;
9131 command
= smbus_info
.cmd
;
9132 buf
= smbus_info
.buf
;
9135 if (copy_from_user(smb_data
, buf
, size
)) {
9136 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9141 ret
= ssd_smbus_write_word(dev
, saddr
, command
, smb_data
);
9149 case SSD_CMD_SMBUS_READ_WORD
: {
9150 struct ssd_smbus_op_info smbus_info
;
9151 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9156 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9157 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9162 saddr
= smbus_info
.saddr
;
9163 command
= smbus_info
.cmd
;
9164 buf
= smbus_info
.buf
;
9167 ret
= ssd_smbus_read_word(dev
, saddr
, command
, smb_data
);
9172 if (copy_to_user(buf
, smb_data
, size
)) {
9173 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9181 case SSD_CMD_SMBUS_WRITE_BLOCK
: {
9182 struct ssd_smbus_op_info smbus_info
;
9183 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9188 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9189 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9194 saddr
= smbus_info
.saddr
;
9195 command
= smbus_info
.cmd
;
9196 buf
= smbus_info
.buf
;
9197 size
= smbus_info
.size
;
9199 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9204 if (copy_from_user(smb_data
, buf
, size
)) {
9205 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9210 ret
= ssd_smbus_write_block(dev
, saddr
, command
, size
, smb_data
);
9218 case SSD_CMD_SMBUS_READ_BLOCK
: {
9219 struct ssd_smbus_op_info smbus_info
;
9220 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9225 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9226 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9231 saddr
= smbus_info
.saddr
;
9232 command
= smbus_info
.cmd
;
9233 buf
= smbus_info
.buf
;
9234 size
= smbus_info
.size
;
9236 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9241 ret
= ssd_smbus_read_block(dev
, saddr
, command
, size
, smb_data
);
9246 if (copy_to_user(buf
, smb_data
, size
)) {
9247 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9255 case SSD_CMD_BM_GET_VER
: {
9258 ret
= ssd_bm_get_version(dev
, &ver
);
9263 if (copy_to_user(argp
, &ver
, sizeof(uint16_t))) {
9264 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9272 case SSD_CMD_BM_GET_NR_CAP
: {
9275 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
9280 if (copy_to_user(argp
, &nr_cap
, sizeof(int))) {
9281 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9289 case SSD_CMD_BM_CAP_LEARNING
: {
9290 ret
= ssd_bm_enter_cap_learning(dev
);
9299 case SSD_CMD_CAP_LEARN
: {
9302 ret
= ssd_cap_learn(dev
, &cap
);
9307 if (copy_to_user(argp
, &cap
, sizeof(uint32_t))) {
9308 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9316 case SSD_CMD_GET_CAP_STATUS
: {
9319 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9323 if (copy_to_user(argp
, &cap_status
, sizeof(int))) {
9324 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9332 case SSD_CMD_RAM_READ
: {
9333 struct ssd_ram_op_info ram_info
;
9336 size_t rlen
, len
= dev
->hw_info
.ram_max_len
;
9339 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9340 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9345 ofs
= ram_info
.start
;
9346 length
= ram_info
.length
;
9348 ctrl_idx
= ram_info
.ctrl_idx
;
9350 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9355 kbuf
= kmalloc(len
, GFP_KERNEL
);
9361 for (rlen
=0; rlen
<length
; rlen
+=len
, buf
+=len
, ofs
+=len
) {
9362 if ((length
- rlen
) < len
) {
9363 len
= length
- rlen
;
9366 ret
= ssd_ram_read(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9371 if (copy_to_user(buf
, kbuf
, len
)) {
9382 case SSD_CMD_RAM_WRITE
: {
9383 struct ssd_ram_op_info ram_info
;
9386 size_t wlen
, len
= dev
->hw_info
.ram_max_len
;
9389 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9390 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9394 ofs
= ram_info
.start
;
9395 length
= ram_info
.length
;
9397 ctrl_idx
= ram_info
.ctrl_idx
;
9399 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9404 kbuf
= kmalloc(len
, GFP_KERNEL
);
9410 for (wlen
=0; wlen
<length
; wlen
+=len
, buf
+=len
, ofs
+=len
) {
9411 if ((length
- wlen
) < len
) {
9412 len
= length
- wlen
;
9415 if (copy_from_user(kbuf
, buf
, len
)) {
9420 ret
= ssd_ram_write(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9431 case SSD_CMD_NAND_READ_ID
: {
9432 struct ssd_flash_op_info flash_info
;
9433 int chip_no
, chip_ce
, length
, ctrl_idx
;
9435 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9436 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9441 chip_no
= flash_info
.flash
;
9442 chip_ce
= flash_info
.chip
;
9443 ctrl_idx
= flash_info
.ctrl_idx
;
9444 buf
= flash_info
.buf
;
9445 length
= dev
->hw_info
.id_size
;
9447 //kbuf = kmalloc(length, GFP_KERNEL);
9448 kbuf
= kmalloc(SSD_NAND_ID_BUFF_SZ
, GFP_KERNEL
); //xx
9453 memset(kbuf
, 0, length
);
9455 ret
= ssd_nand_read_id(dev
, kbuf
, chip_no
, chip_ce
, ctrl_idx
);
9461 if (copy_to_user(buf
, kbuf
, length
)) {
9472 case SSD_CMD_NAND_READ
: { //with oob
9473 struct ssd_flash_op_info flash_info
;
9475 int flash
, chip
, page
, ctrl_idx
;
9478 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9479 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9484 flash
= flash_info
.flash
;
9485 chip
= flash_info
.chip
;
9486 page
= flash_info
.page
;
9487 buf
= flash_info
.buf
;
9488 ctrl_idx
= flash_info
.ctrl_idx
;
9490 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9492 kbuf
= kmalloc(length
, GFP_KERNEL
);
9498 err
= ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9499 if (ret
&& (-EIO
!= ret
)) {
9504 if (copy_to_user(buf
, kbuf
, length
)) {
9516 case SSD_CMD_NAND_WRITE
: {
9517 struct ssd_flash_op_info flash_info
;
9518 int flash
, chip
, page
, ctrl_idx
;
9521 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9522 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9527 flash
= flash_info
.flash
;
9528 chip
= flash_info
.chip
;
9529 page
= flash_info
.page
;
9530 buf
= flash_info
.buf
;
9531 ctrl_idx
= flash_info
.ctrl_idx
;
9533 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9535 kbuf
= kmalloc(length
, GFP_KERNEL
);
9541 if (copy_from_user(kbuf
, buf
, length
)) {
9547 ret
= ssd_nand_write(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9557 case SSD_CMD_NAND_ERASE
: {
9558 struct ssd_flash_op_info flash_info
;
9559 int flash
, chip
, page
, ctrl_idx
;
9561 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9562 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9567 flash
= flash_info
.flash
;
9568 chip
= flash_info
.chip
;
9569 page
= flash_info
.page
;
9570 ctrl_idx
= flash_info
.ctrl_idx
;
9572 if ((page
% dev
->hw_info
.page_count
) != 0) {
9577 //hio_warn("erase fs = %llx\n", ofs);
9578 ret
= ssd_nand_erase(dev
, flash
, chip
, page
, ctrl_idx
);
9586 case SSD_CMD_NAND_READ_EXT
: { //ingore EIO
9587 struct ssd_flash_op_info flash_info
;
9589 int flash
, chip
, page
, ctrl_idx
;
9591 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9592 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9597 flash
= flash_info
.flash
;
9598 chip
= flash_info
.chip
;
9599 page
= flash_info
.page
;
9600 buf
= flash_info
.buf
;
9601 ctrl_idx
= flash_info
.ctrl_idx
;
9603 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9605 kbuf
= kmalloc(length
, GFP_KERNEL
);
9611 ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9612 if (-EIO
== ret
) { //ingore EIO
9620 if (copy_to_user(buf
, kbuf
, length
)) {
9630 case SSD_CMD_UPDATE_BBT
: {
9631 struct ssd_flash_op_info flash_info
;
9632 int ctrl_idx
, flash
;
9634 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9635 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9640 ctrl_idx
= flash_info
.ctrl_idx
;
9641 flash
= flash_info
.flash
;
9642 ret
= ssd_update_bbt(dev
, flash
, ctrl_idx
);
9650 case SSD_CMD_CLEAR_ALARM
:
9651 ssd_clear_alarm(dev
);
9654 case SSD_CMD_SET_ALARM
:
9659 ret
= ssd_do_reset(dev
);
9662 case SSD_CMD_RELOAD_FW
:
9664 dev
->has_non_0x98_reg_access
= 1;
9665 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9666 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
9667 } else if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_1_1
) {
9668 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
9673 case SSD_CMD_UNLOAD_DEV
: {
9674 if (atomic_read(&dev
->refcnt
)) {
9680 ssd_save_smart(dev
);
9682 ret
= ssd_flush(dev
);
9687 /* cleanup the block device */
9688 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
9689 mutex_lock(&dev
->gd_mutex
);
9690 ssd_cleanup_blkdev(dev
);
9691 ssd_cleanup_queue(dev
);
9692 mutex_unlock(&dev
->gd_mutex
);
9698 case SSD_CMD_LOAD_DEV
: {
9700 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9705 ret
= ssd_init_smart(dev
);
9707 hio_warn("%s: init info: failed\n", dev
->name
);
9711 ret
= ssd_init_queue(dev
);
9713 hio_warn("%s: init queue failed\n", dev
->name
);
9716 ret
= ssd_init_blkdev(dev
);
9718 hio_warn("%s: register block device: failed\n", dev
->name
);
9721 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
9726 case SSD_CMD_UPDATE_VP
: {
9728 uint32_t new_vp
, new_vp1
= 0;
9730 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9735 if (copy_from_user(&new_vp
, argp
, sizeof(uint32_t))) {
9736 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9741 if (new_vp
> dev
->hw_info
.max_valid_pages
|| new_vp
<= 0) {
9746 while (new_vp
<= dev
->hw_info
.max_valid_pages
) {
9747 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, new_vp
);
9749 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
9750 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9751 new_vp1
= val
& 0x3FF;
9753 new_vp1
= val
& 0x7FFF;
9756 if (new_vp1
== new_vp
) {
9761 /*if (new_vp == dev->hw_info.valid_pages) {
9766 if (new_vp1
!= new_vp
|| new_vp
> dev
->hw_info
.max_valid_pages
) {
9768 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9773 if (copy_to_user(argp
, &new_vp
, sizeof(uint32_t))) {
9774 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9775 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9781 dev
->hw_info
.valid_pages
= new_vp
;
9782 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
9783 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
9784 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
9789 case SSD_CMD_FULL_RESET
: {
9790 ret
= ssd_full_reset(dev
);
9794 case SSD_CMD_GET_NR_LOG
: {
9795 if (copy_to_user(argp
, &dev
->internal_log
.nr_log
, sizeof(dev
->internal_log
.nr_log
))) {
9802 case SSD_CMD_GET_LOG
: {
9803 uint32_t length
= dev
->rom_info
.log_sz
;
9807 if (copy_to_user(buf
, dev
->internal_log
.log
, length
)) {
9815 case SSD_CMD_LOG_LEVEL
: {
9817 if (copy_from_user(&level
, argp
, sizeof(int))) {
9818 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9823 if (level
>= SSD_LOG_NR_LEVEL
|| level
< SSD_LOG_LEVEL_INFO
) {
9824 level
= SSD_LOG_LEVEL_ERR
;
9827 //just for showing log, no need to protect
9832 case SSD_CMD_OT_PROTECT
: {
9835 if (copy_from_user(&protect
, argp
, sizeof(int))) {
9836 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9841 ssd_set_ot_protect(dev
, !!protect
);
9845 case SSD_CMD_GET_OT_STATUS
: {
9846 int status
= ssd_get_ot_status(dev
, &status
);
9848 if (copy_to_user(argp
, &status
, sizeof(int))) {
9849 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9856 case SSD_CMD_CLEAR_LOG
: {
9857 ret
= ssd_clear_log(dev
);
9861 case SSD_CMD_CLEAR_SMART
: {
9862 ret
= ssd_clear_smart(dev
);
9866 case SSD_CMD_CLEAR_WARNING
: {
9867 ret
= ssd_clear_warning(dev
);
9871 case SSD_CMD_SW_LOG
: {
9872 struct ssd_sw_log_info sw_log
;
9874 if (copy_from_user(&sw_log
, argp
, sizeof(struct ssd_sw_log_info
))) {
9875 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9880 ret
= ssd_gen_swlog(dev
, sw_log
.event
, sw_log
.data
);
9884 case SSD_CMD_GET_LABEL
: {
9886 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9891 if (copy_to_user(argp
, &dev
->label
, sizeof(struct ssd_label
))) {
9892 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9899 case SSD_CMD_GET_VERSION
: {
9900 struct ssd_version_info ver
;
9902 mutex_lock(&dev
->fw_mutex
);
9903 ret
= __ssd_get_version(dev
, &ver
);
9904 mutex_unlock(&dev
->fw_mutex
);
9909 if (copy_to_user(argp
, &ver
, sizeof(struct ssd_version_info
))) {
9910 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9917 case SSD_CMD_GET_TEMPERATURE
: {
9920 mutex_lock(&dev
->fw_mutex
);
9921 ret
= __ssd_get_temperature(dev
, &temp
);
9922 mutex_unlock(&dev
->fw_mutex
);
9927 if (copy_to_user(argp
, &temp
, sizeof(int))) {
9928 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9935 case SSD_CMD_GET_BMSTATUS
: {
9938 mutex_lock(&dev
->fw_mutex
);
9939 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9940 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9941 status
= SSD_BMSTATUS_WARNING
;
9943 status
= SSD_BMSTATUS_OK
;
9945 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
9946 ret
= __ssd_bm_status(dev
, &status
);
9948 status
= SSD_BMSTATUS_OK
;
9950 mutex_unlock(&dev
->fw_mutex
);
9955 if (copy_to_user(argp
, &status
, sizeof(int))) {
9956 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9963 case SSD_CMD_GET_LABEL2
: {
9967 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9968 label
= &dev
->label
;
9969 length
= sizeof(struct ssd_label
);
9971 label
= &dev
->labelv3
;
9972 length
= sizeof(struct ssd_labelv3
);
9975 if (copy_to_user(argp
, label
, length
)) {
9983 ret
= ssd_flush(dev
);
9985 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9991 case SSD_CMD_SAVE_MD
: {
9994 if (copy_from_user(&save_md
, argp
, sizeof(int))) {
9995 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
10000 dev
->save_md
= !!save_md
;
10004 case SSD_CMD_SET_WMODE
: {
10007 if (copy_from_user(&new_wmode
, argp
, sizeof(int))) {
10008 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
10013 ret
= __ssd_set_wmode(dev
, new_wmode
);
10021 case SSD_CMD_GET_WMODE
: {
10022 if (copy_to_user(argp
, &dev
->wmode
, sizeof(int))) {
10023 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10031 case SSD_CMD_GET_USER_WMODE
: {
10032 if (copy_to_user(argp
, &dev
->user_wmode
, sizeof(int))) {
10033 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10041 case SSD_CMD_DEBUG
: {
10042 struct ssd_debug_info db_info
;
10049 if (copy_from_user(&db_info
, argp
, sizeof(struct ssd_debug_info
))) {
10050 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
10055 if (db_info
.type
< SSD_DEBUG_NONE
|| db_info
.type
>= SSD_DEBUG_NR
) {
10061 if (db_info
.type
>= SSD_DEBUG_READ_ERR
&& db_info
.type
<= SSD_DEBUG_RW_ERR
&&
10062 (db_info
.data
.loc
.off
+ db_info
.data
.loc
.len
) > (dev
->hw_info
.size
>> 9)) {
10067 memcpy(&dev
->db_info
, &db_info
, sizeof(struct ssd_debug_info
));
10069 #ifdef SSD_OT_PROTECT
10071 if (db_info
.type
== SSD_DEBUG_NONE
) {
10072 ssd_check_temperature(dev
, SSD_OT_TEMP
);
10073 } else if (db_info
.type
== SSD_DEBUG_LOG
) {
10074 if (db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
) {
10075 dev
->ot_delay
= SSD_OT_DELAY
;
10076 } else if (db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
) {
10083 if (db_info
.type
== SSD_DEBUG_OFFLINE
) {
10084 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
10085 } else if (db_info
.type
== SSD_DEBUG_NONE
) {
10086 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
10090 if (db_info
.type
== SSD_DEBUG_LOG
&& dev
->event_call
&& dev
->gd
) {
10091 dev
->event_call(dev
->gd
, db_info
.data
.log
.event
, 0);
10097 case SSD_CMD_DRV_PARAM_INFO
: {
10098 struct ssd_drv_param_info drv_param
;
10100 memset(&drv_param
, 0, sizeof(struct ssd_drv_param_info
));
10102 drv_param
.mode
= mode
;
10103 drv_param
.status_mask
= status_mask
;
10104 drv_param
.int_mode
= int_mode
;
10105 drv_param
.threaded_irq
= threaded_irq
;
10106 drv_param
.log_level
= log_level
;
10107 drv_param
.wmode
= wmode
;
10108 drv_param
.ot_protect
= ot_protect
;
10109 drv_param
.finject
= finject
;
10111 if (copy_to_user(argp
, &drv_param
, sizeof(struct ssd_drv_param_info
))) {
10112 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
10128 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10129 static int ssd_block_ioctl(struct inode
*inode
, struct file
*file
,
10130 unsigned int cmd
, unsigned long arg
)
10132 struct ssd_device
*dev
;
10133 void __user
*argp
= (void __user
*)arg
;
10139 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10144 static int ssd_block_ioctl(struct block_device
*bdev
, fmode_t mode
,
10145 unsigned int cmd
, unsigned long arg
)
10147 struct ssd_device
*dev
;
10148 void __user
*argp
= (void __user
*)arg
;
10155 dev
= bdev
->bd_disk
->private_data
;
10162 case HDIO_GETGEO
: {
10163 struct hd_geometry geo
;
10164 geo
.cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
10167 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10168 geo
.start
= get_start_sect(inode
->i_bdev
);
10170 geo
.start
= get_start_sect(bdev
);
10172 if (copy_to_user(argp
, &geo
, sizeof(geo
))) {
10181 ret
= ssd_flush(dev
);
10183 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
10191 ret
= ssd_ioctl_common(dev
, cmd
, arg
);
10202 static void ssd_free_dev(struct kref
*kref
)
10204 struct ssd_device
*dev
;
10210 dev
= container_of(kref
, struct ssd_device
, kref
);
10214 ssd_put_index(dev
->slave
, dev
->idx
);
10219 static void ssd_put(struct ssd_device
*dev
)
10221 kref_put(&dev
->kref
, ssd_free_dev
);
10224 static int ssd_get(struct ssd_device
*dev
)
10226 kref_get(&dev
->kref
);
10231 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10232 static int ssd_block_open(struct inode
*inode
, struct file
*filp
)
10234 struct ssd_device
*dev
;
10240 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10245 static int ssd_block_open(struct block_device
*bdev
, fmode_t mode
)
10247 struct ssd_device
*dev
;
10253 dev
= bdev
->bd_disk
->private_data
;
10259 /*if (!try_module_get(dev->owner))
10265 atomic_inc(&dev
->refcnt
);
10270 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10271 static int ssd_block_release(struct inode
*inode
, struct file
*filp
)
10273 struct ssd_device
*dev
;
10279 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10283 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10284 static int ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10286 struct ssd_device
*dev
;
10292 dev
= disk
->private_data
;
10297 static void ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10299 struct ssd_device
*dev
;
10305 dev
= disk
->private_data
;
10311 atomic_dec(&dev
->refcnt
);
10315 //module_put(dev->owner);
10316 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10321 static struct block_device_operations ssd_fops
= {
10322 .owner
= THIS_MODULE
,
10323 .open
= ssd_block_open
,
10324 .release
= ssd_block_release
,
10325 .ioctl
= ssd_block_ioctl
,
10326 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
10327 .getgeo
= ssd_block_getgeo
,
10331 static void ssd_init_trim(ssd_device_t
*dev
)
10333 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
10334 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10337 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, dev
->rq
);
10339 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6))
10340 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
10341 dev
->rq
->limits
.discard_zeroes_data
= 1;
10343 dev
->rq
->limits
.discard_alignment
= 4096;
10344 dev
->rq
->limits
.discard_granularity
= 4096;
10346 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_4
) {
10347 dev
->rq
->limits
.max_discard_sectors
= dev
->hw_info
.sg_max_sec
;
10349 dev
->rq
->limits
.max_discard_sectors
= (dev
->hw_info
.sg_max_sec
) * (dev
->hw_info
.cmd_max_sg
);
10354 static void ssd_cleanup_queue(struct ssd_device
*dev
)
10358 blk_cleanup_queue(dev
->rq
);
10362 static int ssd_init_queue(struct ssd_device
*dev
)
10364 dev
->rq
= blk_alloc_queue(GFP_KERNEL
);
10365 if (dev
->rq
== NULL
) {
10366 hio_warn("%s: alloc queue: failed\n ", dev
->name
);
10367 goto out_init_queue
;
10370 /* must be first */
10371 blk_queue_make_request(dev
->rq
, ssd_make_request
);
10373 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6))
10374 blk_queue_max_hw_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10375 blk_queue_max_phys_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10376 blk_queue_max_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10378 blk_queue_max_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10379 blk_queue_max_hw_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10382 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
10383 blk_queue_hardsect_size(dev
->rq
, 512);
10385 blk_queue_logical_block_size(dev
->rq
, 512);
10387 /* not work for make_request based drivers(bio) */
10388 blk_queue_max_segment_size(dev
->rq
, dev
->hw_info
.sg_max_sec
<< 9);
10390 blk_queue_bounce_limit(dev
->rq
, BLK_BOUNCE_HIGH
);
10392 dev
->rq
->queuedata
= dev
;
10394 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
10395 blk_queue_issue_flush_fn(dev
->rq
, ssd_issue_flush_fn
);
10398 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
10399 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, dev
->rq
);
10402 ssd_init_trim(dev
);
10410 static void ssd_cleanup_blkdev(struct ssd_device
*dev
)
10412 del_gendisk(dev
->gd
);
10415 static int ssd_init_blkdev(struct ssd_device
*dev
)
10421 dev
->gd
= alloc_disk(ssd_minors
);
10423 hio_warn("%s: alloc_disk fail\n", dev
->name
);
10426 dev
->gd
->major
= dev
->major
;
10427 dev
->gd
->first_minor
= dev
->idx
* ssd_minors
;
10428 dev
->gd
->fops
= &ssd_fops
;
10429 dev
->gd
->queue
= dev
->rq
;
10430 dev
->gd
->private_data
= dev
;
10432 snprintf (dev
->gd
->disk_name
, sizeof(dev
->gd
->disk_name
), "%s", dev
->name
);
10434 set_capacity(dev
->gd
, dev
->hw_info
.size
>> 9);
10436 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
10437 device_add_disk(&dev
->pdev
->dev
, dev
->gd
);
10439 dev
->gd
->driverfs_dev
= &dev
->pdev
->dev
;
10449 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10450 static int ssd_ioctl(struct inode
*inode
, struct file
*file
,
10451 unsigned int cmd
, unsigned long arg
)
10453 static long ssd_ioctl(struct file
*file
,
10454 unsigned int cmd
, unsigned long arg
)
10457 struct ssd_device
*dev
;
10463 dev
= file
->private_data
;
10468 return (long)ssd_ioctl_common(dev
, cmd
, arg
);
10471 static int ssd_open(struct inode
*inode
, struct file
*file
)
10473 struct ssd_device
*dev
= NULL
;
10474 struct ssd_device
*n
= NULL
;
10478 if (!inode
|| !file
) {
10482 idx
= iminor(inode
);
10484 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
10485 if (dev
->idx
== idx
) {
10495 file
->private_data
= dev
;
10502 static int ssd_release(struct inode
*inode
, struct file
*file
)
10504 struct ssd_device
*dev
;
10510 dev
= file
->private_data
;
10517 file
->private_data
= NULL
;
10522 static int ssd_reload_ssd_ptr(struct ssd_device
*dev
)
10524 ssd_reset_resp_ptr(dev
);
10526 //update base reg address
10527 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
10529 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
10532 //update response base reg address
10533 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
10534 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
10539 static struct file_operations ssd_cfops
= {
10540 .owner
= THIS_MODULE
,
10542 .release
= ssd_release
,
10543 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10544 .ioctl
= ssd_ioctl
,
10546 .unlocked_ioctl
= ssd_ioctl
,
10550 static void ssd_cleanup_chardev(struct ssd_device
*dev
)
10556 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10557 class_simple_device_remove(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10558 devfs_remove("c%s", dev
->name
);
10559 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10560 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10561 devfs_remove("c%s", dev
->name
);
10562 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10563 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10564 devfs_remove("c%s", dev
->name
);
10565 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10566 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10568 device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10572 static int ssd_init_chardev(struct ssd_device
*dev
)
10580 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10581 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10585 class_simple_device_add(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10587 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10588 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10592 class_device_create(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10594 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10595 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10599 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10601 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10602 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10603 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
10604 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), "c%s", dev
->name
);
10605 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10606 device_create_drvdata(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10608 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10614 static int ssd_check_hw(struct ssd_device
*dev
)
10616 uint32_t test_data
= 0x55AA5AA5;
10617 uint32_t read_data
;
10619 ssd_reg32_write(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
, test_data
);
10620 read_data
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
);
10621 if (read_data
!= ~(test_data
)) {
10622 //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data);
10629 static int ssd_check_fw(struct ssd_device
*dev
)
10634 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10638 for (i
=0; i
<SSD_CONTROLLER_WAIT
; i
++) {
10639 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10640 if ((val
& 0x1) && ((val
>> 8) & 0x1)) {
10644 msleep(SSD_INIT_WAIT
);
10647 if (!(val
& 0x1)) {
10648 /* controller fw status */
10649 hio_warn("%s: controller firmware load failed: %#x\n", dev
->name
, val
);
10651 } else if (!((val
>> 8) & 0x1)) {
10652 /* controller state */
10653 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10657 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RELOAD_FW_REG
);
10659 dev
->reload_fw
= 1;
10665 static int ssd_init_fw_info(struct ssd_device
*dev
)
10670 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_VER_REG
);
10671 dev
->hw_info
.bridge_ver
= val
& 0xFFF;
10672 if (dev
->hw_info
.bridge_ver
< SSD_FW_MIN
) {
10673 hio_warn("%s: bridge firmware version %03X is not supported\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10676 hio_info("%s: bridge firmware version: %03X\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10678 ret
= ssd_check_fw(dev
);
10684 /* skip error if not in standard mode */
10685 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10691 static int ssd_check_clock(struct ssd_device
*dev
)
10696 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10700 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10703 if (!((val
>> 4 ) & 0x1)) {
10704 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST
), &dev
->hwmon
)) {
10705 hio_warn("%s: 166MHz clock losed: %#x\n", dev
->name
, val
);
10706 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10711 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
10712 if (!((val
>> 5 ) & 0x1)) {
10713 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW
), &dev
->hwmon
)) {
10714 hio_warn("%s: 166MHz clock is skew: %#x\n", dev
->name
, val
);
10715 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10719 if (!((val
>> 6 ) & 0x1)) {
10720 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST
), &dev
->hwmon
)) {
10721 hio_warn("%s: 156.25MHz clock lost: %#x\n", dev
->name
, val
);
10722 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10726 if (!((val
>> 7 ) & 0x1)) {
10727 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW
), &dev
->hwmon
)) {
10728 hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev
->name
, val
);
10729 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10738 static int ssd_check_volt(struct ssd_device
*dev
)
10745 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10749 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10751 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
)) {
10752 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V0_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10753 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10754 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10755 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10756 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10757 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10761 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10762 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10763 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10764 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10765 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10771 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
)) {
10772 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V8_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10773 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10774 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10775 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10776 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10777 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10781 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10782 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10783 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10784 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10785 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10794 static int ssd_check_reset_sync(struct ssd_device
*dev
)
10798 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10802 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10803 if (!((val
>> 8) & 0x1)) {
10804 /* controller state */
10805 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10809 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10813 if (((val
>> 9 ) & 0x1)) {
10814 hio_warn("%s: controller reset asynchronously: %#x\n", dev
->name
, val
);
10815 ssd_gen_swlog(dev
, SSD_LOG_CTRL_RST_SYNC
, val
);
10822 static int ssd_check_hw_bh(struct ssd_device
*dev
)
10826 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10831 ret
= ssd_check_clock(dev
);
10837 /* skip error if not in standard mode */
10838 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10844 static int ssd_check_controller(struct ssd_device
*dev
)
10848 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10853 ret
= ssd_check_reset_sync(dev
);
10859 /* skip error if not in standard mode */
10860 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10866 static int ssd_check_controller_bh(struct ssd_device
*dev
)
10868 uint32_t test_data
= 0x55AA5AA5;
10870 int reg_base
, reg_sz
;
10875 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10880 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_READY_REG
);
10882 hio_warn("%s: controller 0 not ready\n", dev
->name
);
10886 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10887 reg_base
= SSD_CTRL_TEST_REG0
+ i
* SSD_CTRL_TEST_REG_SZ
;
10888 ssd_reg32_write(dev
->ctrlp
+ reg_base
, test_data
);
10889 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10890 if (val
!= ~(test_data
)) {
10891 hio_warn("%s: check controller %d error: %#x\n", dev
->name
, i
, val
);
10897 ret
= ssd_check_volt(dev
);
10903 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
10904 reg_base
= SSD_PV3_RAM_STATUS_REG0
;
10905 reg_sz
= SSD_PV3_RAM_STATUS_REG_SZ
;
10907 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10909 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10911 if (!((val
>> 1) & 0x1)) {
10913 if (init_wait
<= SSD_RAM_INIT_MAX_WAIT
) {
10914 msleep(SSD_INIT_WAIT
);
10915 goto check_ram_status
;
10917 hio_warn("%s: controller %d ram init failed: %#x\n", dev
->name
, i
, val
);
10918 ssd_gen_swlog(dev
, SSD_LOG_DDR_INIT_ERR
, i
);
10923 reg_base
+= reg_sz
;
10928 for (i
=0; i
<SSD_CH_INFO_MAX_WAIT
; i
++) {
10929 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10930 if (!((val
>> 31) & 0x1)) {
10934 msleep(SSD_INIT_WAIT
);
10936 if ((val
>> 31) & 0x1) {
10937 hio_warn("%s: channel info init failed: %#x\n", dev
->name
, val
);
10944 static int ssd_init_protocol_info(struct ssd_device
*dev
)
10948 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PROTOCOL_VER_REG
);
10949 if (val
== (uint32_t)-1) {
10950 hio_warn("%s: protocol version error: %#x\n", dev
->name
, val
);
10953 dev
->protocol_info
.ver
= val
;
10955 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10956 dev
->protocol_info
.init_state_reg
= SSD_INIT_STATE_REG0
;
10957 dev
->protocol_info
.init_state_reg_sz
= SSD_INIT_STATE_REG_SZ
;
10959 dev
->protocol_info
.chip_info_reg
= SSD_CHIP_INFO_REG0
;
10960 dev
->protocol_info
.chip_info_reg_sz
= SSD_CHIP_INFO_REG_SZ
;
10962 dev
->protocol_info
.init_state_reg
= SSD_PV3_INIT_STATE_REG0
;
10963 dev
->protocol_info
.init_state_reg_sz
= SSD_PV3_INIT_STATE_REG_SZ
;
10965 dev
->protocol_info
.chip_info_reg
= SSD_PV3_CHIP_INFO_REG0
;
10966 dev
->protocol_info
.chip_info_reg_sz
= SSD_PV3_CHIP_INFO_REG_SZ
;
10972 static int ssd_init_hw_info(struct ssd_device
*dev
)
10980 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESP_INFO_REG
);
10981 dev
->hw_info
.resp_ptr_sz
= 16 * (1U << (val
& 0xFF));
10982 dev
->hw_info
.resp_msg_sz
= 16 * (1U << ((val
>> 8) & 0xFF));
10984 if (0 == dev
->hw_info
.resp_ptr_sz
|| 0 == dev
->hw_info
.resp_msg_sz
) {
10985 hio_warn("%s: response info error\n", dev
->name
);
10990 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10991 dev
->hw_info
.cmd_fifo_sz
= 1U << ((val
>> 4) & 0xF);
10992 dev
->hw_info
.cmd_max_sg
= 1U << ((val
>> 8) & 0xF);
10993 dev
->hw_info
.sg_max_sec
= 1U << ((val
>> 12) & 0xF);
10994 dev
->hw_info
.cmd_fifo_sz_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
10996 if (0 == dev
->hw_info
.cmd_fifo_sz
|| 0 == dev
->hw_info
.cmd_max_sg
|| 0 == dev
->hw_info
.sg_max_sec
) {
10997 hio_warn("%s: cmd info error\n", dev
->name
);
11003 if (ssd_check_hw_bh(dev
)) {
11004 hio_warn("%s: check hardware status failed\n", dev
->name
);
11009 if (ssd_check_controller(dev
)) {
11010 hio_warn("%s: check controller state failed\n", dev
->name
);
11015 /* nr controller : read again*/
11016 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
11017 dev
->hw_info
.nr_ctrl
= (val
>> 16) & 0xF;
11019 /* nr ctrl configured */
11020 nr_ctrl
= (val
>> 20) & 0xF;
11021 if (0 == dev
->hw_info
.nr_ctrl
) {
11022 hio_warn("%s: nr controller error: %u\n", dev
->name
, dev
->hw_info
.nr_ctrl
);
11025 } else if (0 != nr_ctrl
&& nr_ctrl
!= dev
->hw_info
.nr_ctrl
) {
11026 hio_warn("%s: nr controller error: configured %u but found %u\n", dev
->name
, nr_ctrl
, dev
->hw_info
.nr_ctrl
);
11027 if (mode
<= SSD_DRV_MODE_STANDARD
) {
11033 if (ssd_check_controller_bh(dev
)) {
11034 hio_warn("%s: check controller failed\n", dev
->name
);
11039 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
11040 dev
->hw_info
.pcb_ver
= (uint8_t) ((val
>> 4) & 0xF) + 'A' -1;
11041 if ((val
& 0xF) != 0xF) {
11042 dev
->hw_info
.upper_pcb_ver
= (uint8_t) (val
& 0xF) + 'A' -1;
11045 if (dev
->hw_info
.pcb_ver
< 'A' || (0 != dev
->hw_info
.upper_pcb_ver
&& dev
->hw_info
.upper_pcb_ver
< 'A')) {
11046 hio_warn("%s: PCB version error: %#x %#x\n", dev
->name
, dev
->hw_info
.pcb_ver
, dev
->hw_info
.upper_pcb_ver
);
11052 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11053 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11054 dev
->hw_info
.nr_data_ch
= val
& 0xFF;
11055 dev
->hw_info
.nr_ch
= dev
->hw_info
.nr_data_ch
+ ((val
>> 8) & 0xFF);
11056 dev
->hw_info
.nr_chip
= (val
>> 16) & 0xFF;
11058 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11059 dev
->hw_info
.max_ch
= 1;
11060 while (dev
->hw_info
.max_ch
< dev
->hw_info
.nr_ch
) dev
->hw_info
.max_ch
<<= 1;
11062 /* set max channel 32 */
11063 dev
->hw_info
.max_ch
= 32;
11066 if (0 == dev
->hw_info
.nr_chip
) {
11068 dev
->hw_info
.nr_chip
= 1;
11072 dev
->hw_info
.id_size
= SSD_NAND_ID_SZ
;
11073 dev
->hw_info
.max_ce
= SSD_NAND_MAX_CE
;
11075 if (0 == dev
->hw_info
.nr_data_ch
|| 0 == dev
->hw_info
.nr_ch
|| 0 == dev
->hw_info
.nr_chip
) {
11076 hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev
->name
, dev
->hw_info
.nr_data_ch
, dev
->hw_info
.nr_ch
, dev
->hw_info
.nr_chip
);
11083 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11084 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RAM_INFO_REG
);
11085 dev
->hw_info
.ram_size
= 0x4000000ull
* (1ULL << (val
& 0xF));
11086 dev
->hw_info
.ram_align
= 1U << ((val
>> 12) & 0xF);
11087 if (dev
->hw_info
.ram_align
< SSD_RAM_ALIGN
) {
11088 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11089 dev
->hw_info
.ram_align
= SSD_RAM_ALIGN
;
11091 hio_warn("%s: ram align error: %u\n", dev
->name
, dev
->hw_info
.ram_align
);
11096 dev
->hw_info
.ram_max_len
= 0x1000 * (1U << ((val
>> 16) & 0xF));
11098 if (0 == dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.ram_align
|| 0 == dev
->hw_info
.ram_max_len
|| dev
->hw_info
.ram_align
> dev
->hw_info
.ram_max_len
) {
11099 hio_warn("%s: ram info error\n", dev
->name
);
11104 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11105 dev
->hw_info
.log_sz
= SSD_LOG_MAX_SZ
;
11107 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LOG_INFO_REG
);
11108 dev
->hw_info
.log_sz
= 0x1000 * (1U << (val
& 0xFF));
11110 if (0 == dev
->hw_info
.log_sz
) {
11111 hio_warn("%s: log size error\n", dev
->name
);
11116 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BBT_BASE_REG
);
11117 dev
->hw_info
.bbt_base
= 0x40000ull
* (val
& 0xFFFF);
11118 dev
->hw_info
.bbt_size
= 0x40000 * (((val
>> 16) & 0xFFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
11119 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11120 if (dev
->hw_info
.bbt_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.bbt_size
) {
11121 hio_warn("%s: bbt info error\n", dev
->name
);
11127 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ECT_BASE_REG
);
11128 dev
->hw_info
.md_base
= 0x40000ull
* (val
& 0xFFFF);
11129 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
11130 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
11132 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.nr_chip
);
11134 dev
->hw_info
.md_entry_sz
= 8 * (1U << ((val
>> 28) & 0xF));
11135 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
11136 if (dev
->hw_info
.md_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.md_size
||
11137 0 == dev
->hw_info
.md_entry_sz
|| dev
->hw_info
.md_entry_sz
> dev
->hw_info
.md_size
) {
11138 hio_warn("%s: md info error\n", dev
->name
);
11144 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11145 dev
->hw_info
.nand_wbuff_base
= dev
->hw_info
.ram_size
+ 1;
11147 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_NAND_BUFF_BASE
);
11148 dev
->hw_info
.nand_wbuff_base
= 0x8000ull
* val
;
11153 if (mode
<= SSD_DRV_MODE_DEBUG
) {
11154 if (dev
->hw_info
.nr_ctrl
> 1) {
11155 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CTRL_VER_REG
);
11156 dev
->hw_info
.ctrl_ver
= val
& 0xFFF;
11157 hio_info("%s: controller firmware version: %03X\n", dev
->name
, dev
->hw_info
.ctrl_ver
);
11160 val64
= ssd_reg_read(dev
->ctrlp
+ SSD_FLASH_INFO_REG0
);
11161 dev
->hw_info
.nand_vendor_id
= ((val64
>> 56) & 0xFF);
11162 dev
->hw_info
.nand_dev_id
= ((val64
>> 48) & 0xFF);
11164 dev
->hw_info
.block_count
= (((val64
>> 32) & 0xFFFF) + 1);
11165 dev
->hw_info
.page_count
= ((val64
>>16) & 0xFFFF);
11166 dev
->hw_info
.page_size
= (val64
& 0xFFFF);
11168 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_INFO_REG
);
11169 dev
->hw_info
.bbf_pages
= val
& 0xFF;
11170 dev
->hw_info
.bbf_seek
= (val
>> 8) & 0x1;
11172 if (0 == dev
->hw_info
.block_count
|| 0 == dev
->hw_info
.page_count
|| 0 == dev
->hw_info
.page_size
|| dev
->hw_info
.block_count
> INT_MAX
) {
11173 hio_warn("%s: flash info error\n", dev
->name
);
11179 dev
->hw_info
.oob_size
= SSD_NAND_OOB_SZ
; //(dev->hw_info.page_size) >> 5;
11181 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
11182 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11183 dev
->hw_info
.valid_pages
= val
& 0x3FF;
11184 dev
->hw_info
.max_valid_pages
= (val
>>20) & 0x3FF;
11186 dev
->hw_info
.valid_pages
= val
& 0x7FFF;
11187 dev
->hw_info
.max_valid_pages
= (val
>>15) & 0x7FFF;
11189 if (0 == dev
->hw_info
.valid_pages
|| 0 == dev
->hw_info
.max_valid_pages
||
11190 dev
->hw_info
.valid_pages
> dev
->hw_info
.max_valid_pages
|| dev
->hw_info
.max_valid_pages
> dev
->hw_info
.page_count
) {
11191 hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev
->name
, dev
->hw_info
.valid_pages
, dev
->hw_info
.max_valid_pages
);
11196 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESERVED_BLKS_REG
);
11197 dev
->hw_info
.reserved_blks
= val
& 0xFFFF;
11198 dev
->hw_info
.md_reserved_blks
= (val
>> 16) & 0xFF;
11199 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
11200 dev
->hw_info
.md_reserved_blks
= SSD_BBT_RESERVED
;
11202 if (dev
->hw_info
.reserved_blks
> dev
->hw_info
.block_count
|| dev
->hw_info
.md_reserved_blks
> dev
->hw_info
.block_count
) {
11203 hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev
->name
, dev
->hw_info
.reserved_blks
, dev
->hw_info
.md_reserved_blks
);
11210 if (mode
< SSD_DRV_MODE_DEBUG
) {
11211 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
11212 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
11213 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
11216 /* extend hardware info */
11217 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
11218 dev
->hw_info_ext
.board_type
= (val
>> 24) & 0xF;
11220 dev
->hw_info_ext
.form_factor
= SSD_FORM_FACTOR_FHHL
;
11221 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_1
) {
11222 dev
->hw_info_ext
.form_factor
= (val
>> 31) & 0x1;
11225 dev->hw_info_ext.cap_type = (val >> 28) & 0x3;
11226 if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) {
11227 dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA;
11230 /* power loss protect */
11231 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PLP_INFO_REG
);
11232 dev
->hw_info_ext
.plp_type
= (val
& 0x3);
11233 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
11235 dev
->hw_info_ext
.cap_type
= ((val
>> 2)& 0x1);
11239 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11240 dev
->hw_info_ext
.work_mode
= (val
>> 25) & 0x1;
11243 /* skip error if not in standard mode */
11244 if (mode
!= SSD_DRV_MODE_STANDARD
) {
11250 static void ssd_cleanup_response(struct ssd_device
*dev
)
11252 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11253 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11255 pci_free_consistent(dev
->pdev
, resp_ptr_sz
, dev
->resp_ptr_base
, dev
->resp_ptr_base_dma
);
11256 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11259 static int ssd_init_response(struct ssd_device
*dev
)
11261 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11262 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11264 dev
->resp_msg_base
= pci_alloc_consistent(dev
->pdev
, resp_msg_sz
, &(dev
->resp_msg_base_dma
));
11265 if (!dev
->resp_msg_base
) {
11266 hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev
->name
);
11267 goto out_alloc_resp_msg
;
11269 memset(dev
->resp_msg_base
, 0xFF, resp_msg_sz
);
11271 dev
->resp_ptr_base
= pci_alloc_consistent(dev
->pdev
, resp_ptr_sz
, &(dev
->resp_ptr_base_dma
));
11272 if (!dev
->resp_ptr_base
){
11273 hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev
->name
);
11274 goto out_alloc_resp_ptr
;
11276 memset(dev
->resp_ptr_base
, 0, resp_ptr_sz
);
11277 dev
->resp_idx
= *(uint32_t *)(dev
->resp_ptr_base
) = dev
->hw_info
.cmd_fifo_sz
* 2 - 1;
11279 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
11280 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
11284 out_alloc_resp_ptr
:
11285 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11286 out_alloc_resp_msg
:
11290 static int ssd_cleanup_cmd(struct ssd_device
*dev
)
11292 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11295 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11296 kfree(dev
->cmd
[i
].sgl
);
11299 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11303 static int ssd_init_cmd(struct ssd_device
*dev
)
11305 int sgl_sz
= sizeof(struct scatterlist
) * dev
->hw_info
.cmd_max_sg
;
11306 int cmd_sz
= sizeof(struct ssd_cmd
) * dev
->hw_info
.cmd_fifo_sz
;
11307 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11310 spin_lock_init(&dev
->cmd_lock
);
11312 dev
->msg_base
= pci_alloc_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), &dev
->msg_base_dma
);
11313 if (!dev
->msg_base
) {
11314 hio_warn("%s: can not alloc cmd msg\n", dev
->name
);
11315 goto out_alloc_msg
;
11318 dev
->cmd
= kmalloc(cmd_sz
, GFP_KERNEL
);
11320 hio_warn("%s: can not alloc cmd\n", dev
->name
);
11321 goto out_alloc_cmd
;
11323 memset(dev
->cmd
, 0, cmd_sz
);
11325 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11326 dev
->cmd
[i
].sgl
= kmalloc(sgl_sz
, GFP_KERNEL
);
11327 if (!dev
->cmd
[i
].sgl
) {
11328 hio_warn("%s: can not alloc cmd sgl %d\n", dev
->name
, i
);
11329 goto out_alloc_sgl
;
11332 dev
->cmd
[i
].msg
= dev
->msg_base
+ (msg_sz
* i
);
11333 dev
->cmd
[i
].msg_dma
= dev
->msg_base_dma
+ ((dma_addr_t
)msg_sz
* i
);
11335 dev
->cmd
[i
].dev
= dev
;
11336 dev
->cmd
[i
].tag
= i
;
11337 dev
->cmd
[i
].flag
= 0;
11339 INIT_LIST_HEAD(&dev
->cmd
[i
].list
);
11342 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11343 dev
->scmd
= ssd_dispatch_cmd
;
11345 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
11347 dev
->scmd
= ssd_send_cmd_db
;
11349 dev
->scmd
= ssd_send_cmd
;
11356 for (i
--; i
>=0; i
--) {
11357 kfree(dev
->cmd
[i
].sgl
);
11361 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11366 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11367 static irqreturn_t
ssd_interrupt_check(int irq
, void *dev_id
)
11369 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11371 if (*(uint32_t *)queue
->resp_ptr
== queue
->resp_idx
) {
11375 return IRQ_WAKE_THREAD
;
11378 static irqreturn_t
ssd_interrupt_threaded(int irq
, void *dev_id
)
11380 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11381 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11382 struct ssd_cmd
*cmd
;
11383 union ssd_response_msq __msg
;
11384 union ssd_response_msq
*msg
= &__msg
;
11386 uint32_t resp_idx
= queue
->resp_idx
;
11387 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11388 uint32_t end_resp_idx
;
11390 if (unlikely(resp_idx
== new_resp_idx
)) {
11394 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11397 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11400 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11401 msg
->u64_msg
= *u64_msg
;
11403 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11404 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11407 /* clear the resp msg */
11408 *u64_msg
= (uint64_t)(-1);
11410 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11411 /*if (unlikely(!cmd->bio)) {
11412 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11413 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11417 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11418 cmd
->errors
= -EIO
;
11422 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11426 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11427 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11428 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11429 queue_work(dev
->workq
, &dev
->log_work
);
11433 if (unlikely(msg
->resp_msg
.status
)) {
11434 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11435 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11436 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11439 ssd_set_alarm(dev
);
11440 queue
->io_stat
.nr_rwerr
++;
11441 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11443 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11444 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11446 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11448 queue
->io_stat
.nr_ioerr
++;
11451 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11452 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11453 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11455 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11457 }while (resp_idx
!= end_resp_idx
);
11459 queue
->resp_idx
= new_resp_idx
;
11461 return IRQ_HANDLED
;
11465 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11466 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
11468 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
)
11471 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11472 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11473 struct ssd_cmd
*cmd
;
11474 union ssd_response_msq __msg
;
11475 union ssd_response_msq
*msg
= &__msg
;
11477 uint32_t resp_idx
= queue
->resp_idx
;
11478 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11479 uint32_t end_resp_idx
;
11481 if (unlikely(resp_idx
== new_resp_idx
)) {
11485 #if (defined SSD_ESCAPE_IRQ)
11486 if (SSD_INT_MSIX
!= dev
->int_mode
) {
11487 dev
->irq_cpu
= smp_processor_id();
11491 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11494 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11497 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11498 msg
->u64_msg
= *u64_msg
;
11500 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11501 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11504 /* clear the resp msg */
11505 *u64_msg
= (uint64_t)(-1);
11507 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11508 /*if (unlikely(!cmd->bio)) {
11509 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11510 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11514 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11515 cmd
->errors
= -EIO
;
11519 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11523 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11524 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11525 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11526 queue_work(dev
->workq
, &dev
->log_work
);
11530 if (unlikely(msg
->resp_msg
.status
)) {
11531 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11532 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11533 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11536 ssd_set_alarm(dev
);
11537 queue
->io_stat
.nr_rwerr
++;
11538 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11540 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11541 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11543 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11545 queue
->io_stat
.nr_ioerr
++;
11548 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11549 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11550 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11552 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11554 }while (resp_idx
!= end_resp_idx
);
11556 queue
->resp_idx
= new_resp_idx
;
11558 return IRQ_HANDLED
;
11561 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11562 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
, struct pt_regs
*regs
)
11564 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
)
11568 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11569 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11571 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11572 ret
= ssd_interrupt(irq
, dev_id
, regs
);
11574 ret
= ssd_interrupt(irq
, dev_id
);
11578 if (IRQ_HANDLED
== ret
) {
11579 ssd_reg32_write(dev
->ctrlp
+ SSD_CLEAR_INTR_REG
, 1);
11585 static void ssd_reset_resp_ptr(struct ssd_device
*dev
)
11589 for (i
=0; i
<dev
->nr_queue
; i
++) {
11590 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11594 static void ssd_free_irq(struct ssd_device
*dev
)
11598 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11599 if (SSD_INT_MSIX
== dev
->int_mode
) {
11600 for (i
=0; i
<dev
->nr_queue
; i
++) {
11601 irq_set_affinity_hint(dev
->entry
[i
].vector
, NULL
);
11606 for (i
=0; i
<dev
->nr_queue
; i
++) {
11607 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11608 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11610 free_irq(pci_irq_vector(dev
->pdev
, i
), &dev
->queue
[i
]);
11614 if (SSD_INT_MSIX
== dev
->int_mode
) {
11615 pci_disable_msix(dev
->pdev
);
11616 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11617 pci_disable_msi(dev
->pdev
);
11622 static int ssd_init_irq(struct ssd_device
*dev
)
11624 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11625 const struct cpumask
*cpu_mask
= NULL
;
11626 static int cpu_affinity
= 0;
11628 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11629 const struct cpumask
*mask
= NULL
;
11630 static int cpu
= 0;
11634 unsigned long flags
= 0;
11637 ssd_reg32_write(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
, 0x800);
11639 #ifdef SSD_ESCAPE_IRQ
11643 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11644 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11645 dev
->nr_queue
= SSD_MSIX_VEC
;
11647 for (i
=0; i
<dev
->nr_queue
; i
++) {
11648 dev
->entry
[i
].entry
= i
;
11651 ret
= pci_enable_msix(dev
->pdev
, dev
->entry
, dev
->nr_queue
);
11654 } else if (ret
> 0) {
11655 dev
->nr_queue
= ret
;
11657 hio_warn("%s: can not enable msix\n", dev
->name
);
11659 ssd_set_alarm(dev
);
11664 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11665 mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11666 if ((0 == cpu
) || (!cpumask_intersects(mask
, cpumask_of(cpu
)))) {
11667 cpu
= cpumask_first(mask
);
11669 for (i
=0; i
<dev
->nr_queue
; i
++) {
11670 irq_set_affinity_hint(dev
->entry
[i
].vector
, cpumask_of(cpu
));
11671 cpu
= cpumask_next(cpu
, mask
);
11672 if (cpu
>= nr_cpu_ids
) {
11673 cpu
= cpumask_first(mask
);
11678 dev
->int_mode
= SSD_INT_MSIX
;
11679 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11680 ret
= pci_enable_msi(dev
->pdev
);
11682 hio_warn("%s: can not enable msi\n", dev
->name
);
11684 ssd_set_alarm(dev
);
11689 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11691 dev
->int_mode
= SSD_INT_MSI
;
11694 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11696 dev
->int_mode
= SSD_INT_LEGACY
;
11699 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11700 dev
->nr_queue
= SSD_MSIX_VEC
;
11702 dev
->nr_queue
= pci_alloc_irq_vectors(dev
->pdev
, 1, dev
->nr_queue
, PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
);
11703 if (dev
->nr_queue
<= 0) {
11705 hio_warn("%s: can not enable msix\n", dev
->name
);
11706 ssd_set_alarm(dev
);
11710 dev
->int_mode
= SSD_INT_MSIX
;
11711 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11713 ret
= pci_alloc_irq_vectors(dev
->pdev
, 1, 1, PCI_IRQ_MSI
| PCI_IRQ_AFFINITY
);
11716 hio_warn("%s: can not enable msi\n", dev
->name
);
11718 ssd_set_alarm(dev
);
11723 dev
->int_mode
= SSD_INT_MSI
;
11725 ret
= pci_alloc_irq_vectors(dev
->pdev
, 1, 1, PCI_IRQ_LEGACY
);
11729 hio_warn("%s: can not enable msi\n", dev
->name
);
11731 ssd_set_alarm(dev
);
11736 dev
->int_mode
= SSD_INT_LEGACY
;
11740 for (i
=0; i
<dev
->nr_queue
; i
++) {
11741 if (dev
->nr_queue
> 1) {
11742 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100-%d", dev
->name
, i
);
11744 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100", dev
->name
);
11747 dev
->queue
[i
].dev
= dev
;
11748 dev
->queue
[i
].idx
= i
;
11750 dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11751 dev
->queue
[i
].resp_idx_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
11753 dev
->queue
[i
].resp_msg_sz
= dev
->hw_info
.resp_msg_sz
;
11754 dev
->queue
[i
].resp_msg
= dev
->resp_msg_base
+ dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* i
;
11755 dev
->queue
[i
].resp_ptr
= dev
->resp_ptr_base
+ dev
->hw_info
.resp_ptr_sz
* i
;
11756 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
;
11758 dev
->queue
[i
].cmd
= dev
->cmd
;
11761 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
11762 flags
= IRQF_SHARED
;
11767 for (i
=0; i
<dev
->nr_queue
; i
++) {
11768 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
11769 if (dev
->int_mode
== SSD_INT_LEGACY
) {
11770 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11772 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11774 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11775 if (threaded_irq
) {
11776 ret
= request_threaded_irq(dev
->entry
[i
].vector
, ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11777 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11778 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11780 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11783 if (threaded_irq
) {
11784 ret
= request_threaded_irq(pci_irq_vector(dev
->pdev
, i
), ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11785 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11786 ret
= request_irq(pci_irq_vector(dev
->pdev
, i
), &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11788 ret
= request_irq(pci_irq_vector(dev
->pdev
, i
), &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11792 hio_warn("%s: request irq failed\n", dev
->name
);
11794 ssd_set_alarm(dev
);
11795 goto out_request_irq
;
11798 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11799 cpu_mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11800 if (SSD_INT_MSIX
== dev
->int_mode
) {
11801 if ((0 == cpu_affinity
) || (!cpumask_intersects(mask
, cpumask_of(cpu_affinity
)))) {
11802 cpu_affinity
= cpumask_first(cpu_mask
);
11805 irq_set_affinity(dev
->entry
[i
].vector
, cpumask_of(cpu_affinity
));
11806 cpu_affinity
= cpumask_next(cpu_affinity
, cpu_mask
);
11807 if (cpu_affinity
>= nr_cpu_ids
) {
11808 cpu_affinity
= cpumask_first(cpu_mask
);
11817 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11818 if (SSD_INT_MSIX
== dev
->int_mode
) {
11819 for (j
=0; j
<dev
->nr_queue
; j
++) {
11820 irq_set_affinity_hint(dev
->entry
[j
].vector
, NULL
);
11825 for (i
--; i
>=0; i
--) {
11826 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
11827 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11829 free_irq(pci_irq_vector(dev
->pdev
, i
), &dev
->queue
[i
]);
11833 if (SSD_INT_MSIX
== dev
->int_mode
) {
11834 pci_disable_msix(dev
->pdev
);
11835 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11836 pci_disable_msi(dev
->pdev
);
11843 static void ssd_initial_log(struct ssd_device
*dev
)
11846 uint32_t speed
, width
;
11848 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11852 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_POWER_ON_REG
);
11854 // Poweron detection switched to SSD_INTR_INTERVAL_REG in 'ssd_init_smart'
11855 //ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver);
11858 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCIE_LINKSTATUS_REG
);
11860 width
= (val
>> 4)& 0x3F;
11861 if (0x1 == speed
) {
11862 hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev
->name
, width
);
11863 } else if (0x2 == speed
) {
11864 hio_info("%s: PCIe: 5GT/s, x%u\n", dev
->name
, width
);
11866 hio_info("%s: PCIe: unknown GT/s, x%u\n", dev
->name
, width
);
11868 ssd_gen_swlog(dev
, SSD_LOG_PCIE_LINK_STATUS
, val
);
11873 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11874 static void ssd_hwmon_worker(void *data
)
11876 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11878 static void ssd_hwmon_worker(struct work_struct
*work
)
11880 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, hwmon_work
);
11883 if (ssd_check_hw(dev
)) {
11884 //hio_err("%s: check hardware failed\n", dev->name);
11888 ssd_check_clock(dev
);
11889 ssd_check_volt(dev
);
11891 ssd_mon_boardvolt(dev
);
11894 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11895 static void ssd_tempmon_worker(void *data
)
11897 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11899 static void ssd_tempmon_worker(struct work_struct
*work
)
11901 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, tempmon_work
);
11904 if (ssd_check_hw(dev
)) {
11905 //hio_err("%s: check hardware failed\n", dev->name);
11913 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11914 static void ssd_capmon_worker(void *data
)
11916 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11918 static void ssd_capmon_worker(struct work_struct
*work
)
11920 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, capmon_work
);
11923 uint32_t cap_threshold
= SSD_PL_CAP_THRESHOLD
;
11926 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11930 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
11934 /* fault before? */
11935 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11936 ret
= ssd_check_pl_cap_fast(dev
);
11943 ret
= ssd_do_cap_learn(dev
, &cap
);
11945 hio_err("%s: cap learn failed\n", dev
->name
);
11946 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
11950 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, cap
);
11952 if (SSD_PL_CAP_CP
== dev
->hw_info_ext
.cap_type
) {
11953 cap_threshold
= SSD_PL_CAP_CP_THRESHOLD
;
11956 //use the fw event id?
11957 if (cap
< cap_threshold
) {
11958 if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11959 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_FAULT
, 0);
11961 } else if (cap
>= (cap_threshold
+ SSD_PL_CAP_THRESHOLD_HYST
)) {
11962 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11963 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_OK
, 0);
11968 static void ssd_routine_start(void *data
)
11970 struct ssd_device
*dev
;
11977 dev
->routine_tick
++;
11979 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
) && !ssd_busy(dev
)) {
11980 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11981 queue_work(dev
->workq
, &dev
->log_work
);
11984 if ((dev
->routine_tick
% SSD_HWMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11985 queue_work(dev
->workq
, &dev
->hwmon_work
);
11988 if ((dev
->routine_tick
% SSD_CAPMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11989 queue_work(dev
->workq
, &dev
->capmon_work
);
11992 if ((dev
->routine_tick
% SSD_CAPMON2_ROUTINE_TICK
) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
) && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11993 /* CAP fault? check again */
11994 queue_work(dev
->workq
, &dev
->capmon_work
);
11997 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11998 queue_work(dev
->workq
, &dev
->tempmon_work
);
12001 /* schedule routine */
12002 mod_timer(&dev
->routine_timer
, jiffies
+ msecs_to_jiffies(SSD_ROUTINE_INTERVAL
));
12005 static void ssd_cleanup_routine(struct ssd_device
*dev
)
12007 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
12010 (void)ssd_del_timer(&dev
->routine_timer
);
12012 (void)ssd_del_timer(&dev
->bm_timer
);
12015 static int ssd_init_routine(struct ssd_device
*dev
)
12017 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
12020 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
12021 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
, dev
);
12022 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
, dev
);
12023 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
, dev
);
12024 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
, dev
);
12026 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
);
12027 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
);
12028 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
);
12029 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
);
12033 ssd_initial_log(dev
);
12035 /* schedule bm routine */
12036 ssd_add_timer(&dev
->bm_timer
, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY
), ssd_bm_routine_start
, dev
);
12038 /* schedule routine */
12039 ssd_add_timer(&dev
->routine_timer
, msecs_to_jiffies(SSD_ROUTINE_INTERVAL
), ssd_routine_start
, dev
);
12045 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12048 ssd_remove_one (struct pci_dev
*pdev
)
12050 struct ssd_device
*dev
;
12056 dev
= pci_get_drvdata(pdev
);
12061 list_del_init(&dev
->list
);
12063 ssd_unregister_sysfs(dev
);
12065 /* offline firstly */
12066 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12068 /* clean work queue first */
12070 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12071 ssd_cleanup_workq(dev
);
12075 (void)ssd_flush(dev
);
12076 (void)ssd_save_md(dev
);
12080 ssd_save_smart(dev
);
12083 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
12084 ssd_cleanup_blkdev(dev
);
12088 ssd_cleanup_chardev(dev
);
12091 /* clean routine */
12093 ssd_cleanup_routine(dev
);
12096 ssd_cleanup_queue(dev
);
12098 ssd_cleanup_tag(dev
);
12099 ssd_cleanup_thread(dev
);
12103 ssd_cleanup_dcmd(dev
);
12104 ssd_cleanup_cmd(dev
);
12105 ssd_cleanup_response(dev
);
12108 ssd_cleanup_log(dev
);
12111 if (dev
->reload_fw
) { //reload fw
12112 dev
->has_non_0x98_reg_access
= 1;
12113 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12116 /* unmap physical adress */
12117 #ifdef LINUX_SUSE_OS
12118 iounmap(dev
->ctrlp
);
12120 pci_iounmap(pdev
, dev
->ctrlp
);
12123 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12125 pci_disable_device(pdev
);
12127 pci_set_drvdata(pdev
, NULL
);
12133 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12136 ssd_init_one(struct pci_dev
*pdev
,
12137 const struct pci_device_id
*ent
)
12139 struct ssd_device
*dev
;
12143 if (!pdev
|| !ent
) {
12148 dev
= kmalloc(sizeof(struct ssd_device
), GFP_KERNEL
);
12151 goto out_alloc_dev
;
12153 memset(dev
, 0, sizeof(struct ssd_device
));
12155 dev
->owner
= THIS_MODULE
;
12157 if (SSD_SLAVE_PORT_DEVID
== ent
->device
) {
12161 dev
->idx
= ssd_get_index(dev
->slave
);
12162 if (dev
->idx
< 0) {
12164 goto out_get_index
;
12168 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_DEV_NAME
);
12169 ssd_set_dev_name(&dev
->name
[strlen(SSD_DEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_DEV_NAME
), dev
->idx
);
12171 dev
->major
= ssd_major
;
12172 dev
->cmajor
= ssd_cmajor
;
12174 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_SDEV_NAME
);
12175 ssd_set_dev_name(&dev
->name
[strlen(SSD_SDEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_SDEV_NAME
), dev
->idx
);
12176 dev
->major
= ssd_major_sl
;
12180 do_gettimeofday(&tv
);
12181 dev
->reset_time
= tv
.tv_sec
;
12183 atomic_set(&(dev
->refcnt
), 0);
12184 atomic_set(&(dev
->tocnt
), 0);
12186 mutex_init(&dev
->fw_mutex
);
12189 mutex_init(&dev
->gd_mutex
);
12190 dev
->has_non_0x98_reg_access
= 0;
12192 //init in_flight lock
12193 spin_lock_init(&dev
->in_flight_lock
);
12196 pci_set_drvdata(pdev
, dev
);
12198 kref_init(&dev
->kref
);
12200 ret
= pci_enable_device(pdev
);
12202 hio_warn("%s: can not enable device\n", dev
->name
);
12203 goto out_enable_device
;
12206 pci_set_master(pdev
);
12208 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12209 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
12211 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
12214 hio_warn("%s: set dma mask: failed\n", dev
->name
);
12215 goto out_set_dma_mask
;
12218 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12219 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
12221 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
12224 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
12225 goto out_set_dma_mask
;
12228 dev
->mmio_base
= pci_resource_start(pdev
, 0);
12229 dev
->mmio_len
= pci_resource_len(pdev
, 0);
12231 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
12232 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
12234 goto out_request_mem_region
;
12237 /* 2.6.9 kernel bug */
12238 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
12240 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
12242 goto out_pci_iomap
;
12245 ret
= ssd_check_hw(dev
);
12247 hio_err("%s: check hardware failed\n", dev
->name
);
12251 ret
= ssd_init_protocol_info(dev
);
12253 hio_err("%s: init protocol info failed\n", dev
->name
);
12254 goto out_init_protocol_info
;
12258 ssd_clear_alarm(dev
);
12260 ret
= ssd_init_fw_info(dev
);
12262 hio_err("%s: init firmware info failed\n", dev
->name
);
12264 ssd_set_alarm(dev
);
12265 goto out_init_fw_info
;
12273 ret
= ssd_init_rom_info(dev
);
12275 hio_err("%s: init rom info failed\n", dev
->name
);
12277 ssd_set_alarm(dev
);
12278 goto out_init_rom_info
;
12281 ret
= ssd_init_label(dev
);
12283 hio_err("%s: init label failed\n", dev
->name
);
12285 ssd_set_alarm(dev
);
12286 goto out_init_label
;
12289 ret
= ssd_init_workq(dev
);
12291 hio_warn("%s: init workq failed\n", dev
->name
);
12292 goto out_init_workq
;
12294 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12296 ret
= ssd_init_log(dev
);
12298 hio_err("%s: init log failed\n", dev
->name
);
12300 ssd_set_alarm(dev
);
12304 ret
= ssd_init_smart(dev
);
12306 hio_err("%s: init info failed\n", dev
->name
);
12308 ssd_set_alarm(dev
);
12309 goto out_init_smart
;
12313 ret
= ssd_init_hw_info(dev
);
12315 hio_err("%s: init hardware info failed\n", dev
->name
);
12317 ssd_set_alarm(dev
);
12318 goto out_init_hw_info
;
12326 ret
= ssd_init_sensor(dev
);
12328 hio_err("%s: init sensor failed\n", dev
->name
);
12330 ssd_set_alarm(dev
);
12331 goto out_init_sensor
;
12334 ret
= ssd_init_pl_cap(dev
);
12336 hio_err("%s: int pl_cap failed\n", dev
->name
);
12338 ssd_set_alarm(dev
);
12339 goto out_init_pl_cap
;
12343 ret
= ssd_check_init_state(dev
);
12345 hio_err("%s: check init state failed\n", dev
->name
);
12347 ssd_set_alarm(dev
);
12348 goto out_check_init_state
;
12351 ret
= ssd_init_response(dev
);
12353 hio_warn("%s: init resp_msg failed\n", dev
->name
);
12354 goto out_init_response
;
12357 ret
= ssd_init_cmd(dev
);
12359 hio_warn("%s: init msg failed\n", dev
->name
);
12363 ret
= ssd_init_dcmd(dev
);
12365 hio_warn("%s: init cmd failed\n", dev
->name
);
12366 goto out_init_dcmd
;
12369 ret
= ssd_init_irq(dev
);
12371 hio_warn("%s: init irq failed\n", dev
->name
);
12375 ret
= ssd_init_thread(dev
);
12377 hio_warn("%s: init thread failed\n", dev
->name
);
12378 goto out_init_thread
;
12381 ret
= ssd_init_tag(dev
);
12383 hio_warn("%s: init tags failed\n", dev
->name
);
12384 goto out_init_tags
;
12388 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12390 ret
= ssd_init_queue(dev
);
12392 hio_warn("%s: init queue failed\n", dev
->name
);
12393 goto out_init_queue
;
12401 ret
= ssd_init_ot_protect(dev
);
12403 hio_err("%s: int ot_protect failed\n", dev
->name
);
12405 ssd_set_alarm(dev
);
12406 goto out_int_ot_protect
;
12409 ret
= ssd_init_wmode(dev
);
12411 hio_warn("%s: init write mode\n", dev
->name
);
12412 goto out_init_wmode
;
12415 /* init routine after hw is ready */
12416 ret
= ssd_init_routine(dev
);
12418 hio_warn("%s: init routine\n", dev
->name
);
12419 goto out_init_routine
;
12422 ret
= ssd_init_chardev(dev
);
12424 hio_warn("%s: register char device failed\n", dev
->name
);
12425 goto out_init_chardev
;
12429 ret
= ssd_init_blkdev(dev
);
12431 hio_warn("%s: register block device failed\n", dev
->name
);
12432 goto out_init_blkdev
;
12434 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12436 ret
= ssd_register_sysfs(dev
);
12438 hio_warn("%s: register sysfs failed\n", dev
->name
);
12439 goto out_register_sysfs
;
12444 list_add_tail(&dev
->list
, &ssd_list
);
12448 out_register_sysfs
:
12449 test_and_clear_bit(SSD_INIT_BD
, &dev
->state
);
12450 ssd_cleanup_blkdev(dev
);
12454 ssd_cleanup_chardev(dev
);
12459 ssd_cleanup_routine(dev
);
12463 out_int_ot_protect
:
12464 ssd_cleanup_queue(dev
);
12466 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12467 ssd_cleanup_tag(dev
);
12469 ssd_cleanup_thread(dev
);
12473 ssd_cleanup_dcmd(dev
);
12475 ssd_cleanup_cmd(dev
);
12477 ssd_cleanup_response(dev
);
12479 out_check_init_state
:
12486 ssd_cleanup_log(dev
);
12491 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12492 ssd_cleanup_workq(dev
);
12498 out_init_protocol_info
:
12500 #ifdef LINUX_SUSE_OS
12501 iounmap(dev
->ctrlp
);
12503 pci_iounmap(pdev
, dev
->ctrlp
);
12506 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12507 out_request_mem_region
:
12509 pci_disable_device(pdev
);
12511 pci_set_drvdata(pdev
, NULL
);
12519 static void ssd_cleanup_tasklet(void)
12522 for_each_online_cpu(i
) {
12523 tasklet_kill(&per_cpu(ssd_tasklet
, i
));
12527 static int ssd_init_tasklet(void)
12531 for_each_online_cpu(i
) {
12532 INIT_LIST_HEAD(&per_cpu(ssd_doneq
, i
));
12535 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done_db
, 0);
12537 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done
, 0);
12544 static struct pci_device_id ssd_pci_tbl
[] = {
12545 { 0x10ee, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* g3 */
12546 { 0x19e5, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v1 */
12547 //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/
12548 { 0x19e5, 0x0009, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 */
12549 { 0x19e5, 0x000a, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 dp slave*/
12553 /*driver power management handler for pm_ops*/
12554 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12555 static int ssd_hio_suspend(struct pci_dev
*pdev
, pm_message_t state
)
12558 static int ssd_hio_suspend(struct device
*ddev
)
12560 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12562 struct ssd_device
*dev
;
12569 dev
= pci_get_drvdata(pdev
);
12574 hio_warn("%s: suspend disk start.\n", dev
->name
);
12575 ssd_unregister_sysfs(dev
);
12577 /* offline firstly */
12578 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12580 /* clean work queue first */
12582 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12583 ssd_cleanup_workq(dev
);
12587 (void)ssd_flush(dev
);
12588 (void)ssd_save_md(dev
);
12592 ssd_save_smart(dev
);
12595 /* clean routine */
12597 ssd_cleanup_routine(dev
);
12600 ssd_cleanup_thread(dev
);
12605 ssd_cleanup_log(dev
);
12608 if (dev
->reload_fw
) { //reload fw
12609 dev
->has_non_0x98_reg_access
= 1;
12610 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12613 /* unmap physical adress */
12615 #ifdef LINUX_SUSE_OS
12616 iounmap(dev
->ctrlp
);
12618 pci_iounmap(pdev
, dev
->ctrlp
);
12623 if (dev
->mmio_base
) {
12624 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12625 dev
->mmio_base
= 0;
12628 pci_disable_device(pdev
);
12630 hio_warn("%s: suspend disk finish.\n", dev
->name
);
12636 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12637 static int ssd_hio_resume(struct pci_dev
*pdev
)
12640 static int ssd_hio_resume(struct device
*ddev
)
12642 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12644 struct ssd_device
*dev
= NULL
;
12652 dev
= pci_get_drvdata(pdev
);
12655 goto out_alloc_dev
;
12658 hio_warn("%s: resume disk start.\n", dev
->name
);
12659 ret
= pci_enable_device(pdev
);
12661 hio_warn("%s: can not enable device\n", dev
->name
);
12662 goto out_enable_device
;
12665 pci_set_master(pdev
);
12667 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12668 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
12670 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
12673 hio_warn("%s: set dma mask: failed\n", dev
->name
);
12674 goto out_set_dma_mask
;
12677 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12678 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
12680 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
12683 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
12684 goto out_set_dma_mask
;
12687 dev
->mmio_base
= pci_resource_start(pdev
, 0);
12688 dev
->mmio_len
= pci_resource_len(pdev
, 0);
12690 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
12691 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
12693 goto out_request_mem_region
;
12696 /* 2.6.9 kernel bug */
12697 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
12699 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
12701 goto out_pci_iomap
;
12704 ret
= ssd_check_hw(dev
);
12706 hio_err("%s: check hardware failed\n", dev
->name
);
12711 ssd_clear_alarm(dev
);
12713 ret
= ssd_init_fw_info(dev
);
12715 hio_err("%s: init firmware info failed\n", dev
->name
);
12717 ssd_set_alarm(dev
);
12718 goto out_init_fw_info
;
12726 ret
= ssd_init_rom_info(dev
);
12728 hio_err("%s: init rom info failed\n", dev
->name
);
12730 ssd_set_alarm(dev
);
12731 goto out_init_rom_info
;
12734 ret
= ssd_init_label(dev
);
12736 hio_err("%s: init label failed\n", dev
->name
);
12738 ssd_set_alarm(dev
);
12739 goto out_init_label
;
12742 ret
= ssd_init_workq(dev
);
12744 hio_warn("%s: init workq failed\n", dev
->name
);
12745 goto out_init_workq
;
12747 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12749 ret
= ssd_init_log(dev
);
12751 hio_err("%s: init log failed\n", dev
->name
);
12753 ssd_set_alarm(dev
);
12757 ret
= ssd_init_smart(dev
);
12759 hio_err("%s: init info failed\n", dev
->name
);
12761 ssd_set_alarm(dev
);
12762 goto out_init_smart
;
12766 ret
= ssd_init_hw_info(dev
);
12768 hio_err("%s: init hardware info failed\n", dev
->name
);
12770 ssd_set_alarm(dev
);
12771 goto out_init_hw_info
;
12779 ret
= ssd_init_sensor(dev
);
12781 hio_err("%s: init sensor failed\n", dev
->name
);
12783 ssd_set_alarm(dev
);
12784 goto out_init_sensor
;
12787 ret
= ssd_init_pl_cap(dev
);
12789 hio_err("%s: int pl_cap failed\n", dev
->name
);
12791 ssd_set_alarm(dev
);
12792 goto out_init_pl_cap
;
12796 ret
= ssd_check_init_state(dev
);
12798 hio_err("%s: check init state failed\n", dev
->name
);
12800 ssd_set_alarm(dev
);
12801 goto out_check_init_state
;
12804 //flush all base pointer to ssd
12805 (void)ssd_reload_ssd_ptr(dev
);
12807 ret
= ssd_init_irq(dev
);
12809 hio_warn("%s: init irq failed\n", dev
->name
);
12813 ret
= ssd_init_thread(dev
);
12815 hio_warn("%s: init thread failed\n", dev
->name
);
12816 goto out_init_thread
;
12820 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12827 ret
= ssd_init_ot_protect(dev
);
12829 hio_err("%s: int ot_protect failed\n", dev
->name
);
12831 ssd_set_alarm(dev
);
12832 goto out_int_ot_protect
;
12835 ret
= ssd_init_wmode(dev
);
12837 hio_warn("%s: init write mode\n", dev
->name
);
12838 goto out_init_wmode
;
12841 /* init routine after hw is ready */
12842 ret
= ssd_init_routine(dev
);
12844 hio_warn("%s: init routine\n", dev
->name
);
12845 goto out_init_routine
;
12849 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12853 hio_warn("%s: resume disk finish.\n", dev
->name
);
12859 out_int_ot_protect
:
12860 ssd_cleanup_thread(dev
);
12864 out_check_init_state
:
12871 ssd_cleanup_log(dev
);
12876 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12877 ssd_cleanup_workq(dev
);
12884 #ifdef LINUX_SUSE_OS
12885 iounmap(dev
->ctrlp
);
12887 pci_iounmap(pdev
, dev
->ctrlp
);
12890 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12891 out_request_mem_region
:
12893 pci_disable_device(pdev
);
12898 hio_warn("%s: resume disk fail.\n", dev
->name
);
12903 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12905 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12907 SIMPLE_DEV_PM_OPS(hio_pm_ops
, ssd_hio_suspend
, ssd_hio_resume
);
12910 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12911 struct pci_driver ssd_driver
= {
12912 .name
= MODULE_NAME
,
12913 .id_table
= ssd_pci_tbl
,
12914 .probe
= ssd_init_one
,
12915 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12916 .remove
= __devexit_p(ssd_remove_one
),
12918 .remove
= ssd_remove_one
,
12921 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12922 .suspend
= ssd_hio_suspend
,
12923 .resume
= ssd_hio_resume
,
12931 /* notifier block to get a notify on system shutdown/halt/reboot */
12932 static int ssd_notify_reboot(struct notifier_block
*nb
, unsigned long event
, void *buf
)
12934 struct ssd_device
*dev
= NULL
;
12935 struct ssd_device
*n
= NULL
;
12937 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
12938 ssd_gen_swlog(dev
, SSD_LOG_POWER_OFF
, 0);
12940 (void)ssd_flush(dev
);
12941 (void)ssd_save_md(dev
);
12945 ssd_save_smart(dev
);
12947 ssd_stop_workq(dev
);
12949 if (dev
->reload_fw
) {
12950 dev
->has_non_0x98_reg_access
= 1;
12951 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12959 static struct notifier_block ssd_notifier
= {
12960 ssd_notify_reboot
, NULL
, 0
12963 static int __init
ssd_init_module(void)
12967 hio_info("driver version: %s\n", DRIVER_VERSION
);
12969 ret
= ssd_init_index();
12971 hio_warn("init index failed\n");
12972 goto out_init_index
;
12975 ret
= ssd_init_proc();
12977 hio_warn("init proc failed\n");
12978 goto out_init_proc
;
12981 ret
= ssd_init_sysfs();
12983 hio_warn("init sysfs failed\n");
12984 goto out_init_sysfs
;
12987 ret
= ssd_init_tasklet();
12989 hio_warn("init tasklet failed\n");
12990 goto out_init_tasklet
;
12993 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12994 ssd_class
= class_simple_create(THIS_MODULE
, SSD_DEV_NAME
);
12996 ssd_class
= class_create(THIS_MODULE
, SSD_DEV_NAME
);
12998 if (IS_ERR(ssd_class
)) {
12999 ret
= PTR_ERR(ssd_class
);
13000 goto out_class_create
;
13003 if (ssd_cmajor
> 0) {
13004 ret
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
13006 ret
= ssd_cmajor
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
13009 hio_warn("unable to register chardev major number\n");
13010 goto out_register_chardev
;
13013 if (ssd_major
> 0) {
13014 ret
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
13016 ret
= ssd_major
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
13019 hio_warn("unable to register major number\n");
13020 goto out_register_blkdev
;
13023 if (ssd_major_sl
> 0) {
13024 ret
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13026 ret
= ssd_major_sl
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13029 hio_warn("unable to register slave major number\n");
13030 goto out_register_blkdev_sl
;
13033 if (mode
< SSD_DRV_MODE_STANDARD
|| mode
> SSD_DRV_MODE_BASE
) {
13034 mode
= SSD_DRV_MODE_STANDARD
;
13038 if (mode
!= SSD_DRV_MODE_STANDARD
) {
13042 if (int_mode
< SSD_INT_LEGACY
|| int_mode
> SSD_INT_MSIX
) {
13043 int_mode
= SSD_INT_MODE_DEFAULT
;
13046 if (threaded_irq
) {
13047 int_mode
= SSD_INT_MSI
;
13050 if (log_level
>= SSD_LOG_NR_LEVEL
|| log_level
< SSD_LOG_LEVEL_INFO
) {
13051 log_level
= SSD_LOG_LEVEL_ERR
;
13054 if (wmode
< SSD_WMODE_BUFFER
|| wmode
> SSD_WMODE_DEFAULT
) {
13055 wmode
= SSD_WMODE_DEFAULT
;
13058 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
13059 ret
= pci_module_init(&ssd_driver
);
13061 ret
= pci_register_driver(&ssd_driver
);
13064 hio_warn("pci init failed\n");
13068 ret
= register_reboot_notifier(&ssd_notifier
);
13070 hio_warn("register reboot notifier failed\n");
13071 goto out_register_reboot_notifier
;
13076 out_register_reboot_notifier
:
13078 pci_unregister_driver(&ssd_driver
);
13079 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13080 out_register_blkdev_sl
:
13081 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
13082 out_register_blkdev
:
13083 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
13084 out_register_chardev
:
13085 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
13086 class_simple_destroy(ssd_class
);
13088 class_destroy(ssd_class
);
13091 ssd_cleanup_tasklet();
13093 ssd_cleanup_sysfs();
13095 ssd_cleanup_proc();
13097 ssd_cleanup_index();
13103 static void __exit
ssd_cleanup_module(void)
13106 hio_info("unload driver: %s\n", DRIVER_VERSION
);
13110 unregister_reboot_notifier(&ssd_notifier
);
13112 pci_unregister_driver(&ssd_driver
);
13114 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
13115 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
13116 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
13117 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
13118 class_simple_destroy(ssd_class
);
13120 class_destroy(ssd_class
);
13123 ssd_cleanup_tasklet();
13124 ssd_cleanup_sysfs();
13125 ssd_cleanup_proc();
13126 ssd_cleanup_index();
13129 int ssd_register_event_notifier(struct block_device
*bdev
, ssd_event_call event_call
)
13131 struct ssd_device
*dev
;
13133 struct ssd_log
*le
, *temp_le
= NULL
;
13138 if (!bdev
|| !event_call
|| !(bdev
->bd_disk
)) {
13142 dev
= bdev
->bd_disk
->private_data
;
13143 dev
->event_call
= event_call
;
13145 do_gettimeofday(&tv
);
13148 le
= (struct ssd_log
*)(dev
->internal_log
.log
);
13149 log_nr
= dev
->internal_log
.nr_log
;
13152 if (le
->time
<= cur
&& le
->time
>= dev
->uptime
) {
13153 if ((le
->le
.event
== SSD_LOG_SEU_FAULT1
) && (le
->time
< dev
->reset_time
)) {
13157 if (le
->le
.event
== SSD_LOG_OVER_TEMP
|| le
->le
.event
== SSD_LOG_NORMAL_TEMP
|| le
->le
.event
== SSD_LOG_WARN_TEMP
) {
13158 if (!temp_le
|| le
->time
>= temp_le
->time
) {
13164 (void)dev
->event_call(dev
->gd
, le
->le
.event
, ssd_parse_log(dev
, le
, 0));
13169 ssd_get_temperature(bdev
, &temp
);
13170 if (temp_le
&& (temp
>= SSD_OT_TEMP_HYST
)) {
13171 (void)dev
->event_call(dev
->gd
, temp_le
->le
.event
, ssd_parse_log(dev
, temp_le
, 0));
13177 int ssd_unregister_event_notifier(struct block_device
*bdev
)
13179 struct ssd_device
*dev
;
13181 if (!bdev
|| !(bdev
->bd_disk
)) {
13185 dev
= bdev
->bd_disk
->private_data
;
13186 dev
->event_call
= NULL
;
13191 EXPORT_SYMBOL(ssd_get_label
);
13192 EXPORT_SYMBOL(ssd_get_version
);
13193 EXPORT_SYMBOL(ssd_set_otprotect
);
13194 EXPORT_SYMBOL(ssd_bm_status
);
13195 EXPORT_SYMBOL(ssd_submit_pbio
);
13196 EXPORT_SYMBOL(ssd_get_pciaddr
);
13197 EXPORT_SYMBOL(ssd_get_temperature
);
13198 EXPORT_SYMBOL(ssd_register_event_notifier
);
13199 EXPORT_SYMBOL(ssd_unregister_event_notifier
);
13200 EXPORT_SYMBOL(ssd_reset
);
13201 EXPORT_SYMBOL(ssd_set_wmode
);
13205 module_init(ssd_init_module
);
13206 module_exit(ssd_cleanup_module
);
13207 MODULE_VERSION(DRIVER_VERSION
);
13208 MODULE_LICENSE("GPL");
13209 MODULE_AUTHOR("Huawei SSD DEV Team");
13210 MODULE_DESCRIPTION("Huawei SSD driver");