2 * Huawei SSD device driver
3 * Copyright (c) 2016, Huawei Technologies Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #ifndef LINUX_VERSION_CODE
16 #include <linux/version.h>
18 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
19 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/bio.h>
25 #include <linux/timer.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/blkdev.h>
31 #include <linux/sched.h>
32 #include <linux/fcntl.h>
33 #include <linux/interrupt.h>
34 #include <linux/compiler.h>
35 #include <linux/bitops.h>
36 #include <linux/delay.h>
37 #include <linux/time.h>
38 #include <linux/stat.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
44 #include <linux/ioctl.h>
45 #include <linux/hdreg.h> /* HDIO_GETGEO */
46 #include <linux/list.h>
47 #include <linux/reboot.h>
48 #include <linux/kthread.h>
49 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
50 #include <linux/seq_file.h>
52 #include <asm/uaccess.h>
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
54 #include <linux/scatterlist.h>
55 #include <linux/vmalloc.h>
57 #include <asm/scatterlist.h>
60 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
61 #include <linux/devfs_fs_kernel.h>
65 #define MODULE_NAME "hio"
66 #define DRIVER_VERSION "2.1.0.28"
67 #define DRIVER_VERSION_LEN 16
69 #define SSD_FW_MIN 0x1
71 #define SSD_DEV_NAME MODULE_NAME
72 #define SSD_DEV_NAME_LEN 16
73 #define SSD_CDEV_NAME "c"SSD_DEV_NAME
74 #define SSD_SDEV_NAME "s"SSD_DEV_NAME
79 #define SSD_MAJOR_SL 0
82 #define SSD_MAX_DEV 702
83 #define SSD_ALPHABET_NUM 26
85 #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg)
86 #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg)
87 #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg)
88 #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg)
91 #define SSD_SLAVE_PORT_DEVID 0x000a
95 /* 2.6.9 msi affinity bug, should turn msi & msi-x off */
97 #define SSD_ESCAPE_IRQ
103 #define SSD_MSIX_VEC 8
106 //#undef SSD_ESCAPE_IRQ
107 #define SSD_MSIX_AFFINITY_FORCE
112 /* Over temperature protect */
113 #define SSD_OT_PROTECT
115 #ifdef SSD_QUEUE_PBIO
116 #define BIO_SSD_PBIO 20
120 //#define SSD_DEBUG_ERR
123 #define SSD_CMD_TIMEOUT (60*HZ)
126 #define SSD_SPI_TIMEOUT (5*HZ)
127 #define SSD_I2C_TIMEOUT (5*HZ)
129 #define SSD_I2C_MAX_DATA (127)
130 #define SSD_SMBUS_BLOCK_MAX (32)
131 #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2)
134 #define SSD_INIT_WAIT (1000) //1s
135 #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s
136 #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s
137 #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s
138 #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
139 #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
141 /* blkdev busy wait */
142 #define SSD_DEV_BUSY_WAIT 1000 //ms
143 #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s
146 #define SSD_SMBUS_RETRY_INTERVAL (5) //ms
147 #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL)
149 #define SSD_BM_RETRY_MAX 7
151 /* bm routine interval */
152 #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000)
154 /* routine interval */
155 #define SSD_ROUTINE_INTERVAL (10*1000) //10s
156 #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL)
157 #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30)
158 #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover
161 #define SSD_DMA_ALIGN (16)
163 /* some hw defalut */
164 #define SSD_LOG_MAX_SZ 4096
166 #define SSD_NAND_OOB_SZ 1024
167 #define SSD_NAND_ID_SZ 8
168 #define SSD_NAND_ID_BUFF_SZ 1024
169 #define SSD_NAND_MAX_CE 2
171 #define SSD_BBT_RESERVED 8
173 #define SSD_ECC_MAX_FLIP (64+1)
175 #define SSD_RAM_ALIGN 16
178 #define SSD_RELOAD_FLAG 0x3333CCCC
179 #define SSD_RELOAD_FW 0xAA5555AA
180 #define SSD_RESET_NOINIT 0xAA5555AA
181 #define SSD_RESET 0x55AAAA55
182 #define SSD_RESET_FULL 0x5A
183 //#define SSD_RESET_WAIT 1000 //1s
184 //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s
188 #define SSD_PROTOCOL_V1 0x0
190 #define SSD_ROM_SIZE (16*1024*1024)
191 #define SSD_ROM_BLK_SIZE (256*1024)
192 #define SSD_ROM_PAGE_SIZE (256)
193 #define SSD_ROM_NR_BRIDGE_FW 2
194 #define SSD_ROM_NR_CTRL_FW 2
195 #define SSD_ROM_BRIDGE_FW_BASE 0
196 #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024)
197 #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE)
198 #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024)
199 #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW)
200 #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE)
203 #define SSD_PROTOCOL_V3 0x3000000
204 #define SSD_PROTOCOL_V3_1_1 0x3010001
205 #define SSD_PROTOCOL_V3_1_3 0x3010003
206 #define SSD_PROTOCOL_V3_2 0x3020000
207 #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */
208 #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */
209 #define SSD_PROTOCOL_V3_2_4 0x3020004
212 #define SSD_PV3_ROM_NR_BM_FW 1
213 #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8)
215 #define SSD_ROM_LOG_SZ (64*1024*4)
217 #define SSD_ROM_NR_SMART_MAX 2
218 #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX
219 #define SSD_PV3_ROM_SMART_SZ (64*1024)
222 #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */
223 #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */
227 #define SSD_REQ_FIFO_REG 0x0000
228 #define SSD_RESP_FIFO_REG 0x0008 //0x0010
229 #define SSD_RESP_PTR_REG 0x0010 //0x0018
230 #define SSD_INTR_INTERVAL_REG 0x0018
231 #define SSD_READY_REG 0x001C
232 #define SSD_BRIDGE_TEST_REG 0x0020
233 #define SSD_STRIPE_SIZE_REG 0x0028
234 #define SSD_CTRL_VER_REG 0x0030 //controller
235 #define SSD_BRIDGE_VER_REG 0x0034 //bridge
236 #define SSD_PCB_VER_REG 0x0038
237 #define SSD_BURN_FLAG_REG 0x0040
238 #define SSD_BRIDGE_INFO_REG 0x0044
240 #define SSD_WL_VAL_REG 0x0048 //32-bit
242 #define SSD_BB_INFO_REG 0x004C
244 #define SSD_ECC_TEST_REG 0x0050 //test only
245 #define SSD_ERASE_TEST_REG 0x0058 //test only
246 #define SSD_WRITE_TEST_REG 0x0060 //test only
248 #define SSD_RESET_REG 0x0068
249 #define SSD_RELOAD_FW_REG 0x0070
251 #define SSD_RESERVED_BLKS_REG 0x0074
252 #define SSD_VALID_PAGES_REG 0x0078
253 #define SSD_CH_INFO_REG 0x007C
255 #define SSD_CTRL_TEST_REG_SZ 0x8
256 #define SSD_CTRL_TEST_REG0 0x0080
257 #define SSD_CTRL_TEST_REG1 0x0088
258 #define SSD_CTRL_TEST_REG2 0x0090
259 #define SSD_CTRL_TEST_REG3 0x0098
260 #define SSD_CTRL_TEST_REG4 0x00A0
261 #define SSD_CTRL_TEST_REG5 0x00A8
262 #define SSD_CTRL_TEST_REG6 0x00B0
263 #define SSD_CTRL_TEST_REG7 0x00B8
265 #define SSD_FLASH_INFO_REG0 0x00C0
266 #define SSD_FLASH_INFO_REG1 0x00C8
267 #define SSD_FLASH_INFO_REG2 0x00D0
268 #define SSD_FLASH_INFO_REG3 0x00D8
269 #define SSD_FLASH_INFO_REG4 0x00E0
270 #define SSD_FLASH_INFO_REG5 0x00E8
271 #define SSD_FLASH_INFO_REG6 0x00F0
272 #define SSD_FLASH_INFO_REG7 0x00F8
274 #define SSD_RESP_INFO_REG 0x01B8
275 #define SSD_NAND_BUFF_BASE 0x01BC //for nand write
277 #define SSD_CHIP_INFO_REG_SZ 0x10
278 #define SSD_CHIP_INFO_REG0 0x0100 //128 bit
279 #define SSD_CHIP_INFO_REG1 0x0110
280 #define SSD_CHIP_INFO_REG2 0x0120
281 #define SSD_CHIP_INFO_REG3 0x0130
282 #define SSD_CHIP_INFO_REG4 0x0140
283 #define SSD_CHIP_INFO_REG5 0x0150
284 #define SSD_CHIP_INFO_REG6 0x0160
285 #define SSD_CHIP_INFO_REG7 0x0170
287 #define SSD_RAM_INFO_REG 0x01C4
289 #define SSD_BBT_BASE_REG 0x01C8
290 #define SSD_ECT_BASE_REG 0x01CC
292 #define SSD_CLEAR_INTR_REG 0x01F0
294 #define SSD_INIT_STATE_REG_SZ 0x8
295 #define SSD_INIT_STATE_REG0 0x0200
296 #define SSD_INIT_STATE_REG1 0x0208
297 #define SSD_INIT_STATE_REG2 0x0210
298 #define SSD_INIT_STATE_REG3 0x0218
299 #define SSD_INIT_STATE_REG4 0x0220
300 #define SSD_INIT_STATE_REG5 0x0228
301 #define SSD_INIT_STATE_REG6 0x0230
302 #define SSD_INIT_STATE_REG7 0x0238
304 #define SSD_ROM_INFO_REG 0x0600
305 #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604
306 #define SSD_ROM_CTRL_FW_INFO_REG 0x0608
307 #define SSD_ROM_VP_INFO_REG 0x060C
309 #define SSD_LOG_INFO_REG 0x0610
310 #define SSD_LED_REG 0x0614
311 #define SSD_MSG_BASE_REG 0x06F8
314 #define SSD_SPI_REG_CMD 0x0180
315 #define SSD_SPI_REG_CMD_HI 0x0184
316 #define SSD_SPI_REG_WDATA 0x0188
317 #define SSD_SPI_REG_ID 0x0190
318 #define SSD_SPI_REG_STATUS 0x0198
319 #define SSD_SPI_REG_RDATA 0x01A0
320 #define SSD_SPI_REG_READY 0x01A8
323 #define SSD_I2C_CTRL_REG 0x06F0
324 #define SSD_I2C_RDATA_REG 0x06F4
326 /* temperature reg */
327 #define SSD_BRIGE_TEMP_REG 0x0618
329 #define SSD_CTRL_TEMP_REG0 0x0700
330 #define SSD_CTRL_TEMP_REG1 0x0708
331 #define SSD_CTRL_TEMP_REG2 0x0710
332 #define SSD_CTRL_TEMP_REG3 0x0718
333 #define SSD_CTRL_TEMP_REG4 0x0720
334 #define SSD_CTRL_TEMP_REG5 0x0728
335 #define SSD_CTRL_TEMP_REG6 0x0730
336 #define SSD_CTRL_TEMP_REG7 0x0738
338 /* reversion 3 reg */
339 #define SSD_PROTOCOL_VER_REG 0x01B4
341 #define SSD_FLUSH_TIMEOUT_REG 0x02A4
342 #define SSD_BM_FAULT_REG 0x0660
344 #define SSD_PV3_RAM_STATUS_REG_SZ 0x4
345 #define SSD_PV3_RAM_STATUS_REG0 0x0260
346 #define SSD_PV3_RAM_STATUS_REG1 0x0264
347 #define SSD_PV3_RAM_STATUS_REG2 0x0268
348 #define SSD_PV3_RAM_STATUS_REG3 0x026C
349 #define SSD_PV3_RAM_STATUS_REG4 0x0270
350 #define SSD_PV3_RAM_STATUS_REG5 0x0274
351 #define SSD_PV3_RAM_STATUS_REG6 0x0278
352 #define SSD_PV3_RAM_STATUS_REG7 0x027C
354 #define SSD_PV3_CHIP_INFO_REG_SZ 0x40
355 #define SSD_PV3_CHIP_INFO_REG0 0x0300
356 #define SSD_PV3_CHIP_INFO_REG1 0x0340
357 #define SSD_PV3_CHIP_INFO_REG2 0x0380
358 #define SSD_PV3_CHIP_INFO_REG3 0x03B0
359 #define SSD_PV3_CHIP_INFO_REG4 0x0400
360 #define SSD_PV3_CHIP_INFO_REG5 0x0440
361 #define SSD_PV3_CHIP_INFO_REG6 0x0480
362 #define SSD_PV3_CHIP_INFO_REG7 0x04B0
364 #define SSD_PV3_INIT_STATE_REG_SZ 0x20
365 #define SSD_PV3_INIT_STATE_REG0 0x0500
366 #define SSD_PV3_INIT_STATE_REG1 0x0520
367 #define SSD_PV3_INIT_STATE_REG2 0x0540
368 #define SSD_PV3_INIT_STATE_REG3 0x0560
369 #define SSD_PV3_INIT_STATE_REG4 0x0580
370 #define SSD_PV3_INIT_STATE_REG5 0x05A0
371 #define SSD_PV3_INIT_STATE_REG6 0x05C0
372 #define SSD_PV3_INIT_STATE_REG7 0x05E0
374 /* reversion 3.1.1 reg */
375 #define SSD_FULL_RESET_REG 0x01B0
377 #define SSD_CTRL_REG_ZONE_SZ 0x800
379 #define SSD_BB_THRESHOLD_L1_REG 0x2C0
380 #define SSD_BB_THRESHOLD_L2_REG 0x2C4
382 #define SSD_BB_ACC_REG_SZ 0x4
383 #define SSD_BB_ACC_REG0 0x21C0
384 #define SSD_BB_ACC_REG1 0x29C0
385 #define SSD_BB_ACC_REG2 0x31C0
387 #define SSD_EC_THRESHOLD_L1_REG 0x2C8
388 #define SSD_EC_THRESHOLD_L2_REG 0x2CC
390 #define SSD_EC_ACC_REG_SZ 0x4
391 #define SSD_EC_ACC_REG0 0x21E0
392 #define SSD_EC_ACC_REG1 0x29E0
393 #define SSD_EC_ACC_REG2 0x31E0
395 /* reversion 3.1.2 & 3.1.3 reg */
396 #define SSD_HW_STATUS_REG 0x02AC
398 #define SSD_PLP_INFO_REG 0x0664
400 /*reversion 3.2 reg*/
401 #define SSD_POWER_ON_REG 0x01EC
402 #define SSD_PCIE_LINKSTATUS_REG 0x01F8
403 #define SSD_PL_CAP_LEARN_REG 0x01FC
405 #define SSD_FPGA_1V0_REG0 0x2070
406 #define SSD_FPGA_1V8_REG0 0x2078
407 #define SSD_FPGA_1V0_REG1 0x2870
408 #define SSD_FPGA_1V8_REG1 0x2878
410 /*reversion 3.2 reg*/
411 #define SSD_READ_OT_REG0 0x2260
412 #define SSD_WRITE_OT_REG0 0x2264
413 #define SSD_READ_OT_REG1 0x2A60
414 #define SSD_WRITE_OT_REG1 0x2A64
418 #define SSD_FUNC_READ 0x01
419 #define SSD_FUNC_WRITE 0x02
420 #define SSD_FUNC_NAND_READ_WOOB 0x03
421 #define SSD_FUNC_NAND_READ 0x04
422 #define SSD_FUNC_NAND_WRITE 0x05
423 #define SSD_FUNC_NAND_ERASE 0x06
424 #define SSD_FUNC_NAND_READ_ID 0x07
425 #define SSD_FUNC_READ_LOG 0x08
426 #define SSD_FUNC_TRIM 0x09
427 #define SSD_FUNC_RAM_READ 0x10
428 #define SSD_FUNC_RAM_WRITE 0x11
429 #define SSD_FUNC_FLUSH 0x12 //cache / bbt
432 #define SSD_SPI_CMD_PROGRAM 0x02
433 #define SSD_SPI_CMD_READ 0x03
434 #define SSD_SPI_CMD_W_DISABLE 0x04
435 #define SSD_SPI_CMD_READ_STATUS 0x05
436 #define SSD_SPI_CMD_W_ENABLE 0x06
437 #define SSD_SPI_CMD_ERASE 0xd8
438 #define SSD_SPI_CMD_CLSR 0x30
439 #define SSD_SPI_CMD_READ_ID 0x9f
442 #define SSD_I2C_CTRL_READ 0x00
443 #define SSD_I2C_CTRL_WRITE 0x01
445 /* i2c internal register */
446 #define SSD_I2C_CFG_REG 0x00
447 #define SSD_I2C_DATA_REG 0x01
448 #define SSD_I2C_CMD_REG 0x02
449 #define SSD_I2C_STATUS_REG 0x03
450 #define SSD_I2C_SADDR_REG 0x04
451 #define SSD_I2C_LEN_REG 0x05
452 #define SSD_I2C_RLEN_REG 0x06
453 #define SSD_I2C_WLEN_REG 0x07
454 #define SSD_I2C_RESET_REG 0x08 //write for reset
455 #define SSD_I2C_PRER_REG 0x09
459 /* FPGA volt = ADC_value / 4096 * 3v */
460 #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v
461 #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v
462 #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v
463 #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98
466 #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4)
467 #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4)
468 #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4)
469 #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12)
471 #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt))
482 SSD_CLOCK_166M_LOST
= 0,
490 #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1)
491 #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1)
493 #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8))
495 #define SSD_INLET_OT_TEMP (55) //55 DegC
496 #define SSD_INLET_OT_HYST (50) //50 DegC
497 #define SSD_FLASH_OT_TEMP (70) //70 DegC
498 #define SSD_FLASH_OT_HYST (65) //65 DegC
511 SSD_LM75_REG_TEMP
= 0,
518 #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2)
519 #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2)
520 #define SSD_LM80_REG_IN(nr) (0x20 + (nr))
522 #define SSD_LM80_REG_FAN1 0x28
523 #define SSD_LM80_REG_FAN2 0x29
524 #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr))
526 #define SSD_LM80_REG_TEMP 0x27
527 #define SSD_LM80_REG_TEMP_HOT_MAX 0x38
528 #define SSD_LM80_REG_TEMP_HOT_HYST 0x39
529 #define SSD_LM80_REG_TEMP_OS_MAX 0x3a
530 #define SSD_LM80_REG_TEMP_OS_HYST 0x3b
532 #define SSD_LM80_REG_CONFIG 0x00
533 #define SSD_LM80_REG_ALARM1 0x01
534 #define SSD_LM80_REG_ALARM2 0x02
535 #define SSD_LM80_REG_MASK1 0x03
536 #define SSD_LM80_REG_MASK2 0x04
537 #define SSD_LM80_REG_FANDIV 0x05
538 #define SSD_LM80_REG_RES 0x06
540 #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8)
542 #define SSD_LM80_3V3_VOLT(val) ((val)*33/19)
544 #define SSD_LM80_CONV_INTERVAL (1000)
553 SSD_LM80_IN_FPGA_3V3
,
558 struct ssd_lm80_limit
564 /* +/- 5% except cap in*/
565 static struct ssd_lm80_limit ssd_lm80_limit
[SSD_LM80_IN_NR
] = {
566 {171, 217}, /* CAP in: 1710 ~ 2170 */
575 /* temperature sensors */
585 #ifdef SSD_OT_PROTECT
586 #define SSD_OT_DELAY (60) //ms
588 #define SSD_OT_TEMP (90) //90 DegC
590 #define SSD_OT_TEMP_HYST (85) //85 DegC
593 /* fpga temperature */
594 //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f)
595 #define CONVERT_TEMP(val) ((val)*504/4096-273)
597 #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4))
598 #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4))
599 #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4))
603 #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP)
604 #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8)
605 #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2)))
606 #define SSD_PL_CAP_LEARN_WAIT (20) //20ms
607 #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s
609 #define SSD_PL_CAP_CHARGE_WAIT (1000)
610 #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s
612 #define SSD_PL_CAP_VOLT(val) (val*7)
614 #define SSD_PL_CAP_VOLT_FULL (13700)
615 #define SSD_PL_CAP_VOLT_READY (12880)
617 #define SSD_PL_CAP_THRESHOLD (8900)
618 #define SSD_PL_CAP_CP_THRESHOLD (5800)
619 #define SSD_PL_CAP_THRESHOLD_HYST (100)
621 enum ssd_pl_cap_status
629 SSD_PL_CAP_DEFAULT
= 0, /* 4 cap */
630 SSD_PL_CAP_CP
/* 3 cap */
635 #define SSD_HWMON_OFFS_TEMP (0)
636 #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR)
637 #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR)
638 #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR)
639 #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR)
640 #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR)
642 #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx)
643 #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx)
644 #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx)
645 #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx)
646 #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx)
647 #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx)
663 static int sfifo_alloc(struct sfifo
*fifo
, uint32_t size
, uint32_t esize
)
667 if (!fifo
|| size
> INT_MAX
|| esize
== 0) {
671 while (__size
< size
) __size
<<= 1;
677 fifo
->data
= vmalloc(esize
* __size
);
684 fifo
->mask
= __size
- 1;
687 spin_lock_init(&fifo
->lock
);
692 static void sfifo_free(struct sfifo
*fifo
)
707 static int __sfifo_put(struct sfifo
*fifo
, void *val
)
709 if (((fifo
->in
+ 1) & fifo
->mask
) == fifo
->out
) {
713 memcpy((fifo
->data
+ (fifo
->in
* fifo
->esize
)), val
, fifo
->esize
);
714 fifo
->in
= (fifo
->in
+ 1) & fifo
->mask
;
719 static int sfifo_put(struct sfifo
*fifo
, void *val
)
727 if (!in_interrupt()) {
728 spin_lock_irq(&fifo
->lock
);
729 ret
= __sfifo_put(fifo
, val
);
730 spin_unlock_irq(&fifo
->lock
);
732 spin_lock(&fifo
->lock
);
733 ret
= __sfifo_put(fifo
, val
);
734 spin_unlock(&fifo
->lock
);
740 static int __sfifo_get(struct sfifo
*fifo
, void *val
)
742 if (fifo
->out
== fifo
->in
) {
746 memcpy(val
, (fifo
->data
+ (fifo
->out
* fifo
->esize
)), fifo
->esize
);
747 fifo
->out
= (fifo
->out
+ 1) & fifo
->mask
;
752 static int sfifo_get(struct sfifo
*fifo
, void *val
)
760 if (!in_interrupt()) {
761 spin_lock_irq(&fifo
->lock
);
762 ret
= __sfifo_get(fifo
, val
);
763 spin_unlock_irq(&fifo
->lock
);
765 spin_lock(&fifo
->lock
);
766 ret
= __sfifo_get(fifo
, val
);
767 spin_unlock(&fifo
->lock
);
774 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
780 static inline void ssd_blist_init(struct ssd_blist
*ssd_bl
)
786 static inline struct bio
*ssd_blist_get(struct ssd_blist
*ssd_bl
)
788 struct bio
*bio
= ssd_bl
->prev
;
796 static inline void ssd_blist_add(struct ssd_blist
*ssd_bl
, struct bio
*bio
)
801 ssd_bl
->next
->bi_next
= bio
;
810 #define ssd_blist bio_list
811 #define ssd_blist_init bio_list_init
812 #define ssd_blist_get bio_list_get
813 #define ssd_blist_add bio_list_add
816 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
817 #define bio_start(bio) (bio->bi_sector)
819 #define bio_start(bio) (bio->bi_iter.bi_sector)
823 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
824 #define mutex_lock down
825 #define mutex_unlock up
826 #define mutex semaphore
827 #define mutex_init init_MUTEX
831 typedef union ssd_i2c_ctrl
{
839 }__attribute__((packed
)) ssd_i2c_ctrl_t
;
841 typedef union ssd_i2c_data
{
848 }__attribute__((packed
)) ssd_i2c_data_t
;
853 SSD_WMODE_BUFFER
= 0,
870 typedef struct ssd_sg_entry
875 }__attribute__((packed
))ssd_sg_entry_t
;
877 typedef struct ssd_rw_msg
883 uint32_t reserved
; //for 64-bit align
884 struct ssd_sg_entry sge
[1]; //base
885 }__attribute__((packed
))ssd_rw_msg_t
;
887 typedef struct ssd_resp_msg
895 }__attribute__((packed
))ssd_resp_msg_t
;
897 typedef struct ssd_flush_msg
900 uint8_t flag
:2; //flash cache 0 or bbt 1
904 uint32_t reserved
; //align
905 }__attribute__((packed
))ssd_flush_msg_t
;
907 typedef struct ssd_nand_op_msg
913 uint32_t reserved
; //align
919 }__attribute__((packed
))ssd_nand_op_msg_t
;
921 typedef struct ssd_ram_op_msg
927 uint32_t reserved
; //align
931 }__attribute__((packed
))ssd_ram_op_msg_t
;
935 typedef struct ssd_log_msg
941 uint32_t reserved
; //align
943 }__attribute__((packed
))ssd_log_msg_t
;
945 typedef struct ssd_log_op_msg
951 uint32_t reserved
; //align
952 uint64_t reserved1
; //align
954 }__attribute__((packed
))ssd_log_op_msg_t
;
956 typedef struct ssd_log_resp_msg
960 uint16_t reserved1
:2; //align with the normal resp msg
964 }__attribute__((packed
))ssd_log_resp_msg_t
;
968 typedef union ssd_response_msq
970 ssd_resp_msg_t resp_msg
;
971 ssd_log_resp_msg_t log_resp_msg
;
974 } ssd_response_msq_t
;
978 typedef struct ssd_protocol_info
981 uint32_t init_state_reg
;
982 uint32_t init_state_reg_sz
;
983 uint32_t chip_info_reg
;
984 uint32_t chip_info_reg_sz
;
985 } ssd_protocol_info_t
;
987 typedef struct ssd_hw_info
992 uint32_t cmd_fifo_sz
;
993 uint32_t cmd_fifo_sz_mask
;
996 uint32_t resp_ptr_sz
;
997 uint32_t resp_msg_sz
;
1001 uint16_t nr_data_ch
;
1007 uint8_t upper_pcb_ver
;
1009 uint8_t nand_vendor_id
;
1010 uint8_t nand_dev_id
;
1017 uint16_t bbf_seek
; //
1019 uint16_t page_count
; //per block
1021 uint32_t block_count
; //per flash
1025 uint32_t ram_max_len
;
1029 uint64_t md_base
; //metadata
1031 uint32_t md_entry_sz
;
1035 uint64_t nand_wbuff_base
;
1037 uint32_t md_reserved_blks
;
1038 uint32_t reserved_blks
;
1039 uint32_t valid_pages
;
1040 uint32_t max_valid_pages
;
1044 typedef struct ssd_hw_info_extend
1050 uint8_t form_factor
;
1053 }ssd_hw_info_extend_t
;
1055 typedef struct ssd_rom_info
1058 uint32_t block_size
;
1060 uint8_t nr_bridge_fw
;
1064 uint32_t bridge_fw_base
;
1065 uint32_t bridge_fw_sz
;
1066 uint32_t ctrl_fw_base
;
1067 uint32_t ctrl_fw_sz
;
1068 uint32_t bm_fw_base
;
1072 uint32_t smart_base
;
1075 uint32_t label_base
;
1083 SSD_DEBUG_WRITE_ERR
,
1093 typedef struct ssd_debug_info
1109 #define SSD_LABEL_FIELD_SZ 32
1110 #define SSD_SN_SZ 16
1112 typedef struct ssd_label
1114 char date
[SSD_LABEL_FIELD_SZ
];
1115 char sn
[SSD_LABEL_FIELD_SZ
];
1116 char part
[SSD_LABEL_FIELD_SZ
];
1117 char desc
[SSD_LABEL_FIELD_SZ
];
1118 char other
[SSD_LABEL_FIELD_SZ
];
1119 char maf
[SSD_LABEL_FIELD_SZ
];
1122 #define SSD_LABEL_DESC_SZ 256
1124 typedef struct ssd_labelv3
1126 char boardtype
[SSD_LABEL_FIELD_SZ
];
1127 char barcode
[SSD_LABEL_FIELD_SZ
];
1128 char item
[SSD_LABEL_FIELD_SZ
];
1129 char description
[SSD_LABEL_DESC_SZ
];
1130 char manufactured
[SSD_LABEL_FIELD_SZ
];
1131 char vendorname
[SSD_LABEL_FIELD_SZ
];
1132 char issuenumber
[SSD_LABEL_FIELD_SZ
];
1133 char cleicode
[SSD_LABEL_FIELD_SZ
];
1134 char bom
[SSD_LABEL_FIELD_SZ
];
1138 typedef struct ssd_battery_info
1141 } ssd_battery_info_t
;
1143 /* ssd power stat */
1144 typedef struct ssd_power_stat
1146 uint64_t nr_poweron
;
1147 uint64_t nr_powerloss
;
1148 uint64_t init_failed
;
1152 typedef struct ssd_io_stat
1165 typedef struct ssd_ecc_info
1167 uint64_t bitflip
[SSD_ECC_MAX_FLIP
];
1173 SSD_LOG_LEVEL_INFO
= 0,
1174 SSD_LOG_LEVEL_NOTICE
,
1175 SSD_LOG_LEVEL_WARNING
,
1180 typedef struct ssd_log_info
1183 uint64_t stat
[SSD_LOG_NR_LEVEL
];
1187 #define SSD_SMART_MAGIC (0x5452414D53445353ull)
1189 typedef struct ssd_smart
1191 struct ssd_power_stat pstat
;
1192 struct ssd_io_stat io_stat
;
1193 struct ssd_ecc_info ecc_info
;
1194 struct ssd_log_info log_info
;
1200 typedef struct ssd_internal_log
1204 } ssd_internal_log_t
;
1207 typedef struct ssd_cmd
1210 struct scatterlist
*sgl
;
1211 struct list_head list
;
1214 int flag
; /*pbio(1) or bio(0)*/
1220 unsigned long start_time
;
1223 unsigned int nr_log
;
1225 struct timer_list cmd_timer
;
1226 struct completion
*waiting
;
1229 typedef void (*send_cmd_func
)(struct ssd_cmd
*);
1230 typedef int (*ssd_event_call
)(struct gendisk
*, int, int); /* gendisk, event id, event level */
1233 #define SSD_DCMD_MAX_SZ 32
1235 typedef struct ssd_dcmd
1237 struct list_head list
;
1239 uint8_t msg
[SSD_DCMD_MAX_SZ
];
1255 #define SSD_QUEUE_NAME_LEN 16
1256 typedef struct ssd_queue
{
1257 char name
[SSD_QUEUE_NAME_LEN
];
1263 uint32_t resp_idx_mask
;
1264 uint32_t resp_msg_sz
;
1269 struct ssd_cmd
*cmd
;
1271 struct ssd_io_stat io_stat
;
1272 struct ssd_ecc_info ecc_info
;
1275 typedef struct ssd_device
{
1276 char name
[SSD_DEV_NAME_LEN
];
1283 #ifdef SSD_ESCAPE_IRQ
1289 int ot_delay
; //in ms
1293 atomic_t in_flight
[2]; //r&w
1297 struct list_head list
;
1298 struct pci_dev
*pdev
;
1300 unsigned long mmio_base
;
1301 unsigned long mmio_len
;
1302 void __iomem
*ctrlp
;
1304 struct mutex spi_mutex
;
1305 struct mutex i2c_mutex
;
1307 struct ssd_protocol_info protocol_info
;
1308 struct ssd_hw_info hw_info
;
1309 struct ssd_rom_info rom_info
;
1310 struct ssd_label label
;
1312 struct ssd_smart smart
;
1315 spinlock_t sendq_lock
;
1316 struct ssd_blist sendq
;
1317 struct task_struct
*send_thread
;
1318 wait_queue_head_t send_waitq
;
1321 spinlock_t doneq_lock
;
1322 struct ssd_blist doneq
;
1323 struct task_struct
*done_thread
;
1324 wait_queue_head_t done_waitq
;
1326 struct ssd_dcmd
*dcmd
;
1327 spinlock_t dcmd_lock
;
1328 struct list_head dcmd_list
; /* direct cmd list */
1329 wait_queue_head_t dcmd_wq
;
1331 unsigned long *tag_map
;
1332 wait_queue_head_t tag_wq
;
1334 spinlock_t cmd_lock
;
1335 struct ssd_cmd
*cmd
;
1338 ssd_event_call event_call
;
1340 dma_addr_t msg_base_dma
;
1343 void *resp_msg_base
;
1344 void *resp_ptr_base
;
1345 dma_addr_t resp_msg_base_dma
;
1346 dma_addr_t resp_ptr_base_dma
;
1349 struct msix_entry entry
[SSD_MSIX_VEC
];
1350 struct ssd_queue queue
[SSD_MSIX_VEC
];
1352 struct request_queue
*rq
; /* The device request queue */
1353 struct gendisk
*gd
; /* The gendisk structure */
1355 struct mutex internal_log_mutex
;
1356 struct ssd_internal_log internal_log
;
1357 struct workqueue_struct
*workq
;
1358 struct work_struct log_work
; /* get log */
1361 unsigned long state
; /* device state, for example, block device inited */
1363 struct module
*owner
;
1374 struct mutex gd_mutex
;
1375 struct ssd_log_info log_info
; /* volatile */
1377 atomic_t queue_depth
;
1378 struct mutex barrier_mutex
;
1379 struct mutex fw_mutex
;
1381 struct ssd_hw_info_extend hw_info_ext
;
1382 struct ssd_labelv3 labelv3
;
1386 struct mutex bm_mutex
;
1387 struct work_struct bm_work
; /* check bm */
1388 struct timer_list bm_timer
;
1389 struct sfifo log_fifo
;
1391 struct timer_list routine_timer
;
1392 unsigned long routine_tick
;
1393 unsigned long hwmon
;
1395 struct work_struct hwmon_work
; /* check hw */
1396 struct work_struct capmon_work
; /* check battery */
1397 struct work_struct tempmon_work
; /* check temp */
1400 struct ssd_debug_info db_info
;
1401 uint64_t reset_time
;
1406 typedef struct ssd_acc_info
{
1407 uint32_t threshold_l1
;
1408 uint32_t threshold_l2
;
1412 typedef struct ssd_reg_op_info
1416 } ssd_reg_op_info_t
;
1418 typedef struct ssd_spi_op_info
1423 } ssd_spi_op_info_t
;
1425 typedef struct ssd_i2c_op_info
1432 } ssd_i2c_op_info_t
;
1434 typedef struct ssd_smbus_op_info
1440 } ssd_smbus_op_info_t
;
1442 typedef struct ssd_ram_op_info
{
1446 uint8_t __user
*buf
;
1447 } ssd_ram_op_info_t
;
1449 typedef struct ssd_flash_op_info
{
1454 uint8_t __user
*buf
;
1455 } ssd_flash_op_info_t
;
1457 typedef struct ssd_sw_log_info
{
1461 } ssd_sw_log_info_t
;
1463 typedef struct ssd_version_info
1465 uint32_t bridge_ver
; /* bridge fw version */
1466 uint32_t ctrl_ver
; /* controller fw version */
1467 uint32_t bm_ver
; /* battery manager fw version */
1468 uint8_t pcb_ver
; /* main pcb version */
1469 uint8_t upper_pcb_ver
;
1472 } ssd_version_info_t
;
1474 typedef struct pci_addr
1482 typedef struct ssd_drv_param_info
{
1492 } ssd_drv_param_info_t
;
1496 enum ssd_form_factor
1498 SSD_FORM_FACTOR_HHHL
= 0,
1499 SSD_FORM_FACTOR_FHHL
1503 /* ssd power loss protect */
1512 #define SSD_BM_SLAVE_ADDRESS 0x16
1513 #define SSD_BM_CAP 5
1516 #define SSD_BM_SAFETYSTATUS 0x51
1517 #define SSD_BM_OPERATIONSTATUS 0x54
1519 /* ManufacturerAccess */
1520 #define SSD_BM_MANUFACTURERACCESS 0x00
1521 #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */
1523 /* Data flash access */
1524 #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77
1525 #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78
1526 #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56
1527 #define SSD_BM_CONFIGURATION_REGISTERS_ID 64
1529 /* min cap voltage */
1530 #define SSD_BM_CAP_VOLT_MIN 500
1535 SSD_BM_CAP_VINA = 1,
1541 SSD_BMSTATUS_OK
= 0,
1542 SSD_BMSTATUS_CHARGING
, /* not fully charged */
1543 SSD_BMSTATUS_WARNING
1548 SBS_UNIT_TEMPERATURE
,
1553 SBS_UNIT_CAPACITANCE
1581 uint16_t cap_volt
[SSD_BM_CAP
];
1588 struct ssd_bm_manufacturer_data
1590 uint16_t pack_lot_code
;
1591 uint16_t pcb_lot_code
;
1592 uint16_t firmware_ver
;
1593 uint16_t hardware_ver
;
1596 struct ssd_bm_configuration_registers
1609 uint16_t fet_action
;
1614 #define SBS_VALUE_MASK 0xffff
1616 #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var)
1617 #define bm_var(start, offset) ((void *) start + (offset))
1619 static struct sbs_cmd ssd_bm_sbs
[] = {
1620 {0x08, SBS_SIZE_WORD
, SBS_UNIT_TEMPERATURE
, bm_var_offset(temp
), SBS_VALUE_MASK
, "Temperature"},
1621 {0x09, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(volt
), SBS_VALUE_MASK
, "Voltage"},
1622 {0x0a, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(curr
), SBS_VALUE_MASK
, "Current"},
1623 {0x0b, SBS_SIZE_WORD
, SBS_UNIT_ESR
, bm_var_offset(esr
), SBS_VALUE_MASK
, "ESR"},
1624 {0x0d, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(rsoc
), SBS_VALUE_MASK
, "RelativeStateOfCharge"},
1625 {0x0e, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(health
), SBS_VALUE_MASK
, "Health"},
1626 {0x10, SBS_SIZE_WORD
, SBS_UNIT_CAPACITANCE
, bm_var_offset(cap
), SBS_VALUE_MASK
, "Capacitance"},
1627 {0x14, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(chg_curr
), SBS_VALUE_MASK
, "ChargingCurrent"},
1628 {0x15, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(chg_volt
), SBS_VALUE_MASK
, "ChargingVoltage"},
1629 {0x3b, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[4]), SBS_VALUE_MASK
, "CapacitorVoltage5"},
1630 {0x3c, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[3]), SBS_VALUE_MASK
, "CapacitorVoltage4"},
1631 {0x3d, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[2]), SBS_VALUE_MASK
, "CapacitorVoltage3"},
1632 {0x3e, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[1]), SBS_VALUE_MASK
, "CapacitorVoltage2"},
1633 {0x3f, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[0]), SBS_VALUE_MASK
, "CapacitorVoltage1"},
1634 {0x50, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_alert
), 0x870F, "SafetyAlert"},
1635 {0x51, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_status
), 0xE7BF, "SafetyStatus"},
1636 {0x54, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(op_status
), 0x79F4, "OperationStatus"},
1637 {0x5a, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(sys_volt
), SBS_VALUE_MASK
, "SystemVoltage"},
1638 {0, 0, 0, 0, 0, NULL
},
1642 #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info)
1643 #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info)
1644 #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info)
1645 #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart)
1646 #define SSD_CMD_GET_IDX _IOR('H', 105, int)
1647 #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int)
1648 #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int)
1649 #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN])
1651 #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info)
1652 #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info)
1654 #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend)
1656 #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info)
1657 #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info)
1659 #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info)
1660 #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info)
1661 #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info)
1663 #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info)
1664 #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info)
1665 #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info)
1667 #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info)
1668 #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info)
1669 #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info)
1670 #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info)
1671 #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info)
1672 #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info)
1673 #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info)
1674 #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info)
1676 #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t)
1677 #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int)
1678 #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int)
1679 #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t)
1680 #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int)
1682 #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info)
1683 #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info)
1685 #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info)
1686 #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob
1687 #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info)
1688 #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info)
1689 #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO
1691 #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info)
1693 #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int)
1694 #define SSD_CMD_SET_ALARM _IOW('H', 191, int)
1696 #define SSD_CMD_RESET _IOW('H', 200, int)
1697 #define SSD_CMD_RELOAD_FW _IOW('H', 201, int)
1698 #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int)
1699 #define SSD_CMD_LOAD_DEV _IOW('H', 203, int)
1700 #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t)
1701 #define SSD_CMD_FULL_RESET _IOW('H', 206, int)
1703 #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t)
1704 #define SSD_CMD_GET_LOG _IOR('H', 221, void *)
1705 #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int)
1707 #define SSD_CMD_OT_PROTECT _IOW('H', 223, int)
1708 #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int)
1710 #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int)
1711 #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int)
1713 #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info)
1715 #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label)
1716 #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info)
1717 #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int)
1718 #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int)
1719 #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *)
1722 #define SSD_CMD_FLUSH _IOW('H', 240, int)
1723 #define SSD_CMD_SAVE_MD _IOW('H', 241, int)
1725 #define SSD_CMD_SET_WMODE _IOW('H', 242, int)
1726 #define SSD_CMD_GET_WMODE _IOR('H', 243, int)
1727 #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int)
1729 #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info)
1730 #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info)
1732 #define SSD_CMD_CLEAR_WARNING _IOW('H', 260, int)
1736 #define SSD_LOG_MAX_SZ 4096
1737 #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE
1741 SSD_LOG_DATA_NONE
= 0,
1746 typedef struct ssd_log_entry
1764 }__attribute__((packed
))ssd_log_entry_t
;
1766 typedef struct ssd_log
1769 uint64_t ctrl_idx
:8;
1771 } __attribute__((packed
)) ssd_log_t
;
1773 typedef struct ssd_log_desc
1781 } __attribute__((packed
)) ssd_log_desc_t
;
1783 #define SSD_LOG_SW_IDX 0xF
1784 #define SSD_UNKNOWN_EVENT ((uint16_t)-1)
1785 static struct ssd_log_desc ssd_log_desc
[] = {
1786 /* event, level, show flash, show block, show page, desc */
1787 {0x0, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Create BBT failure"}, //g3
1788 {0x1, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Read BBT failure"}, //g3
1789 {0x2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Mark bad block"},
1790 {0x3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flush BBT failure"},
1791 {0x4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1792 {0x7, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "No available blocks"},
1793 {0x8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Bad EC header"},
1794 {0x9, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 0, "Bad VID header"}, //g3
1795 {0xa, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Wear leveling"},
1796 {0xb, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "WL read back failure"},
1797 {0x11, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Data recovery failure"}, // err
1798 {0x20, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan mapping table failure"}, // err g3
1799 {0x21, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1800 {0x22, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1801 {0x23, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1802 {0x24, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Merge: read mapping page failure"},
1803 {0x25, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: read back failure"},
1804 {0x26, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1805 {0x27, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Data corrupted for abnormal power down"}, //g3
1806 {0x28, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: mapping page corrupted"},
1807 {0x29, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: no mapping page"},
1808 {0x2a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: mapping pages incomplete"},
1809 {0x2b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read back failure after programming failure"}, // err
1810 {0xf1, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure without recovery"}, // err
1811 {0xf2, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available blocks"}, // maybe err g3
1812 {0xf3, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: RAID incomplete"}, // err g3
1813 {0xf4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1814 {0xf5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure in moving data"},
1815 {0xf6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1816 {0xf7, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Init: RAID not complete"},
1817 {0xf8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: data moving interrupted"},
1818 {0xfe, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Data inspection failure"},
1819 {0xff, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "IO: ECC failed"},
1822 {0x2e, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available reserved blocks" }, // err
1823 {0x30, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PMT membership not found"},
1824 {0x31, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PMT corrupted"},
1825 {0x32, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT membership not found"},
1826 {0x33, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT not found"},
1827 {0x34, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT corrupted"},
1828 {0x35, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT page read failure"},
1829 {0x36, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT page read failure"},
1830 {0x37, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT backup page read failure"},
1831 {0x38, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT read failure"},
1832 {0x39, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT scan failure"}, // err
1833 {0x3a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page read failure"},
1834 {0x3b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page scan failure"}, // err
1835 {0x3c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan unclosed block failure"}, // err
1836 {0x3d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: write pointer mismatch"},
1837 {0x3e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: PBMT read failure"},
1838 {0x3f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: PMT recovery: PBMT scan failure"},
1839 {0x40, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: data page read failure"}, //err
1840 {0x41, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT write pointer mismatch"},
1841 {0x42, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT latest version corrupted"},
1842 {0x43, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: too many unclosed blocks"},
1843 {0x44, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PDW block found"},
1844 {0x45, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Init: more than one PDW block found"}, //err
1845 {0x46, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page is blank or read failure"},
1846 {0x47, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PDW block not found"},
1848 {0x50, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: hit error data"}, // err
1849 {0x51, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: read back failure"}, // err
1850 {0x52, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Cache: unknown command"}, //?
1851 {0x53, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "GC/WL read back failure"}, // err
1853 {0x60, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Erase failure"},
1855 {0x70, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "LPA not matched"},
1856 {0x71, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "PBN not matched"},
1857 {0x72, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read retry failure"},
1858 {0x73, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Need raid recovery"},
1859 {0x74, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "Need read retry"},
1860 {0x75, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read invalid data page"},
1861 {0x76, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN matched"},
1862 {0x77, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN not matched"},
1863 {0x78, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in flash, PBN not matched"},
1864 {0x79, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in cache, LPA not matched"},
1865 {0x7a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in flash, LPA not matched"},
1866 {0x7b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in cache, LPA not matched"},
1867 {0x7c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in flash, LPA not matched"},
1868 {0x7d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data page status error"},
1869 {0x7e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1870 {0x7f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Access flash timeout"},
1872 {0x80, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "EC overflow"},
1873 {0x81, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_NONE
, 0, 0, "Scrubbing completed"},
1874 {0x82, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Unstable block(too much bit flip)"},
1875 {0x83, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: ram error"}, //?
1876 {0x84, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: one PBMT read failure"},
1878 {0x88, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: mark bad block"},
1879 {0x89, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: invalid page count error"}, // maybe err
1880 {0x8a, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Bad Block close to limit"},
1881 {0x8b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: Bad Block over limit"},
1882 {0x8c, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: P/E cycles close to limit"},
1883 {0x8d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: P/E cycles over limit"},
1885 {0x90, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Over temperature"}, //90
1886 {0x91, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature is OK"}, //80
1887 {0x92, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Battery fault"},
1888 {0x93, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault"}, //err
1889 {0x94, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "DDR error"}, //err
1890 {0x95, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Controller serdes error"}, //err
1891 {0x96, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 1 error"}, //err
1892 {0x97, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 2 error"}, //err
1893 {0x98, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault (corrected)"}, //err
1894 {0x99, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Battery is OK"},
1895 {0x9a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Temperature close to limit"}, //85
1897 {0x9b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (low)"},
1898 {0x9c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (high)"},
1899 {0x9d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "I2C fault" },
1900 {0x9e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "DDR single bit error" },
1901 {0x9f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Board voltage fault" },
1903 {0xa0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "LPA not matched"},
1904 {0xa1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Re-read data in cache"},
1905 {0xa2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1906 {0xa3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Read blank page"},
1907 {0xa4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: new data in cache"},
1908 {0xa5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: PBN not matched"},
1909 {0xa6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data with error flag"},
1910 {0xa7, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: recoverd data with error flag"},
1911 {0xa8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Blank page in cache, PBN matched"},
1912 {0xa9, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Blank page in cache, PBN matched"},
1913 {0xaa, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flash init failure"},
1914 {0xab, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Mapping table recovery failure"},
1915 {0xac, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: ECC failed"},
1916 {0xb0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 95 degrees centigrade"},
1917 {0xb1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Temperature is 100 degrees centigrade"},
1919 {0x300, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "CMD timeout"},
1920 {0x301, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Power on"},
1921 {0x302, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Power off"},
1922 {0x303, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear log"},
1923 {0x304, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity"},
1924 {0x305, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data"},
1925 {0x306, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "BM safety status"},
1926 {0x307, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "I/O error"},
1927 {0x308, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CMD error"},
1928 {0x309, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set wmode"},
1929 {0x30a, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "DDR init failed" },
1930 {0x30b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "PCIe link status" },
1931 {0x30c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Controller reset sync error" },
1932 {0x30d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Clock fault" },
1933 {0x30e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "FPGA voltage fault status" },
1934 {0x30f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity finished"},
1935 {0x310, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data finished"},
1936 {0x311, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Reset"},
1937 {0x312, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "CAP: voltage fault"},
1938 {0x313, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: learn fault"},
1939 {0x314, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CAP status"},
1940 {0x315, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Board voltage fault status"},
1941 {0x316, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 55 degrees centigrade"}, //55
1942 {0x317, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Inlet temperature is 50 degrees centigrade"}, //50
1943 {0x318, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash over temperature"}, //70
1944 {0x319, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Info: Flash temperature is OK"}, //65
1945 {0x31a, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: short circuit"},
1946 {0x31b, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "Sensor fault"},
1947 {0x31c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data"},
1948 {0x31d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data finished"},
1950 {SSD_UNKNOWN_EVENT
, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "unknown event"},
1953 #define SSD_LOG_OVER_TEMP 0x90
1954 #define SSD_LOG_NORMAL_TEMP 0x91
1955 #define SSD_LOG_WARN_TEMP 0x9a
1956 #define SSD_LOG_SEU_FAULT 0x93
1957 #define SSD_LOG_SEU_FAULT1 0x98
1958 #define SSD_LOG_BATTERY_FAULT 0x92
1959 #define SSD_LOG_BATTERY_OK 0x99
1960 #define SSD_LOG_BOARD_VOLT_FAULT 0x9f
1963 #define SSD_LOG_TIMEOUT 0x300
1964 #define SSD_LOG_POWER_ON 0x301
1965 #define SSD_LOG_POWER_OFF 0x302
1966 #define SSD_LOG_CLEAR_LOG 0x303
1967 #define SSD_LOG_SET_CAPACITY 0x304
1968 #define SSD_LOG_CLEAR_DATA 0x305
1969 #define SSD_LOG_BM_SFSTATUS 0x306
1970 #define SSD_LOG_EIO 0x307
1971 #define SSD_LOG_ECMD 0x308
1972 #define SSD_LOG_SET_WMODE 0x309
1973 #define SSD_LOG_DDR_INIT_ERR 0x30a
1974 #define SSD_LOG_PCIE_LINK_STATUS 0x30b
1975 #define SSD_LOG_CTRL_RST_SYNC 0x30c
1976 #define SSD_LOG_CLK_FAULT 0x30d
1977 #define SSD_LOG_VOLT_FAULT 0x30e
1978 #define SSD_LOG_SET_CAPACITY_END 0x30F
1979 #define SSD_LOG_CLEAR_DATA_END 0x310
1980 #define SSD_LOG_RESET 0x311
1981 #define SSD_LOG_CAP_VOLT_FAULT 0x312
1982 #define SSD_LOG_CAP_LEARN_FAULT 0x313
1983 #define SSD_LOG_CAP_STATUS 0x314
1984 #define SSD_LOG_VOLT_STATUS 0x315
1985 #define SSD_LOG_INLET_OVER_TEMP 0x316
1986 #define SSD_LOG_INLET_NORMAL_TEMP 0x317
1987 #define SSD_LOG_FLASH_OVER_TEMP 0x318
1988 #define SSD_LOG_FLASH_NORMAL_TEMP 0x319
1989 #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a
1990 #define SSD_LOG_SENSOR_FAULT 0x31b
1991 #define SSD_LOG_ERASE_ALL 0x31c
1992 #define SSD_LOG_ERASE_ALL_END 0x31d
1995 /* sw log fifo depth */
1996 #define SSD_LOG_FIFO_SZ 1024
2000 static DEFINE_PER_CPU(struct list_head
, ssd_doneq
);
2001 static DEFINE_PER_CPU(struct tasklet_struct
, ssd_tasklet
);
2004 /* unloading driver */
2005 static volatile int ssd_exiting
= 0;
2007 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
2008 static struct class_simple
*ssd_class
;
2010 static struct class *ssd_class
;
2013 static int ssd_cmajor
= SSD_CMAJOR
;
2015 /* ssd block device major, minors */
2016 static int ssd_major
= SSD_MAJOR
;
2017 static int ssd_major_sl
= SSD_MAJOR_SL
;
2018 static int ssd_minors
= SSD_MINORS
;
2020 /* ssd device list */
2021 static struct list_head ssd_list
;
2022 static unsigned long ssd_index_bits
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2023 static unsigned long ssd_index_bits_sl
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2024 static atomic_t ssd_nr
;
2029 SSD_DRV_MODE_STANDARD
= 0, /* full */
2030 SSD_DRV_MODE_DEBUG
= 2, /* debug */
2031 SSD_DRV_MODE_BASE
/* base only */
2041 #if (defined SSD_MSIX)
2042 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2043 #elif (defined SSD_MSI)
2044 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2046 /* auto select the defaut int mode according to the kernel version*/
2047 /* suse 11 sp1 irqbalance bug: use msi instead*/
2048 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5))
2049 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2051 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2055 static int mode
= SSD_DRV_MODE_STANDARD
;
2056 static int status_mask
= 0xFF;
2057 static int int_mode
= SSD_INT_MODE_DEFAULT
;
2058 static int threaded_irq
= 0;
2059 static int log_level
= SSD_LOG_LEVEL_WARNING
;
2060 static int ot_protect
= 1;
2061 static int wmode
= SSD_WMODE_DEFAULT
;
2062 static int finject
= 0;
2064 module_param(mode
, int, 0);
2065 module_param(status_mask
, int, 0);
2066 module_param(int_mode
, int, 0);
2067 module_param(threaded_irq
, int, 0);
2068 module_param(log_level
, int, 0);
2069 module_param(ot_protect
, int, 0);
2070 module_param(wmode
, int, 0);
2071 module_param(finject
, int, 0);
2074 MODULE_PARM_DESC(mode
, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode");
2075 MODULE_PARM_DESC(status_mask
, "command status mask, 0 - without command error, 0xff - with command error");
2076 MODULE_PARM_DESC(int_mode
, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix");
2077 MODULE_PARM_DESC(threaded_irq
, "threaded irq, 0 - normal irq, 1 - threaded irq");
2078 MODULE_PARM_DESC(log_level
, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only");
2079 MODULE_PARM_DESC(ot_protect
, "over temperature protect, 0 - disable, 1 - enable");
2080 MODULE_PARM_DESC(wmode
, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default");
2081 MODULE_PARM_DESC(finject
, "enable fault simulation, 0 - off, 1 - on, for debug purpose only");
2083 // API adaption layer
2084 static inline void ssd_bio_endio(struct bio
*bio
, int error
)
2086 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
2087 bio
->bi_error
= error
;
2089 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
2090 bio_endio(bio
, error
);
2092 bio_endio(bio
, bio
->bi_size
, error
);
2096 static inline int ssd_bio_has_discard(struct bio
*bio
)
2100 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2101 return bio_op(bio
) & REQ_OP_DISCARD
;
2102 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
2103 return bio
->bi_rw
& REQ_DISCARD
;
2104 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
2105 return bio_rw_flagged(bio
, BIO_RW_DISCARD
);
2111 static inline int ssd_bio_has_flush(struct bio
*bio
)
2113 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2114 return bio_op(bio
) & REQ_OP_FLUSH
;
2115 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
2116 return bio
->bi_rw
& REQ_FLUSH
;
2122 static inline int ssd_bio_has_fua(struct bio
*bio
)
2124 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
2125 return bio
->bi_opf
& REQ_FUA
;
2127 return bio
->bi_rw
& REQ_FUA
;
2132 static int __init
ssd_drv_mode(char *str
)
2134 mode
= (int)simple_strtoul(str
, NULL
, 0);
2139 static int __init
ssd_status_mask(char *str
)
2141 status_mask
= (int)simple_strtoul(str
, NULL
, 16);
2146 static int __init
ssd_int_mode(char *str
)
2148 int_mode
= (int)simple_strtoul(str
, NULL
, 0);
2153 static int __init
ssd_threaded_irq(char *str
)
2155 threaded_irq
= (int)simple_strtoul(str
, NULL
, 0);
2160 static int __init
ssd_log_level(char *str
)
2162 log_level
= (int)simple_strtoul(str
, NULL
, 0);
2167 static int __init
ssd_ot_protect(char *str
)
2169 ot_protect
= (int)simple_strtoul(str
, NULL
, 0);
2174 static int __init
ssd_wmode(char *str
)
2176 wmode
= (int)simple_strtoul(str
, NULL
, 0);
2181 static int __init
ssd_finject(char *str
)
2183 finject
= (int)simple_strtoul(str
, NULL
, 0);
2188 __setup(MODULE_NAME
"_mode=", ssd_drv_mode
);
2189 __setup(MODULE_NAME
"_status_mask=", ssd_status_mask
);
2190 __setup(MODULE_NAME
"_int_mode=", ssd_int_mode
);
2191 __setup(MODULE_NAME
"_threaded_irq=", ssd_threaded_irq
);
2192 __setup(MODULE_NAME
"_log_level=", ssd_log_level
);
2193 __setup(MODULE_NAME
"_ot_protect=", ssd_ot_protect
);
2194 __setup(MODULE_NAME
"_wmode=", ssd_wmode
);
2195 __setup(MODULE_NAME
"_finject=", ssd_finject
);
2199 #ifdef CONFIG_PROC_FS
2200 #include <linux/proc_fs.h>
2201 #include <asm/uaccess.h>
2203 #define SSD_PROC_DIR MODULE_NAME
2204 #define SSD_PROC_INFO "info"
2206 static struct proc_dir_entry
*ssd_proc_dir
= NULL
;
2207 static struct proc_dir_entry
*ssd_proc_info
= NULL
;
2209 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2210 static int ssd_proc_read(char *page
, char **start
,
2211 off_t off
, int count
, int *eof
, void *data
)
2213 struct ssd_device
*dev
= NULL
;
2214 struct ssd_device
*n
= NULL
;
2220 if (ssd_exiting
|| off
!= 0) {
2224 len
+= snprintf((page
+ len
), (count
- len
), "Driver Version:\t%s\n", DRIVER_VERSION
);
2226 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2228 size
= dev
->hw_info
.size
;
2229 do_div(size
, 1000000000);
2231 len
+= snprintf((page
+ len
), (count
- len
), "\n");
2233 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2235 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2236 if (dev
->hw_info
.ctrl_ver
!= 0) {
2237 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2240 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2242 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2243 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2246 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Device:\t%s\n", idx
, dev
->name
);
2255 static int ssd_proc_show(struct seq_file
*m
, void *v
)
2257 struct ssd_device
*dev
= NULL
;
2258 struct ssd_device
*n
= NULL
;
2266 seq_printf(m
, "Driver Version:\t%s\n", DRIVER_VERSION
);
2268 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2270 size
= dev
->hw_info
.size
;
2271 do_div(size
, 1000000000);
2273 seq_printf(m
, "\n");
2275 seq_printf(m
, "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2277 seq_printf(m
, "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2278 if (dev
->hw_info
.ctrl_ver
!= 0) {
2279 seq_printf(m
, "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2282 seq_printf(m
, "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2284 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2285 seq_printf(m
, "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2288 seq_printf(m
, "HIO %d Device:\t%s\n", idx
, dev
->name
);
2294 static int ssd_proc_open(struct inode
*inode
, struct file
*file
)
2296 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
2297 return single_open(file
, ssd_proc_show
, PDE(inode
)->data
);
2299 return single_open(file
, ssd_proc_show
, PDE_DATA(inode
));
2303 static const struct file_operations ssd_proc_fops
= {
2304 .open
= ssd_proc_open
,
2306 .llseek
= seq_lseek
,
2307 .release
= single_release
,
2312 static void ssd_cleanup_proc(void)
2314 if (ssd_proc_info
) {
2315 remove_proc_entry(SSD_PROC_INFO
, ssd_proc_dir
);
2316 ssd_proc_info
= NULL
;
2319 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2320 ssd_proc_dir
= NULL
;
2323 static int ssd_init_proc(void)
2325 ssd_proc_dir
= proc_mkdir(SSD_PROC_DIR
, NULL
);
2327 goto out_proc_mkdir
;
2329 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2330 ssd_proc_info
= create_proc_entry(SSD_PROC_INFO
, S_IFREG
| S_IRUGO
| S_IWUSR
, ssd_proc_dir
);
2332 goto out_create_proc_entry
;
2334 ssd_proc_info
->read_proc
= ssd_proc_read
;
2337 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
2338 ssd_proc_info
->owner
= THIS_MODULE
;
2341 ssd_proc_info
= proc_create(SSD_PROC_INFO
, 0600, ssd_proc_dir
, &ssd_proc_fops
);
2343 goto out_create_proc_entry
;
2348 out_create_proc_entry
:
2349 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2355 static void ssd_cleanup_proc(void)
2359 static int ssd_init_proc(void)
2363 #endif /* CONFIG_PROC_FS */
2366 static void ssd_unregister_sysfs(struct ssd_device
*dev
)
2371 static int ssd_register_sysfs(struct ssd_device
*dev
)
2376 static void ssd_cleanup_sysfs(void)
2381 static int ssd_init_sysfs(void)
2386 static inline void ssd_put_index(int slave
, int index
)
2388 unsigned long *index_bits
= ssd_index_bits
;
2391 index_bits
= ssd_index_bits_sl
;
2394 if (test_and_clear_bit(index
, index_bits
)) {
2395 atomic_dec(&ssd_nr
);
2399 static inline int ssd_get_index(int slave
)
2401 unsigned long *index_bits
= ssd_index_bits
;
2405 index_bits
= ssd_index_bits_sl
;
2409 if ((index
= find_first_zero_bit(index_bits
, SSD_MAX_DEV
)) >= SSD_MAX_DEV
) {
2413 if (test_and_set_bit(index
, index_bits
)) {
2417 atomic_inc(&ssd_nr
);
2422 static void ssd_cleanup_index(void)
2427 static int ssd_init_index(void)
2429 INIT_LIST_HEAD(&ssd_list
);
2430 atomic_set(&ssd_nr
, 0);
2431 memset(ssd_index_bits
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2432 memset(ssd_index_bits_sl
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2437 static void ssd_set_dev_name(char *name
, size_t size
, int idx
)
2439 if(idx
< SSD_ALPHABET_NUM
) {
2440 snprintf(name
, size
, "%c", 'a'+idx
);
2442 idx
-= SSD_ALPHABET_NUM
;
2443 snprintf(name
, size
, "%c%c", 'a'+(idx
/SSD_ALPHABET_NUM
), 'a'+(idx
%SSD_ALPHABET_NUM
));
2447 /* pci register r&w */
2448 static inline void ssd_reg_write(void *addr
, uint64_t val
)
2450 iowrite32((uint32_t)val
, addr
);
2451 iowrite32((uint32_t)(val
>> 32), addr
+ 4);
2455 static inline uint64_t ssd_reg_read(void *addr
)
2458 uint32_t val_lo
, val_hi
;
2460 val_lo
= ioread32(addr
);
2461 val_hi
= ioread32(addr
+ 4);
2464 val
= val_lo
| ((uint64_t)val_hi
<< 32);
2470 #define ssd_reg32_write(addr, val) writel(val, addr)
2471 #define ssd_reg32_read(addr) readl(addr)
2474 static void ssd_clear_alarm(struct ssd_device
*dev
)
2478 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2482 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2484 /* firmware control */
2487 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2490 static void ssd_set_alarm(struct ssd_device
*dev
)
2494 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2498 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2502 /* software control */
2505 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2508 #define u32_swap(x) \
2510 (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
2511 (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
2512 (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
2513 (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24)))
2515 #define u16_swap(x) \
2517 (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \
2518 (((uint16_t)(x) & (uint16_t)0xff00) >> 8) ))
2522 /* No lock, for init only*/
2523 static int ssd_spi_read_id(struct ssd_device
*dev
, uint32_t *id
)
2533 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_ID
);
2535 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2536 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2537 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2538 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2542 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2543 if (val
== 0x1000000) {
2547 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2554 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_ID
);
2563 static int ssd_init_spi(struct ssd_device
*dev
)
2569 mutex_lock(&dev
->spi_mutex
);
2572 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2575 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2577 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2582 } while (val
!= 0x1000000);
2584 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2589 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2597 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2599 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2602 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2603 mutex_unlock(&dev
->spi_mutex
);
2610 static int ssd_spi_page_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2621 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2622 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
) {
2626 mutex_lock(&dev
->spi_mutex
);
2627 while (rlen
< size
) {
2628 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, ((off
+ rlen
) >> 24));
2630 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, (((off
+ rlen
) << 8) | SSD_SPI_CMD_READ
));
2632 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2633 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2634 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2635 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2639 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2640 if (val
== 0x1000000) {
2644 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2651 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
2652 *(uint32_t *)(buf
+ rlen
)= u32_swap(val
);
2654 rlen
+= sizeof(uint32_t);
2658 mutex_unlock(&dev
->spi_mutex
);
2662 static int ssd_spi_page_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2674 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2675 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
||
2676 (off
/ dev
->rom_info
.page_size
) != ((off
+ size
- 1) / dev
->rom_info
.page_size
)) {
2680 mutex_lock(&dev
->spi_mutex
);
2682 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2684 wlen
= size
/ sizeof(uint32_t);
2685 for (i
=0; i
<(int)wlen
; i
++) {
2686 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_WDATA
, u32_swap(*((uint32_t *)buf
+ i
)));
2690 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2692 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_PROGRAM
));
2698 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2700 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2702 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2707 } while (val
!= 0x1000000);
2709 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2714 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2721 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2722 if ((val
>> 6) & 0x1) {
2729 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2731 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2734 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2736 mutex_unlock(&dev
->spi_mutex
);
2741 static int ssd_spi_block_erase(struct ssd_device
*dev
, uint32_t off
)
2751 if ((off
% dev
->rom_info
.block_size
) != 0 || off
>= dev
->rom_info
.size
) {
2755 mutex_lock(&dev
->spi_mutex
);
2757 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2758 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2761 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2763 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_ERASE
));
2767 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2770 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2772 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2777 } while (val
!= 0x1000000);
2779 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2784 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2791 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2792 if ((val
>> 5) & 0x1) {
2799 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2801 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2804 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2806 mutex_unlock(&dev
->spi_mutex
);
2811 static int ssd_spi_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2822 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2823 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2827 while (len
< size
) {
2828 roff
= (off
+ len
) % dev
->rom_info
.page_size
;
2829 rsize
= dev
->rom_info
.page_size
- roff
;
2830 if ((size
- len
) < rsize
) {
2831 rsize
= (size
- len
);
2835 ret
= ssd_spi_page_read(dev
, (buf
+ len
), roff
, rsize
);
2849 static int ssd_spi_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2860 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2861 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2865 while (len
< size
) {
2866 woff
= (off
+ len
) % dev
->rom_info
.page_size
;
2867 wsize
= dev
->rom_info
.page_size
- woff
;
2868 if ((size
- len
) < wsize
) {
2869 wsize
= (size
- len
);
2873 ret
= ssd_spi_page_write(dev
, (buf
+ len
), woff
, wsize
);
2887 static int ssd_spi_erase(struct ssd_device
*dev
, uint32_t off
, uint32_t size
)
2897 if (size
== 0 || ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
||
2898 (off
% dev
->rom_info
.block_size
) != 0 || (size
% dev
->rom_info
.block_size
) != 0) {
2902 while (len
< size
) {
2905 ret
= ssd_spi_block_erase(dev
, eoff
);
2910 len
+= dev
->rom_info
.block_size
;
2920 static uint32_t __ssd_i2c_reg32_read(void *addr
)
2922 return ssd_reg32_read(addr
);
2925 static void __ssd_i2c_reg32_write(void *addr
, uint32_t val
)
2927 ssd_reg32_write(addr
, val
);
2928 ssd_reg32_read(addr
);
2931 static int __ssd_i2c_clear(struct ssd_device
*dev
, uint8_t saddr
)
2933 ssd_i2c_ctrl_t ctrl
;
2934 ssd_i2c_data_t data
;
2941 ctrl
.bits
.wdata
= 0;
2942 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
2943 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2944 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2948 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2949 if (data
.bits
.valid
== 0) {
2954 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2960 status
= data
.bits
.rdata
;
2962 if (!(status
& 0x4)) {
2963 /* clear read fifo data */
2964 ctrl
.bits
.wdata
= 0;
2965 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2966 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2967 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2971 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2972 if (data
.bits
.valid
== 0) {
2977 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2985 if (nr_data
<= SSD_I2C_MAX_DATA
) {
2994 ctrl
.bits
.wdata
= 0x04;
2995 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
2996 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2997 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3000 if (!(status
& 0x8)) {
3002 /* reset i2c controller */
3003 ctrl
.bits
.wdata
= 0x0;
3004 ctrl
.bits
.addr
= SSD_I2C_RESET_REG
;
3005 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3006 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3013 static int ssd_i2c_write(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3015 ssd_i2c_ctrl_t ctrl
;
3016 ssd_i2c_data_t data
;
3022 mutex_lock(&dev
->i2c_mutex
);
3027 ctrl
.bits
.wdata
= saddr
;
3028 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3029 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3030 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3033 while (off
< size
) {
3034 ctrl
.bits
.wdata
= buf
[off
];
3035 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3036 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3037 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3043 ctrl
.bits
.wdata
= 0x01;
3044 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3045 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3046 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3051 ctrl
.bits
.wdata
= 0;
3052 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3053 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3054 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3057 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3058 if (data
.bits
.valid
== 0) {
3063 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3070 status
= data
.bits
.rdata
;
3075 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3082 if (!(status
& 0x1)) {
3088 if (status
& 0x20) {
3094 if (status
& 0x10) {
3101 if (__ssd_i2c_clear(dev
, saddr
)) {
3105 mutex_unlock(&dev
->i2c_mutex
);
3110 static int ssd_i2c_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3112 ssd_i2c_ctrl_t ctrl
;
3113 ssd_i2c_data_t data
;
3119 mutex_lock(&dev
->i2c_mutex
);
3124 ctrl
.bits
.wdata
= saddr
;
3125 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3126 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3127 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3130 ctrl
.bits
.wdata
= size
;
3131 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3132 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3133 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3136 ctrl
.bits
.wdata
= 0x02;
3137 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3138 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3139 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3144 ctrl
.bits
.wdata
= 0;
3145 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3146 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3147 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3150 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3151 if (data
.bits
.valid
== 0) {
3156 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3163 status
= data
.bits
.rdata
;
3168 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3175 if (!(status
& 0x2)) {
3181 if (status
& 0x20) {
3187 if (status
& 0x10) {
3193 while (off
< size
) {
3194 ctrl
.bits
.wdata
= 0;
3195 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3196 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3197 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3201 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3202 if (data
.bits
.valid
== 0) {
3207 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3214 buf
[off
] = data
.bits
.rdata
;
3221 if (__ssd_i2c_clear(dev
, saddr
)) {
3225 mutex_unlock(&dev
->i2c_mutex
);
3230 static int ssd_i2c_write_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t wsize
, uint8_t *wbuf
, uint8_t rsize
, uint8_t *rbuf
)
3232 ssd_i2c_ctrl_t ctrl
;
3233 ssd_i2c_data_t data
;
3239 mutex_lock(&dev
->i2c_mutex
);
3244 ctrl
.bits
.wdata
= saddr
;
3245 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3246 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3247 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3251 while (off
< wsize
) {
3252 ctrl
.bits
.wdata
= wbuf
[off
];
3253 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3254 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3255 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3261 ctrl
.bits
.wdata
= rsize
;
3262 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3263 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3264 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3267 ctrl
.bits
.wdata
= 0x03;
3268 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3269 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3270 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3275 ctrl
.bits
.wdata
= 0;
3276 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3277 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3278 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3281 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3282 if (data
.bits
.valid
== 0) {
3287 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3294 status
= data
.bits
.rdata
;
3299 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3306 if (!(status
& 0x2)) {
3312 if (status
& 0x20) {
3318 if (status
& 0x10) {
3325 while (off
< rsize
) {
3326 ctrl
.bits
.wdata
= 0;
3327 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3328 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3329 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3333 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3334 if (data
.bits
.valid
== 0) {
3339 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3346 rbuf
[off
] = data
.bits
.rdata
;
3353 if (__ssd_i2c_clear(dev
, saddr
)) {
3356 mutex_unlock(&dev
->i2c_mutex
);
3361 static int ssd_smbus_send_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3367 ret
= ssd_i2c_write(dev
, saddr
, 1, buf
);
3368 if (!ret
|| -ETIMEDOUT
== ret
) {
3373 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3376 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3382 static int ssd_smbus_receive_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3388 ret
= ssd_i2c_read(dev
, saddr
, 1, buf
);
3389 if (!ret
|| -ETIMEDOUT
== ret
) {
3394 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3397 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3403 static int ssd_smbus_write_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3405 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3410 memcpy((smb_data
+ 1), buf
, 1);
3413 ret
= ssd_i2c_write(dev
, saddr
, 2, smb_data
);
3414 if (!ret
|| -ETIMEDOUT
== ret
) {
3419 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3422 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3428 static int ssd_smbus_read_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3430 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3437 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 1, buf
);
3438 if (!ret
|| -ETIMEDOUT
== ret
) {
3443 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3446 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3452 static int ssd_smbus_write_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3454 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3459 memcpy((smb_data
+ 1), buf
, 2);
3462 ret
= ssd_i2c_write(dev
, saddr
, 3, smb_data
);
3463 if (!ret
|| -ETIMEDOUT
== ret
) {
3468 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3471 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3477 static int ssd_smbus_read_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3479 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3486 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 2, buf
);
3487 if (!ret
|| -ETIMEDOUT
== ret
) {
3492 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3495 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3501 static int ssd_smbus_write_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3503 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3509 memcpy((smb_data
+ 2), buf
, size
);
3512 ret
= ssd_i2c_write(dev
, saddr
, (2 + size
), smb_data
);
3513 if (!ret
|| -ETIMEDOUT
== ret
) {
3518 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3521 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3527 static int ssd_smbus_read_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3529 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3537 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, (SSD_SMBUS_BLOCK_MAX
+ 1), (smb_data
+ 1));
3538 if (!ret
|| -ETIMEDOUT
== ret
) {
3543 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3546 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3552 rsize
= smb_data
[1];
3554 if (rsize
> size
) {
3558 memcpy(buf
, (smb_data
+ 2), rsize
);
3564 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
);
3567 static int ssd_init_lm75(struct ssd_device
*dev
, uint8_t saddr
)
3572 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3577 conf
&= (uint8_t)(~1u);
3579 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3588 static int ssd_lm75_read(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3593 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM75_REG_TEMP
, (uint8_t *)&val
);
3598 *data
= u16_swap(val
);
3603 static int ssd_init_lm80(struct ssd_device
*dev
, uint8_t saddr
)
3612 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3619 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_RES
, &val
);
3624 /* set volt limit */
3625 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3626 high
= ssd_lm80_limit
[i
].high
;
3627 low
= ssd_lm80_limit
[i
].low
;
3629 if (SSD_LM80_IN_CAP
== i
) {
3633 if (dev
->hw_info
.nr_ctrl
<= 1 && SSD_LM80_IN_1V2
== i
) {
3639 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MAX(i
), &high
);
3645 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MIN(i
), &low
);
3651 /* set interrupt mask: allow volt in interrupt except cap in*/
3653 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3658 /* set interrupt mask: disable others */
3660 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK2
, &val
);
3667 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3676 static int ssd_lm80_enable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3681 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3685 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3690 val
&= ~(1UL << (uint32_t)idx
);
3692 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3701 static int ssd_lm80_disable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3706 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3710 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3715 val
|= (1UL << (uint32_t)idx
);
3717 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3726 static int ssd_lm80_read_temp(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3731 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_TEMP
, (uint8_t *)&val
);
3736 *data
= u16_swap(val
);
3741 static int ssd_lm80_check_event(struct ssd_device
*dev
, uint8_t saddr
)
3744 uint16_t val
= 0, status
;
3745 uint8_t alarm1
= 0, alarm2
= 0;
3750 /* read interrupt status to clear interrupt */
3751 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM1
, &alarm1
);
3756 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM2
, &alarm2
);
3761 status
= (uint16_t)alarm1
| ((uint16_t)alarm2
<< 8);
3763 /* parse inetrrupt status */
3764 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3765 if (!((status
>> (uint32_t)i
) & 0x1)) {
3766 if (test_and_clear_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3767 /* enable INx irq */
3768 ret
= ssd_lm80_enable_in(dev
, saddr
, i
);
3777 /* disable INx irq */
3778 ret
= ssd_lm80_disable_in(dev
, saddr
, i
);
3783 if (test_and_set_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3787 high
= (uint32_t)ssd_lm80_limit
[i
].high
* (uint32_t)10;
3788 low
= (uint32_t)ssd_lm80_limit
[i
].low
* (uint32_t)10;
3790 for (j
=0; j
<3; j
++) {
3791 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_IN(i
), (uint8_t *)&val
);
3795 volt
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
3796 if ((volt
>high
) || (volt
<=low
)) {
3798 msleep(SSD_LM80_CONV_INTERVAL
);
3810 case SSD_LM80_IN_CAP
: {
3812 ssd_gen_swlog(dev
, SSD_LOG_CAP_SHORT_CIRCUIT
, 0);
3814 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(volt
));
3819 case SSD_LM80_IN_1V2
:
3820 case SSD_LM80_IN_1V2a
:
3821 case SSD_LM80_IN_1V5
:
3822 case SSD_LM80_IN_1V8
: {
3823 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, volt
));
3826 case SSD_LM80_IN_FPGA_3V3
:
3827 case SSD_LM80_IN_3V3
: {
3828 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, SSD_LM80_3V3_VOLT(volt
)));
3838 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3839 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, (uint32_t)saddr
);
3842 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3847 static int ssd_init_sensor(struct ssd_device
*dev
)
3851 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3855 ret
= ssd_init_lm75(dev
, SSD_SENSOR_LM75_SADDRESS
);
3857 hio_warn("%s: init lm75 failed\n", dev
->name
);
3858 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3859 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
);
3864 if (dev
->hw_info
.pcb_ver
>= 'B' || dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_HHHL
) {
3865 ret
= ssd_init_lm80(dev
, SSD_SENSOR_LM80_SADDRESS
);
3867 hio_warn("%s: init lm80 failed\n", dev
->name
);
3868 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3869 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
3876 /* skip error if not in standard mode */
3877 if (mode
!= SSD_DRV_MODE_STANDARD
) {
3884 static int ssd_mon_boardvolt(struct ssd_device
*dev
)
3886 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3890 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3894 return ssd_lm80_check_event(dev
, SSD_SENSOR_LM80_SADDRESS
);
3898 static int ssd_mon_temp(struct ssd_device
*dev
)
3904 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3908 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3913 ret
= ssd_lm80_read_temp(dev
, SSD_SENSOR_LM80_SADDRESS
, &val
);
3915 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3916 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
3920 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3922 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3923 if (cur
>= SSD_INLET_OT_TEMP
) {
3924 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3925 ssd_gen_swlog(dev
, SSD_LOG_INLET_OVER_TEMP
, (uint32_t)cur
);
3927 } else if(cur
< SSD_INLET_OT_HYST
) {
3928 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3929 ssd_gen_swlog(dev
, SSD_LOG_INLET_NORMAL_TEMP
, (uint32_t)cur
);
3934 ret
= ssd_lm75_read(dev
, SSD_SENSOR_LM75_SADDRESS
, &val
);
3936 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3937 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
);
3941 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
);
3943 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3944 if (cur
>= SSD_FLASH_OT_TEMP
) {
3945 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3946 ssd_gen_swlog(dev
, SSD_LOG_FLASH_OVER_TEMP
, (uint32_t)cur
);
3948 } else if(cur
< SSD_FLASH_OT_HYST
) {
3949 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3950 ssd_gen_swlog(dev
, SSD_LOG_FLASH_NORMAL_TEMP
, (uint32_t)cur
);
3959 static inline void ssd_put_tag(struct ssd_device
*dev
, int tag
)
3961 test_and_clear_bit(tag
, dev
->tag_map
);
3962 wake_up(&dev
->tag_wq
);
3965 static inline int ssd_get_tag(struct ssd_device
*dev
, int wait
)
3970 while ((tag
= find_first_zero_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
)) >= atomic_read(&dev
->queue_depth
)) {
3971 DEFINE_WAIT(__wait
);
3977 prepare_to_wait_exclusive(&dev
->tag_wq
, &__wait
, TASK_UNINTERRUPTIBLE
);
3980 finish_wait(&dev
->tag_wq
, &__wait
);
3983 if (test_and_set_bit(tag
, dev
->tag_map
)) {
3990 static void ssd_barrier_put_tag(struct ssd_device
*dev
, int tag
)
3992 test_and_clear_bit(tag
, dev
->tag_map
);
3995 static int ssd_barrier_get_tag(struct ssd_device
*dev
)
3999 if (test_and_set_bit(tag
, dev
->tag_map
)) {
4006 static void ssd_barrier_end(struct ssd_device
*dev
)
4008 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4009 wake_up_all(&dev
->tag_wq
);
4011 mutex_unlock(&dev
->barrier_mutex
);
4014 static int ssd_barrier_start(struct ssd_device
*dev
)
4018 mutex_lock(&dev
->barrier_mutex
);
4020 atomic_set(&dev
->queue_depth
, 0);
4022 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4023 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4027 __set_current_state(TASK_INTERRUPTIBLE
);
4028 schedule_timeout(1);
4031 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4032 wake_up_all(&dev
->tag_wq
);
4034 mutex_unlock(&dev
->barrier_mutex
);
4039 static int ssd_busy(struct ssd_device
*dev
)
4041 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4048 static int ssd_wait_io(struct ssd_device
*dev
)
4052 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
4053 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
4057 __set_current_state(TASK_INTERRUPTIBLE
);
4058 schedule_timeout(1);
4065 static int ssd_in_barrier(struct ssd_device
*dev
)
4067 return (0 == atomic_read(&dev
->queue_depth
));
4071 static void ssd_cleanup_tag(struct ssd_device
*dev
)
4073 kfree(dev
->tag_map
);
4076 static int ssd_init_tag(struct ssd_device
*dev
)
4078 int nr_ulongs
= ALIGN(dev
->hw_info
.cmd_fifo_sz
, BITS_PER_LONG
) / BITS_PER_LONG
;
4080 mutex_init(&dev
->barrier_mutex
);
4082 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4084 dev
->tag_map
= kmalloc(nr_ulongs
* sizeof(unsigned long), GFP_ATOMIC
);
4085 if (!dev
->tag_map
) {
4089 memset(dev
->tag_map
, 0, nr_ulongs
* sizeof(unsigned long));
4091 init_waitqueue_head(&dev
->tag_wq
);
4097 static void ssd_end_io_acct(struct ssd_cmd
*cmd
)
4099 struct ssd_device
*dev
= cmd
->dev
;
4100 struct bio
*bio
= cmd
->bio
;
4101 unsigned long dur
= jiffies
- cmd
->start_time
;
4102 int rw
= bio_data_dir(bio
);
4104 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4105 int cpu
= part_stat_lock();
4106 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4107 part_round_stats(cpu
, part
);
4108 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4109 part_dec_in_flight(part
, rw
);
4111 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4112 int cpu
= part_stat_lock();
4113 struct hd_struct
*part
= &dev
->gd
->part0
;
4114 part_round_stats(cpu
, part
);
4115 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4117 part
->in_flight
[rw
] = atomic_dec_return(&dev
->in_flight
[rw
]);
4118 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4120 disk_round_stats(dev
->gd
);
4122 disk_stat_add(dev
->gd
, ticks
[rw
], dur
);
4123 dev
->gd
->in_flight
= atomic_dec_return(&dev
->in_flight
[0]);
4126 disk_round_stats(dev
->gd
);
4129 disk_stat_add(dev
->gd
, write_ticks
, dur
);
4131 disk_stat_add(dev
->gd
, read_ticks
, dur
);
4133 dev
->gd
->in_flight
= atomic_dec_return(&dev
->in_flight
[0]);
4137 static void ssd_start_io_acct(struct ssd_cmd
*cmd
)
4139 struct ssd_device
*dev
= cmd
->dev
;
4140 struct bio
*bio
= cmd
->bio
;
4141 int rw
= bio_data_dir(bio
);
4143 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4144 int cpu
= part_stat_lock();
4145 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4146 part_round_stats(cpu
, part
);
4147 part_stat_inc(cpu
, part
, ios
[rw
]);
4148 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4149 part_inc_in_flight(part
, rw
);
4151 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4152 int cpu
= part_stat_lock();
4153 struct hd_struct
*part
= &dev
->gd
->part0
;
4154 part_round_stats(cpu
, part
);
4155 part_stat_inc(cpu
, part
, ios
[rw
]);
4156 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4158 part
->in_flight
[rw
] = atomic_inc_return(&dev
->in_flight
[rw
]);
4159 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4161 disk_round_stats(dev
->gd
);
4163 disk_stat_inc(dev
->gd
, ios
[rw
]);
4164 disk_stat_add(dev
->gd
, sectors
[rw
], bio_sectors(bio
));
4165 dev
->gd
->in_flight
= atomic_inc_return(&dev
->in_flight
[0]);
4168 disk_round_stats(dev
->gd
);
4171 disk_stat_inc(dev
->gd
, writes
);
4172 disk_stat_add(dev
->gd
, write_sectors
, bio_sectors(bio
));
4174 disk_stat_inc(dev
->gd
, reads
);
4175 disk_stat_add(dev
->gd
, read_sectors
, bio_sectors(bio
));
4177 dev
->gd
->in_flight
= atomic_inc_return(&dev
->in_flight
[0]);
4180 cmd
->start_time
= jiffies
;
4184 static void ssd_queue_bio(struct ssd_device
*dev
, struct bio
*bio
)
4186 spin_lock(&dev
->sendq_lock
);
4187 ssd_blist_add(&dev
->sendq
, bio
);
4188 spin_unlock(&dev
->sendq_lock
);
4190 atomic_inc(&dev
->in_sendq
);
4191 wake_up(&dev
->send_waitq
);
4194 static inline void ssd_end_request(struct ssd_cmd
*cmd
)
4196 struct ssd_device
*dev
= cmd
->dev
;
4197 struct bio
*bio
= cmd
->bio
;
4198 int errors
= cmd
->errors
;
4202 if (!ssd_bio_has_discard(bio
)) {
4203 ssd_end_io_acct(cmd
);
4205 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4206 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4211 ssd_put_tag(dev
, tag
);
4213 if (SSD_INT_MSIX
== dev
->int_mode
|| tag
< 16 || errors
) {
4214 ssd_bio_endio(bio
, errors
);
4215 } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ {
4216 spin_lock(&dev
->doneq_lock
);
4217 ssd_blist_add(&dev
->doneq
, bio
);
4218 spin_unlock(&dev
->doneq_lock
);
4220 atomic_inc(&dev
->in_doneq
);
4221 wake_up(&dev
->done_waitq
);
4225 complete(cmd
->waiting
);
4230 static void ssd_end_timeout_request(struct ssd_cmd
*cmd
)
4232 struct ssd_device
*dev
= cmd
->dev
;
4233 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4236 for (i
=0; i
<dev
->nr_queue
; i
++) {
4237 disable_irq(dev
->entry
[i
].vector
);
4240 atomic_inc(&dev
->tocnt
);
4242 hio_err("%s: cmd timeout: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4243 cmd
->errors
= -ETIMEDOUT
;
4244 ssd_end_request(cmd
);
4247 for (i
=0; i
<dev
->nr_queue
; i
++) {
4248 enable_irq(dev
->entry
[i
].vector
);
4256 static void ssd_cmd_add_timer(struct ssd_cmd
*cmd
, int timeout
, void (*complt
)(struct ssd_cmd
*))
4258 init_timer(&cmd
->cmd_timer
);
4260 cmd
->cmd_timer
.data
= (unsigned long)cmd
;
4261 cmd
->cmd_timer
.expires
= jiffies
+ timeout
;
4262 cmd
->cmd_timer
.function
= (void (*)(unsigned long)) complt
;
4264 add_timer(&cmd
->cmd_timer
);
4267 static int ssd_cmd_del_timer(struct ssd_cmd
*cmd
)
4269 return del_timer(&cmd
->cmd_timer
);
4272 static void ssd_add_timer(struct timer_list
*timer
, int timeout
, void (*complt
)(void *), void *data
)
4276 timer
->data
= (unsigned long)data
;
4277 timer
->expires
= jiffies
+ timeout
;
4278 timer
->function
= (void (*)(unsigned long)) complt
;
4283 static int ssd_del_timer(struct timer_list
*timer
)
4285 return del_timer(timer
);
4288 static void ssd_cmd_timeout(struct ssd_cmd
*cmd
)
4290 struct ssd_device
*dev
= cmd
->dev
;
4291 uint32_t msg
= *(uint32_t *)cmd
->msg
;
4293 ssd_end_timeout_request(cmd
);
4295 ssd_gen_swlog(dev
, SSD_LOG_TIMEOUT
, msg
);
4299 static void __ssd_done(unsigned long data
)
4301 struct ssd_cmd
*cmd
;
4304 local_irq_disable();
4305 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4306 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4308 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4312 while (!list_empty(&localq
)) {
4313 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4314 list_del_init(&cmd
->list
);
4316 ssd_end_request(cmd
);
4320 static void __ssd_done_db(unsigned long data
)
4322 struct ssd_cmd
*cmd
;
4323 struct ssd_device
*dev
;
4327 local_irq_disable();
4328 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4329 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4331 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4335 while (!list_empty(&localq
)) {
4336 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4337 list_del_init(&cmd
->list
);
4339 dev
= (struct ssd_device
*)cmd
->dev
;
4343 sector_t off
= dev
->db_info
.data
.loc
.off
;
4344 uint32_t len
= dev
->db_info
.data
.loc
.len
;
4346 switch (dev
->db_info
.type
) {
4347 case SSD_DEBUG_READ_ERR
:
4348 if (bio_data_dir(bio
) == READ
&&
4349 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4353 case SSD_DEBUG_WRITE_ERR
:
4354 if (bio_data_dir(bio
) == WRITE
&&
4355 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4356 cmd
->errors
= -EROFS
;
4359 case SSD_DEBUG_RW_ERR
:
4360 if (!((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4361 if (bio_data_dir(bio
) == READ
) {
4364 cmd
->errors
= -EROFS
;
4373 ssd_end_request(cmd
);
4377 static inline void ssd_done_bh(struct ssd_cmd
*cmd
)
4379 unsigned long flags
= 0;
4381 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4382 struct ssd_device
*dev
= cmd
->dev
;
4383 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4384 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4391 local_irq_save(flags
);
4392 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4393 list_add_tail(&cmd
->list
, &__get_cpu_var(ssd_doneq
));
4394 tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet
));
4396 list_add_tail(&cmd
->list
, this_cpu_ptr(&ssd_doneq
));
4397 tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet
));
4399 local_irq_restore(flags
);
4404 static inline void ssd_done(struct ssd_cmd
*cmd
)
4406 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4407 struct ssd_device
*dev
= cmd
->dev
;
4408 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4409 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4416 ssd_end_request(cmd
);
4421 static inline void ssd_dispatch_cmd(struct ssd_cmd
*cmd
)
4423 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4425 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4427 spin_lock(&dev
->cmd_lock
);
4428 ssd_reg_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, cmd
->msg_dma
);
4429 spin_unlock(&dev
->cmd_lock
);
4432 static inline void ssd_send_cmd(struct ssd_cmd
*cmd
)
4434 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4436 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4438 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4441 static inline void ssd_send_cmd_db(struct ssd_cmd
*cmd
)
4443 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4444 struct bio
*bio
= cmd
->bio
;
4446 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4449 switch (dev
->db_info
.type
) {
4450 case SSD_DEBUG_READ_TO
:
4451 if (bio_data_dir(bio
) == READ
) {
4455 case SSD_DEBUG_WRITE_TO
:
4456 if (bio_data_dir(bio
) == WRITE
) {
4460 case SSD_DEBUG_RW_TO
:
4468 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4472 /* fixed for BIOVEC_PHYS_MERGEABLE */
4473 #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED
4474 #include <linux/bio.h>
4475 #include <linux/io.h>
4476 #include <xen/page.h>
4478 static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec
*vec1
,
4479 const struct bio_vec
*vec2
)
4481 unsigned long mfn1
= pfn_to_mfn(page_to_pfn(vec1
->bv_page
));
4482 unsigned long mfn2
= pfn_to_mfn(page_to_pfn(vec2
->bv_page
));
4484 return __BIOVEC_PHYS_MERGEABLE(vec1
, vec2
) &&
4485 ((mfn1
== mfn2
) || ((mfn1
+1) == mfn2
));
4488 #ifdef BIOVEC_PHYS_MERGEABLE
4489 #undef BIOVEC_PHYS_MERGEABLE
4491 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
4492 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
4493 (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2)))
4497 static inline int ssd_bio_map_sg(struct ssd_device
*dev
, struct bio
*bio
, struct scatterlist
*sgl
)
4499 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
4500 struct bio_vec
*bvec
, *bvprv
= NULL
;
4501 struct scatterlist
*sg
= NULL
;
4502 int i
= 0, nsegs
= 0;
4504 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23))
4505 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4509 * for each segment in bio
4511 bio_for_each_segment(bvec
, bio
, i
) {
4512 if (bvprv
&& BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
)) {
4513 sg
->length
+= bvec
->bv_len
;
4515 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4519 sg
= sg
? (sg
+ 1) : sgl
;
4520 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4521 sg_set_page(sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
4523 sg
->page
= bvec
->bv_page
;
4524 sg
->length
= bvec
->bv_len
;
4525 sg
->offset
= bvec
->bv_offset
;
4532 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4542 struct bio_vec bvec
, bvprv
;
4543 struct bvec_iter iter
;
4544 struct scatterlist
*sg
= NULL
;
4548 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4551 * for each segment in bio
4553 bio_for_each_segment(bvec
, bio
, iter
) {
4554 if (!first
&& BIOVEC_PHYS_MERGEABLE(&bvprv
, &bvec
)) {
4555 sg
->length
+= bvec
.bv_len
;
4557 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4561 sg
= sg
? (sg
+ 1) : sgl
;
4563 sg_set_page(sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
4580 static int __ssd_submit_pbio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4582 struct ssd_cmd
*cmd
;
4583 struct ssd_rw_msg
*msg
;
4584 struct ssd_sg_entry
*sge
;
4585 sector_t block
= bio_start(bio
);
4589 tag
= ssd_get_tag(dev
, wait
);
4594 cmd
= &dev
->cmd
[tag
];
4598 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4600 if (ssd_bio_has_discard(bio
)) {
4601 unsigned int length
= bio_sectors(bio
);
4603 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4605 msg
->fun
= SSD_FUNC_TRIM
;
4608 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4610 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4613 block
+= sge
->length
;
4614 length
-= sge
->length
;
4622 msg
->nsegs
= cmd
->nsegs
= i
;
4628 //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl);
4629 msg
->nsegs
= cmd
->nsegs
= bio
->bi_vcnt
;
4632 if (bio_data_dir(bio
) == READ
) {
4633 msg
->fun
= SSD_FUNC_READ
;
4636 msg
->fun
= SSD_FUNC_WRITE
;
4637 msg
->flag
= dev
->wmode
;
4641 for (i
=0; i
<bio
->bi_vcnt
; i
++) {
4643 sge
->length
= bio
->bi_io_vec
[i
].bv_len
>> 9;
4644 sge
->buf
= (uint64_t)((void *)bio
->bi_io_vec
[i
].bv_page
+ bio
->bi_io_vec
[i
].bv_offset
);
4646 block
+= sge
->length
;
4652 #ifdef SSD_OT_PROTECT
4653 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4654 msleep_interruptible(dev
->ot_delay
);
4658 ssd_start_io_acct(cmd
);
4664 static inline int ssd_submit_bio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4666 struct ssd_cmd
*cmd
;
4667 struct ssd_rw_msg
*msg
;
4668 struct ssd_sg_entry
*sge
;
4669 struct scatterlist
*sgl
;
4670 sector_t block
= bio_start(bio
);
4674 tag
= ssd_get_tag(dev
, wait
);
4679 cmd
= &dev
->cmd
[tag
];
4683 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4687 if (ssd_bio_has_discard(bio
)) {
4688 unsigned int length
= bio_sectors(bio
);
4690 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4692 msg
->fun
= SSD_FUNC_TRIM
;
4695 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4697 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4700 block
+= sge
->length
;
4701 length
-= sge
->length
;
4709 msg
->nsegs
= cmd
->nsegs
= i
;
4715 msg
->nsegs
= cmd
->nsegs
= ssd_bio_map_sg(dev
, bio
, sgl
);
4718 if (bio_data_dir(bio
) == READ
) {
4719 msg
->fun
= SSD_FUNC_READ
;
4721 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_FROMDEVICE
);
4723 msg
->fun
= SSD_FUNC_WRITE
;
4724 msg
->flag
= dev
->wmode
;
4725 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_TODEVICE
);
4729 for (i
=0; i
<cmd
->nsegs
; i
++) {
4731 sge
->length
= sg_dma_len(sgl
) >> 9;
4732 sge
->buf
= sg_dma_address(sgl
);
4734 block
+= sge
->length
;
4741 #ifdef SSD_OT_PROTECT
4742 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4743 msleep_interruptible(dev
->ot_delay
);
4747 ssd_start_io_acct(cmd
);
4754 static int ssd_done_thread(void *data
)
4756 struct ssd_device
*dev
;
4765 current
->flags
|= PF_NOFREEZE
;
4766 //set_user_nice(current, -5);
4768 while (!kthread_should_stop()) {
4769 wait_event_interruptible(dev
->done_waitq
, (atomic_read(&dev
->in_doneq
) || kthread_should_stop()));
4771 while (atomic_read(&dev
->in_doneq
)) {
4773 spin_lock(&dev
->doneq_lock
);
4774 bio
= ssd_blist_get(&dev
->doneq
);
4775 spin_unlock(&dev
->doneq_lock
);
4777 spin_lock_irq(&dev
->doneq_lock
);
4778 bio
= ssd_blist_get(&dev
->doneq
);
4779 spin_unlock_irq(&dev
->doneq_lock
);
4783 next
= bio
->bi_next
;
4784 bio
->bi_next
= NULL
;
4785 ssd_bio_endio(bio
, 0);
4786 atomic_dec(&dev
->in_doneq
);
4792 #ifdef SSD_ESCAPE_IRQ
4793 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4794 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4795 cpumask_var_t new_mask
;
4796 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4797 cpumask_setall(new_mask
);
4798 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4799 set_cpus_allowed_ptr(current
, new_mask
);
4800 free_cpumask_var(new_mask
);
4804 cpus_setall(new_mask
);
4805 cpu_clear(dev
->irq_cpu
, new_mask
);
4806 set_cpus_allowed(current
, new_mask
);
4815 static int ssd_send_thread(void *data
)
4817 struct ssd_device
*dev
;
4826 current
->flags
|= PF_NOFREEZE
;
4827 //set_user_nice(current, -5);
4829 while (!kthread_should_stop()) {
4830 wait_event_interruptible(dev
->send_waitq
, (atomic_read(&dev
->in_sendq
) || kthread_should_stop()));
4832 while (atomic_read(&dev
->in_sendq
)) {
4833 spin_lock(&dev
->sendq_lock
);
4834 bio
= ssd_blist_get(&dev
->sendq
);
4835 spin_unlock(&dev
->sendq_lock
);
4838 next
= bio
->bi_next
;
4839 bio
->bi_next
= NULL
;
4840 #ifdef SSD_QUEUE_PBIO
4841 if (test_and_clear_bit(BIO_SSD_PBIO
, &bio
->bi_flags
)) {
4842 __ssd_submit_pbio(dev
, bio
, 1);
4844 ssd_submit_bio(dev
, bio
, 1);
4847 ssd_submit_bio(dev
, bio
, 1);
4849 atomic_dec(&dev
->in_sendq
);
4855 #ifdef SSD_ESCAPE_IRQ
4856 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4857 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4858 cpumask_var_t new_mask
;
4859 if (alloc_cpumask_var(&new_mask
, GFP_ATOMIC
)) {
4860 cpumask_setall(new_mask
);
4861 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4862 set_cpus_allowed_ptr(current
, new_mask
);
4863 free_cpumask_var(new_mask
);
4867 cpus_setall(new_mask
);
4868 cpu_clear(dev
->irq_cpu
, new_mask
);
4869 set_cpus_allowed(current
, new_mask
);
4879 static void ssd_cleanup_thread(struct ssd_device
*dev
)
4881 kthread_stop(dev
->send_thread
);
4882 kthread_stop(dev
->done_thread
);
4885 static int ssd_init_thread(struct ssd_device
*dev
)
4889 atomic_set(&dev
->in_doneq
, 0);
4890 atomic_set(&dev
->in_sendq
, 0);
4892 spin_lock_init(&dev
->doneq_lock
);
4893 spin_lock_init(&dev
->sendq_lock
);
4895 ssd_blist_init(&dev
->doneq
);
4896 ssd_blist_init(&dev
->sendq
);
4898 init_waitqueue_head(&dev
->done_waitq
);
4899 init_waitqueue_head(&dev
->send_waitq
);
4901 dev
->done_thread
= kthread_run(ssd_done_thread
, dev
, "%s/d", dev
->name
);
4902 if (IS_ERR(dev
->done_thread
)) {
4903 ret
= PTR_ERR(dev
->done_thread
);
4904 goto out_done_thread
;
4907 dev
->send_thread
= kthread_run(ssd_send_thread
, dev
, "%s/s", dev
->name
);
4908 if (IS_ERR(dev
->send_thread
)) {
4909 ret
= PTR_ERR(dev
->send_thread
);
4910 goto out_send_thread
;
4916 kthread_stop(dev
->done_thread
);
4922 static void ssd_put_dcmd(struct ssd_dcmd
*dcmd
)
4924 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4926 spin_lock(&dev
->dcmd_lock
);
4927 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4928 spin_unlock(&dev
->dcmd_lock
);
4931 static struct ssd_dcmd
*ssd_get_dcmd(struct ssd_device
*dev
)
4933 struct ssd_dcmd
*dcmd
= NULL
;
4935 spin_lock(&dev
->dcmd_lock
);
4936 if (!list_empty(&dev
->dcmd_list
)) {
4937 dcmd
= list_entry(dev
->dcmd_list
.next
,
4938 struct ssd_dcmd
, list
);
4939 list_del_init(&dcmd
->list
);
4941 spin_unlock(&dev
->dcmd_lock
);
4946 static void ssd_cleanup_dcmd(struct ssd_device
*dev
)
4951 static int ssd_init_dcmd(struct ssd_device
*dev
)
4953 struct ssd_dcmd
*dcmd
;
4954 int dcmd_sz
= sizeof(struct ssd_dcmd
)*dev
->hw_info
.cmd_fifo_sz
;
4957 spin_lock_init(&dev
->dcmd_lock
);
4958 INIT_LIST_HEAD(&dev
->dcmd_list
);
4959 init_waitqueue_head(&dev
->dcmd_wq
);
4961 dev
->dcmd
= kmalloc(dcmd_sz
, GFP_KERNEL
);
4963 hio_warn("%s: can not alloc dcmd\n", dev
->name
);
4964 goto out_alloc_dcmd
;
4966 memset(dev
->dcmd
, 0, dcmd_sz
);
4968 for (i
=0, dcmd
=dev
->dcmd
; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++, dcmd
++) {
4970 INIT_LIST_HEAD(&dcmd
->list
);
4971 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4980 static void ssd_put_dmsg(void *msg
)
4982 struct ssd_dcmd
*dcmd
= container_of(msg
, struct ssd_dcmd
, msg
);
4983 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4985 memset(dcmd
->msg
, 0, SSD_DCMD_MAX_SZ
);
4987 wake_up(&dev
->dcmd_wq
);
4990 static void *ssd_get_dmsg(struct ssd_device
*dev
)
4992 struct ssd_dcmd
*dcmd
= ssd_get_dcmd(dev
);
4996 prepare_to_wait_exclusive(&dev
->dcmd_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
4999 dcmd
= ssd_get_dcmd(dev
);
5001 finish_wait(&dev
->dcmd_wq
, &wait
);
5007 static int ssd_do_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5009 DECLARE_COMPLETION(wait
);
5010 struct ssd_cmd
*cmd
;
5014 tag
= ssd_get_tag(dev
, 1);
5019 cmd
= &dev
->cmd
[tag
];
5021 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5022 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5024 cmd
->waiting
= &wait
;
5028 wait_for_completion(cmd
->waiting
);
5029 cmd
->waiting
= NULL
;
5031 if (cmd
->errors
== -ETIMEDOUT
) {
5033 } else if (cmd
->errors
) {
5038 *done
= cmd
->nr_log
;
5040 ssd_put_tag(dev
, cmd
->tag
);
5045 static int ssd_do_barrier_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5047 DECLARE_COMPLETION(wait
);
5048 struct ssd_cmd
*cmd
;
5052 tag
= ssd_barrier_get_tag(dev
);
5057 cmd
= &dev
->cmd
[tag
];
5059 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5060 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5062 cmd
->waiting
= &wait
;
5066 wait_for_completion(cmd
->waiting
);
5067 cmd
->waiting
= NULL
;
5069 if (cmd
->errors
== -ETIMEDOUT
) {
5071 } else if (cmd
->errors
) {
5076 *done
= cmd
->nr_log
;
5078 ssd_barrier_put_tag(dev
, cmd
->tag
);
5083 #ifdef SSD_OT_PROTECT
5084 static void ssd_check_temperature(struct ssd_device
*dev
, int temp
)
5091 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5095 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5098 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5099 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
5101 val
= ssd_reg_read(dev
->ctrlp
+ off
);
5102 if (val
== 0xffffffffffffffffull
) {
5106 cur
= (int)CUR_TEMP(val
);
5108 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5109 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5110 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5111 dev
->ot_delay
= SSD_OT_DELAY
;
5118 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5119 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5120 hio_warn("%s: Temperature is OK.\n", dev
->name
);
5127 static int ssd_get_ot_status(struct ssd_device
*dev
, int *status
)
5133 if (!dev
|| !status
) {
5137 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5138 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5139 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5140 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5141 if ((val
>> 22) & 0x1) {
5147 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5148 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5149 if ((val
>> 22) & 0x1) {
5155 *status
= !!dev
->ot_delay
;
5162 static void ssd_set_ot_protect(struct ssd_device
*dev
, int protect
)
5168 mutex_lock(&dev
->fw_mutex
);
5170 dev
->ot_protect
= !!protect
;
5172 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5173 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5174 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5175 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5176 if (dev
->ot_protect
) {
5181 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5184 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5185 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5186 if (dev
->ot_protect
) {
5191 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5195 mutex_unlock(&dev
->fw_mutex
);
5198 static int ssd_init_ot_protect(struct ssd_device
*dev
)
5200 ssd_set_ot_protect(dev
, ot_protect
);
5202 #ifdef SSD_OT_PROTECT
5203 ssd_check_temperature(dev
, SSD_OT_TEMP
);
5210 static int ssd_read_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
, int *nr_log
)
5212 struct ssd_log_op_msg
*msg
;
5213 struct ssd_log_msg
*lmsg
;
5215 size_t length
= dev
->hw_info
.log_sz
;
5218 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
5222 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
5223 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
5224 ret
= dma_mapping_error(buf_dma
);
5226 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
5229 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
5230 goto out_dma_mapping
;
5233 msg
= (struct ssd_log_op_msg
*)ssd_get_dmsg(dev
);
5235 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5236 lmsg
= (struct ssd_log_msg
*)msg
;
5237 lmsg
->fun
= SSD_FUNC_READ_LOG
;
5238 lmsg
->ctrl_idx
= ctrl_idx
;
5239 lmsg
->buf
= buf_dma
;
5241 msg
->fun
= SSD_FUNC_READ_LOG
;
5242 msg
->ctrl_idx
= ctrl_idx
;
5246 ret
= ssd_do_request(dev
, READ
, msg
, nr_log
);
5249 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
5255 #define SSD_LOG_PRINT_BUF_SZ 256
5256 static int ssd_parse_log(struct ssd_device
*dev
, struct ssd_log
*log
, int print
)
5258 struct ssd_log_desc
*log_desc
= ssd_log_desc
;
5259 struct ssd_log_entry
*le
;
5261 char print_buf
[SSD_LOG_PRINT_BUF_SZ
];
5267 while (log_desc
->event
!= SSD_UNKNOWN_EVENT
) {
5268 if (log_desc
->event
== le
->event
) {
5278 if (log_desc
->level
< log_level
) {
5283 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5286 sn
= dev
->labelv3
.barcode
;
5289 print_len
= snprintf(print_buf
, SSD_LOG_PRINT_BUF_SZ
, "%s (%s): <%#x>", dev
->name
, sn
, le
->event
);
5291 if (log
->ctrl_idx
!= SSD_LOG_SW_IDX
) {
5292 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " controller %d", log
->ctrl_idx
);
5295 switch (log_desc
->data
) {
5296 case SSD_LOG_DATA_NONE
:
5298 case SSD_LOG_DATA_LOC
:
5299 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5300 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc
.flash
);
5301 if (log_desc
->sblock
) {
5302 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc
.block
);
5304 if (log_desc
->spage
) {
5305 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc
.page
);
5308 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc1
.flash
);
5309 if (log_desc
->sblock
) {
5310 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc1
.block
);
5312 if (log_desc
->spage
) {
5313 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc1
.page
);
5317 case SSD_LOG_DATA_HEX
:
5318 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " info %#x", le
->data
.val
);
5323 /*print_len += */snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), ": %s", log_desc
->desc
);
5325 switch (log_desc
->level
) {
5326 case SSD_LOG_LEVEL_INFO
:
5327 hio_info("%s\n", print_buf
);
5329 case SSD_LOG_LEVEL_NOTICE
:
5330 hio_note("%s\n", print_buf
);
5332 case SSD_LOG_LEVEL_WARNING
:
5333 hio_warn("%s\n", print_buf
);
5335 case SSD_LOG_LEVEL_ERR
:
5336 hio_err("%s\n", print_buf
);
5337 //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ.");
5340 hio_warn("%s\n", print_buf
);
5345 return log_desc
->level
;
5348 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
);
5349 static int ssd_switch_wmode(struct ssd_device
*dev
, int wmode
);
5352 static int ssd_handle_event(struct ssd_device
*dev
, uint16_t event
, int level
)
5357 case SSD_LOG_OVER_TEMP
: {
5358 #ifdef SSD_OT_PROTECT
5359 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5360 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5361 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5362 dev
->ot_delay
= SSD_OT_DELAY
;
5369 case SSD_LOG_NORMAL_TEMP
: {
5370 #ifdef SSD_OT_PROTECT
5371 /* need to check all controller's temperature */
5372 ssd_check_temperature(dev
, SSD_OT_TEMP_HYST
);
5377 case SSD_LOG_BATTERY_FAULT
: {
5380 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5381 if (!ssd_bm_get_sfstatus(dev
, &sfstatus
)) {
5382 ssd_gen_swlog(dev
, SSD_LOG_BM_SFSTATUS
, sfstatus
);
5386 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5387 ssd_switch_wmode(dev
, dev
->user_wmode
);
5392 case SSD_LOG_BATTERY_OK
: {
5393 if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5394 ssd_switch_wmode(dev
, dev
->user_wmode
);
5399 case SSD_LOG_BOARD_VOLT_FAULT
: {
5400 ssd_mon_boardvolt(dev
);
5404 case SSD_LOG_CLEAR_LOG
: {
5406 memset(&dev
->smart
.log_info
, 0, sizeof(struct ssd_log_info
));
5410 case SSD_LOG_CAP_VOLT_FAULT
:
5411 case SSD_LOG_CAP_LEARN_FAULT
:
5412 case SSD_LOG_CAP_SHORT_CIRCUIT
: {
5413 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5414 ssd_switch_wmode(dev
, dev
->user_wmode
);
5423 /* ssd event call */
5424 if (dev
->event_call
) {
5425 dev
->event_call(dev
->gd
, event
, level
);
5428 if (SSD_LOG_CAP_VOLT_FAULT
== event
|| SSD_LOG_CAP_LEARN_FAULT
== event
|| SSD_LOG_CAP_SHORT_CIRCUIT
== event
) {
5429 dev
->event_call(dev
->gd
, SSD_LOG_BATTERY_FAULT
, level
);
5436 static int ssd_save_log(struct ssd_device
*dev
, struct ssd_log
*log
)
5442 mutex_lock(&dev
->internal_log_mutex
);
5444 size
= sizeof(struct ssd_log
);
5445 off
= dev
->internal_log
.nr_log
* size
;
5447 if (off
== dev
->rom_info
.log_sz
) {
5448 if (dev
->internal_log
.nr_log
== dev
->smart
.log_info
.nr_log
) {
5449 hio_warn("%s: internal log is full\n", dev
->name
);
5454 internal_log
= dev
->internal_log
.log
+ off
;
5455 memcpy(internal_log
, log
, size
);
5457 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
5458 off
+= dev
->rom_info
.log_base
;
5460 ret
= ssd_spi_write(dev
, log
, off
, size
);
5466 dev
->internal_log
.nr_log
++;
5469 mutex_unlock(&dev
->internal_log_mutex
);
5473 static int ssd_save_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5480 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5483 memset(&log
, 0, sizeof(struct ssd_log
));
5485 do_gettimeofday(&tv
);
5486 log
.ctrl_idx
= SSD_LOG_SW_IDX
;
5487 log
.time
= tv
.tv_sec
;
5488 log
.le
.event
= event
;
5489 log
.le
.data
.val
= data
;
5491 level
= ssd_parse_log(dev
, &log
, 0);
5492 if (level
>= SSD_LOG_LEVEL
) {
5493 ret
= ssd_save_log(dev
, &log
);
5497 if (SSD_LOG_LEVEL_ERR
== level
) {
5502 dev
->smart
.log_info
.nr_log
++;
5503 dev
->smart
.log_info
.stat
[level
]++;
5506 ssd_handle_event(dev
, event
, level
);
5511 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5513 struct ssd_log_entry le
;
5516 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5524 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5528 ret
= sfifo_put(&dev
->log_fifo
, &le
);
5533 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
5534 queue_work(dev
->workq
, &dev
->log_work
);
5540 static int ssd_do_swlog(struct ssd_device
*dev
)
5542 struct ssd_log_entry le
;
5545 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5546 while (!sfifo_get(&dev
->log_fifo
, &le
)) {
5547 ret
= ssd_save_swlog(dev
, le
.event
, le
.data
.val
);
5556 static int __ssd_clear_log(struct ssd_device
*dev
)
5558 uint32_t off
, length
;
5561 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5565 if (dev
->internal_log
.nr_log
== 0) {
5569 mutex_lock(&dev
->internal_log_mutex
);
5571 off
= dev
->rom_info
.log_base
;
5572 length
= dev
->rom_info
.log_sz
;
5574 ret
= ssd_spi_erase(dev
, off
, length
);
5576 hio_warn("%s: log erase: failed\n", dev
->name
);
5580 dev
->internal_log
.nr_log
= 0;
5583 mutex_unlock(&dev
->internal_log_mutex
);
5587 static int ssd_clear_log(struct ssd_device
*dev
)
5591 ret
= __ssd_clear_log(dev
);
5593 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_LOG
, 0);
5599 static int ssd_do_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
)
5601 struct ssd_log_entry
*le
;
5608 ret
= ssd_read_log(dev
, ctrl_idx
, buf
, &nr_log
);
5613 do_gettimeofday(&tv
);
5615 log
.time
= tv
.tv_sec
;
5616 log
.ctrl_idx
= ctrl_idx
;
5618 le
= (ssd_log_entry_t
*)buf
;
5619 while (nr_log
> 0) {
5620 memcpy(&log
.le
, le
, sizeof(struct ssd_log_entry
));
5622 level
= ssd_parse_log(dev
, &log
, 1);
5623 if (level
>= SSD_LOG_LEVEL
) {
5624 ssd_save_log(dev
, &log
);
5628 if (SSD_LOG_LEVEL_ERR
== level
) {
5632 dev
->smart
.log_info
.nr_log
++;
5633 if (SSD_LOG_SEU_FAULT
!= le
->event
&& SSD_LOG_SEU_FAULT1
!= le
->event
) {
5634 dev
->smart
.log_info
.stat
[level
]++;
5638 /* log to the volatile log info */
5639 dev
->log_info
.nr_log
++;
5640 dev
->log_info
.stat
[level
]++;
5644 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
5646 /*dev->readonly = 1;
5647 set_disk_ro(dev->gd, 1);
5648 hio_warn("%s: switched to read-only mode.\n", dev->name);*/
5652 ssd_handle_event(dev
, le
->event
, level
);
5661 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5662 static void ssd_log_worker(void *data
)
5664 struct ssd_device
*dev
= (struct ssd_device
*)data
;
5666 static void ssd_log_worker(struct work_struct
*work
)
5668 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, log_work
);
5673 if (!test_bit(SSD_LOG_ERR
, &dev
->state
) && test_bit(SSD_ONLINE
, &dev
->state
)) {
5675 if (!dev
->log_buf
) {
5676 dev
->log_buf
= kmalloc(dev
->hw_info
.log_sz
, GFP_KERNEL
);
5677 if (!dev
->log_buf
) {
5678 hio_warn("%s: ssd_log_worker: no mem\n", dev
->name
);
5684 if (test_and_clear_bit(SSD_LOG_HW
, &dev
->state
)) {
5685 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5686 ret
= ssd_do_log(dev
, i
, dev
->log_buf
);
5688 (void)test_and_set_bit(SSD_LOG_ERR
, &dev
->state
);
5689 hio_warn("%s: do log fail\n", dev
->name
);
5695 ret
= ssd_do_swlog(dev
);
5697 hio_warn("%s: do swlog fail\n", dev
->name
);
5701 static void ssd_cleanup_log(struct ssd_device
*dev
)
5704 kfree(dev
->log_buf
);
5705 dev
->log_buf
= NULL
;
5708 sfifo_free(&dev
->log_fifo
);
5710 if (dev
->internal_log
.log
) {
5711 vfree(dev
->internal_log
.log
);
5712 dev
->internal_log
.nr_log
= 0;
5713 dev
->internal_log
.log
= NULL
;
5717 static int ssd_init_log(struct ssd_device
*dev
)
5719 struct ssd_log
*log
;
5724 mutex_init(&dev
->internal_log_mutex
);
5726 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5727 INIT_WORK(&dev
->log_work
, ssd_log_worker
, dev
);
5729 INIT_WORK(&dev
->log_work
, ssd_log_worker
);
5732 off
= dev
->rom_info
.log_base
;
5733 size
= dev
->rom_info
.log_sz
;
5735 dev
->internal_log
.nr_log
= 0;
5736 dev
->internal_log
.log
= vmalloc(size
);
5737 if (!dev
->internal_log
.log
) {
5742 ret
= sfifo_alloc(&dev
->log_fifo
, SSD_LOG_FIFO_SZ
, sizeof(struct ssd_log_entry
));
5744 goto out_alloc_log_fifo
;
5747 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5751 log
= (struct ssd_log
*)dev
->internal_log
.log
;
5752 while (len
< size
) {
5753 ret
= ssd_spi_read(dev
, log
, off
, sizeof(struct ssd_log
));
5758 if (log
->ctrl_idx
== 0xff) {
5762 dev
->internal_log
.nr_log
++;
5764 len
+= sizeof(struct ssd_log
);
5765 off
+= sizeof(struct ssd_log
);
5771 sfifo_free(&dev
->log_fifo
);
5773 vfree(dev
->internal_log
.log
);
5774 dev
->internal_log
.log
= NULL
;
5775 dev
->internal_log
.nr_log
= 0;
5777 /* skip error if not in standard mode */
5778 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5785 static void ssd_stop_workq(struct ssd_device
*dev
)
5787 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
5788 flush_workqueue(dev
->workq
);
5791 static void ssd_start_workq(struct ssd_device
*dev
)
5793 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
5796 queue_work(dev
->workq
, &dev
->log_work
);
5799 static void ssd_cleanup_workq(struct ssd_device
*dev
)
5801 flush_workqueue(dev
->workq
);
5802 destroy_workqueue(dev
->workq
);
5806 static int ssd_init_workq(struct ssd_device
*dev
)
5810 dev
->workq
= create_singlethread_workqueue(dev
->name
);
5821 static int ssd_init_rom_info(struct ssd_device
*dev
)
5825 mutex_init(&dev
->spi_mutex
);
5826 mutex_init(&dev
->i2c_mutex
);
5828 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5829 /* fix bug: read data to clear status */
5830 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
5832 dev
->rom_info
.size
= SSD_ROM_SIZE
;
5833 dev
->rom_info
.block_size
= SSD_ROM_BLK_SIZE
;
5834 dev
->rom_info
.page_size
= SSD_ROM_PAGE_SIZE
;
5836 dev
->rom_info
.bridge_fw_base
= SSD_ROM_BRIDGE_FW_BASE
;
5837 dev
->rom_info
.bridge_fw_sz
= SSD_ROM_BRIDGE_FW_SIZE
;
5838 dev
->rom_info
.nr_bridge_fw
= SSD_ROM_NR_BRIDGE_FW
;
5840 dev
->rom_info
.ctrl_fw_base
= SSD_ROM_CTRL_FW_BASE
;
5841 dev
->rom_info
.ctrl_fw_sz
= SSD_ROM_CTRL_FW_SIZE
;
5842 dev
->rom_info
.nr_ctrl_fw
= SSD_ROM_NR_CTRL_FW
;
5844 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5846 dev
->rom_info
.vp_base
= SSD_ROM_VP_BASE
;
5847 dev
->rom_info
.label_base
= SSD_ROM_LABEL_BASE
;
5848 } else if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5849 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5850 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5851 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5852 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5854 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5855 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5856 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5857 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5859 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5860 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5861 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5862 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
5864 dev
->rom_info
.bm_fw_base
= dev
->rom_info
.ctrl_fw_base
+ (dev
->rom_info
.nr_ctrl_fw
* dev
->rom_info
.ctrl_fw_sz
);
5865 dev
->rom_info
.bm_fw_sz
= SSD_PV3_ROM_BM_FW_SZ
;
5866 dev
->rom_info
.nr_bm_fw
= SSD_PV3_ROM_NR_BM_FW
;
5868 dev
->rom_info
.log_base
= dev
->rom_info
.bm_fw_base
+ (dev
->rom_info
.nr_bm_fw
* dev
->rom_info
.bm_fw_sz
);
5869 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5871 dev
->rom_info
.smart_base
= dev
->rom_info
.log_base
+ dev
->rom_info
.log_sz
;
5872 dev
->rom_info
.smart_sz
= SSD_PV3_ROM_SMART_SZ
;
5873 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
5875 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
5876 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
5877 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
+ dev
->rom_info
.block_size
;
5878 if (dev
->rom_info
.label_base
>= dev
->rom_info
.size
) {
5879 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- dev
->rom_info
.block_size
;
5882 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5883 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5884 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5885 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5887 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5888 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5889 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5890 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5892 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5893 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5894 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5895 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
5897 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
5898 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
5899 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- SSD_PV3_2_ROM_SEC_SZ
;
5901 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
5902 dev
->rom_info
.smart_sz
= SSD_PV3_2_ROM_SEC_SZ
;
5903 dev
->rom_info
.smart_base
= dev
->rom_info
.label_base
- (dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
);
5904 if (dev
->rom_info
.smart_sz
> dev
->rom_info
.block_size
) {
5905 dev
->rom_info
.smart_sz
= dev
->rom_info
.block_size
;
5908 dev
->rom_info
.log_sz
= SSD_PV3_2_ROM_LOG_SZ
;
5909 dev
->rom_info
.log_base
= dev
->rom_info
.smart_base
- dev
->rom_info
.log_sz
;
5912 return ssd_init_spi(dev
);
5916 static int ssd_update_smart(struct ssd_device
*dev
, struct ssd_smart
*smart
)
5920 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
5921 struct hd_struct
*part
;
5927 if (!test_bit(SSD_INIT_BD
, &dev
->state
)) {
5931 do_gettimeofday(&tv
);
5932 if ((uint64_t)tv
.tv_sec
< dev
->uptime
) {
5935 run_time
= tv
.tv_sec
- dev
->uptime
;
5938 /* avoid frequently update */
5939 if (run_time
>= 60) {
5944 smart
->io_stat
.run_time
+= run_time
;
5946 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
5947 cpu
= part_stat_lock();
5948 part
= &dev
->gd
->part0
;
5949 part_round_stats(cpu
, part
);
5952 smart
->io_stat
.nr_read
+= part_stat_read(part
, ios
[READ
]);
5953 smart
->io_stat
.nr_write
+= part_stat_read(part
, ios
[WRITE
]);
5954 smart
->io_stat
.rsectors
+= part_stat_read(part
, sectors
[READ
]);
5955 smart
->io_stat
.wsectors
+= part_stat_read(part
, sectors
[WRITE
]);
5956 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
5958 disk_round_stats(dev
->gd
);
5961 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, ios
[READ
]);
5962 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, ios
[WRITE
]);
5963 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, sectors
[READ
]);
5964 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, sectors
[WRITE
]);
5967 disk_round_stats(dev
->gd
);
5970 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, reads
);
5971 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, writes
);
5972 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, read_sectors
);
5973 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, write_sectors
);
5976 smart
->io_stat
.nr_to
+= atomic_read(&dev
->tocnt
);
5978 for (i
=0; i
<dev
->nr_queue
; i
++) {
5979 smart
->io_stat
.nr_rwerr
+= dev
->queue
[i
].io_stat
.nr_rwerr
;
5980 smart
->io_stat
.nr_ioerr
+= dev
->queue
[i
].io_stat
.nr_ioerr
;
5983 for (i
=0; i
<dev
->nr_queue
; i
++) {
5984 for (j
=0; j
<SSD_ECC_MAX_FLIP
; j
++) {
5985 smart
->ecc_info
.bitflip
[j
] += dev
->queue
[i
].ecc_info
.bitflip
[j
];
5989 //dev->uptime = tv.tv_sec;
5994 static int ssd_clear_smart(struct ssd_device
*dev
)
5998 uint32_t off
, length
;
6002 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6007 off
= dev
->rom_info
.smart_base
;
6008 length
= dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
;
6010 ret
= ssd_spi_erase(dev
, off
, length
);
6012 hio_warn("%s: info erase: failed\n", dev
->name
);
6016 sversion
= dev
->smart
.version
;
6018 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6019 dev
->smart
.version
= sversion
+ 1;
6020 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6022 /* clear all tmp acc */
6023 for (i
=0; i
<dev
->nr_queue
; i
++) {
6024 memset(&(dev
->queue
[i
].io_stat
), 0, sizeof(struct ssd_io_stat
));
6025 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(struct ssd_ecc_info
));
6028 atomic_set(&dev
->tocnt
, 0);
6030 /* clear tmp log info */
6031 memset(&dev
->log_info
, 0, sizeof(struct ssd_log_info
));
6033 do_gettimeofday(&tv
);
6034 dev
->uptime
= tv
.tv_sec
;
6037 //ssd_clear_alarm(dev);
6042 static int ssd_clear_warning(struct ssd_device
*dev
)
6047 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6051 /* clear log_info warning */
6052 memset(&dev
->smart
.log_info
, 0, sizeof(dev
->smart
.log_info
));
6054 /* clear io_stat warning */
6055 dev
->smart
.io_stat
.nr_to
= 0;
6056 dev
->smart
.io_stat
.nr_rwerr
= 0;
6057 dev
->smart
.io_stat
.nr_ioerr
= 0;
6059 /* clear ecc_info warning */
6060 memset(&dev
->smart
.ecc_info
, 0, sizeof(dev
->smart
.ecc_info
));
6062 /* clear queued warnings */
6063 for (i
=0; i
<dev
->nr_queue
; i
++) {
6064 /* queued io_stat warning */
6065 dev
->queue
[i
].io_stat
.nr_to
= 0;
6066 dev
->queue
[i
].io_stat
.nr_rwerr
= 0;
6067 dev
->queue
[i
].io_stat
.nr_ioerr
= 0;
6069 /* queued ecc_info warning */
6070 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(dev
->queue
[i
].ecc_info
));
6073 /* write smart back to nor */
6074 for (i
= 0; i
< dev
->rom_info
.nr_smart
; i
++) {
6075 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6076 size
= dev
->rom_info
.smart_sz
;
6078 ret
= ssd_spi_erase(dev
, off
, size
);
6080 hio_warn("%s: warning erase: failed with code 1\n", dev
->name
);
6084 size
= sizeof(struct ssd_smart
);
6086 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6088 hio_warn("%s: warning erase: failed with code 2\n", dev
->name
);
6093 dev
->smart
.version
++;
6095 /* clear cmd timeout warning */
6096 atomic_set(&dev
->tocnt
, 0);
6098 /* clear tmp log info */
6099 memset(&dev
->log_info
, 0, sizeof(dev
->log_info
));
6105 static int ssd_save_smart(struct ssd_device
*dev
)
6111 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
6114 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6118 if (!ssd_update_smart(dev
, &dev
->smart
)) {
6122 dev
->smart
.version
++;
6124 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6125 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6126 size
= dev
->rom_info
.smart_sz
;
6128 ret
= ssd_spi_erase(dev
, off
, size
);
6130 hio_warn("%s: info erase failed\n", dev
->name
);
6134 size
= sizeof(struct ssd_smart
);
6136 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6138 hio_warn("%s: info write failed\n", dev
->name
);
6149 static int ssd_init_smart(struct ssd_device
*dev
)
6151 struct ssd_smart
*smart
;
6157 do_gettimeofday(&tv
);
6158 dev
->uptime
= tv
.tv_sec
;
6160 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6164 smart
= kmalloc(sizeof(struct ssd_smart
) * SSD_ROM_NR_SMART_MAX
, GFP_KERNEL
);
6170 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6173 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6174 memset(&smart
[i
], 0, sizeof(struct ssd_smart
));
6176 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6177 size
= sizeof(struct ssd_smart
);
6179 ret
= ssd_spi_read(dev
, &smart
[i
], off
, size
);
6181 hio_warn("%s: info read failed\n", dev
->name
);
6185 if (smart
[i
].magic
!= SSD_SMART_MAGIC
) {
6187 smart
[i
].version
= 0;
6191 if (smart
[i
].version
> dev
->smart
.version
) {
6192 memcpy(&dev
->smart
, &smart
[i
], sizeof(struct ssd_smart
));
6196 if (dev
->smart
.magic
!= SSD_SMART_MAGIC
) {
6197 /* first time power up */
6198 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6199 dev
->smart
.version
= 1;
6202 /* check log info */
6204 struct ssd_log_info log_info
;
6205 struct ssd_log
*log
= (struct ssd_log
*)dev
->internal_log
.log
;
6207 memset(&log_info
, 0, sizeof(struct ssd_log_info
));
6209 while (log_info
.nr_log
< dev
->internal_log
.nr_log
) {
6210 /* skip the volatile log info */
6211 if (SSD_LOG_SEU_FAULT
!= log
->le
.event
&& SSD_LOG_SEU_FAULT1
!= log
->le
.event
) {
6212 log_info
.stat
[ssd_parse_log(dev
, log
, 0)]++;
6220 for (i
=(SSD_LOG_NR_LEVEL
-1); i
>=0; i
--) {
6221 if (log_info
.stat
[i
] > dev
->smart
.log_info
.stat
[i
]) {
6223 memcpy(&dev
->smart
.log_info
, &log_info
, sizeof(struct ssd_log_info
));
6224 dev
->smart
.version
++;
6230 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6231 if (smart
[i
].magic
== SSD_SMART_MAGIC
&& smart
[i
].version
== dev
->smart
.version
) {
6235 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6236 size
= dev
->rom_info
.smart_sz
;
6238 ret
= ssd_spi_erase(dev
, off
, size
);
6240 hio_warn("%s: info erase failed\n", dev
->name
);
6244 size
= sizeof(struct ssd_smart
);
6245 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6247 hio_warn("%s: info write failed\n", dev
->name
);
6254 /* sync smart with alarm led */
6255 if (dev
->smart
.io_stat
.nr_to
|| dev
->smart
.io_stat
.nr_rwerr
|| dev
->smart
.log_info
.stat
[SSD_LOG_LEVEL_ERR
]) {
6256 hio_warn("%s: some fault found in the history info\n", dev
->name
);
6263 /* skip error if not in standard mode */
6264 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6271 static int __ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6273 struct ssd_bm_manufacturer_data bm_md
= {0};
6274 uint16_t sc_id
= SSD_BM_SYSTEM_DATA_SUBCLASS_ID
;
6282 mutex_lock(&dev
->bm_mutex
);
6284 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6285 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6290 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6291 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_manufacturer_data
), (uint8_t *)&bm_md
);
6296 if (bm_md
.firmware_ver
& 0xF000) {
6301 *ver
= bm_md
.firmware_ver
;
6304 mutex_unlock(&dev
->bm_mutex
);
6308 static int ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6311 int i
= SSD_BM_RETRY_MAX
;
6315 ret
= __ssd_bm_get_version(dev
, &tmp
);
6329 static int __ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6331 struct ssd_bm_configuration_registers bm_cr
;
6332 uint16_t sc_id
= SSD_BM_CONFIGURATION_REGISTERS_ID
;
6336 mutex_lock(&dev
->bm_mutex
);
6338 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6339 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6344 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6345 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_configuration_registers
), (uint8_t *)&bm_cr
);
6350 if (bm_cr
.operation_cfg
.cc
== 0 || bm_cr
.operation_cfg
.cc
> 4) {
6355 *nr_cap
= bm_cr
.operation_cfg
.cc
+ 1;
6358 mutex_unlock(&dev
->bm_mutex
);
6362 static int ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6365 int i
= SSD_BM_RETRY_MAX
;
6369 ret
= __ssd_bm_nr_cap(dev
, &tmp
);
6383 static int ssd_bm_enter_cap_learning(struct ssd_device
*dev
)
6385 uint16_t buf
= SSD_BM_ENTER_CAP_LEARNING
;
6386 uint8_t cmd
= SSD_BM_MANUFACTURERACCESS
;
6389 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&buf
);
6398 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
)
6401 uint8_t cmd
= SSD_BM_SAFETYSTATUS
;
6404 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6414 static int ssd_bm_get_opstatus(struct ssd_device
*dev
, uint16_t *status
)
6417 uint8_t cmd
= SSD_BM_OPERATIONSTATUS
;
6420 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6430 static int ssd_get_bmstruct(struct ssd_device
*dev
, struct ssd_bm
*bm_status_out
)
6432 struct sbs_cmd
*bm_sbs
= ssd_bm_sbs
;
6433 struct ssd_bm bm_status
;
6434 uint8_t buf
[2] = {0, };
6439 memset(&bm_status
, 0, sizeof(struct ssd_bm
));
6441 while (bm_sbs
->desc
!= NULL
) {
6442 switch (bm_sbs
->size
) {
6444 ret
= ssd_smbus_read_byte(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, buf
);
6446 //printf("Error: smbus read byte %#x\n", bm_sbs->cmd);
6452 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, (uint8_t *)&val
);
6454 //printf("Error: smbus read word %#x\n", bm_sbs->cmd);
6457 //val = *(uint16_t *)buf;
6465 switch (bm_sbs
->unit
) {
6466 case SBS_UNIT_VALUE
:
6467 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
& bm_sbs
->mask
;
6469 case SBS_UNIT_TEMPERATURE
:
6470 cval
= (uint16_t)(val
- 2731) / 10;
6471 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = cval
;
6473 case SBS_UNIT_VOLTAGE
:
6474 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6476 case SBS_UNIT_CURRENT
:
6477 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6480 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6482 case SBS_UNIT_PERCENT
:
6483 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6485 case SBS_UNIT_CAPACITANCE
:
6486 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6497 memcpy(bm_status_out
, &bm_status
, sizeof(struct ssd_bm
));
6503 static int __ssd_bm_status(struct ssd_device
*dev
, int *status
)
6505 struct ssd_bm bm_status
= {0};
6510 ret
= ssd_get_bmstruct(dev
, &bm_status
);
6515 /* capacitor voltage */
6516 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
6521 for (i
=0; i
<nr_cap
; i
++) {
6522 if (bm_status
.cap_volt
[i
] < SSD_BM_CAP_VOLT_MIN
) {
6523 *status
= SSD_BMSTATUS_WARNING
;
6529 if (bm_status
.sf_status
) {
6530 *status
= SSD_BMSTATUS_WARNING
;
6535 if (!((bm_status
.op_status
>> 12) & 0x1)) {
6536 *status
= SSD_BMSTATUS_CHARGING
;
6538 *status
= SSD_BMSTATUS_OK
;
6545 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int mode
);
6547 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
6548 static void ssd_bm_worker(void *data
)
6550 struct ssd_device
*dev
= (struct ssd_device
*)data
;
6552 static void ssd_bm_worker(struct work_struct
*work
)
6554 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, bm_work
);
6560 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6564 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
6568 if (dev
->hw_info_ext
.plp_type
!= SSD_PLP_SCAP
) {
6572 ret
= ssd_bm_get_opstatus(dev
, &opstatus
);
6574 hio_warn("%s: get bm operationstatus failed\n", dev
->name
);
6578 /* need cap learning ? */
6579 if (!(opstatus
& 0xF0)) {
6580 ret
= ssd_bm_enter_cap_learning(dev
);
6582 hio_warn("%s: enter capacitance learning failed\n", dev
->name
);
6588 static void ssd_bm_routine_start(void *data
)
6590 struct ssd_device
*dev
;
6597 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
6598 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6599 queue_work(dev
->workq
, &dev
->bm_work
);
6601 queue_work(dev
->workq
, &dev
->capmon_work
);
6607 static int ssd_do_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6614 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6619 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6624 /* make sure the lm80 voltage value is updated */
6625 msleep(SSD_LM80_CONV_INTERVAL
);
6627 /* check if full charged */
6630 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6632 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6633 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6637 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6638 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_FULL
) {
6643 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6647 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6650 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U2
, (uint8_t *)&val
);
6652 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6653 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6657 u2
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6664 /* enter cap learn */
6665 ssd_reg32_write(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
, 0x1);
6669 msleep(SSD_PL_CAP_LEARN_WAIT
);
6671 t
= ssd_reg32_read(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
);
6672 if (!((t
>> 1) & 0x1)) {
6677 if (wait
> SSD_PL_CAP_LEARN_MAX_WAIT
) {
6683 if ((t
>> 4) & 0x1) {
6694 *cap
= SSD_PL_CAP_LEARN(u1
, u2
, t
);
6700 static int ssd_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6708 mutex_lock(&dev
->bm_mutex
);
6710 ssd_stop_workq(dev
);
6712 ret
= ssd_do_cap_learn(dev
, cap
);
6714 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
6718 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, *cap
);
6721 ssd_start_workq(dev
);
6722 mutex_unlock(&dev
->bm_mutex
);
6727 static int ssd_check_pl_cap(struct ssd_device
*dev
)
6735 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6739 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6746 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6748 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6749 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6753 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6754 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_READY
) {
6759 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6761 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(u1
));
6764 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6767 low
= ssd_lm80_limit
[SSD_LM80_IN_CAP
].low
;
6768 ret
= ssd_smbus_write_byte(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP
), &low
);
6773 /* enable cap INx */
6774 ret
= ssd_lm80_enable_in(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_IN_CAP
);
6776 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6777 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6783 /* skip error if not in standard mode */
6784 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6790 static int ssd_check_pl_cap_fast(struct ssd_device
*dev
)
6796 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6800 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6805 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6809 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6810 if (SSD_PL_CAP_VOLT(u1
) < SSD_PL_CAP_VOLT_READY
) {
6818 static int ssd_init_pl_cap(struct ssd_device
*dev
)
6822 /* set here: user write mode */
6823 dev
->user_wmode
= wmode
;
6825 mutex_init(&dev
->bm_mutex
);
6827 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6829 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BM_FAULT_REG
);
6830 if ((val
>> 1) & 0x1) {
6831 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
6834 ret
= ssd_check_pl_cap(dev
);
6836 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
6844 static void __end_str(char *str
, int len
)
6848 for(i
=0; i
<len
; i
++) {
6849 if (*(str
+i
) == '\0')
6855 static int ssd_init_label(struct ssd_device
*dev
)
6861 /* label location */
6862 off
= dev
->rom_info
.label_base
;
6864 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6865 size
= sizeof(struct ssd_label
);
6868 ret
= ssd_spi_read(dev
, &dev
->label
, off
, size
);
6870 memset(&dev
->label
, 0, size
);
6874 __end_str(dev
->label
.date
, SSD_LABEL_FIELD_SZ
);
6875 __end_str(dev
->label
.sn
, SSD_LABEL_FIELD_SZ
);
6876 __end_str(dev
->label
.part
, SSD_LABEL_FIELD_SZ
);
6877 __end_str(dev
->label
.desc
, SSD_LABEL_FIELD_SZ
);
6878 __end_str(dev
->label
.other
, SSD_LABEL_FIELD_SZ
);
6879 __end_str(dev
->label
.maf
, SSD_LABEL_FIELD_SZ
);
6881 size
= sizeof(struct ssd_labelv3
);
6884 ret
= ssd_spi_read(dev
, &dev
->labelv3
, off
, size
);
6886 memset(&dev
->labelv3
, 0, size
);
6890 __end_str(dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
6891 __end_str(dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
6892 __end_str(dev
->labelv3
.item
, SSD_LABEL_FIELD_SZ
);
6893 __end_str(dev
->labelv3
.description
, SSD_LABEL_DESC_SZ
);
6894 __end_str(dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
6895 __end_str(dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
6896 __end_str(dev
->labelv3
.issuenumber
, SSD_LABEL_FIELD_SZ
);
6897 __end_str(dev
->labelv3
.cleicode
, SSD_LABEL_FIELD_SZ
);
6898 __end_str(dev
->labelv3
.bom
, SSD_LABEL_FIELD_SZ
);
6902 /* skip error if not in standard mode */
6903 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6909 int ssd_get_label(struct block_device
*bdev
, struct ssd_label
*label
)
6911 struct ssd_device
*dev
;
6913 if (!bdev
|| !label
|| !(bdev
->bd_disk
)) {
6917 dev
= bdev
->bd_disk
->private_data
;
6919 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
6920 memset(label
, 0, sizeof(struct ssd_label
));
6921 memcpy(label
->date
, dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
6922 memcpy(label
->sn
, dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
6923 memcpy(label
->desc
, dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
6924 memcpy(label
->maf
, dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
6926 memcpy(label
, &dev
->label
, sizeof(struct ssd_label
));
6932 static int __ssd_get_version(struct ssd_device
*dev
, struct ssd_version_info
*ver
)
6934 uint16_t bm_ver
= 0;
6937 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6938 ret
= ssd_bm_get_version(dev
, &bm_ver
);
6944 ver
->bridge_ver
= dev
->hw_info
.bridge_ver
;
6945 ver
->ctrl_ver
= dev
->hw_info
.ctrl_ver
;
6946 ver
->bm_ver
= bm_ver
;
6947 ver
->pcb_ver
= dev
->hw_info
.pcb_ver
;
6948 ver
->upper_pcb_ver
= dev
->hw_info
.upper_pcb_ver
;
6955 int ssd_get_version(struct block_device
*bdev
, struct ssd_version_info
*ver
)
6957 struct ssd_device
*dev
;
6960 if (!bdev
|| !ver
|| !(bdev
->bd_disk
)) {
6964 dev
= bdev
->bd_disk
->private_data
;
6966 mutex_lock(&dev
->fw_mutex
);
6967 ret
= __ssd_get_version(dev
, ver
);
6968 mutex_unlock(&dev
->fw_mutex
);
6973 static int __ssd_get_temperature(struct ssd_device
*dev
, int *temp
)
6981 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6987 if (dev
->db_info
.type
== SSD_DEBUG_LOG
&&
6988 (dev
->db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
||
6989 dev
->db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
||
6990 dev
->db_info
.data
.log
.event
== SSD_LOG_WARN_TEMP
)) {
6991 *temp
= (int)dev
->db_info
.data
.log
.extra
;
6996 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
6997 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
6999 val
= ssd_reg_read(dev
->ctrlp
+ off
);
7000 if (val
== 0xffffffffffffffffull
) {
7004 cur
= (int)CUR_TEMP(val
);
7015 int ssd_get_temperature(struct block_device
*bdev
, int *temp
)
7017 struct ssd_device
*dev
;
7020 if (!bdev
|| !temp
|| !(bdev
->bd_disk
)) {
7024 dev
= bdev
->bd_disk
->private_data
;
7027 mutex_lock(&dev
->fw_mutex
);
7028 ret
= __ssd_get_temperature(dev
, temp
);
7029 mutex_unlock(&dev
->fw_mutex
);
7034 int ssd_set_otprotect(struct block_device
*bdev
, int otprotect
)
7036 struct ssd_device
*dev
;
7038 if (!bdev
|| !(bdev
->bd_disk
)) {
7042 dev
= bdev
->bd_disk
->private_data
;
7043 ssd_set_ot_protect(dev
, !!otprotect
);
7048 int ssd_bm_status(struct block_device
*bdev
, int *status
)
7050 struct ssd_device
*dev
;
7053 if (!bdev
|| !status
|| !(bdev
->bd_disk
)) {
7057 dev
= bdev
->bd_disk
->private_data
;
7059 mutex_lock(&dev
->fw_mutex
);
7060 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7061 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7062 *status
= SSD_BMSTATUS_WARNING
;
7064 *status
= SSD_BMSTATUS_OK
;
7066 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7067 ret
= __ssd_bm_status(dev
, status
);
7069 *status
= SSD_BMSTATUS_OK
;
7071 mutex_unlock(&dev
->fw_mutex
);
7076 int ssd_get_pciaddr(struct block_device
*bdev
, struct pci_addr
*paddr
)
7078 struct ssd_device
*dev
;
7080 if (!bdev
|| !paddr
|| !bdev
->bd_disk
) {
7084 dev
= bdev
->bd_disk
->private_data
;
7086 paddr
->domain
= pci_domain_nr(dev
->pdev
->bus
);
7087 paddr
->bus
= dev
->pdev
->bus
->number
;
7088 paddr
->slot
= PCI_SLOT(dev
->pdev
->devfn
);
7089 paddr
->func
= PCI_FUNC(dev
->pdev
->devfn
);
7095 static int ssd_bb_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7100 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7104 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L1_REG
);
7105 if (0xffffffffull
== acc
->threshold_l1
) {
7108 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L2_REG
);
7109 if (0xffffffffull
== acc
->threshold_l2
) {
7114 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7115 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7116 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_BB_ACC_REG_SZ
* chip
));
7117 if (0xffffffffull
== acc
->val
) {
7120 if (val
> acc
->val
) {
7129 static int ssd_ec_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7134 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7138 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L1_REG
);
7139 if (0xffffffffull
== acc
->threshold_l1
) {
7142 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L2_REG
);
7143 if (0xffffffffull
== acc
->threshold_l2
) {
7148 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7149 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7150 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_EC_ACC_REG_SZ
* chip
));
7151 if (0xffffffffull
== acc
->val
) {
7155 if (val
> acc
->val
) {
7166 static int ssd_ram_read_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7168 struct ssd_ram_op_msg
*msg
;
7170 size_t len
= length
;
7174 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7175 || !length
|| length
> dev
->hw_info
.ram_max_len
7176 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7180 len
/= dev
->hw_info
.ram_align
;
7181 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7183 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7184 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7185 ret
= dma_mapping_error(buf_dma
);
7187 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7190 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7191 goto out_dma_mapping
;
7194 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7196 msg
->fun
= SSD_FUNC_RAM_READ
;
7197 msg
->ctrl_idx
= ctrl_idx
;
7198 msg
->start
= (uint32_t)ofs_w
;
7202 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7205 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7211 static int ssd_ram_write_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7213 struct ssd_ram_op_msg
*msg
;
7215 size_t len
= length
;
7219 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7220 || !length
|| length
> dev
->hw_info
.ram_max_len
7221 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7225 len
/= dev
->hw_info
.ram_align
;
7226 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7228 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7229 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7230 ret
= dma_mapping_error(buf_dma
);
7232 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7235 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7236 goto out_dma_mapping
;
7239 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7241 msg
->fun
= SSD_FUNC_RAM_WRITE
;
7242 msg
->ctrl_idx
= ctrl_idx
;
7243 msg
->start
= (uint32_t)ofs_w
;
7247 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7250 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7257 static int ssd_ram_read(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7264 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7265 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7270 len
= dev
->hw_info
.ram_max_len
;
7271 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7275 ret
= ssd_ram_read_4k(dev
, buf
, len
, off
, ctrl_idx
);
7288 static int ssd_ram_write(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7295 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7296 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7301 len
= dev
->hw_info
.ram_max_len
;
7302 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7306 ret
= ssd_ram_write_4k(dev
, buf
, len
, off
, ctrl_idx
);
7321 static int ssd_check_flash(struct ssd_device
*dev
, int flash
, int page
, int ctrl_idx
)
7323 int cur_ch
= flash
% dev
->hw_info
.max_ch
;
7324 int cur_chip
= flash
/dev
->hw_info
.max_ch
;
7326 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
7330 if (cur_ch
>= dev
->hw_info
.nr_ch
|| cur_chip
>= dev
->hw_info
.nr_chip
) {
7334 if (page
>= (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7340 static int ssd_nand_read_id(struct ssd_device
*dev
, void *id
, int flash
, int chip
, int ctrl_idx
)
7342 struct ssd_nand_op_msg
*msg
;
7349 buf_dma
= pci_map_single(dev
->pdev
, id
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7350 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7351 ret
= dma_mapping_error(buf_dma
);
7353 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7356 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7357 goto out_dma_mapping
;
7360 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7361 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7365 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7367 msg
->fun
= SSD_FUNC_NAND_READ_ID
;
7368 msg
->chip_no
= flash
;
7369 msg
->chip_ce
= chip
;
7370 msg
->ctrl_idx
= ctrl_idx
;
7373 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7376 pci_unmap_single(dev
->pdev
, buf_dma
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7383 static int ssd_nand_read(struct ssd_device
*dev
, void *buf
,
7384 int flash
, int chip
, int page
, int page_count
, int ctrl_idx
)
7386 struct ssd_nand_op_msg
*msg
;
7395 if ((page
+ page_count
) > dev
->hw_info
.block_count
*dev
->hw_info
.page_count
) {
7399 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7404 length
= page_count
* dev
->hw_info
.page_size
;
7406 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7407 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7408 ret
= dma_mapping_error(buf_dma
);
7410 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7413 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7414 goto out_dma_mapping
;
7417 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7418 flash
= (flash
<< 1) | chip
;
7422 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7424 msg
->fun
= SSD_FUNC_NAND_READ
;
7425 msg
->ctrl_idx
= ctrl_idx
;
7426 msg
->chip_no
= flash
;
7427 msg
->chip_ce
= chip
;
7428 msg
->page_no
= page
;
7429 msg
->page_count
= page_count
;
7432 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7435 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7442 static int ssd_nand_read_w_oob(struct ssd_device
*dev
, void *buf
,
7443 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7445 struct ssd_nand_op_msg
*msg
;
7454 if ((page
+ count
) > (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7458 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7463 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7465 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7466 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7467 ret
= dma_mapping_error(buf_dma
);
7469 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7472 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7473 goto out_dma_mapping
;
7476 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7477 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7481 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7483 msg
->fun
= SSD_FUNC_NAND_READ_WOOB
;
7484 msg
->ctrl_idx
= ctrl_idx
;
7485 msg
->chip_no
= flash
;
7486 msg
->chip_ce
= chip
;
7487 msg
->page_no
= page
;
7488 msg
->page_count
= count
;
7491 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7494 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7501 static int ssd_nand_write(struct ssd_device
*dev
, void *buf
,
7502 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7504 struct ssd_nand_op_msg
*msg
;
7509 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7521 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7526 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7528 /* write data to ram */
7529 /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx);
7534 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7535 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7536 ret
= dma_mapping_error(buf_dma
);
7538 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7541 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7542 goto out_dma_mapping
;
7545 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7546 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7550 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7552 msg
->fun
= SSD_FUNC_NAND_WRITE
;
7553 msg
->ctrl_idx
= ctrl_idx
;
7554 msg
->chip_no
= flash
;
7555 msg
->chip_ce
= chip
;
7557 msg
->page_no
= page
;
7558 msg
->page_count
= count
;
7561 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7564 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7570 static int ssd_nand_erase(struct ssd_device
*dev
, int flash
, int chip
, int page
, int ctrl_idx
)
7572 struct ssd_nand_op_msg
*msg
;
7575 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7580 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7581 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7585 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7587 msg
->fun
= SSD_FUNC_NAND_ERASE
;
7588 msg
->ctrl_idx
= ctrl_idx
;
7589 msg
->chip_no
= flash
;
7590 msg
->chip_ce
= chip
;
7591 msg
->page_no
= page
;
7593 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7599 static int ssd_update_bbt(struct ssd_device
*dev
, int flash
, int ctrl_idx
)
7601 struct ssd_nand_op_msg
*msg
;
7602 struct ssd_flush_msg
*fmsg
;
7605 ret
= ssd_check_flash(dev
, flash
, 0, ctrl_idx
);
7610 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7612 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7613 fmsg
= (struct ssd_flush_msg
*)msg
;
7615 fmsg
->fun
= SSD_FUNC_FLUSH
;
7617 fmsg
->flash
= flash
;
7618 fmsg
->ctrl_idx
= ctrl_idx
;
7620 msg
->fun
= SSD_FUNC_FLUSH
;
7622 msg
->chip_no
= flash
;
7623 msg
->ctrl_idx
= ctrl_idx
;
7626 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7632 /* flash controller init state */
7633 static int __ssd_check_init_state(struct ssd_device
*dev
)
7635 uint32_t *init_state
= NULL
;
7636 int reg_base
, reg_sz
;
7637 int max_wait
= SSD_INIT_MAX_WAIT
;
7643 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7644 ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data);
7645 read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8);
7646 if (read_data == ~test_data) {
7647 //dev->hw_info.nr_ctrl++;
7648 dev->hw_info.nr_ctrl_map |= 1<<i;
7654 read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG);
7656 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7657 if (((read_data>>i) & 0x1) == 0) {
7662 if (dev->hw_info.nr_ctrl != j) {
7663 printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j);
7669 init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0);
7670 for (j=1; j<dev->hw_info.nr_ctrl;j++) {
7671 if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) {
7672 printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j);
7678 /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0);
7679 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7680 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) {
7681 printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j);
7686 init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8);
7687 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7688 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) {
7689 printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j);
7695 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7696 max_wait
= SSD_INIT_MAX_WAIT_V3_2
;
7699 reg_base
= dev
->protocol_info
.init_state_reg
;
7700 reg_sz
= dev
->protocol_info
.init_state_reg_sz
;
7702 init_state
= (uint32_t *)kmalloc(reg_sz
, GFP_KERNEL
);
7707 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7709 for (j
=0, k
=0; j
<reg_sz
; j
+=sizeof(uint32_t), k
++) {
7710 init_state
[k
] = ssd_reg32_read(dev
->ctrlp
+ reg_base
+ j
);
7713 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7714 /* just check the last bit, no need to check all channel */
7715 ch_start
= dev
->hw_info
.max_ch
- 1;
7720 for (j
=0; j
<dev
->hw_info
.nr_chip
; j
++) {
7721 for (k
=ch_start
; k
<dev
->hw_info
.max_ch
; k
++) {
7722 if (test_bit((j
*dev
->hw_info
.max_ch
+ k
), (void *)init_state
)) {
7727 if (init_wait
<= max_wait
) {
7728 msleep(SSD_INIT_WAIT
);
7731 if (k
< dev
->hw_info
.nr_ch
) {
7732 hio_warn("%s: controller %d chip %d ch %d init failed\n",
7733 dev
->name
, i
, j
, k
);
7735 hio_warn("%s: controller %d chip %d init failed\n",
7746 //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait);
7752 static int ssd_check_init_state(struct ssd_device
*dev
)
7754 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7758 return __ssd_check_init_state(dev
);
7761 static void ssd_reset_resp_ptr(struct ssd_device
*dev
);
7763 /* reset flash controller etc */
7764 static int __ssd_reset(struct ssd_device
*dev
, int type
)
7767 if (type
< SSD_RST_NOINIT
|| type
> SSD_RST_FULL
) {
7771 mutex_lock(&dev
->fw_mutex
);
7773 if (type
== SSD_RST_NOINIT
) { //no init
7774 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET_NOINIT
);
7775 } else if (type
== SSD_RST_NORMAL
) { //reset & init
7776 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET
);
7777 } else { // full reset
7778 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7779 mutex_unlock(&dev
->fw_mutex
);
7783 ssd_reg32_write(dev
->ctrlp
+ SSD_FULL_RESET_REG
, SSD_RESET_FULL
);
7786 ssd_reset_resp_ptr(dev
);
7789 #ifdef SSD_OT_PROTECT
7796 ssd_set_flush_timeout(dev
, dev
->wmode
);
7798 mutex_unlock(&dev
->fw_mutex
);
7799 ssd_gen_swlog(dev
, SSD_LOG_RESET
, (uint32_t)type
);
7800 do_gettimeofday(&tv
);
7801 dev
->reset_time
= tv
.tv_sec
;
7803 return __ssd_check_init_state(dev
);
7806 static int ssd_save_md(struct ssd_device
*dev
)
7808 struct ssd_nand_op_msg
*msg
;
7811 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7814 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7818 if (!dev
->save_md
) {
7822 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7824 msg
->fun
= SSD_FUNC_FLUSH
;
7829 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7835 static int ssd_barrier_save_md(struct ssd_device
*dev
)
7837 struct ssd_nand_op_msg
*msg
;
7840 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7843 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7847 if (!dev
->save_md
) {
7851 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7853 msg
->fun
= SSD_FUNC_FLUSH
;
7858 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
7864 static int ssd_flush(struct ssd_device
*dev
)
7866 struct ssd_nand_op_msg
*msg
;
7867 struct ssd_flush_msg
*fmsg
;
7870 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7873 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7875 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7876 fmsg
= (struct ssd_flush_msg
*)msg
;
7878 fmsg
->fun
= SSD_FUNC_FLUSH
;
7883 msg
->fun
= SSD_FUNC_FLUSH
;
7889 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7895 static int ssd_barrier_flush(struct ssd_device
*dev
)
7897 struct ssd_nand_op_msg
*msg
;
7898 struct ssd_flush_msg
*fmsg
;
7901 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7904 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7906 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7907 fmsg
= (struct ssd_flush_msg
*)msg
;
7909 fmsg
->fun
= SSD_FUNC_FLUSH
;
7914 msg
->fun
= SSD_FUNC_FLUSH
;
7920 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
7926 #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710
7927 #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8
7928 #define SSD_WMODE_FUA_TIMEOUT 0x000503E8
7929 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int m
)
7934 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7939 case SSD_WMODE_BUFFER
:
7940 to
= SSD_WMODE_BUFFER_TIMEOUT
;
7942 case SSD_WMODE_BUFFER_EX
:
7943 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_1
) {
7944 to
= SSD_WMODE_BUFFER_EX_TIMEOUT
;
7946 to
= SSD_WMODE_BUFFER_TIMEOUT
;
7950 to
= SSD_WMODE_FUA_TIMEOUT
;
7956 val
= (((uint32_t)((uint32_t)m
& 0x3) << 28) | to
);
7958 ssd_reg32_write(dev
->ctrlp
+ SSD_FLUSH_TIMEOUT_REG
, val
);
7961 static int ssd_do_switch_wmode(struct ssd_device
*dev
, int m
)
7965 ret
= ssd_barrier_start(dev
);
7970 ret
= ssd_barrier_flush(dev
);
7972 goto out_barrier_end
;
7975 /* set contoller flush timeout */
7976 ssd_set_flush_timeout(dev
, m
);
7982 ssd_barrier_end(dev
);
7987 static int ssd_switch_wmode(struct ssd_device
*dev
, int m
)
7993 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
7997 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7998 default_wmode
= SSD_WMODE_BUFFER
;
8000 default_wmode
= SSD_WMODE_BUFFER_EX
;
8003 if (SSD_WMODE_AUTO
== m
) {
8004 /* battery fault ? */
8005 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8006 next_wmode
= SSD_WMODE_FUA
;
8008 next_wmode
= default_wmode
;
8010 } else if (SSD_WMODE_DEFAULT
== m
) {
8011 next_wmode
= default_wmode
;
8016 if (next_wmode
!= dev
->wmode
) {
8017 hio_warn("%s: switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8018 ret
= ssd_do_switch_wmode(dev
, next_wmode
);
8020 hio_err("%s: can not switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
8027 static int ssd_init_wmode(struct ssd_device
*dev
)
8032 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8033 default_wmode
= SSD_WMODE_BUFFER
;
8035 default_wmode
= SSD_WMODE_BUFFER_EX
;
8039 if (SSD_WMODE_AUTO
== dev
->user_wmode
) {
8040 /* battery fault ? */
8041 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
8042 dev
->wmode
= SSD_WMODE_FUA
;
8044 dev
->wmode
= default_wmode
;
8046 } else if (SSD_WMODE_DEFAULT
== dev
->user_wmode
) {
8047 dev
->wmode
= default_wmode
;
8049 dev
->wmode
= dev
->user_wmode
;
8051 ssd_set_flush_timeout(dev
, dev
->wmode
);
8056 static int __ssd_set_wmode(struct ssd_device
*dev
, int m
)
8060 /* not support old fw*/
8061 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8066 if (m
< SSD_WMODE_BUFFER
|| m
> SSD_WMODE_DEFAULT
) {
8071 ssd_gen_swlog(dev
, SSD_LOG_SET_WMODE
, m
);
8073 dev
->user_wmode
= m
;
8075 ret
= ssd_switch_wmode(dev
, dev
->user_wmode
);
8084 int ssd_set_wmode(struct block_device
*bdev
, int m
)
8086 struct ssd_device
*dev
;
8088 if (!bdev
|| !(bdev
->bd_disk
)) {
8092 dev
= bdev
->bd_disk
->private_data
;
8094 return __ssd_set_wmode(dev
, m
);
8097 static int ssd_do_reset(struct ssd_device
*dev
)
8101 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8105 ssd_stop_workq(dev
);
8107 ret
= ssd_barrier_start(dev
);
8112 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8114 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8117 //ret = __ssd_reset(dev, SSD_RST_FULL);
8118 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8121 goto out_barrier_end
;
8125 ssd_barrier_end(dev
);
8127 ssd_start_workq(dev
);
8128 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8132 static int ssd_full_reset(struct ssd_device
*dev
)
8136 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8140 ssd_stop_workq(dev
);
8142 ret
= ssd_barrier_start(dev
);
8147 ret
= ssd_barrier_flush(dev
);
8149 goto out_barrier_end
;
8152 ret
= ssd_barrier_save_md(dev
);
8154 goto out_barrier_end
;
8157 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8159 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8162 //ret = __ssd_reset(dev, SSD_RST_FULL);
8163 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8166 goto out_barrier_end
;
8170 ssd_barrier_end(dev
);
8172 ssd_start_workq(dev
);
8173 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8177 int ssd_reset(struct block_device
*bdev
)
8179 struct ssd_device
*dev
;
8181 if (!bdev
|| !(bdev
->bd_disk
)) {
8185 dev
= bdev
->bd_disk
->private_data
;
8187 return ssd_full_reset(dev
);
8190 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
8191 static int ssd_issue_flush_fn(struct request_queue
*q
, struct gendisk
*disk
,
8192 sector_t
*error_sector
)
8194 struct ssd_device
*dev
= q
->queuedata
;
8196 return ssd_flush(dev
);
8200 void ssd_submit_pbio(struct request_queue
*q
, struct bio
*bio
)
8202 struct ssd_device
*dev
= q
->queuedata
;
8203 #ifdef SSD_QUEUE_PBIO
8207 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8208 ssd_bio_endio(bio
, -ENODEV
);
8212 #ifdef SSD_DEBUG_ERR
8213 if (atomic_read(&dev
->tocnt
)) {
8214 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8215 ssd_bio_endio(bio
, -EIO
);
8220 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
8221 if (unlikely(bio_barrier(bio
))) {
8222 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8225 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
8226 if (unlikely(bio_rw_flagged(bio
, BIO_RW_BARRIER
))) {
8227 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8230 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
8231 if (unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
8232 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8237 if (unlikely(ssd_bio_has_fua(bio
))) {
8238 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8243 if (unlikely(dev
->readonly
&& bio_data_dir(bio
) == WRITE
)) {
8244 ssd_bio_endio(bio
, -EROFS
);
8248 #ifdef SSD_QUEUE_PBIO
8249 if (0 == atomic_read(&dev
->in_sendq
)) {
8250 ret
= __ssd_submit_pbio(dev
, bio
, 0);
8254 (void)test_and_set_bit(BIO_SSD_PBIO
, &bio
->bi_flags
);
8255 ssd_queue_bio(dev
, bio
);
8258 __ssd_submit_pbio(dev
, bio
, 1);
8265 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8266 static blk_qc_t
ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8267 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8268 static void ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8270 static int ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8273 struct ssd_device
*dev
= q
->queuedata
;
8276 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8277 ssd_bio_endio(bio
, -ENODEV
);
8281 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
8282 blk_queue_split(q
, &bio
, q
->bio_split
);
8285 #ifdef SSD_DEBUG_ERR
8286 if (atomic_read(&dev
->tocnt
)) {
8287 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8288 ssd_bio_endio(bio
, -EIO
);
8293 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
8294 if (unlikely(bio_barrier(bio
))) {
8295 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8298 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
8299 if (unlikely(bio_rw_flagged(bio
, BIO_RW_BARRIER
))) {
8300 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8303 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
8304 if (unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
8305 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8310 if (unlikely(ssd_bio_has_fua(bio
))) {
8311 ssd_bio_endio(bio
, -EOPNOTSUPP
);
8315 /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */
8316 if (unlikely(ssd_bio_has_flush(bio
) && !bio_sectors(bio
))) {
8317 ssd_bio_endio(bio
, 0);
8323 if (0 == atomic_read(&dev
->in_sendq
)) {
8324 ret
= ssd_submit_bio(dev
, bio
, 0);
8328 ssd_queue_bio(dev
, bio
);
8332 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8333 return BLK_QC_T_NONE
;
8334 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8341 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
8342 static int ssd_block_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
8344 struct ssd_device
*dev
;
8350 dev
= bdev
->bd_disk
->private_data
;
8357 geo
->cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
8362 static int ssd_init_queue(struct ssd_device
*dev
);
8363 static void ssd_cleanup_queue(struct ssd_device
*dev
);
8364 static void ssd_cleanup_blkdev(struct ssd_device
*dev
);
8365 static int ssd_init_blkdev(struct ssd_device
*dev
);
8366 static int ssd_ioctl_common(struct ssd_device
*dev
, unsigned int cmd
, unsigned long arg
)
8368 void __user
*argp
= (void __user
*)arg
;
8369 void __user
*buf
= NULL
;
8374 case SSD_CMD_GET_PROTOCOL_INFO
:
8375 if (copy_to_user(argp
, &dev
->protocol_info
, sizeof(struct ssd_protocol_info
))) {
8376 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8382 case SSD_CMD_GET_HW_INFO
:
8383 if (copy_to_user(argp
, &dev
->hw_info
, sizeof(struct ssd_hw_info
))) {
8384 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8390 case SSD_CMD_GET_ROM_INFO
:
8391 if (copy_to_user(argp
, &dev
->rom_info
, sizeof(struct ssd_rom_info
))) {
8392 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8398 case SSD_CMD_GET_SMART
: {
8399 struct ssd_smart smart
;
8402 memcpy(&smart
, &dev
->smart
, sizeof(struct ssd_smart
));
8404 mutex_lock(&dev
->gd_mutex
);
8405 ssd_update_smart(dev
, &smart
);
8406 mutex_unlock(&dev
->gd_mutex
);
8408 /* combine the volatile log info */
8409 if (dev
->log_info
.nr_log
) {
8410 for (i
=0; i
<SSD_LOG_NR_LEVEL
; i
++) {
8411 smart
.log_info
.stat
[i
] += dev
->log_info
.stat
[i
];
8415 if (copy_to_user(argp
, &smart
, sizeof(struct ssd_smart
))) {
8416 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8424 case SSD_CMD_GET_IDX
:
8425 if (copy_to_user(argp
, &dev
->idx
, sizeof(int))) {
8426 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8432 case SSD_CMD_GET_AMOUNT
: {
8433 int nr_ssd
= atomic_read(&ssd_nr
);
8434 if (copy_to_user(argp
, &nr_ssd
, sizeof(int))) {
8435 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8442 case SSD_CMD_GET_TO_INFO
: {
8443 int tocnt
= atomic_read(&dev
->tocnt
);
8445 if (copy_to_user(argp
, &tocnt
, sizeof(int))) {
8446 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8453 case SSD_CMD_GET_DRV_VER
: {
8454 char ver
[] = DRIVER_VERSION
;
8455 int len
= sizeof(ver
);
8457 if (len
> (DRIVER_VERSION_LEN
- 1)) {
8458 len
= (DRIVER_VERSION_LEN
- 1);
8460 if (copy_to_user(argp
, ver
, len
)) {
8461 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8468 case SSD_CMD_GET_BBACC_INFO
: {
8469 struct ssd_acc_info acc
;
8471 mutex_lock(&dev
->fw_mutex
);
8472 ret
= ssd_bb_acc(dev
, &acc
);
8473 mutex_unlock(&dev
->fw_mutex
);
8478 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8479 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8486 case SSD_CMD_GET_ECACC_INFO
: {
8487 struct ssd_acc_info acc
;
8489 mutex_lock(&dev
->fw_mutex
);
8490 ret
= ssd_ec_acc(dev
, &acc
);
8491 mutex_unlock(&dev
->fw_mutex
);
8496 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8497 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8504 case SSD_CMD_GET_HW_INFO_EXT
:
8505 if (copy_to_user(argp
, &dev
->hw_info_ext
, sizeof(struct ssd_hw_info_extend
))) {
8506 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8512 case SSD_CMD_REG_READ
: {
8513 struct ssd_reg_op_info reg_info
;
8515 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8516 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8521 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8526 reg_info
.value
= ssd_reg32_read(dev
->ctrlp
+ reg_info
.offset
);
8527 if (copy_to_user(argp
, ®_info
, sizeof(struct ssd_reg_op_info
))) {
8528 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8536 case SSD_CMD_REG_WRITE
: {
8537 struct ssd_reg_op_info reg_info
;
8539 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8540 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8545 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8550 ssd_reg32_write(dev
->ctrlp
+ reg_info
.offset
, reg_info
.value
);
8555 case SSD_CMD_SPI_READ
: {
8556 struct ssd_spi_op_info spi_info
;
8559 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8560 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8566 size
= spi_info
.len
;
8569 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8574 kbuf
= kmalloc(size
, GFP_KERNEL
);
8580 ret
= ssd_spi_page_read(dev
, kbuf
, off
, size
);
8586 if (copy_to_user(buf
, kbuf
, size
)) {
8587 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8598 case SSD_CMD_SPI_WRITE
: {
8599 struct ssd_spi_op_info spi_info
;
8602 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8603 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8609 size
= spi_info
.len
;
8612 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8617 kbuf
= kmalloc(size
, GFP_KERNEL
);
8623 if (copy_from_user(kbuf
, buf
, size
)) {
8624 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8630 ret
= ssd_spi_page_write(dev
, kbuf
, off
, size
);
8641 case SSD_CMD_SPI_ERASE
: {
8642 struct ssd_spi_op_info spi_info
;
8645 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8646 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8653 if ((off
+ dev
->rom_info
.block_size
) > dev
->rom_info
.size
) {
8658 ret
= ssd_spi_block_erase(dev
, off
);
8666 case SSD_CMD_I2C_READ
: {
8667 struct ssd_i2c_op_info i2c_info
;
8671 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8672 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8677 saddr
= i2c_info
.saddr
;
8678 rsize
= i2c_info
.rsize
;
8679 buf
= i2c_info
.rbuf
;
8681 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8686 kbuf
= kmalloc(rsize
, GFP_KERNEL
);
8692 ret
= ssd_i2c_read(dev
, saddr
, rsize
, kbuf
);
8698 if (copy_to_user(buf
, kbuf
, rsize
)) {
8699 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8710 case SSD_CMD_I2C_WRITE
: {
8711 struct ssd_i2c_op_info i2c_info
;
8715 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8716 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8721 saddr
= i2c_info
.saddr
;
8722 wsize
= i2c_info
.wsize
;
8723 buf
= i2c_info
.wbuf
;
8725 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8730 kbuf
= kmalloc(wsize
, GFP_KERNEL
);
8736 if (copy_from_user(kbuf
, buf
, wsize
)) {
8737 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8743 ret
= ssd_i2c_write(dev
, saddr
, wsize
, kbuf
);
8754 case SSD_CMD_I2C_WRITE_READ
: {
8755 struct ssd_i2c_op_info i2c_info
;
8761 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8762 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8767 saddr
= i2c_info
.saddr
;
8768 wsize
= i2c_info
.wsize
;
8769 rsize
= i2c_info
.rsize
;
8770 buf
= i2c_info
.wbuf
;
8772 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8777 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8782 size
= wsize
+ rsize
;
8784 kbuf
= kmalloc(size
, GFP_KERNEL
);
8790 if (copy_from_user((kbuf
+ rsize
), buf
, wsize
)) {
8791 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8797 buf
= i2c_info
.rbuf
;
8799 ret
= ssd_i2c_write_read(dev
, saddr
, wsize
, (kbuf
+ rsize
), rsize
, kbuf
);
8805 if (copy_to_user(buf
, kbuf
, rsize
)) {
8806 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8817 case SSD_CMD_SMBUS_SEND_BYTE
: {
8818 struct ssd_smbus_op_info smbus_info
;
8819 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8823 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8824 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8829 saddr
= smbus_info
.saddr
;
8830 buf
= smbus_info
.buf
;
8833 if (copy_from_user(smb_data
, buf
, size
)) {
8834 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8839 ret
= ssd_smbus_send_byte(dev
, saddr
, smb_data
);
8847 case SSD_CMD_SMBUS_RECEIVE_BYTE
: {
8848 struct ssd_smbus_op_info smbus_info
;
8849 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8853 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8854 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8859 saddr
= smbus_info
.saddr
;
8860 buf
= smbus_info
.buf
;
8863 ret
= ssd_smbus_receive_byte(dev
, saddr
, smb_data
);
8868 if (copy_to_user(buf
, smb_data
, size
)) {
8869 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8877 case SSD_CMD_SMBUS_WRITE_BYTE
: {
8878 struct ssd_smbus_op_info smbus_info
;
8879 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8884 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8885 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8890 saddr
= smbus_info
.saddr
;
8891 command
= smbus_info
.cmd
;
8892 buf
= smbus_info
.buf
;
8895 if (copy_from_user(smb_data
, buf
, size
)) {
8896 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8901 ret
= ssd_smbus_write_byte(dev
, saddr
, command
, smb_data
);
8909 case SSD_CMD_SMBUS_READ_BYTE
: {
8910 struct ssd_smbus_op_info smbus_info
;
8911 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8916 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8917 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8922 saddr
= smbus_info
.saddr
;
8923 command
= smbus_info
.cmd
;
8924 buf
= smbus_info
.buf
;
8927 ret
= ssd_smbus_read_byte(dev
, saddr
, command
, smb_data
);
8932 if (copy_to_user(buf
, smb_data
, size
)) {
8933 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8941 case SSD_CMD_SMBUS_WRITE_WORD
: {
8942 struct ssd_smbus_op_info smbus_info
;
8943 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8948 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8949 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8954 saddr
= smbus_info
.saddr
;
8955 command
= smbus_info
.cmd
;
8956 buf
= smbus_info
.buf
;
8959 if (copy_from_user(smb_data
, buf
, size
)) {
8960 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8965 ret
= ssd_smbus_write_word(dev
, saddr
, command
, smb_data
);
8973 case SSD_CMD_SMBUS_READ_WORD
: {
8974 struct ssd_smbus_op_info smbus_info
;
8975 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8980 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8981 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8986 saddr
= smbus_info
.saddr
;
8987 command
= smbus_info
.cmd
;
8988 buf
= smbus_info
.buf
;
8991 ret
= ssd_smbus_read_word(dev
, saddr
, command
, smb_data
);
8996 if (copy_to_user(buf
, smb_data
, size
)) {
8997 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9005 case SSD_CMD_SMBUS_WRITE_BLOCK
: {
9006 struct ssd_smbus_op_info smbus_info
;
9007 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9012 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9013 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9018 saddr
= smbus_info
.saddr
;
9019 command
= smbus_info
.cmd
;
9020 buf
= smbus_info
.buf
;
9021 size
= smbus_info
.size
;
9023 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9028 if (copy_from_user(smb_data
, buf
, size
)) {
9029 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9034 ret
= ssd_smbus_write_block(dev
, saddr
, command
, size
, smb_data
);
9042 case SSD_CMD_SMBUS_READ_BLOCK
: {
9043 struct ssd_smbus_op_info smbus_info
;
9044 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9049 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9050 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9055 saddr
= smbus_info
.saddr
;
9056 command
= smbus_info
.cmd
;
9057 buf
= smbus_info
.buf
;
9058 size
= smbus_info
.size
;
9060 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9065 ret
= ssd_smbus_read_block(dev
, saddr
, command
, size
, smb_data
);
9070 if (copy_to_user(buf
, smb_data
, size
)) {
9071 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9079 case SSD_CMD_BM_GET_VER
: {
9082 ret
= ssd_bm_get_version(dev
, &ver
);
9087 if (copy_to_user(argp
, &ver
, sizeof(uint16_t))) {
9088 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9096 case SSD_CMD_BM_GET_NR_CAP
: {
9099 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
9104 if (copy_to_user(argp
, &nr_cap
, sizeof(int))) {
9105 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9113 case SSD_CMD_BM_CAP_LEARNING
: {
9114 ret
= ssd_bm_enter_cap_learning(dev
);
9123 case SSD_CMD_CAP_LEARN
: {
9126 ret
= ssd_cap_learn(dev
, &cap
);
9131 if (copy_to_user(argp
, &cap
, sizeof(uint32_t))) {
9132 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9140 case SSD_CMD_GET_CAP_STATUS
: {
9143 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9147 if (copy_to_user(argp
, &cap_status
, sizeof(int))) {
9148 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9156 case SSD_CMD_RAM_READ
: {
9157 struct ssd_ram_op_info ram_info
;
9160 size_t rlen
, len
= dev
->hw_info
.ram_max_len
;
9163 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9164 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9169 ofs
= ram_info
.start
;
9170 length
= ram_info
.length
;
9172 ctrl_idx
= ram_info
.ctrl_idx
;
9174 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9179 kbuf
= kmalloc(len
, GFP_KERNEL
);
9185 for (rlen
=0; rlen
<length
; rlen
+=len
, buf
+=len
, ofs
+=len
) {
9186 if ((length
- rlen
) < len
) {
9187 len
= length
- rlen
;
9190 ret
= ssd_ram_read(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9195 if (copy_to_user(buf
, kbuf
, len
)) {
9206 case SSD_CMD_RAM_WRITE
: {
9207 struct ssd_ram_op_info ram_info
;
9210 size_t wlen
, len
= dev
->hw_info
.ram_max_len
;
9213 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9214 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9218 ofs
= ram_info
.start
;
9219 length
= ram_info
.length
;
9221 ctrl_idx
= ram_info
.ctrl_idx
;
9223 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9228 kbuf
= kmalloc(len
, GFP_KERNEL
);
9234 for (wlen
=0; wlen
<length
; wlen
+=len
, buf
+=len
, ofs
+=len
) {
9235 if ((length
- wlen
) < len
) {
9236 len
= length
- wlen
;
9239 if (copy_from_user(kbuf
, buf
, len
)) {
9244 ret
= ssd_ram_write(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9255 case SSD_CMD_NAND_READ_ID
: {
9256 struct ssd_flash_op_info flash_info
;
9257 int chip_no
, chip_ce
, length
, ctrl_idx
;
9259 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9260 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9265 chip_no
= flash_info
.flash
;
9266 chip_ce
= flash_info
.chip
;
9267 ctrl_idx
= flash_info
.ctrl_idx
;
9268 buf
= flash_info
.buf
;
9269 length
= dev
->hw_info
.id_size
;
9271 //kbuf = kmalloc(length, GFP_KERNEL);
9272 kbuf
= kmalloc(SSD_NAND_ID_BUFF_SZ
, GFP_KERNEL
); //xx
9277 memset(kbuf
, 0, length
);
9279 ret
= ssd_nand_read_id(dev
, kbuf
, chip_no
, chip_ce
, ctrl_idx
);
9285 if (copy_to_user(buf
, kbuf
, length
)) {
9296 case SSD_CMD_NAND_READ
: { //with oob
9297 struct ssd_flash_op_info flash_info
;
9299 int flash
, chip
, page
, ctrl_idx
;
9302 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9303 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9308 flash
= flash_info
.flash
;
9309 chip
= flash_info
.chip
;
9310 page
= flash_info
.page
;
9311 buf
= flash_info
.buf
;
9312 ctrl_idx
= flash_info
.ctrl_idx
;
9314 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9316 kbuf
= kmalloc(length
, GFP_KERNEL
);
9322 err
= ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9323 if (ret
&& (-EIO
!= ret
)) {
9328 if (copy_to_user(buf
, kbuf
, length
)) {
9340 case SSD_CMD_NAND_WRITE
: {
9341 struct ssd_flash_op_info flash_info
;
9342 int flash
, chip
, page
, ctrl_idx
;
9345 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9346 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9351 flash
= flash_info
.flash
;
9352 chip
= flash_info
.chip
;
9353 page
= flash_info
.page
;
9354 buf
= flash_info
.buf
;
9355 ctrl_idx
= flash_info
.ctrl_idx
;
9357 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9359 kbuf
= kmalloc(length
, GFP_KERNEL
);
9365 if (copy_from_user(kbuf
, buf
, length
)) {
9371 ret
= ssd_nand_write(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9381 case SSD_CMD_NAND_ERASE
: {
9382 struct ssd_flash_op_info flash_info
;
9383 int flash
, chip
, page
, ctrl_idx
;
9385 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9386 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9391 flash
= flash_info
.flash
;
9392 chip
= flash_info
.chip
;
9393 page
= flash_info
.page
;
9394 ctrl_idx
= flash_info
.ctrl_idx
;
9396 if ((page
% dev
->hw_info
.page_count
) != 0) {
9401 //hio_warn("erase fs = %llx\n", ofs);
9402 ret
= ssd_nand_erase(dev
, flash
, chip
, page
, ctrl_idx
);
9410 case SSD_CMD_NAND_READ_EXT
: { //ingore EIO
9411 struct ssd_flash_op_info flash_info
;
9413 int flash
, chip
, page
, ctrl_idx
;
9415 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9416 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9421 flash
= flash_info
.flash
;
9422 chip
= flash_info
.chip
;
9423 page
= flash_info
.page
;
9424 buf
= flash_info
.buf
;
9425 ctrl_idx
= flash_info
.ctrl_idx
;
9427 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9429 kbuf
= kmalloc(length
, GFP_KERNEL
);
9435 ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9436 if (-EIO
== ret
) { //ingore EIO
9444 if (copy_to_user(buf
, kbuf
, length
)) {
9454 case SSD_CMD_UPDATE_BBT
: {
9455 struct ssd_flash_op_info flash_info
;
9456 int ctrl_idx
, flash
;
9458 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9459 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9464 ctrl_idx
= flash_info
.ctrl_idx
;
9465 flash
= flash_info
.flash
;
9466 ret
= ssd_update_bbt(dev
, flash
, ctrl_idx
);
9474 case SSD_CMD_CLEAR_ALARM
:
9475 ssd_clear_alarm(dev
);
9478 case SSD_CMD_SET_ALARM
:
9483 ret
= ssd_do_reset(dev
);
9486 case SSD_CMD_RELOAD_FW
:
9488 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9489 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
9490 } else if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_1_1
) {
9491 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
9496 case SSD_CMD_UNLOAD_DEV
: {
9497 if (atomic_read(&dev
->refcnt
)) {
9503 ssd_save_smart(dev
);
9505 ret
= ssd_flush(dev
);
9510 /* cleanup the block device */
9511 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
9512 mutex_lock(&dev
->gd_mutex
);
9513 ssd_cleanup_blkdev(dev
);
9514 ssd_cleanup_queue(dev
);
9515 mutex_unlock(&dev
->gd_mutex
);
9521 case SSD_CMD_LOAD_DEV
: {
9523 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9528 ret
= ssd_init_smart(dev
);
9530 hio_warn("%s: init info: failed\n", dev
->name
);
9534 ret
= ssd_init_queue(dev
);
9536 hio_warn("%s: init queue failed\n", dev
->name
);
9539 ret
= ssd_init_blkdev(dev
);
9541 hio_warn("%s: register block device: failed\n", dev
->name
);
9544 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
9549 case SSD_CMD_UPDATE_VP
: {
9551 uint32_t new_vp
, new_vp1
= 0;
9553 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9558 if (copy_from_user(&new_vp
, argp
, sizeof(uint32_t))) {
9559 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9564 if (new_vp
> dev
->hw_info
.max_valid_pages
|| new_vp
<= 0) {
9569 while (new_vp
<= dev
->hw_info
.max_valid_pages
) {
9570 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, new_vp
);
9572 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
9573 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9574 new_vp1
= val
& 0x3FF;
9576 new_vp1
= val
& 0x7FFF;
9579 if (new_vp1
== new_vp
) {
9584 /*if (new_vp == dev->hw_info.valid_pages) {
9589 if (new_vp1
!= new_vp
|| new_vp
> dev
->hw_info
.max_valid_pages
) {
9591 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9596 if (copy_to_user(argp
, &new_vp
, sizeof(uint32_t))) {
9597 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9598 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9604 dev
->hw_info
.valid_pages
= new_vp
;
9605 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
9606 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
9607 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
9612 case SSD_CMD_FULL_RESET
: {
9613 ret
= ssd_full_reset(dev
);
9617 case SSD_CMD_GET_NR_LOG
: {
9618 if (copy_to_user(argp
, &dev
->internal_log
.nr_log
, sizeof(dev
->internal_log
.nr_log
))) {
9625 case SSD_CMD_GET_LOG
: {
9626 uint32_t length
= dev
->rom_info
.log_sz
;
9630 if (copy_to_user(buf
, dev
->internal_log
.log
, length
)) {
9638 case SSD_CMD_LOG_LEVEL
: {
9640 if (copy_from_user(&level
, argp
, sizeof(int))) {
9641 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9646 if (level
>= SSD_LOG_NR_LEVEL
|| level
< SSD_LOG_LEVEL_INFO
) {
9647 level
= SSD_LOG_LEVEL_ERR
;
9650 //just for showing log, no need to protect
9655 case SSD_CMD_OT_PROTECT
: {
9658 if (copy_from_user(&protect
, argp
, sizeof(int))) {
9659 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9664 ssd_set_ot_protect(dev
, !!protect
);
9668 case SSD_CMD_GET_OT_STATUS
: {
9669 int status
= ssd_get_ot_status(dev
, &status
);
9671 if (copy_to_user(argp
, &status
, sizeof(int))) {
9672 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9679 case SSD_CMD_CLEAR_LOG
: {
9680 ret
= ssd_clear_log(dev
);
9684 case SSD_CMD_CLEAR_SMART
: {
9685 ret
= ssd_clear_smart(dev
);
9689 case SSD_CMD_CLEAR_WARNING
: {
9690 ret
= ssd_clear_warning(dev
);
9694 case SSD_CMD_SW_LOG
: {
9695 struct ssd_sw_log_info sw_log
;
9697 if (copy_from_user(&sw_log
, argp
, sizeof(struct ssd_sw_log_info
))) {
9698 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9703 ret
= ssd_gen_swlog(dev
, sw_log
.event
, sw_log
.data
);
9707 case SSD_CMD_GET_LABEL
: {
9709 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9714 if (copy_to_user(argp
, &dev
->label
, sizeof(struct ssd_label
))) {
9715 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9722 case SSD_CMD_GET_VERSION
: {
9723 struct ssd_version_info ver
;
9725 mutex_lock(&dev
->fw_mutex
);
9726 ret
= __ssd_get_version(dev
, &ver
);
9727 mutex_unlock(&dev
->fw_mutex
);
9732 if (copy_to_user(argp
, &ver
, sizeof(struct ssd_version_info
))) {
9733 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9740 case SSD_CMD_GET_TEMPERATURE
: {
9743 mutex_lock(&dev
->fw_mutex
);
9744 ret
= __ssd_get_temperature(dev
, &temp
);
9745 mutex_unlock(&dev
->fw_mutex
);
9750 if (copy_to_user(argp
, &temp
, sizeof(int))) {
9751 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9758 case SSD_CMD_GET_BMSTATUS
: {
9761 mutex_lock(&dev
->fw_mutex
);
9762 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9763 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9764 status
= SSD_BMSTATUS_WARNING
;
9766 status
= SSD_BMSTATUS_OK
;
9768 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
9769 ret
= __ssd_bm_status(dev
, &status
);
9771 status
= SSD_BMSTATUS_OK
;
9773 mutex_unlock(&dev
->fw_mutex
);
9778 if (copy_to_user(argp
, &status
, sizeof(int))) {
9779 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9786 case SSD_CMD_GET_LABEL2
: {
9790 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9791 label
= &dev
->label
;
9792 length
= sizeof(struct ssd_label
);
9794 label
= &dev
->labelv3
;
9795 length
= sizeof(struct ssd_labelv3
);
9798 if (copy_to_user(argp
, label
, length
)) {
9806 ret
= ssd_flush(dev
);
9808 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9814 case SSD_CMD_SAVE_MD
: {
9817 if (copy_from_user(&save_md
, argp
, sizeof(int))) {
9818 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9823 dev
->save_md
= !!save_md
;
9827 case SSD_CMD_SET_WMODE
: {
9830 if (copy_from_user(&new_wmode
, argp
, sizeof(int))) {
9831 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9836 ret
= __ssd_set_wmode(dev
, new_wmode
);
9844 case SSD_CMD_GET_WMODE
: {
9845 if (copy_to_user(argp
, &dev
->wmode
, sizeof(int))) {
9846 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9854 case SSD_CMD_GET_USER_WMODE
: {
9855 if (copy_to_user(argp
, &dev
->user_wmode
, sizeof(int))) {
9856 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9864 case SSD_CMD_DEBUG
: {
9865 struct ssd_debug_info db_info
;
9872 if (copy_from_user(&db_info
, argp
, sizeof(struct ssd_debug_info
))) {
9873 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9878 if (db_info
.type
< SSD_DEBUG_NONE
|| db_info
.type
>= SSD_DEBUG_NR
) {
9884 if (db_info
.type
>= SSD_DEBUG_READ_ERR
&& db_info
.type
<= SSD_DEBUG_RW_ERR
&&
9885 (db_info
.data
.loc
.off
+ db_info
.data
.loc
.len
) > (dev
->hw_info
.size
>> 9)) {
9890 memcpy(&dev
->db_info
, &db_info
, sizeof(struct ssd_debug_info
));
9892 #ifdef SSD_OT_PROTECT
9894 if (db_info
.type
== SSD_DEBUG_NONE
) {
9895 ssd_check_temperature(dev
, SSD_OT_TEMP
);
9896 } else if (db_info
.type
== SSD_DEBUG_LOG
) {
9897 if (db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
) {
9898 dev
->ot_delay
= SSD_OT_DELAY
;
9899 } else if (db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
) {
9906 if (db_info
.type
== SSD_DEBUG_OFFLINE
) {
9907 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
9908 } else if (db_info
.type
== SSD_DEBUG_NONE
) {
9909 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
9913 if (db_info
.type
== SSD_DEBUG_LOG
&& dev
->event_call
&& dev
->gd
) {
9914 dev
->event_call(dev
->gd
, db_info
.data
.log
.event
, 0);
9920 case SSD_CMD_DRV_PARAM_INFO
: {
9921 struct ssd_drv_param_info drv_param
;
9923 memset(&drv_param
, 0, sizeof(struct ssd_drv_param_info
));
9925 drv_param
.mode
= mode
;
9926 drv_param
.status_mask
= status_mask
;
9927 drv_param
.int_mode
= int_mode
;
9928 drv_param
.threaded_irq
= threaded_irq
;
9929 drv_param
.log_level
= log_level
;
9930 drv_param
.wmode
= wmode
;
9931 drv_param
.ot_protect
= ot_protect
;
9932 drv_param
.finject
= finject
;
9934 if (copy_to_user(argp
, &drv_param
, sizeof(struct ssd_drv_param_info
))) {
9935 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9951 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
9952 static int ssd_block_ioctl(struct inode
*inode
, struct file
*file
,
9953 unsigned int cmd
, unsigned long arg
)
9955 struct ssd_device
*dev
;
9956 void __user
*argp
= (void __user
*)arg
;
9962 dev
= inode
->i_bdev
->bd_disk
->private_data
;
9967 static int ssd_block_ioctl(struct block_device
*bdev
, fmode_t mode
,
9968 unsigned int cmd
, unsigned long arg
)
9970 struct ssd_device
*dev
;
9971 void __user
*argp
= (void __user
*)arg
;
9978 dev
= bdev
->bd_disk
->private_data
;
9986 struct hd_geometry geo
;
9987 geo
.cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
9990 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
9991 geo
.start
= get_start_sect(inode
->i_bdev
);
9993 geo
.start
= get_start_sect(bdev
);
9995 if (copy_to_user(argp
, &geo
, sizeof(geo
))) {
10004 ret
= ssd_flush(dev
);
10006 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
10014 ret
= ssd_ioctl_common(dev
, cmd
, arg
);
10025 static void ssd_free_dev(struct kref
*kref
)
10027 struct ssd_device
*dev
;
10033 dev
= container_of(kref
, struct ssd_device
, kref
);
10037 ssd_put_index(dev
->slave
, dev
->idx
);
10042 static void ssd_put(struct ssd_device
*dev
)
10044 kref_put(&dev
->kref
, ssd_free_dev
);
10047 static int ssd_get(struct ssd_device
*dev
)
10049 kref_get(&dev
->kref
);
10054 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10055 static int ssd_block_open(struct inode
*inode
, struct file
*filp
)
10057 struct ssd_device
*dev
;
10063 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10068 static int ssd_block_open(struct block_device
*bdev
, fmode_t mode
)
10070 struct ssd_device
*dev
;
10076 dev
= bdev
->bd_disk
->private_data
;
10082 /*if (!try_module_get(dev->owner))
10088 atomic_inc(&dev
->refcnt
);
10093 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10094 static int ssd_block_release(struct inode
*inode
, struct file
*filp
)
10096 struct ssd_device
*dev
;
10102 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10106 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10107 static int ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10109 struct ssd_device
*dev
;
10115 dev
= disk
->private_data
;
10120 static void ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10122 struct ssd_device
*dev
;
10128 dev
= disk
->private_data
;
10134 atomic_dec(&dev
->refcnt
);
10138 //module_put(dev->owner);
10139 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10144 static struct block_device_operations ssd_fops
= {
10145 .owner
= THIS_MODULE
,
10146 .open
= ssd_block_open
,
10147 .release
= ssd_block_release
,
10148 .ioctl
= ssd_block_ioctl
,
10149 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
10150 .getgeo
= ssd_block_getgeo
,
10154 static void ssd_init_trim(ssd_device_t
*dev
)
10156 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
10157 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10160 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, dev
->rq
);
10162 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6))
10163 dev
->rq
->limits
.discard_zeroes_data
= 1;
10164 dev
->rq
->limits
.discard_alignment
= 4096;
10165 dev
->rq
->limits
.discard_granularity
= 4096;
10167 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_4
) {
10168 dev
->rq
->limits
.max_discard_sectors
= dev
->hw_info
.sg_max_sec
;
10170 dev
->rq
->limits
.max_discard_sectors
= (dev
->hw_info
.sg_max_sec
) * (dev
->hw_info
.cmd_max_sg
);
10175 static void ssd_cleanup_queue(struct ssd_device
*dev
)
10179 blk_cleanup_queue(dev
->rq
);
10183 static int ssd_init_queue(struct ssd_device
*dev
)
10185 dev
->rq
= blk_alloc_queue(GFP_KERNEL
);
10186 if (dev
->rq
== NULL
) {
10187 hio_warn("%s: alloc queue: failed\n ", dev
->name
);
10188 goto out_init_queue
;
10191 /* must be first */
10192 blk_queue_make_request(dev
->rq
, ssd_make_request
);
10194 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6))
10195 blk_queue_max_hw_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10196 blk_queue_max_phys_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10197 blk_queue_max_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10199 blk_queue_max_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10200 blk_queue_max_hw_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10203 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
10204 blk_queue_hardsect_size(dev
->rq
, 512);
10206 blk_queue_logical_block_size(dev
->rq
, 512);
10208 /* not work for make_request based drivers(bio) */
10209 blk_queue_max_segment_size(dev
->rq
, dev
->hw_info
.sg_max_sec
<< 9);
10211 blk_queue_bounce_limit(dev
->rq
, BLK_BOUNCE_HIGH
);
10213 dev
->rq
->queuedata
= dev
;
10215 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
10216 blk_queue_issue_flush_fn(dev
->rq
, ssd_issue_flush_fn
);
10219 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
10220 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, dev
->rq
);
10223 ssd_init_trim(dev
);
10231 static void ssd_cleanup_blkdev(struct ssd_device
*dev
)
10233 del_gendisk(dev
->gd
);
10236 static int ssd_init_blkdev(struct ssd_device
*dev
)
10242 dev
->gd
= alloc_disk(ssd_minors
);
10244 hio_warn("%s: alloc_disk fail\n", dev
->name
);
10247 dev
->gd
->major
= dev
->major
;
10248 dev
->gd
->first_minor
= dev
->idx
* ssd_minors
;
10249 dev
->gd
->fops
= &ssd_fops
;
10250 dev
->gd
->queue
= dev
->rq
;
10251 dev
->gd
->private_data
= dev
;
10253 snprintf (dev
->gd
->disk_name
, sizeof(dev
->gd
->disk_name
), "%s", dev
->name
);
10255 set_capacity(dev
->gd
, dev
->hw_info
.size
>> 9);
10257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
10258 device_add_disk(&dev
->pdev
->dev
, dev
->gd
);
10260 dev
->gd
->driverfs_dev
= &dev
->pdev
->dev
;
10270 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10271 static int ssd_ioctl(struct inode
*inode
, struct file
*file
,
10272 unsigned int cmd
, unsigned long arg
)
10274 static long ssd_ioctl(struct file
*file
,
10275 unsigned int cmd
, unsigned long arg
)
10278 struct ssd_device
*dev
;
10284 dev
= file
->private_data
;
10289 return (long)ssd_ioctl_common(dev
, cmd
, arg
);
10292 static int ssd_open(struct inode
*inode
, struct file
*file
)
10294 struct ssd_device
*dev
= NULL
;
10295 struct ssd_device
*n
= NULL
;
10299 if (!inode
|| !file
) {
10303 idx
= iminor(inode
);
10305 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
10306 if (dev
->idx
== idx
) {
10316 file
->private_data
= dev
;
10323 static int ssd_release(struct inode
*inode
, struct file
*file
)
10325 struct ssd_device
*dev
;
10331 dev
= file
->private_data
;
10338 file
->private_data
= NULL
;
10343 static int ssd_reload_ssd_ptr(struct ssd_device
*dev
)
10345 ssd_reset_resp_ptr(dev
);
10347 //update base reg address
10348 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
10350 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
10353 //update response base reg address
10354 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
10355 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
10360 static struct file_operations ssd_cfops
= {
10361 .owner
= THIS_MODULE
,
10363 .release
= ssd_release
,
10364 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10365 .ioctl
= ssd_ioctl
,
10367 .unlocked_ioctl
= ssd_ioctl
,
10371 static void ssd_cleanup_chardev(struct ssd_device
*dev
)
10377 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10378 class_simple_device_remove(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10379 devfs_remove("c%s", dev
->name
);
10380 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10381 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10382 devfs_remove("c%s", dev
->name
);
10383 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10384 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10385 devfs_remove("c%s", dev
->name
);
10386 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10387 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10389 device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10393 static int ssd_init_chardev(struct ssd_device
*dev
)
10401 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10402 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10406 class_simple_device_add(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10408 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10409 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10413 class_device_create(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10415 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10416 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10420 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10422 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10423 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10424 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
10425 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), "c%s", dev
->name
);
10426 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10427 device_create_drvdata(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10429 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10435 static int ssd_check_hw(struct ssd_device
*dev
)
10437 uint32_t test_data
= 0x55AA5AA5;
10438 uint32_t read_data
;
10440 ssd_reg32_write(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
, test_data
);
10441 read_data
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
);
10442 if (read_data
!= ~(test_data
)) {
10443 //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data);
10450 static int ssd_check_fw(struct ssd_device
*dev
)
10455 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10459 for (i
=0; i
<SSD_CONTROLLER_WAIT
; i
++) {
10460 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10461 if ((val
& 0x1) && ((val
>> 8) & 0x1)) {
10465 msleep(SSD_INIT_WAIT
);
10468 if (!(val
& 0x1)) {
10469 /* controller fw status */
10470 hio_warn("%s: controller firmware load failed: %#x\n", dev
->name
, val
);
10472 } else if (!((val
>> 8) & 0x1)) {
10473 /* controller state */
10474 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10478 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RELOAD_FW_REG
);
10480 dev
->reload_fw
= 1;
10486 static int ssd_init_fw_info(struct ssd_device
*dev
)
10491 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_VER_REG
);
10492 dev
->hw_info
.bridge_ver
= val
& 0xFFF;
10493 if (dev
->hw_info
.bridge_ver
< SSD_FW_MIN
) {
10494 hio_warn("%s: bridge firmware version %03X is not supported\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10497 hio_info("%s: bridge firmware version: %03X\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10499 ret
= ssd_check_fw(dev
);
10505 /* skip error if not in standard mode */
10506 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10512 static int ssd_check_clock(struct ssd_device
*dev
)
10517 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10521 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10524 if (!((val
>> 4 ) & 0x1)) {
10525 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST
), &dev
->hwmon
)) {
10526 hio_warn("%s: 166MHz clock losed: %#x\n", dev
->name
, val
);
10527 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10532 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
10533 if (!((val
>> 5 ) & 0x1)) {
10534 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW
), &dev
->hwmon
)) {
10535 hio_warn("%s: 166MHz clock is skew: %#x\n", dev
->name
, val
);
10536 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10540 if (!((val
>> 6 ) & 0x1)) {
10541 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST
), &dev
->hwmon
)) {
10542 hio_warn("%s: 156.25MHz clock lost: %#x\n", dev
->name
, val
);
10543 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10547 if (!((val
>> 7 ) & 0x1)) {
10548 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW
), &dev
->hwmon
)) {
10549 hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev
->name
, val
);
10550 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10559 static int ssd_check_volt(struct ssd_device
*dev
)
10566 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10570 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10572 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
)) {
10573 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V0_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10574 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10575 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10576 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10577 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10578 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10582 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10583 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10584 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10585 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10586 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10592 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
)) {
10593 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V8_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10594 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10595 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10596 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10597 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10598 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10602 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10603 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10604 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10605 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10606 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10615 static int ssd_check_reset_sync(struct ssd_device
*dev
)
10619 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10623 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10624 if (!((val
>> 8) & 0x1)) {
10625 /* controller state */
10626 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10630 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10634 if (((val
>> 9 ) & 0x1)) {
10635 hio_warn("%s: controller reset asynchronously: %#x\n", dev
->name
, val
);
10636 ssd_gen_swlog(dev
, SSD_LOG_CTRL_RST_SYNC
, val
);
10643 static int ssd_check_hw_bh(struct ssd_device
*dev
)
10647 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10652 ret
= ssd_check_clock(dev
);
10658 /* skip error if not in standard mode */
10659 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10665 static int ssd_check_controller(struct ssd_device
*dev
)
10669 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10674 ret
= ssd_check_reset_sync(dev
);
10680 /* skip error if not in standard mode */
10681 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10687 static int ssd_check_controller_bh(struct ssd_device
*dev
)
10689 uint32_t test_data
= 0x55AA5AA5;
10691 int reg_base
, reg_sz
;
10696 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10701 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_READY_REG
);
10703 hio_warn("%s: controller 0 not ready\n", dev
->name
);
10707 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10708 reg_base
= SSD_CTRL_TEST_REG0
+ i
* SSD_CTRL_TEST_REG_SZ
;
10709 ssd_reg32_write(dev
->ctrlp
+ reg_base
, test_data
);
10710 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10711 if (val
!= ~(test_data
)) {
10712 hio_warn("%s: check controller %d error: %#x\n", dev
->name
, i
, val
);
10718 ret
= ssd_check_volt(dev
);
10724 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
10725 reg_base
= SSD_PV3_RAM_STATUS_REG0
;
10726 reg_sz
= SSD_PV3_RAM_STATUS_REG_SZ
;
10728 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10730 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10732 if (!((val
>> 1) & 0x1)) {
10734 if (init_wait
<= SSD_RAM_INIT_MAX_WAIT
) {
10735 msleep(SSD_INIT_WAIT
);
10736 goto check_ram_status
;
10738 hio_warn("%s: controller %d ram init failed: %#x\n", dev
->name
, i
, val
);
10739 ssd_gen_swlog(dev
, SSD_LOG_DDR_INIT_ERR
, i
);
10744 reg_base
+= reg_sz
;
10749 for (i
=0; i
<SSD_CH_INFO_MAX_WAIT
; i
++) {
10750 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10751 if (!((val
>> 31) & 0x1)) {
10755 msleep(SSD_INIT_WAIT
);
10757 if ((val
>> 31) & 0x1) {
10758 hio_warn("%s: channel info init failed: %#x\n", dev
->name
, val
);
10765 static int ssd_init_protocol_info(struct ssd_device
*dev
)
10769 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PROTOCOL_VER_REG
);
10770 if (val
== (uint32_t)-1) {
10771 hio_warn("%s: protocol version error: %#x\n", dev
->name
, val
);
10774 dev
->protocol_info
.ver
= val
;
10776 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10777 dev
->protocol_info
.init_state_reg
= SSD_INIT_STATE_REG0
;
10778 dev
->protocol_info
.init_state_reg_sz
= SSD_INIT_STATE_REG_SZ
;
10780 dev
->protocol_info
.chip_info_reg
= SSD_CHIP_INFO_REG0
;
10781 dev
->protocol_info
.chip_info_reg_sz
= SSD_CHIP_INFO_REG_SZ
;
10783 dev
->protocol_info
.init_state_reg
= SSD_PV3_INIT_STATE_REG0
;
10784 dev
->protocol_info
.init_state_reg_sz
= SSD_PV3_INIT_STATE_REG_SZ
;
10786 dev
->protocol_info
.chip_info_reg
= SSD_PV3_CHIP_INFO_REG0
;
10787 dev
->protocol_info
.chip_info_reg_sz
= SSD_PV3_CHIP_INFO_REG_SZ
;
10793 static int ssd_init_hw_info(struct ssd_device
*dev
)
10801 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESP_INFO_REG
);
10802 dev
->hw_info
.resp_ptr_sz
= 16 * (1U << (val
& 0xFF));
10803 dev
->hw_info
.resp_msg_sz
= 16 * (1U << ((val
>> 8) & 0xFF));
10805 if (0 == dev
->hw_info
.resp_ptr_sz
|| 0 == dev
->hw_info
.resp_msg_sz
) {
10806 hio_warn("%s: response info error\n", dev
->name
);
10811 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10812 dev
->hw_info
.cmd_fifo_sz
= 1U << ((val
>> 4) & 0xF);
10813 dev
->hw_info
.cmd_max_sg
= 1U << ((val
>> 8) & 0xF);
10814 dev
->hw_info
.sg_max_sec
= 1U << ((val
>> 12) & 0xF);
10815 dev
->hw_info
.cmd_fifo_sz_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
10817 if (0 == dev
->hw_info
.cmd_fifo_sz
|| 0 == dev
->hw_info
.cmd_max_sg
|| 0 == dev
->hw_info
.sg_max_sec
) {
10818 hio_warn("%s: cmd info error\n", dev
->name
);
10824 if (ssd_check_hw_bh(dev
)) {
10825 hio_warn("%s: check hardware status failed\n", dev
->name
);
10830 if (ssd_check_controller(dev
)) {
10831 hio_warn("%s: check controller state failed\n", dev
->name
);
10836 /* nr controller : read again*/
10837 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10838 dev
->hw_info
.nr_ctrl
= (val
>> 16) & 0xF;
10840 /* nr ctrl configured */
10841 nr_ctrl
= (val
>> 20) & 0xF;
10842 if (0 == dev
->hw_info
.nr_ctrl
) {
10843 hio_warn("%s: nr controller error: %u\n", dev
->name
, dev
->hw_info
.nr_ctrl
);
10846 } else if (0 != nr_ctrl
&& nr_ctrl
!= dev
->hw_info
.nr_ctrl
) {
10847 hio_warn("%s: nr controller error: configured %u but found %u\n", dev
->name
, nr_ctrl
, dev
->hw_info
.nr_ctrl
);
10848 if (mode
<= SSD_DRV_MODE_STANDARD
) {
10854 if (ssd_check_controller_bh(dev
)) {
10855 hio_warn("%s: check controller failed\n", dev
->name
);
10860 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
10861 dev
->hw_info
.pcb_ver
= (uint8_t) ((val
>> 4) & 0xF) + 'A' -1;
10862 if ((val
& 0xF) != 0xF) {
10863 dev
->hw_info
.upper_pcb_ver
= (uint8_t) (val
& 0xF) + 'A' -1;
10866 if (dev
->hw_info
.pcb_ver
< 'A' || (0 != dev
->hw_info
.upper_pcb_ver
&& dev
->hw_info
.upper_pcb_ver
< 'A')) {
10867 hio_warn("%s: PCB version error: %#x %#x\n", dev
->name
, dev
->hw_info
.pcb_ver
, dev
->hw_info
.upper_pcb_ver
);
10873 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10874 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10875 dev
->hw_info
.nr_data_ch
= val
& 0xFF;
10876 dev
->hw_info
.nr_ch
= dev
->hw_info
.nr_data_ch
+ ((val
>> 8) & 0xFF);
10877 dev
->hw_info
.nr_chip
= (val
>> 16) & 0xFF;
10879 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10880 dev
->hw_info
.max_ch
= 1;
10881 while (dev
->hw_info
.max_ch
< dev
->hw_info
.nr_ch
) dev
->hw_info
.max_ch
<<= 1;
10883 /* set max channel 32 */
10884 dev
->hw_info
.max_ch
= 32;
10887 if (0 == dev
->hw_info
.nr_chip
) {
10889 dev
->hw_info
.nr_chip
= 1;
10893 dev
->hw_info
.id_size
= SSD_NAND_ID_SZ
;
10894 dev
->hw_info
.max_ce
= SSD_NAND_MAX_CE
;
10896 if (0 == dev
->hw_info
.nr_data_ch
|| 0 == dev
->hw_info
.nr_ch
|| 0 == dev
->hw_info
.nr_chip
) {
10897 hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev
->name
, dev
->hw_info
.nr_data_ch
, dev
->hw_info
.nr_ch
, dev
->hw_info
.nr_chip
);
10904 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10905 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RAM_INFO_REG
);
10906 dev
->hw_info
.ram_size
= 0x4000000ull
* (1ULL << (val
& 0xF));
10907 dev
->hw_info
.ram_align
= 1U << ((val
>> 12) & 0xF);
10908 if (dev
->hw_info
.ram_align
< SSD_RAM_ALIGN
) {
10909 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10910 dev
->hw_info
.ram_align
= SSD_RAM_ALIGN
;
10912 hio_warn("%s: ram align error: %u\n", dev
->name
, dev
->hw_info
.ram_align
);
10917 dev
->hw_info
.ram_max_len
= 0x1000 * (1U << ((val
>> 16) & 0xF));
10919 if (0 == dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.ram_align
|| 0 == dev
->hw_info
.ram_max_len
|| dev
->hw_info
.ram_align
> dev
->hw_info
.ram_max_len
) {
10920 hio_warn("%s: ram info error\n", dev
->name
);
10925 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10926 dev
->hw_info
.log_sz
= SSD_LOG_MAX_SZ
;
10928 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LOG_INFO_REG
);
10929 dev
->hw_info
.log_sz
= 0x1000 * (1U << (val
& 0xFF));
10931 if (0 == dev
->hw_info
.log_sz
) {
10932 hio_warn("%s: log size error\n", dev
->name
);
10937 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BBT_BASE_REG
);
10938 dev
->hw_info
.bbt_base
= 0x40000ull
* (val
& 0xFFFF);
10939 dev
->hw_info
.bbt_size
= 0x40000 * (((val
>> 16) & 0xFFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
10940 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10941 if (dev
->hw_info
.bbt_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.bbt_size
) {
10942 hio_warn("%s: bbt info error\n", dev
->name
);
10948 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ECT_BASE_REG
);
10949 dev
->hw_info
.md_base
= 0x40000ull
* (val
& 0xFFFF);
10950 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10951 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
10953 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.nr_chip
);
10955 dev
->hw_info
.md_entry_sz
= 8 * (1U << ((val
>> 28) & 0xF));
10956 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
10957 if (dev
->hw_info
.md_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.md_size
||
10958 0 == dev
->hw_info
.md_entry_sz
|| dev
->hw_info
.md_entry_sz
> dev
->hw_info
.md_size
) {
10959 hio_warn("%s: md info error\n", dev
->name
);
10965 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10966 dev
->hw_info
.nand_wbuff_base
= dev
->hw_info
.ram_size
+ 1;
10968 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_NAND_BUFF_BASE
);
10969 dev
->hw_info
.nand_wbuff_base
= 0x8000ull
* val
;
10974 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10975 if (dev
->hw_info
.nr_ctrl
> 1) {
10976 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CTRL_VER_REG
);
10977 dev
->hw_info
.ctrl_ver
= val
& 0xFFF;
10978 hio_info("%s: controller firmware version: %03X\n", dev
->name
, dev
->hw_info
.ctrl_ver
);
10981 val64
= ssd_reg_read(dev
->ctrlp
+ SSD_FLASH_INFO_REG0
);
10982 dev
->hw_info
.nand_vendor_id
= ((val64
>> 56) & 0xFF);
10983 dev
->hw_info
.nand_dev_id
= ((val64
>> 48) & 0xFF);
10985 dev
->hw_info
.block_count
= (((val64
>> 32) & 0xFFFF) + 1);
10986 dev
->hw_info
.page_count
= ((val64
>>16) & 0xFFFF);
10987 dev
->hw_info
.page_size
= (val64
& 0xFFFF);
10989 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_INFO_REG
);
10990 dev
->hw_info
.bbf_pages
= val
& 0xFF;
10991 dev
->hw_info
.bbf_seek
= (val
>> 8) & 0x1;
10993 if (0 == dev
->hw_info
.block_count
|| 0 == dev
->hw_info
.page_count
|| 0 == dev
->hw_info
.page_size
|| dev
->hw_info
.block_count
> INT_MAX
) {
10994 hio_warn("%s: flash info error\n", dev
->name
);
11000 dev
->hw_info
.oob_size
= SSD_NAND_OOB_SZ
; //(dev->hw_info.page_size) >> 5;
11002 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
11003 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11004 dev
->hw_info
.valid_pages
= val
& 0x3FF;
11005 dev
->hw_info
.max_valid_pages
= (val
>>20) & 0x3FF;
11007 dev
->hw_info
.valid_pages
= val
& 0x7FFF;
11008 dev
->hw_info
.max_valid_pages
= (val
>>15) & 0x7FFF;
11010 if (0 == dev
->hw_info
.valid_pages
|| 0 == dev
->hw_info
.max_valid_pages
||
11011 dev
->hw_info
.valid_pages
> dev
->hw_info
.max_valid_pages
|| dev
->hw_info
.max_valid_pages
> dev
->hw_info
.page_count
) {
11012 hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev
->name
, dev
->hw_info
.valid_pages
, dev
->hw_info
.max_valid_pages
);
11017 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESERVED_BLKS_REG
);
11018 dev
->hw_info
.reserved_blks
= val
& 0xFFFF;
11019 dev
->hw_info
.md_reserved_blks
= (val
>> 16) & 0xFF;
11020 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
11021 dev
->hw_info
.md_reserved_blks
= SSD_BBT_RESERVED
;
11023 if (dev
->hw_info
.reserved_blks
> dev
->hw_info
.block_count
|| dev
->hw_info
.md_reserved_blks
> dev
->hw_info
.block_count
) {
11024 hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev
->name
, dev
->hw_info
.reserved_blks
, dev
->hw_info
.md_reserved_blks
);
11031 if (mode
< SSD_DRV_MODE_DEBUG
) {
11032 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
11033 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
11034 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
11037 /* extend hardware info */
11038 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
11039 dev
->hw_info_ext
.board_type
= (val
>> 24) & 0xF;
11041 dev
->hw_info_ext
.form_factor
= SSD_FORM_FACTOR_FHHL
;
11042 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_1
) {
11043 dev
->hw_info_ext
.form_factor
= (val
>> 31) & 0x1;
11046 dev->hw_info_ext.cap_type = (val >> 28) & 0x3;
11047 if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) {
11048 dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA;
11051 /* power loss protect */
11052 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PLP_INFO_REG
);
11053 dev
->hw_info_ext
.plp_type
= (val
& 0x3);
11054 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
11056 dev
->hw_info_ext
.cap_type
= ((val
>> 2)& 0x1);
11060 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11061 dev
->hw_info_ext
.work_mode
= (val
>> 25) & 0x1;
11064 /* skip error if not in standard mode */
11065 if (mode
!= SSD_DRV_MODE_STANDARD
) {
11071 static void ssd_cleanup_response(struct ssd_device
*dev
)
11073 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11074 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11076 pci_free_consistent(dev
->pdev
, resp_ptr_sz
, dev
->resp_ptr_base
, dev
->resp_ptr_base_dma
);
11077 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11080 static int ssd_init_response(struct ssd_device
*dev
)
11082 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11083 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11085 dev
->resp_msg_base
= pci_alloc_consistent(dev
->pdev
, resp_msg_sz
, &(dev
->resp_msg_base_dma
));
11086 if (!dev
->resp_msg_base
) {
11087 hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev
->name
);
11088 goto out_alloc_resp_msg
;
11090 memset(dev
->resp_msg_base
, 0xFF, resp_msg_sz
);
11092 dev
->resp_ptr_base
= pci_alloc_consistent(dev
->pdev
, resp_ptr_sz
, &(dev
->resp_ptr_base_dma
));
11093 if (!dev
->resp_ptr_base
){
11094 hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev
->name
);
11095 goto out_alloc_resp_ptr
;
11097 memset(dev
->resp_ptr_base
, 0, resp_ptr_sz
);
11098 dev
->resp_idx
= *(uint32_t *)(dev
->resp_ptr_base
) = dev
->hw_info
.cmd_fifo_sz
* 2 - 1;
11100 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
11101 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
11105 out_alloc_resp_ptr
:
11106 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11107 out_alloc_resp_msg
:
11111 static int ssd_cleanup_cmd(struct ssd_device
*dev
)
11113 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11116 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11117 kfree(dev
->cmd
[i
].sgl
);
11120 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11124 static int ssd_init_cmd(struct ssd_device
*dev
)
11126 int sgl_sz
= sizeof(struct scatterlist
) * dev
->hw_info
.cmd_max_sg
;
11127 int cmd_sz
= sizeof(struct ssd_cmd
) * dev
->hw_info
.cmd_fifo_sz
;
11128 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11131 spin_lock_init(&dev
->cmd_lock
);
11133 dev
->msg_base
= pci_alloc_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), &dev
->msg_base_dma
);
11134 if (!dev
->msg_base
) {
11135 hio_warn("%s: can not alloc cmd msg\n", dev
->name
);
11136 goto out_alloc_msg
;
11139 dev
->cmd
= kmalloc(cmd_sz
, GFP_KERNEL
);
11141 hio_warn("%s: can not alloc cmd\n", dev
->name
);
11142 goto out_alloc_cmd
;
11144 memset(dev
->cmd
, 0, cmd_sz
);
11146 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11147 dev
->cmd
[i
].sgl
= kmalloc(sgl_sz
, GFP_KERNEL
);
11148 if (!dev
->cmd
[i
].sgl
) {
11149 hio_warn("%s: can not alloc cmd sgl %d\n", dev
->name
, i
);
11150 goto out_alloc_sgl
;
11153 dev
->cmd
[i
].msg
= dev
->msg_base
+ (msg_sz
* i
);
11154 dev
->cmd
[i
].msg_dma
= dev
->msg_base_dma
+ ((dma_addr_t
)msg_sz
* i
);
11156 dev
->cmd
[i
].dev
= dev
;
11157 dev
->cmd
[i
].tag
= i
;
11158 dev
->cmd
[i
].flag
= 0;
11160 INIT_LIST_HEAD(&dev
->cmd
[i
].list
);
11163 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11164 dev
->scmd
= ssd_dispatch_cmd
;
11166 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
11168 dev
->scmd
= ssd_send_cmd_db
;
11170 dev
->scmd
= ssd_send_cmd
;
11177 for (i
--; i
>=0; i
--) {
11178 kfree(dev
->cmd
[i
].sgl
);
11182 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11187 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11188 static irqreturn_t
ssd_interrupt_check(int irq
, void *dev_id
)
11190 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11192 if (*(uint32_t *)queue
->resp_ptr
== queue
->resp_idx
) {
11196 return IRQ_WAKE_THREAD
;
11199 static irqreturn_t
ssd_interrupt_threaded(int irq
, void *dev_id
)
11201 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11202 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11203 struct ssd_cmd
*cmd
;
11204 union ssd_response_msq __msg
;
11205 union ssd_response_msq
*msg
= &__msg
;
11207 uint32_t resp_idx
= queue
->resp_idx
;
11208 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11209 uint32_t end_resp_idx
;
11211 if (unlikely(resp_idx
== new_resp_idx
)) {
11215 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11218 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11221 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11222 msg
->u64_msg
= *u64_msg
;
11224 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11225 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11228 /* clear the resp msg */
11229 *u64_msg
= (uint64_t)(-1);
11231 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11232 /*if (unlikely(!cmd->bio)) {
11233 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11234 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11238 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11239 cmd
->errors
= -EIO
;
11243 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11247 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11248 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11249 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11250 queue_work(dev
->workq
, &dev
->log_work
);
11254 if (unlikely(msg
->resp_msg
.status
)) {
11255 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11256 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11257 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11260 ssd_set_alarm(dev
);
11261 queue
->io_stat
.nr_rwerr
++;
11262 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11264 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11265 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11267 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11269 queue
->io_stat
.nr_ioerr
++;
11272 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11273 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11274 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11276 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11278 }while (resp_idx
!= end_resp_idx
);
11280 queue
->resp_idx
= new_resp_idx
;
11282 return IRQ_HANDLED
;
11286 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11287 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
11289 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
)
11292 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11293 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11294 struct ssd_cmd
*cmd
;
11295 union ssd_response_msq __msg
;
11296 union ssd_response_msq
*msg
= &__msg
;
11298 uint32_t resp_idx
= queue
->resp_idx
;
11299 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11300 uint32_t end_resp_idx
;
11302 if (unlikely(resp_idx
== new_resp_idx
)) {
11306 #if (defined SSD_ESCAPE_IRQ)
11307 if (SSD_INT_MSIX
!= dev
->int_mode
) {
11308 dev
->irq_cpu
= smp_processor_id();
11312 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11315 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11318 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11319 msg
->u64_msg
= *u64_msg
;
11321 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11322 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11325 /* clear the resp msg */
11326 *u64_msg
= (uint64_t)(-1);
11328 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11329 /*if (unlikely(!cmd->bio)) {
11330 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11331 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11335 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11336 cmd
->errors
= -EIO
;
11340 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11344 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11345 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11346 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11347 queue_work(dev
->workq
, &dev
->log_work
);
11351 if (unlikely(msg
->resp_msg
.status
)) {
11352 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11353 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11354 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11357 ssd_set_alarm(dev
);
11358 queue
->io_stat
.nr_rwerr
++;
11359 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11361 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11362 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11364 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11366 queue
->io_stat
.nr_ioerr
++;
11369 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11370 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11371 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11373 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11375 }while (resp_idx
!= end_resp_idx
);
11377 queue
->resp_idx
= new_resp_idx
;
11379 return IRQ_HANDLED
;
11382 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11383 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
, struct pt_regs
*regs
)
11385 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
)
11389 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11390 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11392 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11393 ret
= ssd_interrupt(irq
, dev_id
, regs
);
11395 ret
= ssd_interrupt(irq
, dev_id
);
11399 if (IRQ_HANDLED
== ret
) {
11400 ssd_reg32_write(dev
->ctrlp
+ SSD_CLEAR_INTR_REG
, 1);
11406 static void ssd_reset_resp_ptr(struct ssd_device
*dev
)
11410 for (i
=0; i
<dev
->nr_queue
; i
++) {
11411 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11415 static void ssd_free_irq(struct ssd_device
*dev
)
11419 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11420 if (SSD_INT_MSIX
== dev
->int_mode
) {
11421 for (i
=0; i
<dev
->nr_queue
; i
++) {
11422 irq_set_affinity_hint(dev
->entry
[i
].vector
, NULL
);
11427 for (i
=0; i
<dev
->nr_queue
; i
++) {
11428 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11431 if (SSD_INT_MSIX
== dev
->int_mode
) {
11432 pci_disable_msix(dev
->pdev
);
11433 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11434 pci_disable_msi(dev
->pdev
);
11439 static int ssd_init_irq(struct ssd_device
*dev
)
11441 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11442 const struct cpumask
*cpu_mask
= NULL
;
11443 static int cpu_affinity
= 0;
11445 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11446 const struct cpumask
*mask
= NULL
;
11447 static int cpu
= 0;
11451 unsigned long flags
= 0;
11454 ssd_reg32_write(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
, 0x800);
11456 #ifdef SSD_ESCAPE_IRQ
11460 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11461 dev
->nr_queue
= SSD_MSIX_VEC
;
11462 for (i
=0; i
<dev
->nr_queue
; i
++) {
11463 dev
->entry
[i
].entry
= i
;
11466 ret
= pci_enable_msix(dev
->pdev
, dev
->entry
, dev
->nr_queue
);
11469 } else if (ret
> 0) {
11470 dev
->nr_queue
= ret
;
11472 hio_warn("%s: can not enable msix\n", dev
->name
);
11474 ssd_set_alarm(dev
);
11479 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11480 mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11481 if ((0 == cpu
) || (!cpumask_intersects(mask
, cpumask_of(cpu
)))) {
11482 cpu
= cpumask_first(mask
);
11484 for (i
=0; i
<dev
->nr_queue
; i
++) {
11485 irq_set_affinity_hint(dev
->entry
[i
].vector
, cpumask_of(cpu
));
11486 cpu
= cpumask_next(cpu
, mask
);
11487 if (cpu
>= nr_cpu_ids
) {
11488 cpu
= cpumask_first(mask
);
11493 dev
->int_mode
= SSD_INT_MSIX
;
11494 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11495 ret
= pci_enable_msi(dev
->pdev
);
11497 hio_warn("%s: can not enable msi\n", dev
->name
);
11499 ssd_set_alarm(dev
);
11504 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11506 dev
->int_mode
= SSD_INT_MSI
;
11509 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11511 dev
->int_mode
= SSD_INT_LEGACY
;
11514 for (i
=0; i
<dev
->nr_queue
; i
++) {
11515 if (dev
->nr_queue
> 1) {
11516 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100-%d", dev
->name
, i
);
11518 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100", dev
->name
);
11521 dev
->queue
[i
].dev
= dev
;
11522 dev
->queue
[i
].idx
= i
;
11524 dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11525 dev
->queue
[i
].resp_idx_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
11527 dev
->queue
[i
].resp_msg_sz
= dev
->hw_info
.resp_msg_sz
;
11528 dev
->queue
[i
].resp_msg
= dev
->resp_msg_base
+ dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* i
;
11529 dev
->queue
[i
].resp_ptr
= dev
->resp_ptr_base
+ dev
->hw_info
.resp_ptr_sz
* i
;
11530 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
;
11532 dev
->queue
[i
].cmd
= dev
->cmd
;
11535 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
11536 flags
= IRQF_SHARED
;
11541 for (i
=0; i
<dev
->nr_queue
; i
++) {
11542 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11543 if (threaded_irq
) {
11544 ret
= request_threaded_irq(dev
->entry
[i
].vector
, ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11545 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11546 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11548 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11551 if (dev
->int_mode
== SSD_INT_LEGACY
) {
11552 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11554 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11558 hio_warn("%s: request irq failed\n", dev
->name
);
11560 ssd_set_alarm(dev
);
11561 goto out_request_irq
;
11564 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11565 cpu_mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11566 if (SSD_INT_MSIX
== dev
->int_mode
) {
11567 if ((0 == cpu_affinity
) || (!cpumask_intersects(mask
, cpumask_of(cpu_affinity
)))) {
11568 cpu_affinity
= cpumask_first(cpu_mask
);
11571 irq_set_affinity(dev
->entry
[i
].vector
, cpumask_of(cpu_affinity
));
11572 cpu_affinity
= cpumask_next(cpu_affinity
, cpu_mask
);
11573 if (cpu_affinity
>= nr_cpu_ids
) {
11574 cpu_affinity
= cpumask_first(cpu_mask
);
11583 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11584 if (SSD_INT_MSIX
== dev
->int_mode
) {
11585 for (j
=0; j
<dev
->nr_queue
; j
++) {
11586 irq_set_affinity_hint(dev
->entry
[j
].vector
, NULL
);
11591 for (i
--; i
>=0; i
--) {
11592 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11595 if (SSD_INT_MSIX
== dev
->int_mode
) {
11596 pci_disable_msix(dev
->pdev
);
11597 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11598 pci_disable_msi(dev
->pdev
);
11605 static void ssd_initial_log(struct ssd_device
*dev
)
11608 uint32_t speed
, width
;
11610 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11614 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_POWER_ON_REG
);
11616 ssd_gen_swlog(dev
, SSD_LOG_POWER_ON
, dev
->hw_info
.bridge_ver
);
11619 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCIE_LINKSTATUS_REG
);
11621 width
= (val
>> 4)& 0x3F;
11622 if (0x1 == speed
) {
11623 hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev
->name
, width
);
11624 } else if (0x2 == speed
) {
11625 hio_info("%s: PCIe: 5GT/s, x%u\n", dev
->name
, width
);
11627 hio_info("%s: PCIe: unknown GT/s, x%u\n", dev
->name
, width
);
11629 ssd_gen_swlog(dev
, SSD_LOG_PCIE_LINK_STATUS
, val
);
11634 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11635 static void ssd_hwmon_worker(void *data
)
11637 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11639 static void ssd_hwmon_worker(struct work_struct
*work
)
11641 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, hwmon_work
);
11644 if (ssd_check_hw(dev
)) {
11645 //hio_err("%s: check hardware failed\n", dev->name);
11649 ssd_check_clock(dev
);
11650 ssd_check_volt(dev
);
11652 ssd_mon_boardvolt(dev
);
11655 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11656 static void ssd_tempmon_worker(void *data
)
11658 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11660 static void ssd_tempmon_worker(struct work_struct
*work
)
11662 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, tempmon_work
);
11665 if (ssd_check_hw(dev
)) {
11666 //hio_err("%s: check hardware failed\n", dev->name);
11674 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11675 static void ssd_capmon_worker(void *data
)
11677 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11679 static void ssd_capmon_worker(struct work_struct
*work
)
11681 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, capmon_work
);
11684 uint32_t cap_threshold
= SSD_PL_CAP_THRESHOLD
;
11687 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11691 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
11695 /* fault before? */
11696 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11697 ret
= ssd_check_pl_cap_fast(dev
);
11704 ret
= ssd_do_cap_learn(dev
, &cap
);
11706 hio_err("%s: cap learn failed\n", dev
->name
);
11707 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
11711 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, cap
);
11713 if (SSD_PL_CAP_CP
== dev
->hw_info_ext
.cap_type
) {
11714 cap_threshold
= SSD_PL_CAP_CP_THRESHOLD
;
11717 //use the fw event id?
11718 if (cap
< cap_threshold
) {
11719 if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11720 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_FAULT
, 0);
11722 } else if (cap
>= (cap_threshold
+ SSD_PL_CAP_THRESHOLD_HYST
)) {
11723 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11724 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_OK
, 0);
11729 static void ssd_routine_start(void *data
)
11731 struct ssd_device
*dev
;
11738 dev
->routine_tick
++;
11740 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
) && !ssd_busy(dev
)) {
11741 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11742 queue_work(dev
->workq
, &dev
->log_work
);
11745 if ((dev
->routine_tick
% SSD_HWMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11746 queue_work(dev
->workq
, &dev
->hwmon_work
);
11749 if ((dev
->routine_tick
% SSD_CAPMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11750 queue_work(dev
->workq
, &dev
->capmon_work
);
11753 if ((dev
->routine_tick
% SSD_CAPMON2_ROUTINE_TICK
) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
) && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11754 /* CAP fault? check again */
11755 queue_work(dev
->workq
, &dev
->capmon_work
);
11758 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11759 queue_work(dev
->workq
, &dev
->tempmon_work
);
11762 /* schedule routine */
11763 mod_timer(&dev
->routine_timer
, jiffies
+ msecs_to_jiffies(SSD_ROUTINE_INTERVAL
));
11766 static void ssd_cleanup_routine(struct ssd_device
*dev
)
11768 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11771 (void)ssd_del_timer(&dev
->routine_timer
);
11773 (void)ssd_del_timer(&dev
->bm_timer
);
11776 static int ssd_init_routine(struct ssd_device
*dev
)
11778 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11781 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11782 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
, dev
);
11783 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
, dev
);
11784 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
, dev
);
11785 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
, dev
);
11787 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
);
11788 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
);
11789 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
);
11790 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
);
11794 ssd_initial_log(dev
);
11796 /* schedule bm routine */
11797 ssd_add_timer(&dev
->bm_timer
, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY
), ssd_bm_routine_start
, dev
);
11799 /* schedule routine */
11800 ssd_add_timer(&dev
->routine_timer
, msecs_to_jiffies(SSD_ROUTINE_INTERVAL
), ssd_routine_start
, dev
);
11806 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
11809 ssd_remove_one (struct pci_dev
*pdev
)
11811 struct ssd_device
*dev
;
11817 dev
= pci_get_drvdata(pdev
);
11822 list_del_init(&dev
->list
);
11824 ssd_unregister_sysfs(dev
);
11826 /* offline firstly */
11827 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
11829 /* clean work queue first */
11831 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
11832 ssd_cleanup_workq(dev
);
11836 (void)ssd_flush(dev
);
11837 (void)ssd_save_md(dev
);
11841 ssd_save_smart(dev
);
11844 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
11845 ssd_cleanup_blkdev(dev
);
11849 ssd_cleanup_chardev(dev
);
11852 /* clean routine */
11854 ssd_cleanup_routine(dev
);
11857 ssd_cleanup_queue(dev
);
11859 ssd_cleanup_tag(dev
);
11860 ssd_cleanup_thread(dev
);
11864 ssd_cleanup_dcmd(dev
);
11865 ssd_cleanup_cmd(dev
);
11866 ssd_cleanup_response(dev
);
11869 ssd_cleanup_log(dev
);
11872 if (dev
->reload_fw
) { //reload fw
11873 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
11876 /* unmap physical adress */
11877 #ifdef LINUX_SUSE_OS
11878 iounmap(dev
->ctrlp
);
11880 pci_iounmap(pdev
, dev
->ctrlp
);
11883 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
11885 pci_disable_device(pdev
);
11887 pci_set_drvdata(pdev
, NULL
);
11893 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
11896 ssd_init_one(struct pci_dev
*pdev
,
11897 const struct pci_device_id
*ent
)
11899 struct ssd_device
*dev
;
11903 if (!pdev
|| !ent
) {
11908 dev
= kmalloc(sizeof(struct ssd_device
), GFP_KERNEL
);
11911 goto out_alloc_dev
;
11913 memset(dev
, 0, sizeof(struct ssd_device
));
11915 dev
->owner
= THIS_MODULE
;
11917 if (SSD_SLAVE_PORT_DEVID
== ent
->device
) {
11921 dev
->idx
= ssd_get_index(dev
->slave
);
11922 if (dev
->idx
< 0) {
11924 goto out_get_index
;
11928 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_DEV_NAME
);
11929 ssd_set_dev_name(&dev
->name
[strlen(SSD_DEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_DEV_NAME
), dev
->idx
);
11931 dev
->major
= ssd_major
;
11932 dev
->cmajor
= ssd_cmajor
;
11934 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_SDEV_NAME
);
11935 ssd_set_dev_name(&dev
->name
[strlen(SSD_SDEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_SDEV_NAME
), dev
->idx
);
11936 dev
->major
= ssd_major_sl
;
11940 do_gettimeofday(&tv
);
11941 dev
->reset_time
= tv
.tv_sec
;
11943 atomic_set(&(dev
->refcnt
), 0);
11944 atomic_set(&(dev
->tocnt
), 0);
11946 mutex_init(&dev
->fw_mutex
);
11949 mutex_init(&dev
->gd_mutex
);
11952 pci_set_drvdata(pdev
, dev
);
11954 kref_init(&dev
->kref
);
11956 ret
= pci_enable_device(pdev
);
11958 hio_warn("%s: can not enable device\n", dev
->name
);
11959 goto out_enable_device
;
11962 pci_set_master(pdev
);
11964 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
11965 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
11967 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
11970 hio_warn("%s: set dma mask: failed\n", dev
->name
);
11971 goto out_set_dma_mask
;
11974 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
11975 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
11977 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
11980 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
11981 goto out_set_dma_mask
;
11984 dev
->mmio_base
= pci_resource_start(pdev
, 0);
11985 dev
->mmio_len
= pci_resource_len(pdev
, 0);
11987 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
11988 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
11990 goto out_request_mem_region
;
11993 /* 2.6.9 kernel bug */
11994 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
11996 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
11998 goto out_pci_iomap
;
12001 ret
= ssd_check_hw(dev
);
12003 hio_err("%s: check hardware failed\n", dev
->name
);
12007 ret
= ssd_init_protocol_info(dev
);
12009 hio_err("%s: init protocol info failed\n", dev
->name
);
12010 goto out_init_protocol_info
;
12014 ssd_clear_alarm(dev
);
12016 ret
= ssd_init_fw_info(dev
);
12018 hio_err("%s: init firmware info failed\n", dev
->name
);
12020 ssd_set_alarm(dev
);
12021 goto out_init_fw_info
;
12029 ret
= ssd_init_rom_info(dev
);
12031 hio_err("%s: init rom info failed\n", dev
->name
);
12033 ssd_set_alarm(dev
);
12034 goto out_init_rom_info
;
12037 ret
= ssd_init_label(dev
);
12039 hio_err("%s: init label failed\n", dev
->name
);
12041 ssd_set_alarm(dev
);
12042 goto out_init_label
;
12045 ret
= ssd_init_workq(dev
);
12047 hio_warn("%s: init workq failed\n", dev
->name
);
12048 goto out_init_workq
;
12050 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12052 ret
= ssd_init_log(dev
);
12054 hio_err("%s: init log failed\n", dev
->name
);
12056 ssd_set_alarm(dev
);
12060 ret
= ssd_init_smart(dev
);
12062 hio_err("%s: init info failed\n", dev
->name
);
12064 ssd_set_alarm(dev
);
12065 goto out_init_smart
;
12069 ret
= ssd_init_hw_info(dev
);
12071 hio_err("%s: init hardware info failed\n", dev
->name
);
12073 ssd_set_alarm(dev
);
12074 goto out_init_hw_info
;
12082 ret
= ssd_init_sensor(dev
);
12084 hio_err("%s: init sensor failed\n", dev
->name
);
12086 ssd_set_alarm(dev
);
12087 goto out_init_sensor
;
12090 ret
= ssd_init_pl_cap(dev
);
12092 hio_err("%s: int pl_cap failed\n", dev
->name
);
12094 ssd_set_alarm(dev
);
12095 goto out_init_pl_cap
;
12099 ret
= ssd_check_init_state(dev
);
12101 hio_err("%s: check init state failed\n", dev
->name
);
12103 ssd_set_alarm(dev
);
12104 goto out_check_init_state
;
12107 ret
= ssd_init_response(dev
);
12109 hio_warn("%s: init resp_msg failed\n", dev
->name
);
12110 goto out_init_response
;
12113 ret
= ssd_init_cmd(dev
);
12115 hio_warn("%s: init msg failed\n", dev
->name
);
12119 ret
= ssd_init_dcmd(dev
);
12121 hio_warn("%s: init cmd failed\n", dev
->name
);
12122 goto out_init_dcmd
;
12125 ret
= ssd_init_irq(dev
);
12127 hio_warn("%s: init irq failed\n", dev
->name
);
12131 ret
= ssd_init_thread(dev
);
12133 hio_warn("%s: init thread failed\n", dev
->name
);
12134 goto out_init_thread
;
12137 ret
= ssd_init_tag(dev
);
12139 hio_warn("%s: init tags failed\n", dev
->name
);
12140 goto out_init_tags
;
12144 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12146 ret
= ssd_init_queue(dev
);
12148 hio_warn("%s: init queue failed\n", dev
->name
);
12149 goto out_init_queue
;
12157 ret
= ssd_init_ot_protect(dev
);
12159 hio_err("%s: int ot_protect failed\n", dev
->name
);
12161 ssd_set_alarm(dev
);
12162 goto out_int_ot_protect
;
12165 ret
= ssd_init_wmode(dev
);
12167 hio_warn("%s: init write mode\n", dev
->name
);
12168 goto out_init_wmode
;
12171 /* init routine after hw is ready */
12172 ret
= ssd_init_routine(dev
);
12174 hio_warn("%s: init routine\n", dev
->name
);
12175 goto out_init_routine
;
12178 ret
= ssd_init_chardev(dev
);
12180 hio_warn("%s: register char device failed\n", dev
->name
);
12181 goto out_init_chardev
;
12185 ret
= ssd_init_blkdev(dev
);
12187 hio_warn("%s: register block device failed\n", dev
->name
);
12188 goto out_init_blkdev
;
12190 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12192 ret
= ssd_register_sysfs(dev
);
12194 hio_warn("%s: register sysfs failed\n", dev
->name
);
12195 goto out_register_sysfs
;
12200 list_add_tail(&dev
->list
, &ssd_list
);
12204 out_register_sysfs
:
12205 test_and_clear_bit(SSD_INIT_BD
, &dev
->state
);
12206 ssd_cleanup_blkdev(dev
);
12210 ssd_cleanup_chardev(dev
);
12215 ssd_cleanup_routine(dev
);
12219 out_int_ot_protect
:
12220 ssd_cleanup_queue(dev
);
12222 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12223 ssd_cleanup_tag(dev
);
12225 ssd_cleanup_thread(dev
);
12229 ssd_cleanup_dcmd(dev
);
12231 ssd_cleanup_cmd(dev
);
12233 ssd_cleanup_response(dev
);
12235 out_check_init_state
:
12242 ssd_cleanup_log(dev
);
12247 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12248 ssd_cleanup_workq(dev
);
12254 out_init_protocol_info
:
12256 #ifdef LINUX_SUSE_OS
12257 iounmap(dev
->ctrlp
);
12259 pci_iounmap(pdev
, dev
->ctrlp
);
12262 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12263 out_request_mem_region
:
12265 pci_disable_device(pdev
);
12267 pci_set_drvdata(pdev
, NULL
);
12275 static void ssd_cleanup_tasklet(void)
12278 for_each_online_cpu(i
) {
12279 tasklet_kill(&per_cpu(ssd_tasklet
, i
));
12283 static int ssd_init_tasklet(void)
12287 for_each_online_cpu(i
) {
12288 INIT_LIST_HEAD(&per_cpu(ssd_doneq
, i
));
12291 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done_db
, 0);
12293 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done
, 0);
12300 static struct pci_device_id ssd_pci_tbl
[] = {
12301 { 0x10ee, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* g3 */
12302 { 0x19e5, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v1 */
12303 //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/
12304 { 0x19e5, 0x0009, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 */
12305 { 0x19e5, 0x000a, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 dp slave*/
12309 /*driver power management handler for pm_ops*/
12310 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12311 static int ssd_hio_suspend(struct pci_dev
*pdev
, pm_message_t state
)
12314 static int ssd_hio_suspend(struct device
*ddev
)
12316 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12318 struct ssd_device
*dev
;
12325 dev
= pci_get_drvdata(pdev
);
12330 hio_warn("%s: suspend disk start.\n", dev
->name
);
12331 ssd_unregister_sysfs(dev
);
12333 /* offline firstly */
12334 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12336 /* clean work queue first */
12338 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12339 ssd_cleanup_workq(dev
);
12343 (void)ssd_flush(dev
);
12344 (void)ssd_save_md(dev
);
12348 ssd_save_smart(dev
);
12351 /* clean routine */
12353 ssd_cleanup_routine(dev
);
12356 ssd_cleanup_thread(dev
);
12361 ssd_cleanup_log(dev
);
12364 if (dev
->reload_fw
) { //reload fw
12365 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12368 /* unmap physical adress */
12370 #ifdef LINUX_SUSE_OS
12371 iounmap(dev
->ctrlp
);
12373 pci_iounmap(pdev
, dev
->ctrlp
);
12378 if (dev
->mmio_base
) {
12379 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12380 dev
->mmio_base
= 0;
12383 pci_disable_device(pdev
);
12385 hio_warn("%s: suspend disk finish.\n", dev
->name
);
12391 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12392 static int ssd_hio_resume(struct pci_dev
*pdev
)
12395 static int ssd_hio_resume(struct device
*ddev
)
12397 struct pci_dev
*pdev
= to_pci_dev(ddev
);
12399 struct ssd_device
*dev
= NULL
;
12407 dev
= pci_get_drvdata(pdev
);
12410 goto out_alloc_dev
;
12413 hio_warn("%s: resume disk start.\n", dev
->name
);
12414 ret
= pci_enable_device(pdev
);
12416 hio_warn("%s: can not enable device\n", dev
->name
);
12417 goto out_enable_device
;
12420 pci_set_master(pdev
);
12422 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12423 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
12425 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
12428 hio_warn("%s: set dma mask: failed\n", dev
->name
);
12429 goto out_set_dma_mask
;
12432 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
12433 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
12435 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
12438 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
12439 goto out_set_dma_mask
;
12442 dev
->mmio_base
= pci_resource_start(pdev
, 0);
12443 dev
->mmio_len
= pci_resource_len(pdev
, 0);
12445 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
12446 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
12448 goto out_request_mem_region
;
12451 /* 2.6.9 kernel bug */
12452 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
12454 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
12456 goto out_pci_iomap
;
12459 ret
= ssd_check_hw(dev
);
12461 hio_err("%s: check hardware failed\n", dev
->name
);
12466 ssd_clear_alarm(dev
);
12468 ret
= ssd_init_fw_info(dev
);
12470 hio_err("%s: init firmware info failed\n", dev
->name
);
12472 ssd_set_alarm(dev
);
12473 goto out_init_fw_info
;
12481 ret
= ssd_init_rom_info(dev
);
12483 hio_err("%s: init rom info failed\n", dev
->name
);
12485 ssd_set_alarm(dev
);
12486 goto out_init_rom_info
;
12489 ret
= ssd_init_label(dev
);
12491 hio_err("%s: init label failed\n", dev
->name
);
12493 ssd_set_alarm(dev
);
12494 goto out_init_label
;
12497 ret
= ssd_init_workq(dev
);
12499 hio_warn("%s: init workq failed\n", dev
->name
);
12500 goto out_init_workq
;
12502 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12504 ret
= ssd_init_log(dev
);
12506 hio_err("%s: init log failed\n", dev
->name
);
12508 ssd_set_alarm(dev
);
12512 ret
= ssd_init_smart(dev
);
12514 hio_err("%s: init info failed\n", dev
->name
);
12516 ssd_set_alarm(dev
);
12517 goto out_init_smart
;
12521 ret
= ssd_init_hw_info(dev
);
12523 hio_err("%s: init hardware info failed\n", dev
->name
);
12525 ssd_set_alarm(dev
);
12526 goto out_init_hw_info
;
12534 ret
= ssd_init_sensor(dev
);
12536 hio_err("%s: init sensor failed\n", dev
->name
);
12538 ssd_set_alarm(dev
);
12539 goto out_init_sensor
;
12542 ret
= ssd_init_pl_cap(dev
);
12544 hio_err("%s: int pl_cap failed\n", dev
->name
);
12546 ssd_set_alarm(dev
);
12547 goto out_init_pl_cap
;
12551 ret
= ssd_check_init_state(dev
);
12553 hio_err("%s: check init state failed\n", dev
->name
);
12555 ssd_set_alarm(dev
);
12556 goto out_check_init_state
;
12559 //flush all base pointer to ssd
12560 (void)ssd_reload_ssd_ptr(dev
);
12562 ret
= ssd_init_irq(dev
);
12564 hio_warn("%s: init irq failed\n", dev
->name
);
12568 ret
= ssd_init_thread(dev
);
12570 hio_warn("%s: init thread failed\n", dev
->name
);
12571 goto out_init_thread
;
12575 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12582 ret
= ssd_init_ot_protect(dev
);
12584 hio_err("%s: int ot_protect failed\n", dev
->name
);
12586 ssd_set_alarm(dev
);
12587 goto out_int_ot_protect
;
12590 ret
= ssd_init_wmode(dev
);
12592 hio_warn("%s: init write mode\n", dev
->name
);
12593 goto out_init_wmode
;
12596 /* init routine after hw is ready */
12597 ret
= ssd_init_routine(dev
);
12599 hio_warn("%s: init routine\n", dev
->name
);
12600 goto out_init_routine
;
12604 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12608 hio_warn("%s: resume disk finish.\n", dev
->name
);
12614 out_int_ot_protect
:
12615 ssd_cleanup_thread(dev
);
12619 out_check_init_state
:
12626 ssd_cleanup_log(dev
);
12631 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12632 ssd_cleanup_workq(dev
);
12639 #ifdef LINUX_SUSE_OS
12640 iounmap(dev
->ctrlp
);
12642 pci_iounmap(pdev
, dev
->ctrlp
);
12645 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12646 out_request_mem_region
:
12648 pci_disable_device(pdev
);
12653 hio_warn("%s: resume disk fail.\n", dev
->name
);
12658 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12660 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12662 SIMPLE_DEV_PM_OPS(hio_pm_ops
, ssd_hio_suspend
, ssd_hio_resume
);
12665 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12666 struct pci_driver ssd_driver
= {
12667 .name
= MODULE_NAME
,
12668 .id_table
= ssd_pci_tbl
,
12669 .probe
= ssd_init_one
,
12670 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12671 .remove
= __devexit_p(ssd_remove_one
),
12673 .remove
= ssd_remove_one
,
12676 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
12677 .suspend
= ssd_hio_suspend
,
12678 .resume
= ssd_hio_resume
,
12686 /* notifier block to get a notify on system shutdown/halt/reboot */
12687 static int ssd_notify_reboot(struct notifier_block
*nb
, unsigned long event
, void *buf
)
12689 struct ssd_device
*dev
= NULL
;
12690 struct ssd_device
*n
= NULL
;
12692 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
12693 ssd_gen_swlog(dev
, SSD_LOG_POWER_OFF
, 0);
12695 (void)ssd_flush(dev
);
12696 (void)ssd_save_md(dev
);
12700 ssd_save_smart(dev
);
12702 ssd_stop_workq(dev
);
12704 if (dev
->reload_fw
) {
12705 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12713 static struct notifier_block ssd_notifier
= {
12714 ssd_notify_reboot
, NULL
, 0
12717 static int __init
ssd_init_module(void)
12721 hio_info("driver version: %s\n", DRIVER_VERSION
);
12723 ret
= ssd_init_index();
12725 hio_warn("init index failed\n");
12726 goto out_init_index
;
12729 ret
= ssd_init_proc();
12731 hio_warn("init proc failed\n");
12732 goto out_init_proc
;
12735 ret
= ssd_init_sysfs();
12737 hio_warn("init sysfs failed\n");
12738 goto out_init_sysfs
;
12741 ret
= ssd_init_tasklet();
12743 hio_warn("init tasklet failed\n");
12744 goto out_init_tasklet
;
12747 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12748 ssd_class
= class_simple_create(THIS_MODULE
, SSD_DEV_NAME
);
12750 ssd_class
= class_create(THIS_MODULE
, SSD_DEV_NAME
);
12752 if (IS_ERR(ssd_class
)) {
12753 ret
= PTR_ERR(ssd_class
);
12754 goto out_class_create
;
12757 if (ssd_cmajor
> 0) {
12758 ret
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12760 ret
= ssd_cmajor
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12763 hio_warn("unable to register chardev major number\n");
12764 goto out_register_chardev
;
12767 if (ssd_major
> 0) {
12768 ret
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12770 ret
= ssd_major
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12773 hio_warn("unable to register major number\n");
12774 goto out_register_blkdev
;
12777 if (ssd_major_sl
> 0) {
12778 ret
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12780 ret
= ssd_major_sl
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12783 hio_warn("unable to register slave major number\n");
12784 goto out_register_blkdev_sl
;
12787 if (mode
< SSD_DRV_MODE_STANDARD
|| mode
> SSD_DRV_MODE_BASE
) {
12788 mode
= SSD_DRV_MODE_STANDARD
;
12792 if (mode
!= SSD_DRV_MODE_STANDARD
) {
12796 if (int_mode
< SSD_INT_LEGACY
|| int_mode
> SSD_INT_MSIX
) {
12797 int_mode
= SSD_INT_MODE_DEFAULT
;
12800 if (threaded_irq
) {
12801 int_mode
= SSD_INT_MSI
;
12804 if (log_level
>= SSD_LOG_NR_LEVEL
|| log_level
< SSD_LOG_LEVEL_INFO
) {
12805 log_level
= SSD_LOG_LEVEL_ERR
;
12808 if (wmode
< SSD_WMODE_BUFFER
|| wmode
> SSD_WMODE_DEFAULT
) {
12809 wmode
= SSD_WMODE_DEFAULT
;
12812 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
12813 ret
= pci_module_init(&ssd_driver
);
12815 ret
= pci_register_driver(&ssd_driver
);
12818 hio_warn("pci init failed\n");
12822 ret
= register_reboot_notifier(&ssd_notifier
);
12824 hio_warn("register reboot notifier failed\n");
12825 goto out_register_reboot_notifier
;
12830 out_register_reboot_notifier
:
12832 pci_unregister_driver(&ssd_driver
);
12833 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12834 out_register_blkdev_sl
:
12835 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
12836 out_register_blkdev
:
12837 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
12838 out_register_chardev
:
12839 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12840 class_simple_destroy(ssd_class
);
12842 class_destroy(ssd_class
);
12845 ssd_cleanup_tasklet();
12847 ssd_cleanup_sysfs();
12849 ssd_cleanup_proc();
12851 ssd_cleanup_index();
12857 static void __exit
ssd_cleanup_module(void)
12860 hio_info("unload driver: %s\n", DRIVER_VERSION
);
12864 unregister_reboot_notifier(&ssd_notifier
);
12866 pci_unregister_driver(&ssd_driver
);
12868 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12869 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
12870 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
12871 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12872 class_simple_destroy(ssd_class
);
12874 class_destroy(ssd_class
);
12877 ssd_cleanup_tasklet();
12878 ssd_cleanup_sysfs();
12879 ssd_cleanup_proc();
12880 ssd_cleanup_index();
12883 int ssd_register_event_notifier(struct block_device
*bdev
, ssd_event_call event_call
)
12885 struct ssd_device
*dev
;
12887 struct ssd_log
*le
, *temp_le
= NULL
;
12892 if (!bdev
|| !event_call
|| !(bdev
->bd_disk
)) {
12896 dev
= bdev
->bd_disk
->private_data
;
12897 dev
->event_call
= event_call
;
12899 do_gettimeofday(&tv
);
12902 le
= (struct ssd_log
*)(dev
->internal_log
.log
);
12903 log_nr
= dev
->internal_log
.nr_log
;
12906 if (le
->time
<= cur
&& le
->time
>= dev
->uptime
) {
12907 if ((le
->le
.event
== SSD_LOG_SEU_FAULT1
) && (le
->time
< dev
->reset_time
)) {
12911 if (le
->le
.event
== SSD_LOG_OVER_TEMP
|| le
->le
.event
== SSD_LOG_NORMAL_TEMP
|| le
->le
.event
== SSD_LOG_WARN_TEMP
) {
12912 if (!temp_le
|| le
->time
>= temp_le
->time
) {
12918 (void)dev
->event_call(dev
->gd
, le
->le
.event
, ssd_parse_log(dev
, le
, 0));
12923 ssd_get_temperature(bdev
, &temp
);
12924 if (temp_le
&& (temp
>= SSD_OT_TEMP_HYST
)) {
12925 (void)dev
->event_call(dev
->gd
, temp_le
->le
.event
, ssd_parse_log(dev
, temp_le
, 0));
12931 int ssd_unregister_event_notifier(struct block_device
*bdev
)
12933 struct ssd_device
*dev
;
12935 if (!bdev
|| !(bdev
->bd_disk
)) {
12939 dev
= bdev
->bd_disk
->private_data
;
12940 dev
->event_call
= NULL
;
12945 EXPORT_SYMBOL(ssd_get_label
);
12946 EXPORT_SYMBOL(ssd_get_version
);
12947 EXPORT_SYMBOL(ssd_set_otprotect
);
12948 EXPORT_SYMBOL(ssd_bm_status
);
12949 EXPORT_SYMBOL(ssd_submit_pbio
);
12950 EXPORT_SYMBOL(ssd_get_pciaddr
);
12951 EXPORT_SYMBOL(ssd_get_temperature
);
12952 EXPORT_SYMBOL(ssd_register_event_notifier
);
12953 EXPORT_SYMBOL(ssd_unregister_event_notifier
);
12954 EXPORT_SYMBOL(ssd_reset
);
12955 EXPORT_SYMBOL(ssd_set_wmode
);
12959 module_init(ssd_init_module
);
12960 module_exit(ssd_cleanup_module
);
12961 MODULE_VERSION(DRIVER_VERSION
);
12962 MODULE_LICENSE("GPL");
12963 MODULE_AUTHOR("Huawei SSD DEV Team");
12964 MODULE_DESCRIPTION("Huawei SSD driver");