2 * Huawei SSD device driver
3 * Copyright (c) 2016, Huawei Technologies Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #ifndef LINUX_VERSION_CODE
16 #include <linux/version.h>
18 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
19 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/bio.h>
25 #include <linux/timer.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/blkdev.h>
31 #include <linux/sched.h>
32 #include <linux/fcntl.h>
33 #include <linux/interrupt.h>
34 #include <linux/compiler.h>
35 #include <linux/bitops.h>
36 #include <linux/delay.h>
37 #include <linux/time.h>
38 #include <linux/stat.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
44 #include <linux/ioctl.h>
45 #include <linux/hdreg.h> /* HDIO_GETGEO */
46 #include <linux/list.h>
47 #include <linux/reboot.h>
48 #include <linux/kthread.h>
49 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
50 #include <linux/seq_file.h>
52 #include <asm/uaccess.h>
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
54 #include <linux/scatterlist.h>
55 #include <linux/vmalloc.h>
57 #include <asm/scatterlist.h>
60 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
61 #include <linux/devfs_fs_kernel.h>
65 #define MODULE_NAME "hio"
66 #define DRIVER_VERSION "2.1.0.23"
67 #define DRIVER_VERSION_LEN 16
69 #define SSD_FW_MIN 0x1
71 #define SSD_DEV_NAME MODULE_NAME
72 #define SSD_DEV_NAME_LEN 16
73 #define SSD_CDEV_NAME "c"SSD_DEV_NAME
74 #define SSD_SDEV_NAME "s"SSD_DEV_NAME
79 #define SSD_MAJOR_SL 0
82 #define SSD_MAX_DEV 702
83 #define SSD_ALPHABET_NUM 26
85 #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg)
86 #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg)
87 #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg)
88 #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg)
91 #define SSD_SLAVE_PORT_DEVID 0x000a
95 /* 2.6.9 msi affinity bug, should turn msi & msi-x off */
97 #define SSD_ESCAPE_IRQ
103 #define SSD_MSIX_VEC 8
106 //#undef SSD_ESCAPE_IRQ
107 #define SSD_MSIX_AFFINITY_FORCE
112 /* Over temperature protect */
113 #define SSD_OT_PROTECT
115 #ifdef SSD_QUEUE_PBIO
116 #define BIO_SSD_PBIO 20
120 //#define SSD_DEBUG_ERR
123 #define SSD_CMD_TIMEOUT (60*HZ)
126 #define SSD_SPI_TIMEOUT (5*HZ)
127 #define SSD_I2C_TIMEOUT (5*HZ)
129 #define SSD_I2C_MAX_DATA (127)
130 #define SSD_SMBUS_BLOCK_MAX (32)
131 #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2)
134 #define SSD_INIT_WAIT (1000) //1s
135 #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s
136 #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s
137 #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s
138 #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
139 #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
141 /* blkdev busy wait */
142 #define SSD_DEV_BUSY_WAIT 1000 //ms
143 #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s
146 #define SSD_SMBUS_RETRY_INTERVAL (5) //ms
147 #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL)
149 #define SSD_BM_RETRY_MAX 7
151 /* bm routine interval */
152 #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000)
154 /* routine interval */
155 #define SSD_ROUTINE_INTERVAL (10*1000) //10s
156 #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL)
157 #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30)
158 #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover
161 #define SSD_DMA_ALIGN (16)
163 /* some hw defalut */
164 #define SSD_LOG_MAX_SZ 4096
166 #define SSD_NAND_OOB_SZ 1024
167 #define SSD_NAND_ID_SZ 8
168 #define SSD_NAND_ID_BUFF_SZ 1024
169 #define SSD_NAND_MAX_CE 2
171 #define SSD_BBT_RESERVED 8
173 #define SSD_ECC_MAX_FLIP (64+1)
175 #define SSD_RAM_ALIGN 16
178 #define SSD_RELOAD_FLAG 0x3333CCCC
179 #define SSD_RELOAD_FW 0xAA5555AA
180 #define SSD_RESET_NOINIT 0xAA5555AA
181 #define SSD_RESET 0x55AAAA55
182 #define SSD_RESET_FULL 0x5A
183 //#define SSD_RESET_WAIT 1000 //1s
184 //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s
188 #define SSD_PROTOCOL_V1 0x0
190 #define SSD_ROM_SIZE (16*1024*1024)
191 #define SSD_ROM_BLK_SIZE (256*1024)
192 #define SSD_ROM_PAGE_SIZE (256)
193 #define SSD_ROM_NR_BRIDGE_FW 2
194 #define SSD_ROM_NR_CTRL_FW 2
195 #define SSD_ROM_BRIDGE_FW_BASE 0
196 #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024)
197 #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE)
198 #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024)
199 #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW)
200 #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE)
203 #define SSD_PROTOCOL_V3 0x3000000
204 #define SSD_PROTOCOL_V3_1_1 0x3010001
205 #define SSD_PROTOCOL_V3_1_3 0x3010003
206 #define SSD_PROTOCOL_V3_2 0x3020000
207 #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */
208 #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */
209 #define SSD_PROTOCOL_V3_2_4 0x3020004
212 #define SSD_PV3_ROM_NR_BM_FW 1
213 #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8)
215 #define SSD_ROM_LOG_SZ (64*1024*4)
217 #define SSD_ROM_NR_SMART_MAX 2
218 #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX
219 #define SSD_PV3_ROM_SMART_SZ (64*1024)
222 #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */
223 #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */
227 #define SSD_REQ_FIFO_REG 0x0000
228 #define SSD_RESP_FIFO_REG 0x0008 //0x0010
229 #define SSD_RESP_PTR_REG 0x0010 //0x0018
230 #define SSD_INTR_INTERVAL_REG 0x0018
231 #define SSD_READY_REG 0x001C
232 #define SSD_BRIDGE_TEST_REG 0x0020
233 #define SSD_STRIPE_SIZE_REG 0x0028
234 #define SSD_CTRL_VER_REG 0x0030 //controller
235 #define SSD_BRIDGE_VER_REG 0x0034 //bridge
236 #define SSD_PCB_VER_REG 0x0038
237 #define SSD_BURN_FLAG_REG 0x0040
238 #define SSD_BRIDGE_INFO_REG 0x0044
240 #define SSD_WL_VAL_REG 0x0048 //32-bit
242 #define SSD_BB_INFO_REG 0x004C
244 #define SSD_ECC_TEST_REG 0x0050 //test only
245 #define SSD_ERASE_TEST_REG 0x0058 //test only
246 #define SSD_WRITE_TEST_REG 0x0060 //test only
248 #define SSD_RESET_REG 0x0068
249 #define SSD_RELOAD_FW_REG 0x0070
251 #define SSD_RESERVED_BLKS_REG 0x0074
252 #define SSD_VALID_PAGES_REG 0x0078
253 #define SSD_CH_INFO_REG 0x007C
255 #define SSD_CTRL_TEST_REG_SZ 0x8
256 #define SSD_CTRL_TEST_REG0 0x0080
257 #define SSD_CTRL_TEST_REG1 0x0088
258 #define SSD_CTRL_TEST_REG2 0x0090
259 #define SSD_CTRL_TEST_REG3 0x0098
260 #define SSD_CTRL_TEST_REG4 0x00A0
261 #define SSD_CTRL_TEST_REG5 0x00A8
262 #define SSD_CTRL_TEST_REG6 0x00B0
263 #define SSD_CTRL_TEST_REG7 0x00B8
265 #define SSD_FLASH_INFO_REG0 0x00C0
266 #define SSD_FLASH_INFO_REG1 0x00C8
267 #define SSD_FLASH_INFO_REG2 0x00D0
268 #define SSD_FLASH_INFO_REG3 0x00D8
269 #define SSD_FLASH_INFO_REG4 0x00E0
270 #define SSD_FLASH_INFO_REG5 0x00E8
271 #define SSD_FLASH_INFO_REG6 0x00F0
272 #define SSD_FLASH_INFO_REG7 0x00F8
274 #define SSD_RESP_INFO_REG 0x01B8
275 #define SSD_NAND_BUFF_BASE 0x01BC //for nand write
277 #define SSD_CHIP_INFO_REG_SZ 0x10
278 #define SSD_CHIP_INFO_REG0 0x0100 //128 bit
279 #define SSD_CHIP_INFO_REG1 0x0110
280 #define SSD_CHIP_INFO_REG2 0x0120
281 #define SSD_CHIP_INFO_REG3 0x0130
282 #define SSD_CHIP_INFO_REG4 0x0140
283 #define SSD_CHIP_INFO_REG5 0x0150
284 #define SSD_CHIP_INFO_REG6 0x0160
285 #define SSD_CHIP_INFO_REG7 0x0170
287 #define SSD_RAM_INFO_REG 0x01C4
289 #define SSD_BBT_BASE_REG 0x01C8
290 #define SSD_ECT_BASE_REG 0x01CC
292 #define SSD_CLEAR_INTR_REG 0x01F0
294 #define SSD_INIT_STATE_REG_SZ 0x8
295 #define SSD_INIT_STATE_REG0 0x0200
296 #define SSD_INIT_STATE_REG1 0x0208
297 #define SSD_INIT_STATE_REG2 0x0210
298 #define SSD_INIT_STATE_REG3 0x0218
299 #define SSD_INIT_STATE_REG4 0x0220
300 #define SSD_INIT_STATE_REG5 0x0228
301 #define SSD_INIT_STATE_REG6 0x0230
302 #define SSD_INIT_STATE_REG7 0x0238
304 #define SSD_ROM_INFO_REG 0x0600
305 #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604
306 #define SSD_ROM_CTRL_FW_INFO_REG 0x0608
307 #define SSD_ROM_VP_INFO_REG 0x060C
309 #define SSD_LOG_INFO_REG 0x0610
310 #define SSD_LED_REG 0x0614
311 #define SSD_MSG_BASE_REG 0x06F8
314 #define SSD_SPI_REG_CMD 0x0180
315 #define SSD_SPI_REG_CMD_HI 0x0184
316 #define SSD_SPI_REG_WDATA 0x0188
317 #define SSD_SPI_REG_ID 0x0190
318 #define SSD_SPI_REG_STATUS 0x0198
319 #define SSD_SPI_REG_RDATA 0x01A0
320 #define SSD_SPI_REG_READY 0x01A8
323 #define SSD_I2C_CTRL_REG 0x06F0
324 #define SSD_I2C_RDATA_REG 0x06F4
326 /* temperature reg */
327 #define SSD_BRIGE_TEMP_REG 0x0618
329 #define SSD_CTRL_TEMP_REG0 0x0700
330 #define SSD_CTRL_TEMP_REG1 0x0708
331 #define SSD_CTRL_TEMP_REG2 0x0710
332 #define SSD_CTRL_TEMP_REG3 0x0718
333 #define SSD_CTRL_TEMP_REG4 0x0720
334 #define SSD_CTRL_TEMP_REG5 0x0728
335 #define SSD_CTRL_TEMP_REG6 0x0730
336 #define SSD_CTRL_TEMP_REG7 0x0738
338 /* reversion 3 reg */
339 #define SSD_PROTOCOL_VER_REG 0x01B4
341 #define SSD_FLUSH_TIMEOUT_REG 0x02A4
342 #define SSD_BM_FAULT_REG 0x0660
344 #define SSD_PV3_RAM_STATUS_REG_SZ 0x4
345 #define SSD_PV3_RAM_STATUS_REG0 0x0260
346 #define SSD_PV3_RAM_STATUS_REG1 0x0264
347 #define SSD_PV3_RAM_STATUS_REG2 0x0268
348 #define SSD_PV3_RAM_STATUS_REG3 0x026C
349 #define SSD_PV3_RAM_STATUS_REG4 0x0270
350 #define SSD_PV3_RAM_STATUS_REG5 0x0274
351 #define SSD_PV3_RAM_STATUS_REG6 0x0278
352 #define SSD_PV3_RAM_STATUS_REG7 0x027C
354 #define SSD_PV3_CHIP_INFO_REG_SZ 0x40
355 #define SSD_PV3_CHIP_INFO_REG0 0x0300
356 #define SSD_PV3_CHIP_INFO_REG1 0x0340
357 #define SSD_PV3_CHIP_INFO_REG2 0x0380
358 #define SSD_PV3_CHIP_INFO_REG3 0x03B0
359 #define SSD_PV3_CHIP_INFO_REG4 0x0400
360 #define SSD_PV3_CHIP_INFO_REG5 0x0440
361 #define SSD_PV3_CHIP_INFO_REG6 0x0480
362 #define SSD_PV3_CHIP_INFO_REG7 0x04B0
364 #define SSD_PV3_INIT_STATE_REG_SZ 0x20
365 #define SSD_PV3_INIT_STATE_REG0 0x0500
366 #define SSD_PV3_INIT_STATE_REG1 0x0520
367 #define SSD_PV3_INIT_STATE_REG2 0x0540
368 #define SSD_PV3_INIT_STATE_REG3 0x0560
369 #define SSD_PV3_INIT_STATE_REG4 0x0580
370 #define SSD_PV3_INIT_STATE_REG5 0x05A0
371 #define SSD_PV3_INIT_STATE_REG6 0x05C0
372 #define SSD_PV3_INIT_STATE_REG7 0x05E0
374 /* reversion 3.1.1 reg */
375 #define SSD_FULL_RESET_REG 0x01B0
377 #define SSD_CTRL_REG_ZONE_SZ 0x800
379 #define SSD_BB_THRESHOLD_L1_REG 0x2C0
380 #define SSD_BB_THRESHOLD_L2_REG 0x2C4
382 #define SSD_BB_ACC_REG_SZ 0x4
383 #define SSD_BB_ACC_REG0 0x21C0
384 #define SSD_BB_ACC_REG1 0x29C0
385 #define SSD_BB_ACC_REG2 0x31C0
387 #define SSD_EC_THRESHOLD_L1_REG 0x2C8
388 #define SSD_EC_THRESHOLD_L2_REG 0x2CC
390 #define SSD_EC_ACC_REG_SZ 0x4
391 #define SSD_EC_ACC_REG0 0x21E0
392 #define SSD_EC_ACC_REG1 0x29E0
393 #define SSD_EC_ACC_REG2 0x31E0
395 /* reversion 3.1.2 & 3.1.3 reg */
396 #define SSD_HW_STATUS_REG 0x02AC
398 #define SSD_PLP_INFO_REG 0x0664
400 /*reversion 3.2 reg*/
401 #define SSD_POWER_ON_REG 0x01EC
402 #define SSD_PCIE_LINKSTATUS_REG 0x01F8
403 #define SSD_PL_CAP_LEARN_REG 0x01FC
405 #define SSD_FPGA_1V0_REG0 0x2070
406 #define SSD_FPGA_1V8_REG0 0x2078
407 #define SSD_FPGA_1V0_REG1 0x2870
408 #define SSD_FPGA_1V8_REG1 0x2878
410 /*reversion 3.2 reg*/
411 #define SSD_READ_OT_REG0 0x2260
412 #define SSD_WRITE_OT_REG0 0x2264
413 #define SSD_READ_OT_REG1 0x2A60
414 #define SSD_WRITE_OT_REG1 0x2A64
418 #define SSD_FUNC_READ 0x01
419 #define SSD_FUNC_WRITE 0x02
420 #define SSD_FUNC_NAND_READ_WOOB 0x03
421 #define SSD_FUNC_NAND_READ 0x04
422 #define SSD_FUNC_NAND_WRITE 0x05
423 #define SSD_FUNC_NAND_ERASE 0x06
424 #define SSD_FUNC_NAND_READ_ID 0x07
425 #define SSD_FUNC_READ_LOG 0x08
426 #define SSD_FUNC_TRIM 0x09
427 #define SSD_FUNC_RAM_READ 0x10
428 #define SSD_FUNC_RAM_WRITE 0x11
429 #define SSD_FUNC_FLUSH 0x12 //cache / bbt
432 #define SSD_SPI_CMD_PROGRAM 0x02
433 #define SSD_SPI_CMD_READ 0x03
434 #define SSD_SPI_CMD_W_DISABLE 0x04
435 #define SSD_SPI_CMD_READ_STATUS 0x05
436 #define SSD_SPI_CMD_W_ENABLE 0x06
437 #define SSD_SPI_CMD_ERASE 0xd8
438 #define SSD_SPI_CMD_CLSR 0x30
439 #define SSD_SPI_CMD_READ_ID 0x9f
442 #define SSD_I2C_CTRL_READ 0x00
443 #define SSD_I2C_CTRL_WRITE 0x01
445 /* i2c internal register */
446 #define SSD_I2C_CFG_REG 0x00
447 #define SSD_I2C_DATA_REG 0x01
448 #define SSD_I2C_CMD_REG 0x02
449 #define SSD_I2C_STATUS_REG 0x03
450 #define SSD_I2C_SADDR_REG 0x04
451 #define SSD_I2C_LEN_REG 0x05
452 #define SSD_I2C_RLEN_REG 0x06
453 #define SSD_I2C_WLEN_REG 0x07
454 #define SSD_I2C_RESET_REG 0x08 //write for reset
455 #define SSD_I2C_PRER_REG 0x09
459 /* FPGA volt = ADC_value / 4096 * 3v */
460 #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v
461 #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v
462 #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v
463 #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98
466 #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4)
467 #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4)
468 #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4)
469 #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12)
471 #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt))
482 SSD_CLOCK_166M_LOST
= 0,
490 #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1)
491 #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1)
493 #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8))
495 #define SSD_INLET_OT_TEMP (55) //55 DegC
496 #define SSD_INLET_OT_HYST (50) //50 DegC
497 #define SSD_FLASH_OT_TEMP (70) //70 DegC
498 #define SSD_FLASH_OT_HYST (65) //65 DegC
511 SSD_LM75_REG_TEMP
= 0,
518 #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2)
519 #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2)
520 #define SSD_LM80_REG_IN(nr) (0x20 + (nr))
522 #define SSD_LM80_REG_FAN1 0x28
523 #define SSD_LM80_REG_FAN2 0x29
524 #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr))
526 #define SSD_LM80_REG_TEMP 0x27
527 #define SSD_LM80_REG_TEMP_HOT_MAX 0x38
528 #define SSD_LM80_REG_TEMP_HOT_HYST 0x39
529 #define SSD_LM80_REG_TEMP_OS_MAX 0x3a
530 #define SSD_LM80_REG_TEMP_OS_HYST 0x3b
532 #define SSD_LM80_REG_CONFIG 0x00
533 #define SSD_LM80_REG_ALARM1 0x01
534 #define SSD_LM80_REG_ALARM2 0x02
535 #define SSD_LM80_REG_MASK1 0x03
536 #define SSD_LM80_REG_MASK2 0x04
537 #define SSD_LM80_REG_FANDIV 0x05
538 #define SSD_LM80_REG_RES 0x06
540 #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8)
542 #define SSD_LM80_3V3_VOLT(val) ((val)*33/19)
544 #define SSD_LM80_CONV_INTERVAL (1000)
553 SSD_LM80_IN_FPGA_3V3
,
558 struct ssd_lm80_limit
564 /* +/- 5% except cap in*/
565 static struct ssd_lm80_limit ssd_lm80_limit
[SSD_LM80_IN_NR
] = {
566 {171, 217}, /* CAP in: 1710 ~ 2170 */
575 /* temperature sensors */
585 #ifdef SSD_OT_PROTECT
586 #define SSD_OT_DELAY (60) //ms
588 #define SSD_OT_TEMP (90) //90 DegC
590 #define SSD_OT_TEMP_HYST (85) //85 DegC
593 /* fpga temperature */
594 //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f)
595 #define CONVERT_TEMP(val) ((val)*504/4096-273)
597 #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4))
598 #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4))
599 #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4))
603 #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP)
604 #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8)
605 #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2)))
606 #define SSD_PL_CAP_LEARN_WAIT (20) //20ms
607 #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s
609 #define SSD_PL_CAP_CHARGE_WAIT (1000)
610 #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s
612 #define SSD_PL_CAP_VOLT(val) (val*7)
614 #define SSD_PL_CAP_VOLT_FULL (13700)
615 #define SSD_PL_CAP_VOLT_READY (12880)
617 #define SSD_PL_CAP_THRESHOLD (8900)
618 #define SSD_PL_CAP_CP_THRESHOLD (5800)
619 #define SSD_PL_CAP_THRESHOLD_HYST (100)
621 enum ssd_pl_cap_status
629 SSD_PL_CAP_DEFAULT
= 0, /* 4 cap */
630 SSD_PL_CAP_CP
/* 3 cap */
635 #define SSD_HWMON_OFFS_TEMP (0)
636 #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR)
637 #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR)
638 #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR)
639 #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR)
640 #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR)
642 #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx)
643 #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx)
644 #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx)
645 #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx)
646 #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx)
647 #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx)
663 static int sfifo_alloc(struct sfifo
*fifo
, uint32_t size
, uint32_t esize
)
667 if (!fifo
|| size
> INT_MAX
|| esize
== 0) {
671 while (__size
< size
) __size
<<= 1;
677 fifo
->data
= vmalloc(esize
* __size
);
684 fifo
->mask
= __size
- 1;
687 spin_lock_init(&fifo
->lock
);
692 static void sfifo_free(struct sfifo
*fifo
)
707 static int __sfifo_put(struct sfifo
*fifo
, void *val
)
709 if (((fifo
->in
+ 1) & fifo
->mask
) == fifo
->out
) {
713 memcpy((fifo
->data
+ (fifo
->in
* fifo
->esize
)), val
, fifo
->esize
);
714 fifo
->in
= (fifo
->in
+ 1) & fifo
->mask
;
719 static int sfifo_put(struct sfifo
*fifo
, void *val
)
727 if (!in_interrupt()) {
728 spin_lock_irq(&fifo
->lock
);
729 ret
= __sfifo_put(fifo
, val
);
730 spin_unlock_irq(&fifo
->lock
);
732 spin_lock(&fifo
->lock
);
733 ret
= __sfifo_put(fifo
, val
);
734 spin_unlock(&fifo
->lock
);
740 static int __sfifo_get(struct sfifo
*fifo
, void *val
)
742 if (fifo
->out
== fifo
->in
) {
746 memcpy(val
, (fifo
->data
+ (fifo
->out
* fifo
->esize
)), fifo
->esize
);
747 fifo
->out
= (fifo
->out
+ 1) & fifo
->mask
;
752 static int sfifo_get(struct sfifo
*fifo
, void *val
)
760 if (!in_interrupt()) {
761 spin_lock_irq(&fifo
->lock
);
762 ret
= __sfifo_get(fifo
, val
);
763 spin_unlock_irq(&fifo
->lock
);
765 spin_lock(&fifo
->lock
);
766 ret
= __sfifo_get(fifo
, val
);
767 spin_unlock(&fifo
->lock
);
774 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
780 static inline void ssd_blist_init(struct ssd_blist
*ssd_bl
)
786 static inline struct bio
*ssd_blist_get(struct ssd_blist
*ssd_bl
)
788 struct bio
*bio
= ssd_bl
->prev
;
796 static inline void ssd_blist_add(struct ssd_blist
*ssd_bl
, struct bio
*bio
)
801 ssd_bl
->next
->bi_next
= bio
;
810 #define ssd_blist bio_list
811 #define ssd_blist_init bio_list_init
812 #define ssd_blist_get bio_list_get
813 #define ssd_blist_add bio_list_add
816 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
817 #define bio_start(bio) (bio->bi_sector)
819 #define bio_start(bio) (bio->bi_iter.bi_sector)
823 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
824 #define mutex_lock down
825 #define mutex_unlock up
826 #define mutex semaphore
827 #define mutex_init init_MUTEX
831 typedef union ssd_i2c_ctrl
{
839 }__attribute__((packed
)) ssd_i2c_ctrl_t
;
841 typedef union ssd_i2c_data
{
848 }__attribute__((packed
)) ssd_i2c_data_t
;
853 SSD_WMODE_BUFFER
= 0,
870 typedef struct ssd_sg_entry
875 }__attribute__((packed
))ssd_sg_entry_t
;
877 typedef struct ssd_rw_msg
883 uint32_t reserved
; //for 64-bit align
884 struct ssd_sg_entry sge
[1]; //base
885 }__attribute__((packed
))ssd_rw_msg_t
;
887 typedef struct ssd_resp_msg
895 }__attribute__((packed
))ssd_resp_msg_t
;
897 typedef struct ssd_flush_msg
900 uint8_t flag
:2; //flash cache 0 or bbt 1
904 uint32_t reserved
; //align
905 }__attribute__((packed
))ssd_flush_msg_t
;
907 typedef struct ssd_nand_op_msg
913 uint32_t reserved
; //align
919 }__attribute__((packed
))ssd_nand_op_msg_t
;
921 typedef struct ssd_ram_op_msg
927 uint32_t reserved
; //align
931 }__attribute__((packed
))ssd_ram_op_msg_t
;
935 typedef struct ssd_log_msg
941 uint32_t reserved
; //align
943 }__attribute__((packed
))ssd_log_msg_t
;
945 typedef struct ssd_log_op_msg
951 uint32_t reserved
; //align
952 uint64_t reserved1
; //align
954 }__attribute__((packed
))ssd_log_op_msg_t
;
956 typedef struct ssd_log_resp_msg
960 uint16_t reserved1
:2; //align with the normal resp msg
964 }__attribute__((packed
))ssd_log_resp_msg_t
;
968 typedef union ssd_response_msq
970 ssd_resp_msg_t resp_msg
;
971 ssd_log_resp_msg_t log_resp_msg
;
974 } ssd_response_msq_t
;
978 typedef struct ssd_protocol_info
981 uint32_t init_state_reg
;
982 uint32_t init_state_reg_sz
;
983 uint32_t chip_info_reg
;
984 uint32_t chip_info_reg_sz
;
985 } ssd_protocol_info_t
;
987 typedef struct ssd_hw_info
992 uint32_t cmd_fifo_sz
;
993 uint32_t cmd_fifo_sz_mask
;
996 uint32_t resp_ptr_sz
;
997 uint32_t resp_msg_sz
;
1001 uint16_t nr_data_ch
;
1007 uint8_t upper_pcb_ver
;
1009 uint8_t nand_vendor_id
;
1010 uint8_t nand_dev_id
;
1017 uint16_t bbf_seek
; //
1019 uint16_t page_count
; //per block
1021 uint32_t block_count
; //per flash
1025 uint32_t ram_max_len
;
1029 uint64_t md_base
; //metadata
1031 uint32_t md_entry_sz
;
1035 uint64_t nand_wbuff_base
;
1037 uint32_t md_reserved_blks
;
1038 uint32_t reserved_blks
;
1039 uint32_t valid_pages
;
1040 uint32_t max_valid_pages
;
1044 typedef struct ssd_hw_info_extend
1050 uint8_t form_factor
;
1053 }ssd_hw_info_extend_t
;
1055 typedef struct ssd_rom_info
1058 uint32_t block_size
;
1060 uint8_t nr_bridge_fw
;
1064 uint32_t bridge_fw_base
;
1065 uint32_t bridge_fw_sz
;
1066 uint32_t ctrl_fw_base
;
1067 uint32_t ctrl_fw_sz
;
1068 uint32_t bm_fw_base
;
1072 uint32_t smart_base
;
1075 uint32_t label_base
;
1083 SSD_DEBUG_WRITE_ERR
,
1093 typedef struct ssd_debug_info
1109 #define SSD_LABEL_FIELD_SZ 32
1110 #define SSD_SN_SZ 16
1112 typedef struct ssd_label
1114 char date
[SSD_LABEL_FIELD_SZ
];
1115 char sn
[SSD_LABEL_FIELD_SZ
];
1116 char part
[SSD_LABEL_FIELD_SZ
];
1117 char desc
[SSD_LABEL_FIELD_SZ
];
1118 char other
[SSD_LABEL_FIELD_SZ
];
1119 char maf
[SSD_LABEL_FIELD_SZ
];
1122 #define SSD_LABEL_DESC_SZ 256
1124 typedef struct ssd_labelv3
1126 char boardtype
[SSD_LABEL_FIELD_SZ
];
1127 char barcode
[SSD_LABEL_FIELD_SZ
];
1128 char item
[SSD_LABEL_FIELD_SZ
];
1129 char description
[SSD_LABEL_DESC_SZ
];
1130 char manufactured
[SSD_LABEL_FIELD_SZ
];
1131 char vendorname
[SSD_LABEL_FIELD_SZ
];
1132 char issuenumber
[SSD_LABEL_FIELD_SZ
];
1133 char cleicode
[SSD_LABEL_FIELD_SZ
];
1134 char bom
[SSD_LABEL_FIELD_SZ
];
1138 typedef struct ssd_battery_info
1141 } ssd_battery_info_t
;
1143 /* ssd power stat */
1144 typedef struct ssd_power_stat
1146 uint64_t nr_poweron
;
1147 uint64_t nr_powerloss
;
1148 uint64_t init_failed
;
1152 typedef struct ssd_io_stat
1165 typedef struct ssd_ecc_info
1167 uint64_t bitflip
[SSD_ECC_MAX_FLIP
];
1173 SSD_LOG_LEVEL_INFO
= 0,
1174 SSD_LOG_LEVEL_NOTICE
,
1175 SSD_LOG_LEVEL_WARNING
,
1180 typedef struct ssd_log_info
1183 uint64_t stat
[SSD_LOG_NR_LEVEL
];
1187 #define SSD_SMART_MAGIC (0x5452414D53445353ull)
1189 typedef struct ssd_smart
1191 struct ssd_power_stat pstat
;
1192 struct ssd_io_stat io_stat
;
1193 struct ssd_ecc_info ecc_info
;
1194 struct ssd_log_info log_info
;
1200 typedef struct ssd_internal_log
1204 } ssd_internal_log_t
;
1207 typedef struct ssd_cmd
1210 struct scatterlist
*sgl
;
1211 struct list_head list
;
1214 int flag
; /*pbio(1) or bio(0)*/
1220 unsigned long start_time
;
1223 unsigned int nr_log
;
1225 struct timer_list cmd_timer
;
1226 struct completion
*waiting
;
1229 typedef void (*send_cmd_func
)(struct ssd_cmd
*);
1230 typedef int (*ssd_event_call
)(struct gendisk
*, int, int); /* gendisk, event id, event level */
1233 #define SSD_DCMD_MAX_SZ 32
1235 typedef struct ssd_dcmd
1237 struct list_head list
;
1239 uint8_t msg
[SSD_DCMD_MAX_SZ
];
1255 #define SSD_QUEUE_NAME_LEN 16
1256 typedef struct ssd_queue
{
1257 char name
[SSD_QUEUE_NAME_LEN
];
1263 uint32_t resp_idx_mask
;
1264 uint32_t resp_msg_sz
;
1269 struct ssd_cmd
*cmd
;
1271 struct ssd_io_stat io_stat
;
1272 struct ssd_ecc_info ecc_info
;
1275 typedef struct ssd_device
{
1276 char name
[SSD_DEV_NAME_LEN
];
1283 #ifdef SSD_ESCAPE_IRQ
1289 int ot_delay
; //in ms
1293 atomic_t in_flight
[2]; //r&w
1297 struct list_head list
;
1298 struct pci_dev
*pdev
;
1300 unsigned long mmio_base
;
1301 unsigned long mmio_len
;
1302 void __iomem
*ctrlp
;
1304 struct mutex spi_mutex
;
1305 struct mutex i2c_mutex
;
1307 struct ssd_protocol_info protocol_info
;
1308 struct ssd_hw_info hw_info
;
1309 struct ssd_rom_info rom_info
;
1310 struct ssd_label label
;
1312 struct ssd_smart smart
;
1315 spinlock_t sendq_lock
;
1316 struct ssd_blist sendq
;
1317 struct task_struct
*send_thread
;
1318 wait_queue_head_t send_waitq
;
1321 spinlock_t doneq_lock
;
1322 struct ssd_blist doneq
;
1323 struct task_struct
*done_thread
;
1324 wait_queue_head_t done_waitq
;
1326 struct ssd_dcmd
*dcmd
;
1327 spinlock_t dcmd_lock
;
1328 struct list_head dcmd_list
; /* direct cmd list */
1329 wait_queue_head_t dcmd_wq
;
1331 unsigned long *tag_map
;
1332 wait_queue_head_t tag_wq
;
1334 spinlock_t cmd_lock
;
1335 struct ssd_cmd
*cmd
;
1338 ssd_event_call event_call
;
1340 dma_addr_t msg_base_dma
;
1343 void *resp_msg_base
;
1344 void *resp_ptr_base
;
1345 dma_addr_t resp_msg_base_dma
;
1346 dma_addr_t resp_ptr_base_dma
;
1349 struct msix_entry entry
[SSD_MSIX_VEC
];
1350 struct ssd_queue queue
[SSD_MSIX_VEC
];
1352 struct request_queue
*rq
; /* The device request queue */
1353 struct gendisk
*gd
; /* The gendisk structure */
1355 struct mutex internal_log_mutex
;
1356 struct ssd_internal_log internal_log
;
1357 struct workqueue_struct
*workq
;
1358 struct work_struct log_work
; /* get log */
1361 unsigned long state
; /* device state, for example, block device inited */
1363 struct module
*owner
;
1374 struct mutex gd_mutex
;
1375 struct ssd_log_info log_info
; /* volatile */
1377 atomic_t queue_depth
;
1378 struct mutex barrier_mutex
;
1379 struct mutex fw_mutex
;
1381 struct ssd_hw_info_extend hw_info_ext
;
1382 struct ssd_labelv3 labelv3
;
1386 struct mutex bm_mutex
;
1387 struct work_struct bm_work
; /* check bm */
1388 struct timer_list bm_timer
;
1389 struct sfifo log_fifo
;
1391 struct timer_list routine_timer
;
1392 unsigned long routine_tick
;
1393 unsigned long hwmon
;
1395 struct work_struct hwmon_work
; /* check hw */
1396 struct work_struct capmon_work
; /* check battery */
1397 struct work_struct tempmon_work
; /* check temp */
1400 struct ssd_debug_info db_info
;
1405 typedef struct ssd_acc_info
{
1406 uint32_t threshold_l1
;
1407 uint32_t threshold_l2
;
1411 typedef struct ssd_reg_op_info
1415 } ssd_reg_op_info_t
;
1417 typedef struct ssd_spi_op_info
1422 } ssd_spi_op_info_t
;
1424 typedef struct ssd_i2c_op_info
1431 } ssd_i2c_op_info_t
;
1433 typedef struct ssd_smbus_op_info
1439 } ssd_smbus_op_info_t
;
1441 typedef struct ssd_ram_op_info
{
1445 uint8_t __user
*buf
;
1446 } ssd_ram_op_info_t
;
1448 typedef struct ssd_flash_op_info
{
1453 uint8_t __user
*buf
;
1454 } ssd_flash_op_info_t
;
1456 typedef struct ssd_sw_log_info
{
1460 } ssd_sw_log_info_t
;
1462 typedef struct ssd_version_info
1464 uint32_t bridge_ver
; /* bridge fw version */
1465 uint32_t ctrl_ver
; /* controller fw version */
1466 uint32_t bm_ver
; /* battery manager fw version */
1467 uint8_t pcb_ver
; /* main pcb version */
1468 uint8_t upper_pcb_ver
;
1471 } ssd_version_info_t
;
1473 typedef struct pci_addr
1481 typedef struct ssd_drv_param_info
{
1491 } ssd_drv_param_info_t
;
1495 enum ssd_form_factor
1497 SSD_FORM_FACTOR_HHHL
= 0,
1498 SSD_FORM_FACTOR_FHHL
1502 /* ssd power loss protect */
1511 #define SSD_BM_SLAVE_ADDRESS 0x16
1512 #define SSD_BM_CAP 5
1515 #define SSD_BM_SAFETYSTATUS 0x51
1516 #define SSD_BM_OPERATIONSTATUS 0x54
1518 /* ManufacturerAccess */
1519 #define SSD_BM_MANUFACTURERACCESS 0x00
1520 #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */
1522 /* Data flash access */
1523 #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77
1524 #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78
1525 #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56
1526 #define SSD_BM_CONFIGURATION_REGISTERS_ID 64
1528 /* min cap voltage */
1529 #define SSD_BM_CAP_VOLT_MIN 500
1534 SSD_BM_CAP_VINA = 1,
1540 SSD_BMSTATUS_OK
= 0,
1541 SSD_BMSTATUS_CHARGING
, /* not fully charged */
1542 SSD_BMSTATUS_WARNING
1547 SBS_UNIT_TEMPERATURE
,
1552 SBS_UNIT_CAPACITANCE
1580 uint16_t cap_volt
[SSD_BM_CAP
];
1587 struct ssd_bm_manufacturer_data
1589 uint16_t pack_lot_code
;
1590 uint16_t pcb_lot_code
;
1591 uint16_t firmware_ver
;
1592 uint16_t hardware_ver
;
1595 struct ssd_bm_configuration_registers
1608 uint16_t fet_action
;
1613 #define SBS_VALUE_MASK 0xffff
1615 #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var)
1616 #define bm_var(start, offset) ((void *) start + (offset))
1618 static struct sbs_cmd ssd_bm_sbs
[] = {
1619 {0x08, SBS_SIZE_WORD
, SBS_UNIT_TEMPERATURE
, bm_var_offset(temp
), SBS_VALUE_MASK
, "Temperature"},
1620 {0x09, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(volt
), SBS_VALUE_MASK
, "Voltage"},
1621 {0x0a, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(curr
), SBS_VALUE_MASK
, "Current"},
1622 {0x0b, SBS_SIZE_WORD
, SBS_UNIT_ESR
, bm_var_offset(esr
), SBS_VALUE_MASK
, "ESR"},
1623 {0x0d, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(rsoc
), SBS_VALUE_MASK
, "RelativeStateOfCharge"},
1624 {0x0e, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(health
), SBS_VALUE_MASK
, "Health"},
1625 {0x10, SBS_SIZE_WORD
, SBS_UNIT_CAPACITANCE
, bm_var_offset(cap
), SBS_VALUE_MASK
, "Capacitance"},
1626 {0x14, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(chg_curr
), SBS_VALUE_MASK
, "ChargingCurrent"},
1627 {0x15, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(chg_volt
), SBS_VALUE_MASK
, "ChargingVoltage"},
1628 {0x3b, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[4]), SBS_VALUE_MASK
, "CapacitorVoltage5"},
1629 {0x3c, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[3]), SBS_VALUE_MASK
, "CapacitorVoltage4"},
1630 {0x3d, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[2]), SBS_VALUE_MASK
, "CapacitorVoltage3"},
1631 {0x3e, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[1]), SBS_VALUE_MASK
, "CapacitorVoltage2"},
1632 {0x3f, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[0]), SBS_VALUE_MASK
, "CapacitorVoltage1"},
1633 {0x50, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_alert
), 0x870F, "SafetyAlert"},
1634 {0x51, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_status
), 0xE7BF, "SafetyStatus"},
1635 {0x54, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(op_status
), 0x79F4, "OperationStatus"},
1636 {0x5a, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(sys_volt
), SBS_VALUE_MASK
, "SystemVoltage"},
1637 {0, 0, 0, 0, 0, NULL
},
1641 #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info)
1642 #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info)
1643 #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info)
1644 #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart)
1645 #define SSD_CMD_GET_IDX _IOR('H', 105, int)
1646 #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int)
1647 #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int)
1648 #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN])
1650 #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info)
1651 #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info)
1653 #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend)
1655 #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info)
1656 #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info)
1658 #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info)
1659 #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info)
1660 #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info)
1662 #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info)
1663 #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info)
1664 #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info)
1666 #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info)
1667 #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info)
1668 #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info)
1669 #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info)
1670 #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info)
1671 #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info)
1672 #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info)
1673 #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info)
1675 #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t)
1676 #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int)
1677 #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int)
1678 #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t)
1679 #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int)
1681 #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info)
1682 #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info)
1684 #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info)
1685 #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob
1686 #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info)
1687 #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info)
1688 #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO
1690 #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info)
1692 #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int)
1693 #define SSD_CMD_SET_ALARM _IOW('H', 191, int)
1695 #define SSD_CMD_RESET _IOW('H', 200, int)
1696 #define SSD_CMD_RELOAD_FW _IOW('H', 201, int)
1697 #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int)
1698 #define SSD_CMD_LOAD_DEV _IOW('H', 203, int)
1699 #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t)
1700 #define SSD_CMD_FULL_RESET _IOW('H', 206, int)
1702 #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t)
1703 #define SSD_CMD_GET_LOG _IOR('H', 221, void *)
1704 #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int)
1706 #define SSD_CMD_OT_PROTECT _IOW('H', 223, int)
1707 #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int)
1709 #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int)
1710 #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int)
1712 #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info)
1714 #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label)
1715 #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info)
1716 #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int)
1717 #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int)
1718 #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *)
1721 #define SSD_CMD_FLUSH _IOW('H', 240, int)
1722 #define SSD_CMD_SAVE_MD _IOW('H', 241, int)
1724 #define SSD_CMD_SET_WMODE _IOW('H', 242, int)
1725 #define SSD_CMD_GET_WMODE _IOR('H', 243, int)
1726 #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int)
1728 #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info)
1729 #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info)
1733 #define SSD_LOG_MAX_SZ 4096
1734 #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE
1738 SSD_LOG_DATA_NONE
= 0,
1743 typedef struct ssd_log_entry
1761 }__attribute__((packed
))ssd_log_entry_t
;
1763 typedef struct ssd_log
1766 uint64_t ctrl_idx
:8;
1768 } __attribute__((packed
)) ssd_log_t
;
1770 typedef struct ssd_log_desc
1778 } __attribute__((packed
)) ssd_log_desc_t
;
1780 #define SSD_LOG_SW_IDX 0xF
1781 #define SSD_UNKNOWN_EVENT ((uint16_t)-1)
1782 static struct ssd_log_desc ssd_log_desc
[] = {
1783 /* event, level, show flash, show block, show page, desc */
1784 {0x0, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Create BBT failure"}, //g3
1785 {0x1, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Read BBT failure"}, //g3
1786 {0x2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Mark bad block"},
1787 {0x3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flush BBT failure"},
1788 {0x4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1789 {0x7, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "No available blocks"},
1790 {0x8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Bad EC header"},
1791 {0x9, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 0, "Bad VID header"}, //g3
1792 {0xa, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Wear leveling"},
1793 {0xb, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "WL read back failure"},
1794 {0x11, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Data recovery failure"}, // err
1795 {0x20, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan mapping table failure"}, // err g3
1796 {0x21, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1797 {0x22, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1798 {0x23, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1799 {0x24, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Merge: read mapping page failure"},
1800 {0x25, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: read back failure"},
1801 {0x26, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1802 {0x27, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Data corrupted for abnormal power down"}, //g3
1803 {0x28, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: mapping page corrupted"},
1804 {0x29, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: no mapping page"},
1805 {0x2a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: mapping pages incomplete"},
1806 {0x2b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read back failure after programming failure"}, // err
1807 {0xf1, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure without recovery"}, // err
1808 {0xf2, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available blocks"}, // maybe err g3
1809 {0xf3, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: RAID incomplete"}, // err g3
1810 {0xf4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1811 {0xf5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure in moving data"},
1812 {0xf6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1813 {0xf7, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Init: RAID not complete"},
1814 {0xf8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: data moving interrupted"},
1815 {0xfe, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Data inspection failure"},
1816 {0xff, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "IO: ECC failed"},
1819 {0x2e, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available reserved blocks" }, // err
1820 {0x30, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PMT membership not found"},
1821 {0x31, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PMT corrupted"},
1822 {0x32, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT membership not found"},
1823 {0x33, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT not found"},
1824 {0x34, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT corrupted"},
1825 {0x35, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT page read failure"},
1826 {0x36, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT page read failure"},
1827 {0x37, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT backup page read failure"},
1828 {0x38, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT read failure"},
1829 {0x39, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT scan failure"}, // err
1830 {0x3a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page read failure"},
1831 {0x3b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page scan failure"}, // err
1832 {0x3c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan unclosed block failure"}, // err
1833 {0x3d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: write pointer mismatch"},
1834 {0x3e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: PBMT read failure"},
1835 {0x3f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: PMT recovery: PBMT scan failure"},
1836 {0x40, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: data page read failure"}, //err
1837 {0x41, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT write pointer mismatch"},
1838 {0x42, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT latest version corrupted"},
1839 {0x43, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: too many unclosed blocks"},
1840 {0x44, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PDW block found"},
1841 {0x45, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Init: more than one PDW block found"}, //err
1842 {0x46, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page is blank or read failure"},
1843 {0x47, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PDW block not found"},
1845 {0x50, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: hit error data"}, // err
1846 {0x51, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: read back failure"}, // err
1847 {0x52, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Cache: unknown command"}, //?
1848 {0x53, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "GC/WL read back failure"}, // err
1850 {0x60, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Erase failure"},
1852 {0x70, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "LPA not matched"},
1853 {0x71, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "PBN not matched"},
1854 {0x72, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read retry failure"},
1855 {0x73, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Need raid recovery"},
1856 {0x74, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "Need read retry"},
1857 {0x75, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read invalid data page"},
1858 {0x76, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN matched"},
1859 {0x77, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN not matched"},
1860 {0x78, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in flash, PBN not matched"},
1861 {0x79, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in cache, LPA not matched"},
1862 {0x7a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in flash, LPA not matched"},
1863 {0x7b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in cache, LPA not matched"},
1864 {0x7c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in flash, LPA not matched"},
1865 {0x7d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data page status error"},
1866 {0x7e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1867 {0x7f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Access flash timeout"},
1869 {0x80, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "EC overflow"},
1870 {0x81, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_NONE
, 0, 0, "Scrubbing completed"},
1871 {0x82, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Unstable block(too much bit flip)"},
1872 {0x83, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: ram error"}, //?
1873 {0x84, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: one PBMT read failure"},
1875 {0x88, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: mark bad block"},
1876 {0x89, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: invalid page count error"}, // maybe err
1877 {0x8a, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Bad Block close to limit"},
1878 {0x8b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: Bad Block over limit"},
1879 {0x8c, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: P/E cycles close to limit"},
1880 {0x8d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: P/E cycles over limit"},
1882 {0x90, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Over temperature"}, //xx
1883 {0x91, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature is OK"}, //xx
1884 {0x92, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Battery fault"},
1885 {0x93, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault"}, //err
1886 {0x94, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "DDR error"}, //err
1887 {0x95, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Controller serdes error"}, //err
1888 {0x96, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 1 error"}, //err
1889 {0x97, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 2 error"}, //err
1890 {0x98, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault (corrected)"}, //err
1891 {0x99, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Battery is OK"},
1892 {0x9a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature close to limit"}, //xx
1894 {0x9b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (low)"},
1895 {0x9c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (high)"},
1896 {0x9d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "I2C fault" },
1897 {0x9e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "DDR single bit error" },
1898 {0x9f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Board voltage fault" },
1900 {0xa0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "LPA not matched"},
1901 {0xa1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Re-read data in cache"},
1902 {0xa2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1903 {0xa3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Read blank page"},
1904 {0xa4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: new data in cache"},
1905 {0xa5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: PBN not matched"},
1906 {0xa6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data with error flag"},
1907 {0xa7, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: recoverd data with error flag"},
1908 {0xa8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Blank page in cache, PBN matched"},
1909 {0xa9, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Blank page in cache, PBN matched"},
1910 {0xaa, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flash init failure"},
1911 {0xab, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Mapping table recovery failure"},
1912 {0xac, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: ECC failed"},
1913 {0xb0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature is up to degree 95"},
1914 {0xb1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature is up to degree 100"},
1916 {0x300, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "CMD timeout"},
1917 {0x301, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Power on"},
1918 {0x302, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Power off"},
1919 {0x303, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear log"},
1920 {0x304, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity"},
1921 {0x305, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data"},
1922 {0x306, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "BM safety status"},
1923 {0x307, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "I/O error"},
1924 {0x308, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CMD error"},
1925 {0x309, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set wmode"},
1926 {0x30a, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "DDR init failed" },
1927 {0x30b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "PCIe link status" },
1928 {0x30c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Controller reset sync error" },
1929 {0x30d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Clock fault" },
1930 {0x30e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "FPGA voltage fault status" },
1931 {0x30f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity finished"},
1932 {0x310, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data finished"},
1933 {0x311, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Reset"},
1934 {0x312, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "CAP: voltage fault"},
1935 {0x313, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: learn fault"},
1936 {0x314, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CAP status"},
1937 {0x315, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Board voltage fault status"},
1938 {0x316, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Inlet over temperature"},
1939 {0x317, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Inlet temperature is OK"},
1940 {0x318, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Flash over temperature"},
1941 {0x319, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Flash temperature is OK"},
1942 {0x31a, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: short circuit"},
1943 {0x31b, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "Sensor fault"},
1944 {0x31c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data"},
1945 {0x31d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data finished"},
1947 {SSD_UNKNOWN_EVENT
, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "unknown event"},
1950 #define SSD_LOG_OVER_TEMP 0x90
1951 #define SSD_LOG_NORMAL_TEMP 0x91
1952 #define SSD_LOG_WARN_TEMP 0x9a
1953 #define SSD_LOG_SEU_FAULT 0x93
1954 #define SSD_LOG_SEU_FAULT1 0x98
1955 #define SSD_LOG_BATTERY_FAULT 0x92
1956 #define SSD_LOG_BATTERY_OK 0x99
1957 #define SSD_LOG_BOARD_VOLT_FAULT 0x9f
1960 #define SSD_LOG_TIMEOUT 0x300
1961 #define SSD_LOG_POWER_ON 0x301
1962 #define SSD_LOG_POWER_OFF 0x302
1963 #define SSD_LOG_CLEAR_LOG 0x303
1964 #define SSD_LOG_SET_CAPACITY 0x304
1965 #define SSD_LOG_CLEAR_DATA 0x305
1966 #define SSD_LOG_BM_SFSTATUS 0x306
1967 #define SSD_LOG_EIO 0x307
1968 #define SSD_LOG_ECMD 0x308
1969 #define SSD_LOG_SET_WMODE 0x309
1970 #define SSD_LOG_DDR_INIT_ERR 0x30a
1971 #define SSD_LOG_PCIE_LINK_STATUS 0x30b
1972 #define SSD_LOG_CTRL_RST_SYNC 0x30c
1973 #define SSD_LOG_CLK_FAULT 0x30d
1974 #define SSD_LOG_VOLT_FAULT 0x30e
1975 #define SSD_LOG_SET_CAPACITY_END 0x30F
1976 #define SSD_LOG_CLEAR_DATA_END 0x310
1977 #define SSD_LOG_RESET 0x311
1978 #define SSD_LOG_CAP_VOLT_FAULT 0x312
1979 #define SSD_LOG_CAP_LEARN_FAULT 0x313
1980 #define SSD_LOG_CAP_STATUS 0x314
1981 #define SSD_LOG_VOLT_STATUS 0x315
1982 #define SSD_LOG_INLET_OVER_TEMP 0x316
1983 #define SSD_LOG_INLET_NORMAL_TEMP 0x317
1984 #define SSD_LOG_FLASH_OVER_TEMP 0x318
1985 #define SSD_LOG_FLASH_NORMAL_TEMP 0x319
1986 #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a
1987 #define SSD_LOG_SENSOR_FAULT 0x31b
1988 #define SSD_LOG_ERASE_ALL 0x31c
1989 #define SSD_LOG_ERASE_ALL_END 0x31d
1992 /* sw log fifo depth */
1993 #define SSD_LOG_FIFO_SZ 1024
1997 static DEFINE_PER_CPU(struct list_head
, ssd_doneq
);
1998 static DEFINE_PER_CPU(struct tasklet_struct
, ssd_tasklet
);
2001 /* unloading driver */
2002 static volatile int ssd_exiting
= 0;
2004 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
2005 static struct class_simple
*ssd_class
;
2007 static struct class *ssd_class
;
2010 static int ssd_cmajor
= SSD_CMAJOR
;
2012 /* ssd block device major, minors */
2013 static int ssd_major
= SSD_MAJOR
;
2014 static int ssd_major_sl
= SSD_MAJOR_SL
;
2015 static int ssd_minors
= SSD_MINORS
;
2017 /* ssd device list */
2018 static struct list_head ssd_list
;
2019 static unsigned long ssd_index_bits
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2020 static unsigned long ssd_index_bits_sl
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2021 static atomic_t ssd_nr
;
2026 SSD_DRV_MODE_STANDARD
= 0, /* full */
2027 SSD_DRV_MODE_DEBUG
= 2, /* debug */
2028 SSD_DRV_MODE_BASE
/* base only */
2038 #if (defined SSD_MSIX)
2039 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2040 #elif (defined SSD_MSI)
2041 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2043 /* auto select the defaut int mode according to the kernel version*/
2044 /* suse 11 sp1 irqbalance bug: use msi instead*/
2045 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5))
2046 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2048 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2052 static int mode
= SSD_DRV_MODE_STANDARD
;
2053 static int status_mask
= 0xFF;
2054 static int int_mode
= SSD_INT_MODE_DEFAULT
;
2055 static int threaded_irq
= 0;
2056 static int log_level
= SSD_LOG_LEVEL_WARNING
;
2057 static int ot_protect
= 1;
2058 static int wmode
= SSD_WMODE_DEFAULT
;
2059 static int finject
= 0;
2061 module_param(mode
, int, 0);
2062 module_param(status_mask
, int, 0);
2063 module_param(int_mode
, int, 0);
2064 module_param(threaded_irq
, int, 0);
2065 module_param(log_level
, int, 0);
2066 module_param(ot_protect
, int, 0);
2067 module_param(wmode
, int, 0);
2068 module_param(finject
, int, 0);
2071 MODULE_PARM_DESC(mode
, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode");
2072 MODULE_PARM_DESC(status_mask
, "command status mask, 0 - without command error, 0xff - with command error");
2073 MODULE_PARM_DESC(int_mode
, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix");
2074 MODULE_PARM_DESC(threaded_irq
, "threaded irq, 0 - normal irq, 1 - threaded irq");
2075 MODULE_PARM_DESC(log_level
, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only");
2076 MODULE_PARM_DESC(ot_protect
, "over temperature protect, 0 - disable, 1 - enable");
2077 MODULE_PARM_DESC(wmode
, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default");
2078 MODULE_PARM_DESC(finject
, "enable fault simulation, 0 - off, 1 - on, for debug purpose only");
2082 static int __init
ssd_drv_mode(char *str
)
2084 mode
= (int)simple_strtoul(str
, NULL
, 0);
2089 static int __init
ssd_status_mask(char *str
)
2091 status_mask
= (int)simple_strtoul(str
, NULL
, 16);
2096 static int __init
ssd_int_mode(char *str
)
2098 int_mode
= (int)simple_strtoul(str
, NULL
, 0);
2103 static int __init
ssd_threaded_irq(char *str
)
2105 threaded_irq
= (int)simple_strtoul(str
, NULL
, 0);
2110 static int __init
ssd_log_level(char *str
)
2112 log_level
= (int)simple_strtoul(str
, NULL
, 0);
2117 static int __init
ssd_ot_protect(char *str
)
2119 ot_protect
= (int)simple_strtoul(str
, NULL
, 0);
2124 static int __init
ssd_wmode(char *str
)
2126 wmode
= (int)simple_strtoul(str
, NULL
, 0);
2131 static int __init
ssd_finject(char *str
)
2133 finject
= (int)simple_strtoul(str
, NULL
, 0);
2138 __setup(MODULE_NAME
"_mode=", ssd_drv_mode
);
2139 __setup(MODULE_NAME
"_status_mask=", ssd_status_mask
);
2140 __setup(MODULE_NAME
"_int_mode=", ssd_int_mode
);
2141 __setup(MODULE_NAME
"_threaded_irq=", ssd_threaded_irq
);
2142 __setup(MODULE_NAME
"_log_level=", ssd_log_level
);
2143 __setup(MODULE_NAME
"_ot_protect=", ssd_ot_protect
);
2144 __setup(MODULE_NAME
"_wmode=", ssd_wmode
);
2145 __setup(MODULE_NAME
"_finject=", ssd_finject
);
2149 #ifdef CONFIG_PROC_FS
2150 #include <linux/proc_fs.h>
2151 #include <asm/uaccess.h>
2153 #define SSD_PROC_DIR MODULE_NAME
2154 #define SSD_PROC_INFO "info"
2156 static struct proc_dir_entry
*ssd_proc_dir
= NULL
;
2157 static struct proc_dir_entry
*ssd_proc_info
= NULL
;
2159 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2160 static int ssd_proc_read(char *page
, char **start
,
2161 off_t off
, int count
, int *eof
, void *data
)
2163 struct ssd_device
*dev
= NULL
;
2164 struct ssd_device
*n
= NULL
;
2174 len
+= snprintf((page
+ len
), (count
- len
), "Driver Version:\t%s\n", DRIVER_VERSION
);
2176 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2178 size
= dev
->hw_info
.size
;
2179 do_div(size
, 1000000000);
2181 len
+= snprintf((page
+ len
), (count
- len
), "\n");
2183 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2185 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2186 if (dev
->hw_info
.ctrl_ver
!= 0) {
2187 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2190 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2192 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2193 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2196 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Device:\t%s\n", idx
, dev
->name
);
2204 static int ssd_proc_show(struct seq_file
*m
, void *v
)
2206 struct ssd_device
*dev
= NULL
;
2207 struct ssd_device
*n
= NULL
;
2215 seq_printf(m
, "Driver Version:\t%s\n", DRIVER_VERSION
);
2217 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2219 size
= dev
->hw_info
.size
;
2220 do_div(size
, 1000000000);
2222 seq_printf(m
, "\n");
2224 seq_printf(m
, "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2226 seq_printf(m
, "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2227 if (dev
->hw_info
.ctrl_ver
!= 0) {
2228 seq_printf(m
, "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2231 seq_printf(m
, "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2233 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2234 seq_printf(m
, "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2237 seq_printf(m
, "HIO %d Device:\t%s\n", idx
, dev
->name
);
2243 static int ssd_proc_open(struct inode
*inode
, struct file
*file
)
2245 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
2246 return single_open(file
, ssd_proc_show
, PDE(inode
)->data
);
2248 return single_open(file
, ssd_proc_show
, PDE_DATA(inode
));
2252 static const struct file_operations ssd_proc_fops
= {
2253 .open
= ssd_proc_open
,
2255 .llseek
= seq_lseek
,
2256 .release
= single_release
,
2261 static void ssd_cleanup_proc(void)
2263 if (ssd_proc_info
) {
2264 remove_proc_entry(SSD_PROC_INFO
, ssd_proc_dir
);
2265 ssd_proc_info
= NULL
;
2268 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2269 ssd_proc_dir
= NULL
;
2272 static int ssd_init_proc(void)
2274 ssd_proc_dir
= proc_mkdir(SSD_PROC_DIR
, NULL
);
2276 goto out_proc_mkdir
;
2278 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2279 ssd_proc_info
= create_proc_entry(SSD_PROC_INFO
, S_IFREG
| S_IRUGO
| S_IWUSR
, ssd_proc_dir
);
2281 goto out_create_proc_entry
;
2283 ssd_proc_info
->read_proc
= ssd_proc_read
;
2286 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
2287 ssd_proc_info
->owner
= THIS_MODULE
;
2290 ssd_proc_info
= proc_create(SSD_PROC_INFO
, 0600, ssd_proc_dir
, &ssd_proc_fops
);
2292 goto out_create_proc_entry
;
2297 out_create_proc_entry
:
2298 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2304 static void ssd_cleanup_proc(void)
2308 static int ssd_init_proc(void)
2312 #endif /* CONFIG_PROC_FS */
2315 static void ssd_unregister_sysfs(struct ssd_device
*dev
)
2320 static int ssd_register_sysfs(struct ssd_device
*dev
)
2325 static void ssd_cleanup_sysfs(void)
2330 static int ssd_init_sysfs(void)
2335 static inline void ssd_put_index(int slave
, int index
)
2337 unsigned long *index_bits
= ssd_index_bits
;
2340 index_bits
= ssd_index_bits_sl
;
2343 if (test_and_clear_bit(index
, index_bits
)) {
2344 atomic_dec(&ssd_nr
);
2348 static inline int ssd_get_index(int slave
)
2350 unsigned long *index_bits
= ssd_index_bits
;
2354 index_bits
= ssd_index_bits_sl
;
2358 if ((index
= find_first_zero_bit(index_bits
, SSD_MAX_DEV
)) >= SSD_MAX_DEV
) {
2362 if (test_and_set_bit(index
, index_bits
)) {
2366 atomic_inc(&ssd_nr
);
2371 static void ssd_cleanup_index(void)
2376 static int ssd_init_index(void)
2378 INIT_LIST_HEAD(&ssd_list
);
2379 atomic_set(&ssd_nr
, 0);
2380 memset(ssd_index_bits
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2381 memset(ssd_index_bits_sl
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2386 static void ssd_set_dev_name(char *name
, size_t size
, int idx
)
2388 if(idx
< SSD_ALPHABET_NUM
) {
2389 snprintf(name
, size
, "%c", 'a'+idx
);
2391 idx
-= SSD_ALPHABET_NUM
;
2392 snprintf(name
, size
, "%c%c", 'a'+(idx
/SSD_ALPHABET_NUM
), 'a'+(idx
%SSD_ALPHABET_NUM
));
2396 /* pci register r&w */
2397 static inline void ssd_reg_write(void *addr
, uint64_t val
)
2399 iowrite32((uint32_t)val
, addr
);
2400 iowrite32((uint32_t)(val
>> 32), addr
+ 4);
2404 static inline uint64_t ssd_reg_read(void *addr
)
2407 uint32_t val_lo
, val_hi
;
2409 val_lo
= ioread32(addr
);
2410 val_hi
= ioread32(addr
+ 4);
2413 val
= val_lo
| ((uint64_t)val_hi
<< 32);
2419 #define ssd_reg32_write(addr, val) writel(val, addr)
2420 #define ssd_reg32_read(addr) readl(addr)
2423 static void ssd_clear_alarm(struct ssd_device
*dev
)
2427 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2431 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2433 /* firmware control */
2436 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2439 static void ssd_set_alarm(struct ssd_device
*dev
)
2443 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2447 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2451 /* software control */
2454 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2457 #define u32_swap(x) \
2459 (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
2460 (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
2461 (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
2462 (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24)))
2464 #define u16_swap(x) \
2466 (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \
2467 (((uint16_t)(x) & (uint16_t)0xff00) >> 8) ))
2471 /* No lock, for init only*/
2472 static int ssd_spi_read_id(struct ssd_device
*dev
, uint32_t *id
)
2482 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_ID
);
2484 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2485 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2486 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2487 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2491 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2492 if (val
== 0x1000000) {
2496 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2503 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_ID
);
2512 static int ssd_init_spi(struct ssd_device
*dev
)
2518 mutex_lock(&dev
->spi_mutex
);
2521 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2524 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2526 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2531 } while (val
!= 0x1000000);
2533 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2538 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2546 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2548 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2551 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2552 mutex_unlock(&dev
->spi_mutex
);
2559 static int ssd_spi_page_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2570 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2571 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
) {
2575 mutex_lock(&dev
->spi_mutex
);
2576 while (rlen
< size
) {
2577 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, ((off
+ rlen
) >> 24));
2579 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, (((off
+ rlen
) << 8) | SSD_SPI_CMD_READ
));
2581 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2582 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2583 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2584 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2588 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2589 if (val
== 0x1000000) {
2593 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2600 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
2601 *(uint32_t *)(buf
+ rlen
)= u32_swap(val
);
2603 rlen
+= sizeof(uint32_t);
2607 mutex_unlock(&dev
->spi_mutex
);
2611 static int ssd_spi_page_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2623 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2624 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
||
2625 (off
/ dev
->rom_info
.page_size
) != ((off
+ size
- 1) / dev
->rom_info
.page_size
)) {
2629 mutex_lock(&dev
->spi_mutex
);
2631 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2633 wlen
= size
/ sizeof(uint32_t);
2634 for (i
=0; i
<(int)wlen
; i
++) {
2635 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_WDATA
, u32_swap(*((uint32_t *)buf
+ i
)));
2639 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2641 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_PROGRAM
));
2647 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2649 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2651 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2656 } while (val
!= 0x1000000);
2658 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2663 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2670 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2671 if ((val
>> 6) & 0x1) {
2678 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2680 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2683 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2685 mutex_unlock(&dev
->spi_mutex
);
2690 static int ssd_spi_block_erase(struct ssd_device
*dev
, uint32_t off
)
2700 if ((off
% dev
->rom_info
.block_size
) != 0 || off
>= dev
->rom_info
.size
) {
2704 mutex_lock(&dev
->spi_mutex
);
2706 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2707 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2710 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2712 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_ERASE
));
2716 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2719 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2721 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2726 } while (val
!= 0x1000000);
2728 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2733 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2740 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2741 if ((val
>> 5) & 0x1) {
2748 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2750 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2753 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2755 mutex_unlock(&dev
->spi_mutex
);
2760 static int ssd_spi_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2771 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2772 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2776 while (len
< size
) {
2777 roff
= (off
+ len
) % dev
->rom_info
.page_size
;
2778 rsize
= dev
->rom_info
.page_size
- roff
;
2779 if ((size
- len
) < rsize
) {
2780 rsize
= (size
- len
);
2784 ret
= ssd_spi_page_read(dev
, (buf
+ len
), roff
, rsize
);
2798 static int ssd_spi_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2809 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2810 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2814 while (len
< size
) {
2815 woff
= (off
+ len
) % dev
->rom_info
.page_size
;
2816 wsize
= dev
->rom_info
.page_size
- woff
;
2817 if ((size
- len
) < wsize
) {
2818 wsize
= (size
- len
);
2822 ret
= ssd_spi_page_write(dev
, (buf
+ len
), woff
, wsize
);
2836 static int ssd_spi_erase(struct ssd_device
*dev
, uint32_t off
, uint32_t size
)
2846 if (size
== 0 || ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
||
2847 (off
% dev
->rom_info
.block_size
) != 0 || (size
% dev
->rom_info
.block_size
) != 0) {
2851 while (len
< size
) {
2854 ret
= ssd_spi_block_erase(dev
, eoff
);
2859 len
+= dev
->rom_info
.block_size
;
2869 static uint32_t __ssd_i2c_reg32_read(void *addr
)
2871 return ssd_reg32_read(addr
);
2874 static void __ssd_i2c_reg32_write(void *addr
, uint32_t val
)
2876 ssd_reg32_write(addr
, val
);
2877 ssd_reg32_read(addr
);
2880 static int __ssd_i2c_clear(struct ssd_device
*dev
, uint8_t saddr
)
2882 ssd_i2c_ctrl_t ctrl
;
2883 ssd_i2c_data_t data
;
2890 ctrl
.bits
.wdata
= 0;
2891 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
2892 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2893 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2897 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2898 if (data
.bits
.valid
== 0) {
2903 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2909 status
= data
.bits
.rdata
;
2911 if (!(status
& 0x4)) {
2912 /* clear read fifo data */
2913 ctrl
.bits
.wdata
= 0;
2914 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2915 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2916 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2920 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2921 if (data
.bits
.valid
== 0) {
2926 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2934 if (nr_data
<= SSD_I2C_MAX_DATA
) {
2943 ctrl
.bits
.wdata
= 0x04;
2944 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
2945 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2946 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2949 if (!(status
& 0x8)) {
2951 /* reset i2c controller */
2952 ctrl
.bits
.wdata
= 0x0;
2953 ctrl
.bits
.addr
= SSD_I2C_RESET_REG
;
2954 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2955 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2962 static int ssd_i2c_write(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
2964 ssd_i2c_ctrl_t ctrl
;
2965 ssd_i2c_data_t data
;
2971 mutex_lock(&dev
->i2c_mutex
);
2976 ctrl
.bits
.wdata
= saddr
;
2977 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
2978 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2979 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2982 while (off
< size
) {
2983 ctrl
.bits
.wdata
= buf
[off
];
2984 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2985 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2986 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2992 ctrl
.bits
.wdata
= 0x01;
2993 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
2994 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2995 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3000 ctrl
.bits
.wdata
= 0;
3001 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3002 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3003 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3006 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3007 if (data
.bits
.valid
== 0) {
3012 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3019 status
= data
.bits
.rdata
;
3024 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3031 if (!(status
& 0x1)) {
3037 if (status
& 0x20) {
3043 if (status
& 0x10) {
3050 if (__ssd_i2c_clear(dev
, saddr
)) {
3054 mutex_unlock(&dev
->i2c_mutex
);
3059 static int ssd_i2c_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3061 ssd_i2c_ctrl_t ctrl
;
3062 ssd_i2c_data_t data
;
3068 mutex_lock(&dev
->i2c_mutex
);
3073 ctrl
.bits
.wdata
= saddr
;
3074 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3075 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3076 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3079 ctrl
.bits
.wdata
= size
;
3080 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3081 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3082 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3085 ctrl
.bits
.wdata
= 0x02;
3086 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3087 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3088 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3093 ctrl
.bits
.wdata
= 0;
3094 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3095 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3096 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3099 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3100 if (data
.bits
.valid
== 0) {
3105 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3112 status
= data
.bits
.rdata
;
3117 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3124 if (!(status
& 0x2)) {
3130 if (status
& 0x20) {
3136 if (status
& 0x10) {
3142 while (off
< size
) {
3143 ctrl
.bits
.wdata
= 0;
3144 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3145 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3146 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3150 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3151 if (data
.bits
.valid
== 0) {
3156 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3163 buf
[off
] = data
.bits
.rdata
;
3170 if (__ssd_i2c_clear(dev
, saddr
)) {
3174 mutex_unlock(&dev
->i2c_mutex
);
3179 static int ssd_i2c_write_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t wsize
, uint8_t *wbuf
, uint8_t rsize
, uint8_t *rbuf
)
3181 ssd_i2c_ctrl_t ctrl
;
3182 ssd_i2c_data_t data
;
3188 mutex_lock(&dev
->i2c_mutex
);
3193 ctrl
.bits
.wdata
= saddr
;
3194 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3195 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3196 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3200 while (off
< wsize
) {
3201 ctrl
.bits
.wdata
= wbuf
[off
];
3202 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3203 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3204 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3210 ctrl
.bits
.wdata
= rsize
;
3211 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3212 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3213 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3216 ctrl
.bits
.wdata
= 0x03;
3217 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3218 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3219 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3224 ctrl
.bits
.wdata
= 0;
3225 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3226 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3227 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3230 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3231 if (data
.bits
.valid
== 0) {
3236 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3243 status
= data
.bits
.rdata
;
3248 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3255 if (!(status
& 0x2)) {
3261 if (status
& 0x20) {
3267 if (status
& 0x10) {
3274 while (off
< rsize
) {
3275 ctrl
.bits
.wdata
= 0;
3276 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3277 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3278 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3282 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3283 if (data
.bits
.valid
== 0) {
3288 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3295 rbuf
[off
] = data
.bits
.rdata
;
3302 if (__ssd_i2c_clear(dev
, saddr
)) {
3305 mutex_unlock(&dev
->i2c_mutex
);
3310 static int ssd_smbus_send_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3316 ret
= ssd_i2c_write(dev
, saddr
, 1, buf
);
3317 if (!ret
|| -ETIMEDOUT
== ret
) {
3322 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3325 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3331 static int ssd_smbus_receive_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3337 ret
= ssd_i2c_read(dev
, saddr
, 1, buf
);
3338 if (!ret
|| -ETIMEDOUT
== ret
) {
3343 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3346 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3352 static int ssd_smbus_write_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3354 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3359 memcpy((smb_data
+ 1), buf
, 1);
3362 ret
= ssd_i2c_write(dev
, saddr
, 2, smb_data
);
3363 if (!ret
|| -ETIMEDOUT
== ret
) {
3368 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3371 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3377 static int ssd_smbus_read_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3379 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3386 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 1, buf
);
3387 if (!ret
|| -ETIMEDOUT
== ret
) {
3392 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3395 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3401 static int ssd_smbus_write_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3403 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3408 memcpy((smb_data
+ 1), buf
, 2);
3411 ret
= ssd_i2c_write(dev
, saddr
, 3, smb_data
);
3412 if (!ret
|| -ETIMEDOUT
== ret
) {
3417 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3420 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3426 static int ssd_smbus_read_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3428 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3435 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 2, buf
);
3436 if (!ret
|| -ETIMEDOUT
== ret
) {
3441 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3444 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3450 static int ssd_smbus_write_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3452 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3458 memcpy((smb_data
+ 2), buf
, size
);
3461 ret
= ssd_i2c_write(dev
, saddr
, (2 + size
), smb_data
);
3462 if (!ret
|| -ETIMEDOUT
== ret
) {
3467 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3470 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3476 static int ssd_smbus_read_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3478 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3486 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, (SSD_SMBUS_BLOCK_MAX
+ 1), (smb_data
+ 1));
3487 if (!ret
|| -ETIMEDOUT
== ret
) {
3492 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3495 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3501 rsize
= smb_data
[1];
3503 if (rsize
> size
) {
3507 memcpy(buf
, (smb_data
+ 2), rsize
);
3513 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
);
3516 static int ssd_init_lm75(struct ssd_device
*dev
, uint8_t saddr
)
3521 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3526 conf
&= (uint8_t)(~1u);
3528 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3537 static int ssd_lm75_read(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3542 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM75_REG_TEMP
, (uint8_t *)&val
);
3547 *data
= u16_swap(val
);
3552 static int ssd_init_lm80(struct ssd_device
*dev
, uint8_t saddr
)
3561 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3568 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_RES
, &val
);
3573 /* set volt limit */
3574 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3575 high
= ssd_lm80_limit
[i
].high
;
3576 low
= ssd_lm80_limit
[i
].low
;
3578 if (SSD_LM80_IN_CAP
== i
) {
3582 if (dev
->hw_info
.nr_ctrl
<= 1 && SSD_LM80_IN_1V2
== i
) {
3588 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MAX(i
), &high
);
3594 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MIN(i
), &low
);
3600 /* set interrupt mask: allow volt in interrupt except cap in*/
3602 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3607 /* set interrupt mask: disable others */
3609 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK2
, &val
);
3616 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3625 static int ssd_lm80_enable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3630 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3634 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3639 val
&= ~(1UL << (uint32_t)idx
);
3641 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3650 static int ssd_lm80_disable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3655 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3659 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3664 val
|= (1UL << (uint32_t)idx
);
3666 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3675 static int ssd_lm80_read_temp(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3680 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_TEMP
, (uint8_t *)&val
);
3685 *data
= u16_swap(val
);
3690 static int ssd_lm80_check_event(struct ssd_device
*dev
, uint8_t saddr
)
3693 uint16_t val
= 0, status
;
3694 uint8_t alarm1
= 0, alarm2
= 0;
3698 /* read interrupt status to clear interrupt */
3699 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM1
, &alarm1
);
3704 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM2
, &alarm2
);
3709 status
= (uint16_t)alarm1
| ((uint16_t)alarm2
<< 8);
3711 /* parse inetrrupt status */
3712 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3713 if (!((status
>> (uint32_t)i
) & 0x1)) {
3714 if (test_and_clear_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3715 /* enable INx irq */
3716 ret
= ssd_lm80_enable_in(dev
, saddr
, i
);
3725 /* disable INx irq */
3726 ret
= ssd_lm80_disable_in(dev
, saddr
, i
);
3731 if (test_and_set_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3735 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_IN(i
), (uint8_t *)&val
);
3740 volt
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
3743 case SSD_LM80_IN_CAP
: {
3745 ssd_gen_swlog(dev
, SSD_LOG_CAP_SHORT_CIRCUIT
, 0);
3747 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(volt
));
3752 case SSD_LM80_IN_1V2
:
3753 case SSD_LM80_IN_1V2a
:
3754 case SSD_LM80_IN_1V5
:
3755 case SSD_LM80_IN_1V8
: {
3756 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, volt
));
3759 case SSD_LM80_IN_FPGA_3V3
:
3760 case SSD_LM80_IN_3V3
: {
3761 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, SSD_LM80_3V3_VOLT(volt
)));
3771 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3772 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, (uint32_t)saddr
);
3775 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3780 static int ssd_init_sensor(struct ssd_device
*dev
)
3784 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3788 ret
= ssd_init_lm75(dev
, SSD_SENSOR_LM75_SADDRESS
);
3790 hio_warn("%s: init lm75 failed\n", dev
->name
);
3791 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3792 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
);
3797 if (dev
->hw_info
.pcb_ver
>= 'B' || dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_HHHL
) {
3798 ret
= ssd_init_lm80(dev
, SSD_SENSOR_LM80_SADDRESS
);
3800 hio_warn("%s: init lm80 failed\n", dev
->name
);
3801 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3802 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
3809 /* skip error if not in standard mode */
3810 if (mode
!= SSD_DRV_MODE_STANDARD
) {
3817 static int ssd_mon_boardvolt(struct ssd_device
*dev
)
3819 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3823 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3827 return ssd_lm80_check_event(dev
, SSD_SENSOR_LM80_SADDRESS
);
3831 static int ssd_mon_temp(struct ssd_device
*dev
)
3837 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3841 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3846 ret
= ssd_lm80_read_temp(dev
, SSD_SENSOR_LM80_SADDRESS
, &val
);
3848 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3849 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
3853 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3855 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3856 if (cur
>= SSD_INLET_OT_TEMP
) {
3857 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3858 ssd_gen_swlog(dev
, SSD_LOG_INLET_OVER_TEMP
, (uint32_t)cur
);
3860 } else if(cur
< SSD_INLET_OT_HYST
) {
3861 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3862 ssd_gen_swlog(dev
, SSD_LOG_INLET_NORMAL_TEMP
, (uint32_t)cur
);
3867 ret
= ssd_lm75_read(dev
, SSD_SENSOR_LM75_SADDRESS
, &val
);
3869 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3870 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
);
3874 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
);
3876 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3877 if (cur
>= SSD_FLASH_OT_TEMP
) {
3878 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3879 ssd_gen_swlog(dev
, SSD_LOG_FLASH_OVER_TEMP
, (uint32_t)cur
);
3881 } else if(cur
< SSD_FLASH_OT_HYST
) {
3882 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3883 ssd_gen_swlog(dev
, SSD_LOG_FLASH_NORMAL_TEMP
, (uint32_t)cur
);
3892 static inline void ssd_put_tag(struct ssd_device
*dev
, int tag
)
3894 test_and_clear_bit(tag
, dev
->tag_map
);
3895 wake_up(&dev
->tag_wq
);
3898 static inline int ssd_get_tag(struct ssd_device
*dev
, int wait
)
3903 while ((tag
= find_first_zero_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
)) >= atomic_read(&dev
->queue_depth
)) {
3904 DEFINE_WAIT(__wait
);
3910 prepare_to_wait_exclusive(&dev
->tag_wq
, &__wait
, TASK_UNINTERRUPTIBLE
);
3913 finish_wait(&dev
->tag_wq
, &__wait
);
3916 if (test_and_set_bit(tag
, dev
->tag_map
)) {
3923 static void ssd_barrier_put_tag(struct ssd_device
*dev
, int tag
)
3925 test_and_clear_bit(tag
, dev
->tag_map
);
3928 static int ssd_barrier_get_tag(struct ssd_device
*dev
)
3932 if (test_and_set_bit(tag
, dev
->tag_map
)) {
3939 static void ssd_barrier_end(struct ssd_device
*dev
)
3941 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
3942 wake_up_all(&dev
->tag_wq
);
3944 mutex_unlock(&dev
->barrier_mutex
);
3947 static int ssd_barrier_start(struct ssd_device
*dev
)
3951 mutex_lock(&dev
->barrier_mutex
);
3953 atomic_set(&dev
->queue_depth
, 0);
3955 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
3956 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
3960 __set_current_state(TASK_INTERRUPTIBLE
);
3961 schedule_timeout(1);
3964 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
3965 wake_up_all(&dev
->tag_wq
);
3967 mutex_unlock(&dev
->barrier_mutex
);
3972 static int ssd_busy(struct ssd_device
*dev
)
3974 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
3981 static int ssd_wait_io(struct ssd_device
*dev
)
3985 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
3986 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
3990 __set_current_state(TASK_INTERRUPTIBLE
);
3991 schedule_timeout(1);
3998 static int ssd_in_barrier(struct ssd_device
*dev
)
4000 return (0 == atomic_read(&dev
->queue_depth
));
4004 static void ssd_cleanup_tag(struct ssd_device
*dev
)
4006 kfree(dev
->tag_map
);
4009 static int ssd_init_tag(struct ssd_device
*dev
)
4011 int nr_ulongs
= ALIGN(dev
->hw_info
.cmd_fifo_sz
, BITS_PER_LONG
) / BITS_PER_LONG
;
4013 mutex_init(&dev
->barrier_mutex
);
4015 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4017 dev
->tag_map
= kmalloc(nr_ulongs
* sizeof(unsigned long), GFP_ATOMIC
);
4018 if (!dev
->tag_map
) {
4022 memset(dev
->tag_map
, 0, nr_ulongs
* sizeof(unsigned long));
4024 init_waitqueue_head(&dev
->tag_wq
);
4030 static void ssd_end_io_acct(struct ssd_cmd
*cmd
)
4032 struct ssd_device
*dev
= cmd
->dev
;
4033 struct bio
*bio
= cmd
->bio
;
4034 unsigned long dur
= jiffies
- cmd
->start_time
;
4035 int rw
= bio_data_dir(bio
);
4037 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4038 int cpu
= part_stat_lock();
4039 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4040 part_round_stats(cpu
, part
);
4041 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4042 part_dec_in_flight(part
, rw
);
4044 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4045 int cpu
= part_stat_lock();
4046 struct hd_struct
*part
= &dev
->gd
->part0
;
4047 part_round_stats(cpu
, part
);
4048 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4050 part
->in_flight
[rw
] = atomic_dec_return(&dev
->in_flight
[rw
]);
4051 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4053 disk_round_stats(dev
->gd
);
4055 disk_stat_add(dev
->gd
, ticks
[rw
], dur
);
4056 dev
->gd
->in_flight
= atomic_dec_return(&dev
->in_flight
[0]);
4059 disk_round_stats(dev
->gd
);
4062 disk_stat_add(dev
->gd
, write_ticks
, dur
);
4064 disk_stat_add(dev
->gd
, read_ticks
, dur
);
4066 dev
->gd
->in_flight
= atomic_dec_return(&dev
->in_flight
[0]);
4070 static void ssd_start_io_acct(struct ssd_cmd
*cmd
)
4072 struct ssd_device
*dev
= cmd
->dev
;
4073 struct bio
*bio
= cmd
->bio
;
4074 int rw
= bio_data_dir(bio
);
4076 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4077 int cpu
= part_stat_lock();
4078 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4079 part_round_stats(cpu
, part
);
4080 part_stat_inc(cpu
, part
, ios
[rw
]);
4081 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4082 part_inc_in_flight(part
, rw
);
4084 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4085 int cpu
= part_stat_lock();
4086 struct hd_struct
*part
= &dev
->gd
->part0
;
4087 part_round_stats(cpu
, part
);
4088 part_stat_inc(cpu
, part
, ios
[rw
]);
4089 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4091 part
->in_flight
[rw
] = atomic_inc_return(&dev
->in_flight
[rw
]);
4092 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4094 disk_round_stats(dev
->gd
);
4096 disk_stat_inc(dev
->gd
, ios
[rw
]);
4097 disk_stat_add(dev
->gd
, sectors
[rw
], bio_sectors(bio
));
4098 dev
->gd
->in_flight
= atomic_inc_return(&dev
->in_flight
[0]);
4101 disk_round_stats(dev
->gd
);
4104 disk_stat_inc(dev
->gd
, writes
);
4105 disk_stat_add(dev
->gd
, write_sectors
, bio_sectors(bio
));
4107 disk_stat_inc(dev
->gd
, reads
);
4108 disk_stat_add(dev
->gd
, read_sectors
, bio_sectors(bio
));
4110 dev
->gd
->in_flight
= atomic_inc_return(&dev
->in_flight
[0]);
4113 cmd
->start_time
= jiffies
;
4117 static void ssd_queue_bio(struct ssd_device
*dev
, struct bio
*bio
)
4119 spin_lock(&dev
->sendq_lock
);
4120 ssd_blist_add(&dev
->sendq
, bio
);
4121 spin_unlock(&dev
->sendq_lock
);
4123 atomic_inc(&dev
->in_sendq
);
4124 wake_up(&dev
->send_waitq
);
4127 static inline void ssd_end_request(struct ssd_cmd
*cmd
)
4129 struct ssd_device
*dev
= cmd
->dev
;
4130 struct bio
*bio
= cmd
->bio
;
4131 int errors
= cmd
->errors
;
4135 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)))
4136 if (!(bio
->bi_rw
& REQ_DISCARD
)) {
4137 ssd_end_io_acct(cmd
);
4139 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4140 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4143 #elif (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
4144 if (!bio_rw_flagged(bio
, BIO_RW_DISCARD
)) {
4145 ssd_end_io_acct(cmd
);
4147 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4148 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4152 ssd_end_io_acct(cmd
);
4155 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4156 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4161 ssd_put_tag(dev
, tag
);
4163 if (SSD_INT_MSIX
== dev
->int_mode
|| tag
< 16 || errors
) {
4164 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4165 bio_endio(bio
, errors
);
4167 bio_endio(bio
, bio
->bi_size
, errors
);
4169 } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ {
4170 spin_lock(&dev
->doneq_lock
);
4171 ssd_blist_add(&dev
->doneq
, bio
);
4172 spin_unlock(&dev
->doneq_lock
);
4174 atomic_inc(&dev
->in_doneq
);
4175 wake_up(&dev
->done_waitq
);
4179 complete(cmd
->waiting
);
4184 static void ssd_end_timeout_request(struct ssd_cmd
*cmd
)
4186 struct ssd_device
*dev
= cmd
->dev
;
4187 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4190 for (i
=0; i
<dev
->nr_queue
; i
++) {
4191 disable_irq(dev
->entry
[i
].vector
);
4194 atomic_inc(&dev
->tocnt
);
4196 hio_err("%s: cmd timeout: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4197 cmd
->errors
= -ETIMEDOUT
;
4198 ssd_end_request(cmd
);
4201 for (i
=0; i
<dev
->nr_queue
; i
++) {
4202 enable_irq(dev
->entry
[i
].vector
);
4210 static void ssd_cmd_add_timer(struct ssd_cmd
*cmd
, int timeout
, void (*complt
)(struct ssd_cmd
*))
4212 init_timer(&cmd
->cmd_timer
);
4214 cmd
->cmd_timer
.data
= (unsigned long)cmd
;
4215 cmd
->cmd_timer
.expires
= jiffies
+ timeout
;
4216 cmd
->cmd_timer
.function
= (void (*)(unsigned long)) complt
;
4218 add_timer(&cmd
->cmd_timer
);
4221 static int ssd_cmd_del_timer(struct ssd_cmd
*cmd
)
4223 return del_timer(&cmd
->cmd_timer
);
4226 static void ssd_add_timer(struct timer_list
*timer
, int timeout
, void (*complt
)(void *), void *data
)
4230 timer
->data
= (unsigned long)data
;
4231 timer
->expires
= jiffies
+ timeout
;
4232 timer
->function
= (void (*)(unsigned long)) complt
;
4237 static int ssd_del_timer(struct timer_list
*timer
)
4239 return del_timer(timer
);
4242 static void ssd_cmd_timeout(struct ssd_cmd
*cmd
)
4244 struct ssd_device
*dev
= cmd
->dev
;
4245 uint32_t msg
= *(uint32_t *)cmd
->msg
;
4247 ssd_end_timeout_request(cmd
);
4249 ssd_gen_swlog(dev
, SSD_LOG_TIMEOUT
, msg
);
4253 static void __ssd_done(unsigned long data
)
4255 struct ssd_cmd
*cmd
;
4258 local_irq_disable();
4259 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4260 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4262 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4266 while (!list_empty(&localq
)) {
4267 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4268 list_del_init(&cmd
->list
);
4270 ssd_end_request(cmd
);
4274 static void __ssd_done_db(unsigned long data
)
4276 struct ssd_cmd
*cmd
;
4277 struct ssd_device
*dev
;
4281 local_irq_disable();
4282 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4283 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4285 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4289 while (!list_empty(&localq
)) {
4290 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4291 list_del_init(&cmd
->list
);
4293 dev
= (struct ssd_device
*)cmd
->dev
;
4297 sector_t off
= dev
->db_info
.data
.loc
.off
;
4298 uint32_t len
= dev
->db_info
.data
.loc
.len
;
4300 switch (dev
->db_info
.type
) {
4301 case SSD_DEBUG_READ_ERR
:
4302 if (bio_data_dir(bio
) == READ
&&
4303 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4307 case SSD_DEBUG_WRITE_ERR
:
4308 if (bio_data_dir(bio
) == WRITE
&&
4309 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4310 cmd
->errors
= -EROFS
;
4313 case SSD_DEBUG_RW_ERR
:
4314 if (!((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4315 if (bio_data_dir(bio
) == READ
) {
4318 cmd
->errors
= -EROFS
;
4327 ssd_end_request(cmd
);
4331 static inline void ssd_done_bh(struct ssd_cmd
*cmd
)
4333 unsigned long flags
= 0;
4335 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4336 struct ssd_device
*dev
= cmd
->dev
;
4337 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4338 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4345 local_irq_save(flags
);
4346 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4347 list_add_tail(&cmd
->list
, &__get_cpu_var(ssd_doneq
));
4348 tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet
));
4350 list_add_tail(&cmd
->list
, this_cpu_ptr(&ssd_doneq
));
4351 tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet
));
4353 local_irq_restore(flags
);
4358 static inline void ssd_done(struct ssd_cmd
*cmd
)
4360 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4361 struct ssd_device
*dev
= cmd
->dev
;
4362 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4363 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4370 ssd_end_request(cmd
);
4375 static inline void ssd_dispatch_cmd(struct ssd_cmd
*cmd
)
4377 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4379 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4381 spin_lock(&dev
->cmd_lock
);
4382 ssd_reg_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, cmd
->msg_dma
);
4383 spin_unlock(&dev
->cmd_lock
);
4386 static inline void ssd_send_cmd(struct ssd_cmd
*cmd
)
4388 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4390 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4392 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4395 static inline void ssd_send_cmd_db(struct ssd_cmd
*cmd
)
4397 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4398 struct bio
*bio
= cmd
->bio
;
4400 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4403 switch (dev
->db_info
.type
) {
4404 case SSD_DEBUG_READ_TO
:
4405 if (bio_data_dir(bio
) == READ
) {
4409 case SSD_DEBUG_WRITE_TO
:
4410 if (bio_data_dir(bio
) == WRITE
) {
4414 case SSD_DEBUG_RW_TO
:
4422 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4426 /* fixed for BIOVEC_PHYS_MERGEABLE */
4427 #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED
4428 #include <linux/bio.h>
4429 #include <linux/io.h>
4430 #include <xen/page.h>
4432 static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec
*vec1
,
4433 const struct bio_vec
*vec2
)
4435 unsigned long mfn1
= pfn_to_mfn(page_to_pfn(vec1
->bv_page
));
4436 unsigned long mfn2
= pfn_to_mfn(page_to_pfn(vec2
->bv_page
));
4438 return __BIOVEC_PHYS_MERGEABLE(vec1
, vec2
) &&
4439 ((mfn1
== mfn2
) || ((mfn1
+1) == mfn2
));
4442 #ifdef BIOVEC_PHYS_MERGEABLE
4443 #undef BIOVEC_PHYS_MERGEABLE
4445 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
4446 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
4447 (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2)))
4451 static inline int ssd_bio_map_sg(struct ssd_device
*dev
, struct bio
*bio
, struct scatterlist
*sgl
)
4453 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
4454 struct bio_vec
*bvec
, *bvprv
= NULL
;
4455 struct scatterlist
*sg
= NULL
;
4456 int i
= 0, nsegs
= 0;
4458 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23))
4459 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4463 * for each segment in bio
4465 bio_for_each_segment(bvec
, bio
, i
) {
4466 if (bvprv
&& BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
)) {
4467 sg
->length
+= bvec
->bv_len
;
4469 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4473 sg
= sg
? (sg
+ 1) : sgl
;
4474 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4475 sg_set_page(sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
4477 sg
->page
= bvec
->bv_page
;
4478 sg
->length
= bvec
->bv_len
;
4479 sg
->offset
= bvec
->bv_offset
;
4486 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4496 struct bio_vec bvec
, bvprv
;
4497 struct bvec_iter iter
;
4498 struct scatterlist
*sg
= NULL
;
4502 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4505 * for each segment in bio
4507 bio_for_each_segment(bvec
, bio
, iter
) {
4508 if (!first
&& BIOVEC_PHYS_MERGEABLE(&bvprv
, &bvec
)) {
4509 sg
->length
+= bvec
.bv_len
;
4511 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4515 sg
= sg
? (sg
+ 1) : sgl
;
4517 sg_set_page(sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
4534 static int __ssd_submit_pbio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4536 struct ssd_cmd
*cmd
;
4537 struct ssd_rw_msg
*msg
;
4538 struct ssd_sg_entry
*sge
;
4539 sector_t block
= bio_start(bio
);
4543 tag
= ssd_get_tag(dev
, wait
);
4548 cmd
= &dev
->cmd
[tag
];
4552 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4554 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)))
4555 if (bio
->bi_rw
& REQ_DISCARD
) {
4556 unsigned int length
= bio_sectors(bio
);
4558 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4560 msg
->fun
= SSD_FUNC_TRIM
;
4563 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4565 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4568 block
+= sge
->length
;
4569 length
-= sge
->length
;
4576 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4581 #elif (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
4582 if (bio_rw_flagged(bio
, BIO_RW_DISCARD
)) {
4583 unsigned int length
= bio_sectors(bio
);
4585 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4587 msg
->fun
= SSD_FUNC_TRIM
;
4590 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4592 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4595 block
+= sge
->length
;
4596 length
-= sge
->length
;
4603 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4610 //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl);
4611 msg
->nsegs
= cmd
->nsegs
= bio
->bi_vcnt
;
4614 if (bio_data_dir(bio
) == READ
) {
4615 msg
->fun
= SSD_FUNC_READ
;
4618 msg
->fun
= SSD_FUNC_WRITE
;
4619 msg
->flag
= dev
->wmode
;
4623 for (i
=0; i
<bio
->bi_vcnt
; i
++) {
4625 sge
->length
= bio
->bi_io_vec
[i
].bv_len
>> 9;
4626 sge
->buf
= (uint64_t)((void *)bio
->bi_io_vec
[i
].bv_page
+ bio
->bi_io_vec
[i
].bv_offset
);
4628 block
+= sge
->length
;
4634 #ifdef SSD_OT_PROTECT
4635 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4636 msleep_interruptible(dev
->ot_delay
);
4640 ssd_start_io_acct(cmd
);
4646 static inline int ssd_submit_bio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4648 struct ssd_cmd
*cmd
;
4649 struct ssd_rw_msg
*msg
;
4650 struct ssd_sg_entry
*sge
;
4651 struct scatterlist
*sgl
;
4652 sector_t block
= bio_start(bio
);
4656 tag
= ssd_get_tag(dev
, wait
);
4661 cmd
= &dev
->cmd
[tag
];
4665 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4669 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)))
4670 if (bio
->bi_rw
& REQ_DISCARD
) {
4671 unsigned int length
= bio_sectors(bio
);
4673 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4675 msg
->fun
= SSD_FUNC_TRIM
;
4678 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4680 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4683 block
+= sge
->length
;
4684 length
-= sge
->length
;
4691 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4696 #elif (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
4697 if (bio_rw_flagged(bio
, BIO_RW_DISCARD
)) {
4698 unsigned int length
= bio_sectors(bio
);
4700 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4702 msg
->fun
= SSD_FUNC_TRIM
;
4705 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4707 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4710 block
+= sge
->length
;
4711 length
-= sge
->length
;
4718 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4725 msg
->nsegs
= cmd
->nsegs
= ssd_bio_map_sg(dev
, bio
, sgl
);
4728 if (bio_data_dir(bio
) == READ
) {
4729 msg
->fun
= SSD_FUNC_READ
;
4731 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_FROMDEVICE
);
4733 msg
->fun
= SSD_FUNC_WRITE
;
4734 msg
->flag
= dev
->wmode
;
4735 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_TODEVICE
);
4739 for (i
=0; i
<cmd
->nsegs
; i
++) {
4741 sge
->length
= sg_dma_len(sgl
) >> 9;
4742 sge
->buf
= sg_dma_address(sgl
);
4744 block
+= sge
->length
;
4751 #ifdef SSD_OT_PROTECT
4752 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4753 msleep_interruptible(dev
->ot_delay
);
4757 ssd_start_io_acct(cmd
);
4764 static int ssd_done_thread(void *data
)
4766 struct ssd_device
*dev
;
4769 #ifdef SSD_ESCAPE_IRQ
4778 //set_user_nice(current, -5);
4780 while (!kthread_should_stop()) {
4781 wait_event_interruptible(dev
->done_waitq
, (atomic_read(&dev
->in_doneq
) || kthread_should_stop()));
4783 while (atomic_read(&dev
->in_doneq
)) {
4785 spin_lock(&dev
->doneq_lock
);
4786 bio
= ssd_blist_get(&dev
->doneq
);
4787 spin_unlock(&dev
->doneq_lock
);
4789 spin_lock_irq(&dev
->doneq_lock
);
4790 bio
= ssd_blist_get(&dev
->doneq
);
4791 spin_unlock_irq(&dev
->doneq_lock
);
4795 next
= bio
->bi_next
;
4796 bio
->bi_next
= NULL
;
4797 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4800 bio_endio(bio
, bio
->bi_size
, 0);
4802 atomic_dec(&dev
->in_doneq
);
4808 #ifdef SSD_ESCAPE_IRQ
4809 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4810 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4811 cpumask_setall(&new_mask
);
4812 cpumask_clear_cpu(dev
->irq_cpu
, &new_mask
);
4813 set_cpus_allowed_ptr(current
, &new_mask
);
4815 cpus_setall(new_mask
);
4816 cpu_clear(dev
->irq_cpu
, new_mask
);
4817 set_cpus_allowed(current
, new_mask
);
4826 static int ssd_send_thread(void *data
)
4828 struct ssd_device
*dev
;
4831 #ifdef SSD_ESCAPE_IRQ
4840 //set_user_nice(current, -5);
4842 while (!kthread_should_stop()) {
4843 wait_event_interruptible(dev
->send_waitq
, (atomic_read(&dev
->in_sendq
) || kthread_should_stop()));
4845 while (atomic_read(&dev
->in_sendq
)) {
4846 spin_lock(&dev
->sendq_lock
);
4847 bio
= ssd_blist_get(&dev
->sendq
);
4848 spin_unlock(&dev
->sendq_lock
);
4851 next
= bio
->bi_next
;
4852 bio
->bi_next
= NULL
;
4853 #ifdef SSD_QUEUE_PBIO
4854 if (test_and_clear_bit(BIO_SSD_PBIO
, &bio
->bi_flags
)) {
4855 __ssd_submit_pbio(dev
, bio
, 1);
4857 ssd_submit_bio(dev
, bio
, 1);
4860 ssd_submit_bio(dev
, bio
, 1);
4862 atomic_dec(&dev
->in_sendq
);
4868 #ifdef SSD_ESCAPE_IRQ
4869 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4870 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4871 cpumask_setall(&new_mask
);
4872 cpumask_clear_cpu(dev
->irq_cpu
, &new_mask
);
4873 set_cpus_allowed_ptr(current
, &new_mask
);
4875 cpus_setall(new_mask
);
4876 cpu_clear(dev
->irq_cpu
, new_mask
);
4877 set_cpus_allowed(current
, new_mask
);
4887 static void ssd_cleanup_thread(struct ssd_device
*dev
)
4889 kthread_stop(dev
->send_thread
);
4890 kthread_stop(dev
->done_thread
);
4893 static int ssd_init_thread(struct ssd_device
*dev
)
4897 atomic_set(&dev
->in_doneq
, 0);
4898 atomic_set(&dev
->in_sendq
, 0);
4900 spin_lock_init(&dev
->doneq_lock
);
4901 spin_lock_init(&dev
->sendq_lock
);
4903 ssd_blist_init(&dev
->doneq
);
4904 ssd_blist_init(&dev
->sendq
);
4906 init_waitqueue_head(&dev
->done_waitq
);
4907 init_waitqueue_head(&dev
->send_waitq
);
4909 dev
->done_thread
= kthread_run(ssd_done_thread
, dev
, "%s/d", dev
->name
);
4910 if (IS_ERR(dev
->done_thread
)) {
4911 ret
= PTR_ERR(dev
->done_thread
);
4912 goto out_done_thread
;
4915 dev
->send_thread
= kthread_run(ssd_send_thread
, dev
, "%s/s", dev
->name
);
4916 if (IS_ERR(dev
->send_thread
)) {
4917 ret
= PTR_ERR(dev
->send_thread
);
4918 goto out_send_thread
;
4924 kthread_stop(dev
->done_thread
);
4930 static void ssd_put_dcmd(struct ssd_dcmd
*dcmd
)
4932 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4934 spin_lock(&dev
->dcmd_lock
);
4935 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4936 spin_unlock(&dev
->dcmd_lock
);
4939 static struct ssd_dcmd
*ssd_get_dcmd(struct ssd_device
*dev
)
4941 struct ssd_dcmd
*dcmd
= NULL
;
4943 spin_lock(&dev
->dcmd_lock
);
4944 if (!list_empty(&dev
->dcmd_list
)) {
4945 dcmd
= list_entry(dev
->dcmd_list
.next
,
4946 struct ssd_dcmd
, list
);
4947 list_del_init(&dcmd
->list
);
4949 spin_unlock(&dev
->dcmd_lock
);
4954 static void ssd_cleanup_dcmd(struct ssd_device
*dev
)
4959 static int ssd_init_dcmd(struct ssd_device
*dev
)
4961 struct ssd_dcmd
*dcmd
;
4962 int dcmd_sz
= sizeof(struct ssd_dcmd
)*dev
->hw_info
.cmd_fifo_sz
;
4965 spin_lock_init(&dev
->dcmd_lock
);
4966 INIT_LIST_HEAD(&dev
->dcmd_list
);
4967 init_waitqueue_head(&dev
->dcmd_wq
);
4969 dev
->dcmd
= kmalloc(dcmd_sz
, GFP_KERNEL
);
4971 hio_warn("%s: can not alloc dcmd\n", dev
->name
);
4972 goto out_alloc_dcmd
;
4974 memset(dev
->dcmd
, 0, dcmd_sz
);
4976 for (i
=0, dcmd
=dev
->dcmd
; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++, dcmd
++) {
4978 INIT_LIST_HEAD(&dcmd
->list
);
4979 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4988 static void ssd_put_dmsg(void *msg
)
4990 struct ssd_dcmd
*dcmd
= container_of(msg
, struct ssd_dcmd
, msg
);
4991 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4993 memset(dcmd
->msg
, 0, SSD_DCMD_MAX_SZ
);
4995 wake_up(&dev
->dcmd_wq
);
4998 static void *ssd_get_dmsg(struct ssd_device
*dev
)
5000 struct ssd_dcmd
*dcmd
= ssd_get_dcmd(dev
);
5004 prepare_to_wait_exclusive(&dev
->dcmd_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
5007 dcmd
= ssd_get_dcmd(dev
);
5009 finish_wait(&dev
->dcmd_wq
, &wait
);
5015 static int ssd_do_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5017 DECLARE_COMPLETION(wait
);
5018 struct ssd_cmd
*cmd
;
5022 tag
= ssd_get_tag(dev
, 1);
5027 cmd
= &dev
->cmd
[tag
];
5029 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5030 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5032 cmd
->waiting
= &wait
;
5036 wait_for_completion(cmd
->waiting
);
5037 cmd
->waiting
= NULL
;
5039 if (cmd
->errors
== -ETIMEDOUT
) {
5041 } else if (cmd
->errors
) {
5046 *done
= cmd
->nr_log
;
5048 ssd_put_tag(dev
, cmd
->tag
);
5053 static int ssd_do_barrier_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5055 DECLARE_COMPLETION(wait
);
5056 struct ssd_cmd
*cmd
;
5060 tag
= ssd_barrier_get_tag(dev
);
5065 cmd
= &dev
->cmd
[tag
];
5067 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5068 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5070 cmd
->waiting
= &wait
;
5074 wait_for_completion(cmd
->waiting
);
5075 cmd
->waiting
= NULL
;
5077 if (cmd
->errors
== -ETIMEDOUT
) {
5079 } else if (cmd
->errors
) {
5084 *done
= cmd
->nr_log
;
5086 ssd_barrier_put_tag(dev
, cmd
->tag
);
5091 #ifdef SSD_OT_PROTECT
5092 static void ssd_check_temperature(struct ssd_device
*dev
, int temp
)
5099 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5103 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5106 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5107 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
5109 val
= ssd_reg_read(dev
->ctrlp
+ off
);
5110 if (val
== 0xffffffffffffffffull
) {
5114 cur
= (int)CUR_TEMP(val
);
5116 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5117 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5118 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5119 dev
->ot_delay
= SSD_OT_DELAY
;
5126 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5127 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5128 hio_warn("%s: Temperature is OK.\n", dev
->name
);
5135 static int ssd_get_ot_status(struct ssd_device
*dev
, int *status
)
5141 if (!dev
|| !status
) {
5145 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5146 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5147 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5148 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5149 if ((val
>> 22) & 0x1) {
5155 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5156 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5157 if ((val
>> 22) & 0x1) {
5163 *status
= !!dev
->ot_delay
;
5170 static void ssd_set_ot_protect(struct ssd_device
*dev
, int protect
)
5176 mutex_lock(&dev
->fw_mutex
);
5178 dev
->ot_protect
= !!protect
;
5180 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5181 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5182 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5183 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5184 if (dev
->ot_protect
) {
5189 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5192 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5193 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5194 if (dev
->ot_protect
) {
5199 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5203 mutex_unlock(&dev
->fw_mutex
);
5206 static int ssd_init_ot_protect(struct ssd_device
*dev
)
5208 ssd_set_ot_protect(dev
, ot_protect
);
5210 #ifdef SSD_OT_PROTECT
5211 ssd_check_temperature(dev
, SSD_OT_TEMP
);
5218 static int ssd_read_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
, int *nr_log
)
5220 struct ssd_log_op_msg
*msg
;
5221 struct ssd_log_msg
*lmsg
;
5223 size_t length
= dev
->hw_info
.log_sz
;
5226 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
5230 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
5231 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
5232 ret
= dma_mapping_error(buf_dma
);
5234 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
5237 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
5238 goto out_dma_mapping
;
5241 msg
= (struct ssd_log_op_msg
*)ssd_get_dmsg(dev
);
5243 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5244 lmsg
= (struct ssd_log_msg
*)msg
;
5245 lmsg
->fun
= SSD_FUNC_READ_LOG
;
5246 lmsg
->ctrl_idx
= ctrl_idx
;
5247 lmsg
->buf
= buf_dma
;
5249 msg
->fun
= SSD_FUNC_READ_LOG
;
5250 msg
->ctrl_idx
= ctrl_idx
;
5254 ret
= ssd_do_request(dev
, READ
, msg
, nr_log
);
5257 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
5263 #define SSD_LOG_PRINT_BUF_SZ 256
5264 static int ssd_parse_log(struct ssd_device
*dev
, struct ssd_log
*log
, int print
)
5266 struct ssd_log_desc
*log_desc
= ssd_log_desc
;
5267 struct ssd_log_entry
*le
;
5269 char print_buf
[SSD_LOG_PRINT_BUF_SZ
];
5275 while (log_desc
->event
!= SSD_UNKNOWN_EVENT
) {
5276 if (log_desc
->event
== le
->event
) {
5286 if (log_desc
->level
< log_level
) {
5291 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5294 sn
= dev
->labelv3
.barcode
;
5297 print_len
= snprintf(print_buf
, SSD_LOG_PRINT_BUF_SZ
, "%s (%s): <%#x>", dev
->name
, sn
, le
->event
);
5299 if (log
->ctrl_idx
!= SSD_LOG_SW_IDX
) {
5300 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " controller %d", log
->ctrl_idx
);
5303 switch (log_desc
->data
) {
5304 case SSD_LOG_DATA_NONE
:
5306 case SSD_LOG_DATA_LOC
:
5307 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5308 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc
.flash
);
5309 if (log_desc
->sblock
) {
5310 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc
.block
);
5312 if (log_desc
->spage
) {
5313 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc
.page
);
5316 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc1
.flash
);
5317 if (log_desc
->sblock
) {
5318 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc1
.block
);
5320 if (log_desc
->spage
) {
5321 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc1
.page
);
5325 case SSD_LOG_DATA_HEX
:
5326 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " info %#x", le
->data
.val
);
5331 /*print_len += */snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), ": %s", log_desc
->desc
);
5333 switch (log_desc
->level
) {
5334 case SSD_LOG_LEVEL_INFO
:
5335 hio_info("%s\n", print_buf
);
5337 case SSD_LOG_LEVEL_NOTICE
:
5338 hio_note("%s\n", print_buf
);
5340 case SSD_LOG_LEVEL_WARNING
:
5341 hio_warn("%s\n", print_buf
);
5343 case SSD_LOG_LEVEL_ERR
:
5344 hio_err("%s\n", print_buf
);
5345 //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ.");
5348 hio_warn("%s\n", print_buf
);
5353 return log_desc
->level
;
5356 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
);
5357 static int ssd_switch_wmode(struct ssd_device
*dev
, int wmode
);
5360 static int ssd_handle_event(struct ssd_device
*dev
, uint16_t event
, int level
)
5365 case SSD_LOG_OVER_TEMP
: {
5366 #ifdef SSD_OT_PROTECT
5367 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5368 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5369 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5370 dev
->ot_delay
= SSD_OT_DELAY
;
5377 case SSD_LOG_NORMAL_TEMP
: {
5378 #ifdef SSD_OT_PROTECT
5379 /* need to check all controller's temperature */
5380 ssd_check_temperature(dev
, SSD_OT_TEMP_HYST
);
5385 case SSD_LOG_BATTERY_FAULT
: {
5388 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5389 if (!ssd_bm_get_sfstatus(dev
, &sfstatus
)) {
5390 ssd_gen_swlog(dev
, SSD_LOG_BM_SFSTATUS
, sfstatus
);
5394 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5395 ssd_switch_wmode(dev
, dev
->user_wmode
);
5400 case SSD_LOG_BATTERY_OK
: {
5401 if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5402 ssd_switch_wmode(dev
, dev
->user_wmode
);
5407 case SSD_LOG_BOARD_VOLT_FAULT
: {
5408 ssd_mon_boardvolt(dev
);
5412 case SSD_LOG_CLEAR_LOG
: {
5414 memset(&dev
->smart
.log_info
, 0, sizeof(struct ssd_log_info
));
5418 case SSD_LOG_CAP_VOLT_FAULT
:
5419 case SSD_LOG_CAP_LEARN_FAULT
:
5420 case SSD_LOG_CAP_SHORT_CIRCUIT
: {
5421 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5422 ssd_switch_wmode(dev
, dev
->user_wmode
);
5431 /* ssd event call */
5432 if (dev
->event_call
) {
5433 dev
->event_call(dev
->gd
, event
, level
);
5436 if (SSD_LOG_CAP_VOLT_FAULT
== event
|| SSD_LOG_CAP_LEARN_FAULT
== event
|| SSD_LOG_CAP_SHORT_CIRCUIT
== event
) {
5437 dev
->event_call(dev
->gd
, SSD_LOG_BATTERY_FAULT
, level
);
5444 static int ssd_save_log(struct ssd_device
*dev
, struct ssd_log
*log
)
5450 mutex_lock(&dev
->internal_log_mutex
);
5452 size
= sizeof(struct ssd_log
);
5453 off
= dev
->internal_log
.nr_log
* size
;
5455 if (off
== dev
->rom_info
.log_sz
) {
5456 if (dev
->internal_log
.nr_log
== dev
->smart
.log_info
.nr_log
) {
5457 hio_warn("%s: internal log is full\n", dev
->name
);
5462 internal_log
= dev
->internal_log
.log
+ off
;
5463 memcpy(internal_log
, log
, size
);
5465 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
5466 off
+= dev
->rom_info
.log_base
;
5468 ret
= ssd_spi_write(dev
, log
, off
, size
);
5474 dev
->internal_log
.nr_log
++;
5477 mutex_unlock(&dev
->internal_log_mutex
);
5481 static int ssd_save_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5488 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5491 memset(&log
, 0, sizeof(struct ssd_log
));
5493 do_gettimeofday(&tv
);
5494 log
.ctrl_idx
= SSD_LOG_SW_IDX
;
5495 log
.time
= tv
.tv_sec
;
5496 log
.le
.event
= event
;
5497 log
.le
.data
.val
= data
;
5499 level
= ssd_parse_log(dev
, &log
, 0);
5500 if (level
>= SSD_LOG_LEVEL
) {
5501 ret
= ssd_save_log(dev
, &log
);
5505 if (SSD_LOG_LEVEL_ERR
== level
) {
5510 dev
->smart
.log_info
.nr_log
++;
5511 dev
->smart
.log_info
.stat
[level
]++;
5514 ssd_handle_event(dev
, event
, level
);
5519 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5521 struct ssd_log_entry le
;
5524 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5532 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5536 ret
= sfifo_put(&dev
->log_fifo
, &le
);
5541 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
5542 queue_work(dev
->workq
, &dev
->log_work
);
5548 static int ssd_do_swlog(struct ssd_device
*dev
)
5550 struct ssd_log_entry le
;
5553 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5554 while (!sfifo_get(&dev
->log_fifo
, &le
)) {
5555 ret
= ssd_save_swlog(dev
, le
.event
, le
.data
.val
);
5564 static int __ssd_clear_log(struct ssd_device
*dev
)
5566 uint32_t off
, length
;
5569 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5573 if (dev
->internal_log
.nr_log
== 0) {
5577 mutex_lock(&dev
->internal_log_mutex
);
5579 off
= dev
->rom_info
.log_base
;
5580 length
= dev
->rom_info
.log_sz
;
5582 ret
= ssd_spi_erase(dev
, off
, length
);
5584 hio_warn("%s: log erase: failed\n", dev
->name
);
5588 dev
->internal_log
.nr_log
= 0;
5591 mutex_unlock(&dev
->internal_log_mutex
);
5595 static int ssd_clear_log(struct ssd_device
*dev
)
5599 ret
= __ssd_clear_log(dev
);
5601 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_LOG
, 0);
5607 static int ssd_do_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
)
5609 struct ssd_log_entry
*le
;
5616 ret
= ssd_read_log(dev
, ctrl_idx
, buf
, &nr_log
);
5621 do_gettimeofday(&tv
);
5623 log
.time
= tv
.tv_sec
;
5624 log
.ctrl_idx
= ctrl_idx
;
5626 le
= (ssd_log_entry_t
*)buf
;
5627 while (nr_log
> 0) {
5628 memcpy(&log
.le
, le
, sizeof(struct ssd_log_entry
));
5630 level
= ssd_parse_log(dev
, &log
, 1);
5631 if (level
>= SSD_LOG_LEVEL
) {
5632 ssd_save_log(dev
, &log
);
5636 if (SSD_LOG_LEVEL_ERR
== level
) {
5640 dev
->smart
.log_info
.nr_log
++;
5641 if (SSD_LOG_SEU_FAULT
!= le
->event
&& SSD_LOG_SEU_FAULT1
!= le
->event
) {
5642 dev
->smart
.log_info
.stat
[level
]++;
5646 /* log to the volatile log info */
5647 dev
->log_info
.nr_log
++;
5648 dev
->log_info
.stat
[level
]++;
5652 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
5654 /*dev->readonly = 1;
5655 set_disk_ro(dev->gd, 1);
5656 hio_warn("%s: switched to read-only mode.\n", dev->name);*/
5660 ssd_handle_event(dev
, le
->event
, level
);
5669 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5670 static void ssd_log_worker(void *data
)
5672 struct ssd_device
*dev
= (struct ssd_device
*)data
;
5674 static void ssd_log_worker(struct work_struct
*work
)
5676 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, log_work
);
5681 if (!test_bit(SSD_LOG_ERR
, &dev
->state
) && test_bit(SSD_ONLINE
, &dev
->state
)) {
5683 if (!dev
->log_buf
) {
5684 dev
->log_buf
= kmalloc(dev
->hw_info
.log_sz
, GFP_KERNEL
);
5685 if (!dev
->log_buf
) {
5686 hio_warn("%s: ssd_log_worker: no mem\n", dev
->name
);
5692 if (test_and_clear_bit(SSD_LOG_HW
, &dev
->state
)) {
5693 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5694 ret
= ssd_do_log(dev
, i
, dev
->log_buf
);
5696 (void)test_and_set_bit(SSD_LOG_ERR
, &dev
->state
);
5697 hio_warn("%s: do log fail\n", dev
->name
);
5703 ret
= ssd_do_swlog(dev
);
5705 hio_warn("%s: do swlog fail\n", dev
->name
);
5709 static void ssd_cleanup_log(struct ssd_device
*dev
)
5712 kfree(dev
->log_buf
);
5713 dev
->log_buf
= NULL
;
5716 sfifo_free(&dev
->log_fifo
);
5718 if (dev
->internal_log
.log
) {
5719 vfree(dev
->internal_log
.log
);
5720 dev
->internal_log
.log
= NULL
;
5724 static int ssd_init_log(struct ssd_device
*dev
)
5726 struct ssd_log
*log
;
5731 mutex_init(&dev
->internal_log_mutex
);
5733 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5734 INIT_WORK(&dev
->log_work
, ssd_log_worker
, dev
);
5736 INIT_WORK(&dev
->log_work
, ssd_log_worker
);
5739 off
= dev
->rom_info
.log_base
;
5740 size
= dev
->rom_info
.log_sz
;
5742 dev
->internal_log
.log
= vmalloc(size
);
5743 if (!dev
->internal_log
.log
) {
5748 ret
= sfifo_alloc(&dev
->log_fifo
, SSD_LOG_FIFO_SZ
, sizeof(struct ssd_log_entry
));
5750 goto out_alloc_log_fifo
;
5753 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5757 log
= (struct ssd_log
*)dev
->internal_log
.log
;
5758 while (len
< size
) {
5759 ret
= ssd_spi_read(dev
, log
, off
, sizeof(struct ssd_log
));
5764 if (log
->ctrl_idx
== 0xff) {
5768 dev
->internal_log
.nr_log
++;
5770 len
+= sizeof(struct ssd_log
);
5771 off
+= sizeof(struct ssd_log
);
5777 sfifo_free(&dev
->log_fifo
);
5779 vfree(dev
->internal_log
.log
);
5780 dev
->internal_log
.log
= NULL
;
5781 dev
->internal_log
.nr_log
= 0;
5783 /* skip error if not in standard mode */
5784 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5791 static void ssd_stop_workq(struct ssd_device
*dev
)
5793 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
5794 flush_workqueue(dev
->workq
);
5797 static void ssd_start_workq(struct ssd_device
*dev
)
5799 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
5802 queue_work(dev
->workq
, &dev
->log_work
);
5805 static void ssd_cleanup_workq(struct ssd_device
*dev
)
5807 flush_workqueue(dev
->workq
);
5808 destroy_workqueue(dev
->workq
);
5812 static int ssd_init_workq(struct ssd_device
*dev
)
5816 dev
->workq
= create_singlethread_workqueue(dev
->name
);
5827 static int ssd_init_rom_info(struct ssd_device
*dev
)
5831 mutex_init(&dev
->spi_mutex
);
5832 mutex_init(&dev
->i2c_mutex
);
5834 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5835 /* fix bug: read data to clear status */
5836 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
5838 dev
->rom_info
.size
= SSD_ROM_SIZE
;
5839 dev
->rom_info
.block_size
= SSD_ROM_BLK_SIZE
;
5840 dev
->rom_info
.page_size
= SSD_ROM_PAGE_SIZE
;
5842 dev
->rom_info
.bridge_fw_base
= SSD_ROM_BRIDGE_FW_BASE
;
5843 dev
->rom_info
.bridge_fw_sz
= SSD_ROM_BRIDGE_FW_SIZE
;
5844 dev
->rom_info
.nr_bridge_fw
= SSD_ROM_NR_BRIDGE_FW
;
5846 dev
->rom_info
.ctrl_fw_base
= SSD_ROM_CTRL_FW_BASE
;
5847 dev
->rom_info
.ctrl_fw_sz
= SSD_ROM_CTRL_FW_SIZE
;
5848 dev
->rom_info
.nr_ctrl_fw
= SSD_ROM_NR_CTRL_FW
;
5850 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5852 dev
->rom_info
.vp_base
= SSD_ROM_VP_BASE
;
5853 dev
->rom_info
.label_base
= SSD_ROM_LABEL_BASE
;
5854 } else if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5855 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5856 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5857 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5858 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5860 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5861 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5862 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5863 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5865 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5866 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5867 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5868 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
5870 dev
->rom_info
.bm_fw_base
= dev
->rom_info
.ctrl_fw_base
+ (dev
->rom_info
.nr_ctrl_fw
* dev
->rom_info
.ctrl_fw_sz
);
5871 dev
->rom_info
.bm_fw_sz
= SSD_PV3_ROM_BM_FW_SZ
;
5872 dev
->rom_info
.nr_bm_fw
= SSD_PV3_ROM_NR_BM_FW
;
5874 dev
->rom_info
.log_base
= dev
->rom_info
.bm_fw_base
+ (dev
->rom_info
.nr_bm_fw
* dev
->rom_info
.bm_fw_sz
);
5875 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5877 dev
->rom_info
.smart_base
= dev
->rom_info
.log_base
+ dev
->rom_info
.log_sz
;
5878 dev
->rom_info
.smart_sz
= SSD_PV3_ROM_SMART_SZ
;
5879 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
5881 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
5882 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
5883 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
+ dev
->rom_info
.block_size
;
5884 if (dev
->rom_info
.label_base
>= dev
->rom_info
.size
) {
5885 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- dev
->rom_info
.block_size
;
5888 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5889 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5890 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5891 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5893 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5894 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5895 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5896 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5898 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5899 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5900 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5901 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
5903 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
5904 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
5905 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- SSD_PV3_2_ROM_SEC_SZ
;
5907 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
5908 dev
->rom_info
.smart_sz
= SSD_PV3_2_ROM_SEC_SZ
;
5909 dev
->rom_info
.smart_base
= dev
->rom_info
.label_base
- (dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
);
5910 if (dev
->rom_info
.smart_sz
> dev
->rom_info
.block_size
) {
5911 dev
->rom_info
.smart_sz
= dev
->rom_info
.block_size
;
5914 dev
->rom_info
.log_sz
= SSD_PV3_2_ROM_LOG_SZ
;
5915 dev
->rom_info
.log_base
= dev
->rom_info
.smart_base
- dev
->rom_info
.log_sz
;
5918 return ssd_init_spi(dev
);
5922 static int ssd_update_smart(struct ssd_device
*dev
, struct ssd_smart
*smart
)
5926 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
5927 struct hd_struct
*part
;
5933 if (!test_bit(SSD_INIT_BD
, &dev
->state
)) {
5937 do_gettimeofday(&tv
);
5938 if ((uint64_t)tv
.tv_sec
< dev
->uptime
) {
5941 run_time
= tv
.tv_sec
- dev
->uptime
;
5944 /* avoid frequently update */
5945 if (run_time
>= 60) {
5950 smart
->io_stat
.run_time
+= run_time
;
5952 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
5953 cpu
= part_stat_lock();
5954 part
= &dev
->gd
->part0
;
5955 part_round_stats(cpu
, part
);
5958 smart
->io_stat
.nr_read
+= part_stat_read(part
, ios
[READ
]);
5959 smart
->io_stat
.nr_write
+= part_stat_read(part
, ios
[WRITE
]);
5960 smart
->io_stat
.rsectors
+= part_stat_read(part
, sectors
[READ
]);
5961 smart
->io_stat
.wsectors
+= part_stat_read(part
, sectors
[WRITE
]);
5962 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
5964 disk_round_stats(dev
->gd
);
5967 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, ios
[READ
]);
5968 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, ios
[WRITE
]);
5969 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, sectors
[READ
]);
5970 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, sectors
[WRITE
]);
5973 disk_round_stats(dev
->gd
);
5976 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, reads
);
5977 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, writes
);
5978 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, read_sectors
);
5979 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, write_sectors
);
5982 smart
->io_stat
.nr_to
+= atomic_read(&dev
->tocnt
);
5984 for (i
=0; i
<dev
->nr_queue
; i
++) {
5985 smart
->io_stat
.nr_rwerr
+= dev
->queue
[i
].io_stat
.nr_rwerr
;
5986 smart
->io_stat
.nr_ioerr
+= dev
->queue
[i
].io_stat
.nr_ioerr
;
5989 for (i
=0; i
<dev
->nr_queue
; i
++) {
5990 for (j
=0; j
<SSD_ECC_MAX_FLIP
; j
++) {
5991 smart
->ecc_info
.bitflip
[j
] += dev
->queue
[i
].ecc_info
.bitflip
[j
];
5995 //dev->uptime = tv.tv_sec;
6000 static int ssd_clear_smart(struct ssd_device
*dev
)
6004 uint32_t off
, length
;
6008 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6013 off
= dev
->rom_info
.smart_base
;
6014 length
= dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
;
6016 ret
= ssd_spi_erase(dev
, off
, length
);
6018 hio_warn("%s: info erase: failed\n", dev
->name
);
6022 sversion
= dev
->smart
.version
;
6024 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6025 dev
->smart
.version
= sversion
+ 1;
6026 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6028 /* clear all tmp acc */
6029 for (i
=0; i
<dev
->nr_queue
; i
++) {
6030 memset(&(dev
->queue
[i
].io_stat
), 0, sizeof(struct ssd_io_stat
));
6031 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(struct ssd_ecc_info
));
6034 atomic_set(&dev
->tocnt
, 0);
6036 /* clear tmp log info */
6037 memset(&dev
->log_info
, 0, sizeof(struct ssd_log_info
));
6039 do_gettimeofday(&tv
);
6040 dev
->uptime
= tv
.tv_sec
;
6043 //ssd_clear_alarm(dev);
6048 static int ssd_save_smart(struct ssd_device
*dev
)
6054 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
6057 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6061 if (!ssd_update_smart(dev
, &dev
->smart
)) {
6065 dev
->smart
.version
++;
6067 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6068 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6069 size
= dev
->rom_info
.smart_sz
;
6071 ret
= ssd_spi_erase(dev
, off
, size
);
6073 hio_warn("%s: info erase failed\n", dev
->name
);
6077 size
= sizeof(struct ssd_smart
);
6079 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6081 hio_warn("%s: info write failed\n", dev
->name
);
6092 static int ssd_init_smart(struct ssd_device
*dev
)
6094 struct ssd_smart
*smart
;
6100 do_gettimeofday(&tv
);
6101 dev
->uptime
= tv
.tv_sec
;
6103 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6107 smart
= kmalloc(sizeof(struct ssd_smart
) * SSD_ROM_NR_SMART_MAX
, GFP_KERNEL
);
6113 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6116 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6117 memset(&smart
[i
], 0, sizeof(struct ssd_smart
));
6119 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6120 size
= sizeof(struct ssd_smart
);
6122 ret
= ssd_spi_read(dev
, &smart
[i
], off
, size
);
6124 hio_warn("%s: info read failed\n", dev
->name
);
6128 if (smart
[i
].magic
!= SSD_SMART_MAGIC
) {
6130 smart
[i
].version
= 0;
6134 if (smart
[i
].version
> dev
->smart
.version
) {
6135 memcpy(&dev
->smart
, &smart
[i
], sizeof(struct ssd_smart
));
6139 if (dev
->smart
.magic
!= SSD_SMART_MAGIC
) {
6140 /* first time power up */
6141 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6142 dev
->smart
.version
= 1;
6145 /* check log info */
6147 struct ssd_log_info log_info
;
6148 struct ssd_log
*log
= (struct ssd_log
*)dev
->internal_log
.log
;
6150 memset(&log_info
, 0, sizeof(struct ssd_log_info
));
6152 while (log_info
.nr_log
< dev
->internal_log
.nr_log
) {
6153 /* skip the volatile log info */
6154 if (SSD_LOG_SEU_FAULT
!= log
->le
.event
&& SSD_LOG_SEU_FAULT1
!= log
->le
.event
) {
6155 log_info
.stat
[ssd_parse_log(dev
, log
, 0)]++;
6163 for (i
=(SSD_LOG_NR_LEVEL
-1); i
>=0; i
--) {
6164 if (log_info
.stat
[i
] > dev
->smart
.log_info
.stat
[i
]) {
6166 memcpy(&dev
->smart
.log_info
, &log_info
, sizeof(struct ssd_log_info
));
6167 dev
->smart
.version
++;
6173 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6174 if (smart
[i
].magic
== SSD_SMART_MAGIC
&& smart
[i
].version
== dev
->smart
.version
) {
6178 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6179 size
= dev
->rom_info
.smart_sz
;
6181 ret
= ssd_spi_erase(dev
, off
, size
);
6183 hio_warn("%s: info erase failed\n", dev
->name
);
6187 size
= sizeof(struct ssd_smart
);
6188 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6190 hio_warn("%s: info write failed\n", dev
->name
);
6197 /* sync smart with alarm led */
6198 if (dev
->smart
.io_stat
.nr_to
|| dev
->smart
.io_stat
.nr_rwerr
|| dev
->smart
.log_info
.stat
[SSD_LOG_LEVEL_ERR
]) {
6199 hio_warn("%s: some fault found in the history info\n", dev
->name
);
6206 /* skip error if not in standard mode */
6207 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6214 static int __ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6216 struct ssd_bm_manufacturer_data bm_md
= {0};
6217 uint16_t sc_id
= SSD_BM_SYSTEM_DATA_SUBCLASS_ID
;
6225 mutex_lock(&dev
->bm_mutex
);
6227 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6228 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6233 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6234 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_manufacturer_data
), (uint8_t *)&bm_md
);
6239 if (bm_md
.firmware_ver
& 0xF000) {
6244 *ver
= bm_md
.firmware_ver
;
6247 mutex_unlock(&dev
->bm_mutex
);
6251 static int ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6254 int i
= SSD_BM_RETRY_MAX
;
6258 ret
= __ssd_bm_get_version(dev
, &tmp
);
6272 static int __ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6274 struct ssd_bm_configuration_registers bm_cr
;
6275 uint16_t sc_id
= SSD_BM_CONFIGURATION_REGISTERS_ID
;
6279 mutex_lock(&dev
->bm_mutex
);
6281 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6282 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6287 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6288 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_configuration_registers
), (uint8_t *)&bm_cr
);
6293 if (bm_cr
.operation_cfg
.cc
== 0 || bm_cr
.operation_cfg
.cc
> 4) {
6298 *nr_cap
= bm_cr
.operation_cfg
.cc
+ 1;
6301 mutex_unlock(&dev
->bm_mutex
);
6305 static int ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6308 int i
= SSD_BM_RETRY_MAX
;
6312 ret
= __ssd_bm_nr_cap(dev
, &tmp
);
6326 static int ssd_bm_enter_cap_learning(struct ssd_device
*dev
)
6328 uint16_t buf
= SSD_BM_ENTER_CAP_LEARNING
;
6329 uint8_t cmd
= SSD_BM_MANUFACTURERACCESS
;
6332 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&buf
);
6341 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
)
6344 uint8_t cmd
= SSD_BM_SAFETYSTATUS
;
6347 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6357 static int ssd_bm_get_opstatus(struct ssd_device
*dev
, uint16_t *status
)
6360 uint8_t cmd
= SSD_BM_OPERATIONSTATUS
;
6363 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6373 static int ssd_get_bmstruct(struct ssd_device
*dev
, struct ssd_bm
*bm_status_out
)
6375 struct sbs_cmd
*bm_sbs
= ssd_bm_sbs
;
6376 struct ssd_bm bm_status
;
6377 uint8_t buf
[2] = {0, };
6382 memset(&bm_status
, 0, sizeof(struct ssd_bm
));
6384 while (bm_sbs
->desc
!= NULL
) {
6385 switch (bm_sbs
->size
) {
6387 ret
= ssd_smbus_read_byte(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, buf
);
6389 //printf("Error: smbus read byte %#x\n", bm_sbs->cmd);
6395 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, (uint8_t *)&val
);
6397 //printf("Error: smbus read word %#x\n", bm_sbs->cmd);
6400 //val = *(uint16_t *)buf;
6408 switch (bm_sbs
->unit
) {
6409 case SBS_UNIT_VALUE
:
6410 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
& bm_sbs
->mask
;
6412 case SBS_UNIT_TEMPERATURE
:
6413 cval
= (uint16_t)(val
- 2731) / 10;
6414 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = cval
;
6416 case SBS_UNIT_VOLTAGE
:
6417 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6419 case SBS_UNIT_CURRENT
:
6420 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6423 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6425 case SBS_UNIT_PERCENT
:
6426 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6428 case SBS_UNIT_CAPACITANCE
:
6429 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6440 memcpy(bm_status_out
, &bm_status
, sizeof(struct ssd_bm
));
6446 static int __ssd_bm_status(struct ssd_device
*dev
, int *status
)
6448 struct ssd_bm bm_status
= {0};
6453 ret
= ssd_get_bmstruct(dev
, &bm_status
);
6458 /* capacitor voltage */
6459 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
6464 for (i
=0; i
<nr_cap
; i
++) {
6465 if (bm_status
.cap_volt
[i
] < SSD_BM_CAP_VOLT_MIN
) {
6466 *status
= SSD_BMSTATUS_WARNING
;
6472 if (bm_status
.sf_status
) {
6473 *status
= SSD_BMSTATUS_WARNING
;
6478 if (!((bm_status
.op_status
>> 12) & 0x1)) {
6479 *status
= SSD_BMSTATUS_CHARGING
;
6481 *status
= SSD_BMSTATUS_OK
;
6488 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int mode
);
6490 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
6491 static void ssd_bm_worker(void *data
)
6493 struct ssd_device
*dev
= (struct ssd_device
*)data
;
6495 static void ssd_bm_worker(struct work_struct
*work
)
6497 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, bm_work
);
6503 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6507 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
6511 if (dev
->hw_info_ext
.plp_type
!= SSD_PLP_SCAP
) {
6515 ret
= ssd_bm_get_opstatus(dev
, &opstatus
);
6517 hio_warn("%s: get bm operationstatus failed\n", dev
->name
);
6521 /* need cap learning ? */
6522 if (!(opstatus
& 0xF0)) {
6523 ret
= ssd_bm_enter_cap_learning(dev
);
6525 hio_warn("%s: enter capacitance learning failed\n", dev
->name
);
6531 static void ssd_bm_routine_start(void *data
)
6533 struct ssd_device
*dev
;
6540 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
6541 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6542 queue_work(dev
->workq
, &dev
->bm_work
);
6544 queue_work(dev
->workq
, &dev
->capmon_work
);
6550 static int ssd_do_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6557 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6562 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6567 /* make sure the lm80 voltage value is updated */
6568 msleep(SSD_LM80_CONV_INTERVAL
);
6570 /* check if full charged */
6573 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6575 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6576 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6580 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6581 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_FULL
) {
6586 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6590 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6593 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U2
, (uint8_t *)&val
);
6595 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6596 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6600 u2
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6607 /* enter cap learn */
6608 ssd_reg32_write(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
, 0x1);
6612 msleep(SSD_PL_CAP_LEARN_WAIT
);
6614 t
= ssd_reg32_read(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
);
6615 if (!((t
>> 1) & 0x1)) {
6620 if (wait
> SSD_PL_CAP_LEARN_MAX_WAIT
) {
6626 if ((t
>> 4) & 0x1) {
6637 *cap
= SSD_PL_CAP_LEARN(u1
, u2
, t
);
6643 static int ssd_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6651 mutex_lock(&dev
->bm_mutex
);
6653 ssd_stop_workq(dev
);
6655 ret
= ssd_do_cap_learn(dev
, cap
);
6657 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
6661 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, *cap
);
6664 ssd_start_workq(dev
);
6665 mutex_unlock(&dev
->bm_mutex
);
6670 static int ssd_check_pl_cap(struct ssd_device
*dev
)
6678 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6682 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6689 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6691 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6692 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6696 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6697 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_READY
) {
6702 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6704 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(u1
));
6707 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6710 low
= ssd_lm80_limit
[SSD_LM80_IN_CAP
].low
;
6711 ret
= ssd_smbus_write_byte(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP
), &low
);
6716 /* enable cap INx */
6717 ret
= ssd_lm80_enable_in(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_IN_CAP
);
6719 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6720 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6726 /* skip error if not in standard mode */
6727 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6733 static int ssd_check_pl_cap_fast(struct ssd_device
*dev
)
6739 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6743 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6748 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6752 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6753 if (SSD_PL_CAP_VOLT(u1
) < SSD_PL_CAP_VOLT_READY
) {
6761 static int ssd_init_pl_cap(struct ssd_device
*dev
)
6765 /* set here: user write mode */
6766 dev
->user_wmode
= wmode
;
6768 mutex_init(&dev
->bm_mutex
);
6770 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6772 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BM_FAULT_REG
);
6773 if ((val
>> 1) & 0x1) {
6774 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
6777 ret
= ssd_check_pl_cap(dev
);
6779 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
6787 static void __end_str(char *str
, int len
)
6791 for(i
=0; i
<len
; i
++) {
6792 if (*(str
+i
) == '\0')
6798 static int ssd_init_label(struct ssd_device
*dev
)
6804 /* label location */
6805 off
= dev
->rom_info
.label_base
;
6807 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6808 size
= sizeof(struct ssd_label
);
6811 ret
= ssd_spi_read(dev
, &dev
->label
, off
, size
);
6813 memset(&dev
->label
, 0, size
);
6817 __end_str(dev
->label
.date
, SSD_LABEL_FIELD_SZ
);
6818 __end_str(dev
->label
.sn
, SSD_LABEL_FIELD_SZ
);
6819 __end_str(dev
->label
.part
, SSD_LABEL_FIELD_SZ
);
6820 __end_str(dev
->label
.desc
, SSD_LABEL_FIELD_SZ
);
6821 __end_str(dev
->label
.other
, SSD_LABEL_FIELD_SZ
);
6822 __end_str(dev
->label
.maf
, SSD_LABEL_FIELD_SZ
);
6824 size
= sizeof(struct ssd_labelv3
);
6827 ret
= ssd_spi_read(dev
, &dev
->labelv3
, off
, size
);
6829 memset(&dev
->labelv3
, 0, size
);
6833 __end_str(dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
6834 __end_str(dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
6835 __end_str(dev
->labelv3
.item
, SSD_LABEL_FIELD_SZ
);
6836 __end_str(dev
->labelv3
.description
, SSD_LABEL_DESC_SZ
);
6837 __end_str(dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
6838 __end_str(dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
6839 __end_str(dev
->labelv3
.issuenumber
, SSD_LABEL_FIELD_SZ
);
6840 __end_str(dev
->labelv3
.cleicode
, SSD_LABEL_FIELD_SZ
);
6841 __end_str(dev
->labelv3
.bom
, SSD_LABEL_FIELD_SZ
);
6845 /* skip error if not in standard mode */
6846 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6852 int ssd_get_label(struct block_device
*bdev
, struct ssd_label
*label
)
6854 struct ssd_device
*dev
;
6856 if (!bdev
|| !label
|| !(bdev
->bd_disk
)) {
6860 dev
= bdev
->bd_disk
->private_data
;
6862 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
6863 memset(label
, 0, sizeof(struct ssd_label
));
6864 memcpy(label
->date
, dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
6865 memcpy(label
->sn
, dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
6866 memcpy(label
->desc
, dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
6867 memcpy(label
->maf
, dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
6869 memcpy(label
, &dev
->label
, sizeof(struct ssd_label
));
6875 static int __ssd_get_version(struct ssd_device
*dev
, struct ssd_version_info
*ver
)
6877 uint16_t bm_ver
= 0;
6880 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6881 ret
= ssd_bm_get_version(dev
, &bm_ver
);
6887 ver
->bridge_ver
= dev
->hw_info
.bridge_ver
;
6888 ver
->ctrl_ver
= dev
->hw_info
.ctrl_ver
;
6889 ver
->bm_ver
= bm_ver
;
6890 ver
->pcb_ver
= dev
->hw_info
.pcb_ver
;
6891 ver
->upper_pcb_ver
= dev
->hw_info
.upper_pcb_ver
;
6898 int ssd_get_version(struct block_device
*bdev
, struct ssd_version_info
*ver
)
6900 struct ssd_device
*dev
;
6903 if (!bdev
|| !ver
|| !(bdev
->bd_disk
)) {
6907 dev
= bdev
->bd_disk
->private_data
;
6909 mutex_lock(&dev
->fw_mutex
);
6910 ret
= __ssd_get_version(dev
, ver
);
6911 mutex_unlock(&dev
->fw_mutex
);
6916 static int __ssd_get_temperature(struct ssd_device
*dev
, int *temp
)
6924 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6930 if (dev
->db_info
.type
== SSD_DEBUG_LOG
&&
6931 (dev
->db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
||
6932 dev
->db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
||
6933 dev
->db_info
.data
.log
.event
== SSD_LOG_WARN_TEMP
)) {
6934 *temp
= (int)dev
->db_info
.data
.log
.extra
;
6939 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
6940 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
6942 val
= ssd_reg_read(dev
->ctrlp
+ off
);
6943 if (val
== 0xffffffffffffffffull
) {
6947 cur
= (int)CUR_TEMP(val
);
6958 int ssd_get_temperature(struct block_device
*bdev
, int *temp
)
6960 struct ssd_device
*dev
;
6963 if (!bdev
|| !temp
|| !(bdev
->bd_disk
)) {
6967 dev
= bdev
->bd_disk
->private_data
;
6970 mutex_lock(&dev
->fw_mutex
);
6971 ret
= __ssd_get_temperature(dev
, temp
);
6972 mutex_unlock(&dev
->fw_mutex
);
6977 int ssd_set_otprotect(struct block_device
*bdev
, int otprotect
)
6979 struct ssd_device
*dev
;
6981 if (!bdev
|| !(bdev
->bd_disk
)) {
6985 dev
= bdev
->bd_disk
->private_data
;
6986 ssd_set_ot_protect(dev
, !!otprotect
);
6991 int ssd_bm_status(struct block_device
*bdev
, int *status
)
6993 struct ssd_device
*dev
;
6996 if (!bdev
|| !status
|| !(bdev
->bd_disk
)) {
7000 dev
= bdev
->bd_disk
->private_data
;
7002 mutex_lock(&dev
->fw_mutex
);
7003 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7004 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7005 *status
= SSD_BMSTATUS_WARNING
;
7007 *status
= SSD_BMSTATUS_OK
;
7009 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7010 ret
= __ssd_bm_status(dev
, status
);
7012 *status
= SSD_BMSTATUS_OK
;
7014 mutex_unlock(&dev
->fw_mutex
);
7019 int ssd_get_pciaddr(struct block_device
*bdev
, struct pci_addr
*paddr
)
7021 struct ssd_device
*dev
;
7023 if (!bdev
|| !paddr
|| !bdev
->bd_disk
) {
7027 dev
= bdev
->bd_disk
->private_data
;
7029 paddr
->domain
= pci_domain_nr(dev
->pdev
->bus
);
7030 paddr
->bus
= dev
->pdev
->bus
->number
;
7031 paddr
->slot
= PCI_SLOT(dev
->pdev
->devfn
);
7032 paddr
->func
= PCI_FUNC(dev
->pdev
->devfn
);
7038 static int ssd_bb_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7043 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7047 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L1_REG
);
7048 if (0xffffffffull
== acc
->threshold_l1
) {
7051 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L2_REG
);
7052 if (0xffffffffull
== acc
->threshold_l2
) {
7057 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7058 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7059 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_BB_ACC_REG_SZ
* chip
));
7060 if (0xffffffffull
== acc
->val
) {
7063 if (val
> acc
->val
) {
7072 static int ssd_ec_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7077 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7081 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L1_REG
);
7082 if (0xffffffffull
== acc
->threshold_l1
) {
7085 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L2_REG
);
7086 if (0xffffffffull
== acc
->threshold_l2
) {
7091 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7092 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7093 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_EC_ACC_REG_SZ
* chip
));
7094 if (0xffffffffull
== acc
->val
) {
7098 if (val
> acc
->val
) {
7109 static int ssd_ram_read_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7111 struct ssd_ram_op_msg
*msg
;
7113 size_t len
= length
;
7117 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7118 || !length
|| length
> dev
->hw_info
.ram_max_len
7119 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7123 len
/= dev
->hw_info
.ram_align
;
7124 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7126 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7127 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7128 ret
= dma_mapping_error(buf_dma
);
7130 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7133 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7134 goto out_dma_mapping
;
7137 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7139 msg
->fun
= SSD_FUNC_RAM_READ
;
7140 msg
->ctrl_idx
= ctrl_idx
;
7141 msg
->start
= (uint32_t)ofs_w
;
7145 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7148 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7154 static int ssd_ram_write_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7156 struct ssd_ram_op_msg
*msg
;
7158 size_t len
= length
;
7162 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7163 || !length
|| length
> dev
->hw_info
.ram_max_len
7164 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7168 len
/= dev
->hw_info
.ram_align
;
7169 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7171 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7172 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7173 ret
= dma_mapping_error(buf_dma
);
7175 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7178 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7179 goto out_dma_mapping
;
7182 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7184 msg
->fun
= SSD_FUNC_RAM_WRITE
;
7185 msg
->ctrl_idx
= ctrl_idx
;
7186 msg
->start
= (uint32_t)ofs_w
;
7190 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7193 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7200 static int ssd_ram_read(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7207 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7208 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7213 len
= dev
->hw_info
.ram_max_len
;
7214 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7218 ret
= ssd_ram_read_4k(dev
, buf
, len
, off
, ctrl_idx
);
7231 static int ssd_ram_write(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7238 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7239 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7244 len
= dev
->hw_info
.ram_max_len
;
7245 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7249 ret
= ssd_ram_write_4k(dev
, buf
, len
, off
, ctrl_idx
);
7264 static int ssd_check_flash(struct ssd_device
*dev
, int flash
, int page
, int ctrl_idx
)
7266 int cur_ch
= flash
% dev
->hw_info
.max_ch
;
7267 int cur_chip
= flash
/dev
->hw_info
.max_ch
;
7269 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
7273 if (cur_ch
>= dev
->hw_info
.nr_ch
|| cur_chip
>= dev
->hw_info
.nr_chip
) {
7277 if (page
>= (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7283 static int ssd_nand_read_id(struct ssd_device
*dev
, void *id
, int flash
, int chip
, int ctrl_idx
)
7285 struct ssd_nand_op_msg
*msg
;
7292 buf_dma
= pci_map_single(dev
->pdev
, id
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7293 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7294 ret
= dma_mapping_error(buf_dma
);
7296 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7299 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7300 goto out_dma_mapping
;
7303 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7304 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7308 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7310 msg
->fun
= SSD_FUNC_NAND_READ_ID
;
7311 msg
->chip_no
= flash
;
7312 msg
->chip_ce
= chip
;
7313 msg
->ctrl_idx
= ctrl_idx
;
7316 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7319 pci_unmap_single(dev
->pdev
, buf_dma
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7326 static int ssd_nand_read(struct ssd_device
*dev
, void *buf
,
7327 int flash
, int chip
, int page
, int page_count
, int ctrl_idx
)
7329 struct ssd_nand_op_msg
*msg
;
7338 if ((page
+ page_count
) > dev
->hw_info
.block_count
*dev
->hw_info
.page_count
) {
7342 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7347 length
= page_count
* dev
->hw_info
.page_size
;
7349 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7350 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7351 ret
= dma_mapping_error(buf_dma
);
7353 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7356 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7357 goto out_dma_mapping
;
7360 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7361 flash
= (flash
<< 1) | chip
;
7365 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7367 msg
->fun
= SSD_FUNC_NAND_READ
;
7368 msg
->ctrl_idx
= ctrl_idx
;
7369 msg
->chip_no
= flash
;
7370 msg
->chip_ce
= chip
;
7371 msg
->page_no
= page
;
7372 msg
->page_count
= page_count
;
7375 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7378 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7385 static int ssd_nand_read_w_oob(struct ssd_device
*dev
, void *buf
,
7386 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7388 struct ssd_nand_op_msg
*msg
;
7397 if ((page
+ count
) > (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7401 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7406 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7408 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7409 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7410 ret
= dma_mapping_error(buf_dma
);
7412 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7415 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7416 goto out_dma_mapping
;
7419 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7420 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7424 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7426 msg
->fun
= SSD_FUNC_NAND_READ_WOOB
;
7427 msg
->ctrl_idx
= ctrl_idx
;
7428 msg
->chip_no
= flash
;
7429 msg
->chip_ce
= chip
;
7430 msg
->page_no
= page
;
7431 msg
->page_count
= count
;
7434 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7437 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7444 static int ssd_nand_write(struct ssd_device
*dev
, void *buf
,
7445 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7447 struct ssd_nand_op_msg
*msg
;
7452 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7464 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7469 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7471 /* write data to ram */
7472 /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx);
7477 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7478 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7479 ret
= dma_mapping_error(buf_dma
);
7481 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7484 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7485 goto out_dma_mapping
;
7488 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7489 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7493 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7495 msg
->fun
= SSD_FUNC_NAND_WRITE
;
7496 msg
->ctrl_idx
= ctrl_idx
;
7497 msg
->chip_no
= flash
;
7498 msg
->chip_ce
= chip
;
7500 msg
->page_no
= page
;
7501 msg
->page_count
= count
;
7504 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7507 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7513 static int ssd_nand_erase(struct ssd_device
*dev
, int flash
, int chip
, int page
, int ctrl_idx
)
7515 struct ssd_nand_op_msg
*msg
;
7518 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7523 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7524 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7528 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7530 msg
->fun
= SSD_FUNC_NAND_ERASE
;
7531 msg
->ctrl_idx
= ctrl_idx
;
7532 msg
->chip_no
= flash
;
7533 msg
->chip_ce
= chip
;
7534 msg
->page_no
= page
;
7536 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7542 static int ssd_update_bbt(struct ssd_device
*dev
, int flash
, int ctrl_idx
)
7544 struct ssd_nand_op_msg
*msg
;
7545 struct ssd_flush_msg
*fmsg
;
7548 ret
= ssd_check_flash(dev
, flash
, 0, ctrl_idx
);
7553 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7555 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7556 fmsg
= (struct ssd_flush_msg
*)msg
;
7558 fmsg
->fun
= SSD_FUNC_FLUSH
;
7560 fmsg
->flash
= flash
;
7561 fmsg
->ctrl_idx
= ctrl_idx
;
7563 msg
->fun
= SSD_FUNC_FLUSH
;
7565 msg
->chip_no
= flash
;
7566 msg
->ctrl_idx
= ctrl_idx
;
7569 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7575 /* flash controller init state */
7576 static int __ssd_check_init_state(struct ssd_device
*dev
)
7578 uint32_t *init_state
= NULL
;
7579 int reg_base
, reg_sz
;
7580 int max_wait
= SSD_INIT_MAX_WAIT
;
7586 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7587 ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data);
7588 read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8);
7589 if (read_data == ~test_data) {
7590 //dev->hw_info.nr_ctrl++;
7591 dev->hw_info.nr_ctrl_map |= 1<<i;
7597 read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG);
7599 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7600 if (((read_data>>i) & 0x1) == 0) {
7605 if (dev->hw_info.nr_ctrl != j) {
7606 printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j);
7612 init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0);
7613 for (j=1; j<dev->hw_info.nr_ctrl;j++) {
7614 if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) {
7615 printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j);
7621 /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0);
7622 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7623 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) {
7624 printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j);
7629 init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8);
7630 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7631 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) {
7632 printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j);
7638 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7639 max_wait
= SSD_INIT_MAX_WAIT_V3_2
;
7642 reg_base
= dev
->protocol_info
.init_state_reg
;
7643 reg_sz
= dev
->protocol_info
.init_state_reg_sz
;
7645 init_state
= (uint32_t *)kmalloc(reg_sz
, GFP_KERNEL
);
7650 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7652 for (j
=0, k
=0; j
<reg_sz
; j
+=sizeof(uint32_t), k
++) {
7653 init_state
[k
] = ssd_reg32_read(dev
->ctrlp
+ reg_base
+ j
);
7656 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7657 /* just check the last bit, no need to check all channel */
7658 ch_start
= dev
->hw_info
.max_ch
- 1;
7663 for (j
=0; j
<dev
->hw_info
.nr_chip
; j
++) {
7664 for (k
=ch_start
; k
<dev
->hw_info
.max_ch
; k
++) {
7665 if (test_bit((j
*dev
->hw_info
.max_ch
+ k
), (void *)init_state
)) {
7670 if (init_wait
<= max_wait
) {
7671 msleep(SSD_INIT_WAIT
);
7674 if (k
< dev
->hw_info
.nr_ch
) {
7675 hio_warn("%s: controller %d chip %d ch %d init failed\n",
7676 dev
->name
, i
, j
, k
);
7678 hio_warn("%s: controller %d chip %d init failed\n",
7689 //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait);
7695 static int ssd_check_init_state(struct ssd_device
*dev
)
7697 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7701 return __ssd_check_init_state(dev
);
7704 static void ssd_reset_resp_ptr(struct ssd_device
*dev
);
7706 /* reset flash controller etc */
7707 static int __ssd_reset(struct ssd_device
*dev
, int type
)
7709 if (type
< SSD_RST_NOINIT
|| type
> SSD_RST_FULL
) {
7713 mutex_lock(&dev
->fw_mutex
);
7715 if (type
== SSD_RST_NOINIT
) { //no init
7716 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET_NOINIT
);
7717 } else if (type
== SSD_RST_NORMAL
) { //reset & init
7718 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET
);
7719 } else { // full reset
7720 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7721 mutex_unlock(&dev
->fw_mutex
);
7725 ssd_reg32_write(dev
->ctrlp
+ SSD_FULL_RESET_REG
, SSD_RESET_FULL
);
7728 ssd_reset_resp_ptr(dev
);
7731 #ifdef SSD_OT_PROTECT
7738 ssd_set_flush_timeout(dev
, dev
->wmode
);
7740 mutex_unlock(&dev
->fw_mutex
);
7741 ssd_gen_swlog(dev
, SSD_LOG_RESET
, (uint32_t)type
);
7743 return __ssd_check_init_state(dev
);
7746 static int ssd_save_md(struct ssd_device
*dev
)
7748 struct ssd_nand_op_msg
*msg
;
7751 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7754 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7758 if (!dev
->save_md
) {
7762 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7764 msg
->fun
= SSD_FUNC_FLUSH
;
7769 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7775 static int ssd_barrier_save_md(struct ssd_device
*dev
)
7777 struct ssd_nand_op_msg
*msg
;
7780 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7783 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7787 if (!dev
->save_md
) {
7791 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7793 msg
->fun
= SSD_FUNC_FLUSH
;
7798 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
7804 static int ssd_flush(struct ssd_device
*dev
)
7806 struct ssd_nand_op_msg
*msg
;
7807 struct ssd_flush_msg
*fmsg
;
7810 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7813 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7815 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7816 fmsg
= (struct ssd_flush_msg
*)msg
;
7818 fmsg
->fun
= SSD_FUNC_FLUSH
;
7823 msg
->fun
= SSD_FUNC_FLUSH
;
7829 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7835 static int ssd_barrier_flush(struct ssd_device
*dev
)
7837 struct ssd_nand_op_msg
*msg
;
7838 struct ssd_flush_msg
*fmsg
;
7841 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7844 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7846 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7847 fmsg
= (struct ssd_flush_msg
*)msg
;
7849 fmsg
->fun
= SSD_FUNC_FLUSH
;
7854 msg
->fun
= SSD_FUNC_FLUSH
;
7860 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
7866 #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710
7867 #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8
7868 #define SSD_WMODE_FUA_TIMEOUT 0x000503E8
7869 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int m
)
7874 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7879 case SSD_WMODE_BUFFER
:
7880 to
= SSD_WMODE_BUFFER_TIMEOUT
;
7882 case SSD_WMODE_BUFFER_EX
:
7883 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_1
) {
7884 to
= SSD_WMODE_BUFFER_EX_TIMEOUT
;
7886 to
= SSD_WMODE_BUFFER_TIMEOUT
;
7890 to
= SSD_WMODE_FUA_TIMEOUT
;
7896 val
= (((uint32_t)((uint32_t)m
& 0x3) << 28) | to
);
7898 ssd_reg32_write(dev
->ctrlp
+ SSD_FLUSH_TIMEOUT_REG
, val
);
7901 static int ssd_do_switch_wmode(struct ssd_device
*dev
, int m
)
7905 ret
= ssd_barrier_start(dev
);
7910 ret
= ssd_barrier_flush(dev
);
7912 goto out_barrier_end
;
7915 /* set contoller flush timeout */
7916 ssd_set_flush_timeout(dev
, m
);
7922 ssd_barrier_end(dev
);
7927 static int ssd_switch_wmode(struct ssd_device
*dev
, int m
)
7933 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
7937 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7938 default_wmode
= SSD_WMODE_BUFFER
;
7940 default_wmode
= SSD_WMODE_BUFFER_EX
;
7943 if (SSD_WMODE_AUTO
== m
) {
7944 /* battery fault ? */
7945 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7946 next_wmode
= SSD_WMODE_FUA
;
7948 next_wmode
= default_wmode
;
7950 } else if (SSD_WMODE_DEFAULT
== m
) {
7951 next_wmode
= default_wmode
;
7956 if (next_wmode
!= dev
->wmode
) {
7957 hio_warn("%s: switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
7958 ret
= ssd_do_switch_wmode(dev
, next_wmode
);
7960 hio_err("%s: can not switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
7967 static int ssd_init_wmode(struct ssd_device
*dev
)
7972 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7973 default_wmode
= SSD_WMODE_BUFFER
;
7975 default_wmode
= SSD_WMODE_BUFFER_EX
;
7979 if (SSD_WMODE_AUTO
== dev
->user_wmode
) {
7980 /* battery fault ? */
7981 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7982 dev
->wmode
= SSD_WMODE_FUA
;
7984 dev
->wmode
= default_wmode
;
7986 } else if (SSD_WMODE_DEFAULT
== dev
->user_wmode
) {
7987 dev
->wmode
= default_wmode
;
7989 dev
->wmode
= dev
->user_wmode
;
7991 ssd_set_flush_timeout(dev
, dev
->wmode
);
7996 static int __ssd_set_wmode(struct ssd_device
*dev
, int m
)
8000 /* not support old fw*/
8001 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8006 if (m
< SSD_WMODE_BUFFER
|| m
> SSD_WMODE_DEFAULT
) {
8011 ssd_gen_swlog(dev
, SSD_LOG_SET_WMODE
, m
);
8013 dev
->user_wmode
= m
;
8015 ret
= ssd_switch_wmode(dev
, dev
->user_wmode
);
8024 int ssd_set_wmode(struct block_device
*bdev
, int m
)
8026 struct ssd_device
*dev
;
8028 if (!bdev
|| !(bdev
->bd_disk
)) {
8032 dev
= bdev
->bd_disk
->private_data
;
8034 return __ssd_set_wmode(dev
, m
);
8037 static int ssd_do_reset(struct ssd_device
*dev
)
8041 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8045 ssd_stop_workq(dev
);
8047 ret
= ssd_barrier_start(dev
);
8052 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8054 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8057 //ret = __ssd_reset(dev, SSD_RST_FULL);
8058 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8061 goto out_barrier_end
;
8065 ssd_barrier_end(dev
);
8067 ssd_start_workq(dev
);
8068 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8072 static int ssd_full_reset(struct ssd_device
*dev
)
8076 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8080 ssd_stop_workq(dev
);
8082 ret
= ssd_barrier_start(dev
);
8087 ret
= ssd_barrier_flush(dev
);
8089 goto out_barrier_end
;
8092 ret
= ssd_barrier_save_md(dev
);
8094 goto out_barrier_end
;
8097 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8099 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8102 //ret = __ssd_reset(dev, SSD_RST_FULL);
8103 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8106 goto out_barrier_end
;
8110 ssd_barrier_end(dev
);
8112 ssd_start_workq(dev
);
8113 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8117 int ssd_reset(struct block_device
*bdev
)
8119 struct ssd_device
*dev
;
8121 if (!bdev
|| !(bdev
->bd_disk
)) {
8125 dev
= bdev
->bd_disk
->private_data
;
8127 return ssd_full_reset(dev
);
8130 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
8131 static int ssd_issue_flush_fn(struct request_queue
*q
, struct gendisk
*disk
,
8132 sector_t
*error_sector
)
8134 struct ssd_device
*dev
= q
->queuedata
;
8136 return ssd_flush(dev
);
8140 void ssd_submit_pbio(struct request_queue
*q
, struct bio
*bio
)
8142 struct ssd_device
*dev
= q
->queuedata
;
8143 #ifdef SSD_QUEUE_PBIO
8147 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8148 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8149 bio_endio(bio
, -ENODEV
);
8151 bio_endio(bio
, bio
->bi_size
, -ENODEV
);
8156 #ifdef SSD_DEBUG_ERR
8157 if (atomic_read(&dev
->tocnt
)) {
8158 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8159 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8160 bio_endio(bio
, -EIO
);
8162 bio_endio(bio
, bio
->bi_size
, -EIO
);
8168 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
8169 if (unlikely(bio_barrier(bio
))) {
8170 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8171 bio_endio(bio
, -EOPNOTSUPP
);
8173 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8177 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
8178 if (unlikely(bio_rw_flagged(bio
, BIO_RW_BARRIER
))) {
8179 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8180 bio_endio(bio
, -EOPNOTSUPP
);
8182 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8186 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
8187 if (unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
8188 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8189 bio_endio(bio
, -EOPNOTSUPP
);
8191 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8197 if (unlikely(bio
->bi_rw
& REQ_FUA
)) {
8198 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8199 bio_endio(bio
, -EOPNOTSUPP
);
8201 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8207 if (unlikely(dev
->readonly
&& bio_data_dir(bio
) == WRITE
)) {
8208 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8209 bio_endio(bio
, -EROFS
);
8211 bio_endio(bio
, bio
->bi_size
, -EROFS
);
8216 #ifdef SSD_QUEUE_PBIO
8217 if (0 == atomic_read(&dev
->in_sendq
)) {
8218 ret
= __ssd_submit_pbio(dev
, bio
, 0);
8222 (void)test_and_set_bit(BIO_SSD_PBIO
, &bio
->bi_flags
);
8223 ssd_queue_bio(dev
, bio
);
8226 __ssd_submit_pbio(dev
, bio
, 1);
8233 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
8234 static int ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8236 static void ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8239 struct ssd_device
*dev
= q
->queuedata
;
8242 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8243 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8244 bio_endio(bio
, -ENODEV
);
8246 bio_endio(bio
, bio
->bi_size
, -ENODEV
);
8251 #ifdef SSD_DEBUG_ERR
8252 if (atomic_read(&dev
->tocnt
)) {
8253 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8254 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8255 bio_endio(bio
, -EIO
);
8257 bio_endio(bio
, bio
->bi_size
, -EIO
);
8263 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
8264 if (unlikely(bio_barrier(bio
))) {
8265 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8266 bio_endio(bio
, -EOPNOTSUPP
);
8268 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8272 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
8273 if (unlikely(bio_rw_flagged(bio
, BIO_RW_BARRIER
))) {
8274 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8275 bio_endio(bio
, -EOPNOTSUPP
);
8277 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8281 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
8282 if (unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
8283 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8284 bio_endio(bio
, -EOPNOTSUPP
);
8286 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8292 if (unlikely(bio
->bi_rw
& REQ_FUA
)) {
8293 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8294 bio_endio(bio
, -EOPNOTSUPP
);
8296 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8301 /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */
8302 if (unlikely((bio
->bi_rw
& REQ_FLUSH
) && !bio_sectors(bio
))) {
8309 if (0 == atomic_read(&dev
->in_sendq
)) {
8310 ret
= ssd_submit_bio(dev
, bio
, 0);
8314 ssd_queue_bio(dev
, bio
);
8318 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
8325 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
8326 static int ssd_block_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
8328 struct ssd_device
*dev
;
8334 dev
= bdev
->bd_disk
->private_data
;
8341 geo
->cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
8346 static void ssd_cleanup_blkdev(struct ssd_device
*dev
);
8347 static int ssd_init_blkdev(struct ssd_device
*dev
);
8348 static int ssd_ioctl_common(struct ssd_device
*dev
, unsigned int cmd
, unsigned long arg
)
8350 void __user
*argp
= (void __user
*)arg
;
8351 void __user
*buf
= NULL
;
8356 case SSD_CMD_GET_PROTOCOL_INFO
:
8357 if (copy_to_user(argp
, &dev
->protocol_info
, sizeof(struct ssd_protocol_info
))) {
8358 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8364 case SSD_CMD_GET_HW_INFO
:
8365 if (copy_to_user(argp
, &dev
->hw_info
, sizeof(struct ssd_hw_info
))) {
8366 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8372 case SSD_CMD_GET_ROM_INFO
:
8373 if (copy_to_user(argp
, &dev
->rom_info
, sizeof(struct ssd_rom_info
))) {
8374 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8380 case SSD_CMD_GET_SMART
: {
8381 struct ssd_smart smart
;
8384 memcpy(&smart
, &dev
->smart
, sizeof(struct ssd_smart
));
8386 mutex_lock(&dev
->gd_mutex
);
8387 ssd_update_smart(dev
, &smart
);
8388 mutex_unlock(&dev
->gd_mutex
);
8390 /* combine the volatile log info */
8391 if (dev
->log_info
.nr_log
) {
8392 for (i
=0; i
<SSD_LOG_NR_LEVEL
; i
++) {
8393 smart
.log_info
.stat
[i
] += dev
->log_info
.stat
[i
];
8397 if (copy_to_user(argp
, &smart
, sizeof(struct ssd_smart
))) {
8398 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8406 case SSD_CMD_GET_IDX
:
8407 if (copy_to_user(argp
, &dev
->idx
, sizeof(int))) {
8408 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8414 case SSD_CMD_GET_AMOUNT
: {
8415 int nr_ssd
= atomic_read(&ssd_nr
);
8416 if (copy_to_user(argp
, &nr_ssd
, sizeof(int))) {
8417 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8424 case SSD_CMD_GET_TO_INFO
: {
8425 int tocnt
= atomic_read(&dev
->tocnt
);
8427 if (copy_to_user(argp
, &tocnt
, sizeof(int))) {
8428 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8435 case SSD_CMD_GET_DRV_VER
: {
8436 char ver
[] = DRIVER_VERSION
;
8437 int len
= sizeof(ver
);
8439 if (len
> (DRIVER_VERSION_LEN
- 1)) {
8440 len
= (DRIVER_VERSION_LEN
- 1);
8442 if (copy_to_user(argp
, ver
, len
)) {
8443 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8450 case SSD_CMD_GET_BBACC_INFO
: {
8451 struct ssd_acc_info acc
;
8453 mutex_lock(&dev
->fw_mutex
);
8454 ret
= ssd_bb_acc(dev
, &acc
);
8455 mutex_unlock(&dev
->fw_mutex
);
8460 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8461 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8468 case SSD_CMD_GET_ECACC_INFO
: {
8469 struct ssd_acc_info acc
;
8471 mutex_lock(&dev
->fw_mutex
);
8472 ret
= ssd_ec_acc(dev
, &acc
);
8473 mutex_unlock(&dev
->fw_mutex
);
8478 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8479 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8486 case SSD_CMD_GET_HW_INFO_EXT
:
8487 if (copy_to_user(argp
, &dev
->hw_info_ext
, sizeof(struct ssd_hw_info_extend
))) {
8488 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8494 case SSD_CMD_REG_READ
: {
8495 struct ssd_reg_op_info reg_info
;
8497 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8498 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8503 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8508 reg_info
.value
= ssd_reg32_read(dev
->ctrlp
+ reg_info
.offset
);
8509 if (copy_to_user(argp
, ®_info
, sizeof(struct ssd_reg_op_info
))) {
8510 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8518 case SSD_CMD_REG_WRITE
: {
8519 struct ssd_reg_op_info reg_info
;
8521 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8522 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8527 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8532 ssd_reg32_write(dev
->ctrlp
+ reg_info
.offset
, reg_info
.value
);
8537 case SSD_CMD_SPI_READ
: {
8538 struct ssd_spi_op_info spi_info
;
8541 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8542 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8548 size
= spi_info
.len
;
8551 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8556 kbuf
= kmalloc(size
, GFP_KERNEL
);
8562 ret
= ssd_spi_page_read(dev
, kbuf
, off
, size
);
8568 if (copy_to_user(buf
, kbuf
, size
)) {
8569 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8580 case SSD_CMD_SPI_WRITE
: {
8581 struct ssd_spi_op_info spi_info
;
8584 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8585 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8591 size
= spi_info
.len
;
8594 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8599 kbuf
= kmalloc(size
, GFP_KERNEL
);
8605 if (copy_from_user(kbuf
, buf
, size
)) {
8606 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8612 ret
= ssd_spi_page_write(dev
, kbuf
, off
, size
);
8623 case SSD_CMD_SPI_ERASE
: {
8624 struct ssd_spi_op_info spi_info
;
8627 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8628 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8635 if ((off
+ dev
->rom_info
.block_size
) > dev
->rom_info
.size
) {
8640 ret
= ssd_spi_block_erase(dev
, off
);
8648 case SSD_CMD_I2C_READ
: {
8649 struct ssd_i2c_op_info i2c_info
;
8653 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8654 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8659 saddr
= i2c_info
.saddr
;
8660 rsize
= i2c_info
.rsize
;
8661 buf
= i2c_info
.rbuf
;
8663 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8668 kbuf
= kmalloc(rsize
, GFP_KERNEL
);
8674 ret
= ssd_i2c_read(dev
, saddr
, rsize
, kbuf
);
8680 if (copy_to_user(buf
, kbuf
, rsize
)) {
8681 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8692 case SSD_CMD_I2C_WRITE
: {
8693 struct ssd_i2c_op_info i2c_info
;
8697 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8698 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8703 saddr
= i2c_info
.saddr
;
8704 wsize
= i2c_info
.wsize
;
8705 buf
= i2c_info
.wbuf
;
8707 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8712 kbuf
= kmalloc(wsize
, GFP_KERNEL
);
8718 if (copy_from_user(kbuf
, buf
, wsize
)) {
8719 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8725 ret
= ssd_i2c_write(dev
, saddr
, wsize
, kbuf
);
8736 case SSD_CMD_I2C_WRITE_READ
: {
8737 struct ssd_i2c_op_info i2c_info
;
8743 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8744 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8749 saddr
= i2c_info
.saddr
;
8750 wsize
= i2c_info
.wsize
;
8751 rsize
= i2c_info
.rsize
;
8752 buf
= i2c_info
.wbuf
;
8754 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8759 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8764 size
= wsize
+ rsize
;
8766 kbuf
= kmalloc(size
, GFP_KERNEL
);
8772 if (copy_from_user((kbuf
+ rsize
), buf
, wsize
)) {
8773 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8779 buf
= i2c_info
.rbuf
;
8781 ret
= ssd_i2c_write_read(dev
, saddr
, wsize
, (kbuf
+ rsize
), rsize
, kbuf
);
8787 if (copy_to_user(buf
, kbuf
, rsize
)) {
8788 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8799 case SSD_CMD_SMBUS_SEND_BYTE
: {
8800 struct ssd_smbus_op_info smbus_info
;
8801 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8805 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8806 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8811 saddr
= smbus_info
.saddr
;
8812 buf
= smbus_info
.buf
;
8815 if (copy_from_user(smb_data
, buf
, size
)) {
8816 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8821 ret
= ssd_smbus_send_byte(dev
, saddr
, smb_data
);
8829 case SSD_CMD_SMBUS_RECEIVE_BYTE
: {
8830 struct ssd_smbus_op_info smbus_info
;
8831 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8835 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8836 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8841 saddr
= smbus_info
.saddr
;
8842 buf
= smbus_info
.buf
;
8845 ret
= ssd_smbus_receive_byte(dev
, saddr
, smb_data
);
8850 if (copy_to_user(buf
, smb_data
, size
)) {
8851 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8859 case SSD_CMD_SMBUS_WRITE_BYTE
: {
8860 struct ssd_smbus_op_info smbus_info
;
8861 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8866 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8867 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8872 saddr
= smbus_info
.saddr
;
8873 command
= smbus_info
.cmd
;
8874 buf
= smbus_info
.buf
;
8877 if (copy_from_user(smb_data
, buf
, size
)) {
8878 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8883 ret
= ssd_smbus_write_byte(dev
, saddr
, command
, smb_data
);
8891 case SSD_CMD_SMBUS_READ_BYTE
: {
8892 struct ssd_smbus_op_info smbus_info
;
8893 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8898 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8899 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8904 saddr
= smbus_info
.saddr
;
8905 command
= smbus_info
.cmd
;
8906 buf
= smbus_info
.buf
;
8909 ret
= ssd_smbus_read_byte(dev
, saddr
, command
, smb_data
);
8914 if (copy_to_user(buf
, smb_data
, size
)) {
8915 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8923 case SSD_CMD_SMBUS_WRITE_WORD
: {
8924 struct ssd_smbus_op_info smbus_info
;
8925 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8930 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8931 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8936 saddr
= smbus_info
.saddr
;
8937 command
= smbus_info
.cmd
;
8938 buf
= smbus_info
.buf
;
8941 if (copy_from_user(smb_data
, buf
, size
)) {
8942 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8947 ret
= ssd_smbus_write_word(dev
, saddr
, command
, smb_data
);
8955 case SSD_CMD_SMBUS_READ_WORD
: {
8956 struct ssd_smbus_op_info smbus_info
;
8957 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8962 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8963 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8968 saddr
= smbus_info
.saddr
;
8969 command
= smbus_info
.cmd
;
8970 buf
= smbus_info
.buf
;
8973 ret
= ssd_smbus_read_word(dev
, saddr
, command
, smb_data
);
8978 if (copy_to_user(buf
, smb_data
, size
)) {
8979 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8987 case SSD_CMD_SMBUS_WRITE_BLOCK
: {
8988 struct ssd_smbus_op_info smbus_info
;
8989 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8994 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8995 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9000 saddr
= smbus_info
.saddr
;
9001 command
= smbus_info
.cmd
;
9002 buf
= smbus_info
.buf
;
9003 size
= smbus_info
.size
;
9005 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9010 if (copy_from_user(smb_data
, buf
, size
)) {
9011 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9016 ret
= ssd_smbus_write_block(dev
, saddr
, command
, size
, smb_data
);
9024 case SSD_CMD_SMBUS_READ_BLOCK
: {
9025 struct ssd_smbus_op_info smbus_info
;
9026 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9031 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9032 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9037 saddr
= smbus_info
.saddr
;
9038 command
= smbus_info
.cmd
;
9039 buf
= smbus_info
.buf
;
9040 size
= smbus_info
.size
;
9042 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9047 ret
= ssd_smbus_read_block(dev
, saddr
, command
, size
, smb_data
);
9052 if (copy_to_user(buf
, smb_data
, size
)) {
9053 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9061 case SSD_CMD_BM_GET_VER
: {
9064 ret
= ssd_bm_get_version(dev
, &ver
);
9069 if (copy_to_user(argp
, &ver
, sizeof(uint16_t))) {
9070 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9078 case SSD_CMD_BM_GET_NR_CAP
: {
9081 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
9086 if (copy_to_user(argp
, &nr_cap
, sizeof(int))) {
9087 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9095 case SSD_CMD_BM_CAP_LEARNING
: {
9096 ret
= ssd_bm_enter_cap_learning(dev
);
9105 case SSD_CMD_CAP_LEARN
: {
9108 ret
= ssd_cap_learn(dev
, &cap
);
9113 if (copy_to_user(argp
, &cap
, sizeof(uint32_t))) {
9114 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9122 case SSD_CMD_GET_CAP_STATUS
: {
9125 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9129 if (copy_to_user(argp
, &cap_status
, sizeof(int))) {
9130 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9138 case SSD_CMD_RAM_READ
: {
9139 struct ssd_ram_op_info ram_info
;
9142 size_t rlen
, len
= dev
->hw_info
.ram_max_len
;
9145 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9146 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9151 ofs
= ram_info
.start
;
9152 length
= ram_info
.length
;
9154 ctrl_idx
= ram_info
.ctrl_idx
;
9156 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9161 kbuf
= kmalloc(len
, GFP_KERNEL
);
9167 for (rlen
=0; rlen
<length
; rlen
+=len
, buf
+=len
, ofs
+=len
) {
9168 if ((length
- rlen
) < len
) {
9169 len
= length
- rlen
;
9172 ret
= ssd_ram_read(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9177 if (copy_to_user(buf
, kbuf
, len
)) {
9188 case SSD_CMD_RAM_WRITE
: {
9189 struct ssd_ram_op_info ram_info
;
9192 size_t wlen
, len
= dev
->hw_info
.ram_max_len
;
9195 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9196 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9200 ofs
= ram_info
.start
;
9201 length
= ram_info
.length
;
9203 ctrl_idx
= ram_info
.ctrl_idx
;
9205 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9210 kbuf
= kmalloc(len
, GFP_KERNEL
);
9216 for (wlen
=0; wlen
<length
; wlen
+=len
, buf
+=len
, ofs
+=len
) {
9217 if ((length
- wlen
) < len
) {
9218 len
= length
- wlen
;
9221 if (copy_from_user(kbuf
, buf
, len
)) {
9226 ret
= ssd_ram_write(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9237 case SSD_CMD_NAND_READ_ID
: {
9238 struct ssd_flash_op_info flash_info
;
9239 int chip_no
, chip_ce
, length
, ctrl_idx
;
9241 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9242 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9247 chip_no
= flash_info
.flash
;
9248 chip_ce
= flash_info
.chip
;
9249 ctrl_idx
= flash_info
.ctrl_idx
;
9250 buf
= flash_info
.buf
;
9251 length
= dev
->hw_info
.id_size
;
9253 //kbuf = kmalloc(length, GFP_KERNEL);
9254 kbuf
= kmalloc(SSD_NAND_ID_BUFF_SZ
, GFP_KERNEL
); //xx
9259 memset(kbuf
, 0, length
);
9261 ret
= ssd_nand_read_id(dev
, kbuf
, chip_no
, chip_ce
, ctrl_idx
);
9267 if (copy_to_user(buf
, kbuf
, length
)) {
9278 case SSD_CMD_NAND_READ
: { //with oob
9279 struct ssd_flash_op_info flash_info
;
9281 int flash
, chip
, page
, ctrl_idx
;
9284 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9285 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9290 flash
= flash_info
.flash
;
9291 chip
= flash_info
.chip
;
9292 page
= flash_info
.page
;
9293 buf
= flash_info
.buf
;
9294 ctrl_idx
= flash_info
.ctrl_idx
;
9296 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9298 kbuf
= kmalloc(length
, GFP_KERNEL
);
9304 err
= ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9305 if (ret
&& (-EIO
!= ret
)) {
9310 if (copy_to_user(buf
, kbuf
, length
)) {
9322 case SSD_CMD_NAND_WRITE
: {
9323 struct ssd_flash_op_info flash_info
;
9324 int flash
, chip
, page
, ctrl_idx
;
9327 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9328 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9333 flash
= flash_info
.flash
;
9334 chip
= flash_info
.chip
;
9335 page
= flash_info
.page
;
9336 buf
= flash_info
.buf
;
9337 ctrl_idx
= flash_info
.ctrl_idx
;
9339 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9341 kbuf
= kmalloc(length
, GFP_KERNEL
);
9347 if (copy_from_user(kbuf
, buf
, length
)) {
9353 ret
= ssd_nand_write(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9363 case SSD_CMD_NAND_ERASE
: {
9364 struct ssd_flash_op_info flash_info
;
9365 int flash
, chip
, page
, ctrl_idx
;
9367 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9368 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9373 flash
= flash_info
.flash
;
9374 chip
= flash_info
.chip
;
9375 page
= flash_info
.page
;
9376 ctrl_idx
= flash_info
.ctrl_idx
;
9378 if ((page
% dev
->hw_info
.page_count
) != 0) {
9383 //hio_warn("erase fs = %llx\n", ofs);
9384 ret
= ssd_nand_erase(dev
, flash
, chip
, page
, ctrl_idx
);
9392 case SSD_CMD_NAND_READ_EXT
: { //ingore EIO
9393 struct ssd_flash_op_info flash_info
;
9395 int flash
, chip
, page
, ctrl_idx
;
9397 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9398 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9403 flash
= flash_info
.flash
;
9404 chip
= flash_info
.chip
;
9405 page
= flash_info
.page
;
9406 buf
= flash_info
.buf
;
9407 ctrl_idx
= flash_info
.ctrl_idx
;
9409 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9411 kbuf
= kmalloc(length
, GFP_KERNEL
);
9417 ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9418 if (-EIO
== ret
) { //ingore EIO
9426 if (copy_to_user(buf
, kbuf
, length
)) {
9436 case SSD_CMD_UPDATE_BBT
: {
9437 struct ssd_flash_op_info flash_info
;
9438 int ctrl_idx
, flash
;
9440 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9441 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9446 ctrl_idx
= flash_info
.ctrl_idx
;
9447 flash
= flash_info
.flash
;
9448 ret
= ssd_update_bbt(dev
, flash
, ctrl_idx
);
9456 case SSD_CMD_CLEAR_ALARM
:
9457 ssd_clear_alarm(dev
);
9460 case SSD_CMD_SET_ALARM
:
9465 ret
= ssd_do_reset(dev
);
9468 case SSD_CMD_RELOAD_FW
:
9470 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9471 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
9472 } else if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_1_1
) {
9473 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
9478 case SSD_CMD_UNLOAD_DEV
: {
9479 if (atomic_read(&dev
->refcnt
)) {
9485 ssd_save_smart(dev
);
9487 ret
= ssd_flush(dev
);
9492 /* cleanup the block device */
9493 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
9494 mutex_lock(&dev
->gd_mutex
);
9495 ssd_cleanup_blkdev(dev
);
9496 mutex_unlock(&dev
->gd_mutex
);
9502 case SSD_CMD_LOAD_DEV
: {
9504 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9509 ret
= ssd_init_smart(dev
);
9511 hio_warn("%s: init info: failed\n", dev
->name
);
9515 ret
= ssd_init_blkdev(dev
);
9517 hio_warn("%s: register block device: failed\n", dev
->name
);
9520 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
9525 case SSD_CMD_UPDATE_VP
: {
9527 uint32_t new_vp
, new_vp1
= 0;
9529 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9534 if (copy_from_user(&new_vp
, argp
, sizeof(uint32_t))) {
9535 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9540 if (new_vp
> dev
->hw_info
.max_valid_pages
|| new_vp
<= 0) {
9545 while (new_vp
<= dev
->hw_info
.max_valid_pages
) {
9546 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, new_vp
);
9548 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
9549 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9550 new_vp1
= val
& 0x3FF;
9552 new_vp1
= val
& 0x7FFF;
9555 if (new_vp1
== new_vp
) {
9560 /*if (new_vp == dev->hw_info.valid_pages) {
9565 if (new_vp1
!= new_vp
|| new_vp
> dev
->hw_info
.max_valid_pages
) {
9567 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9572 if (copy_to_user(argp
, &new_vp
, sizeof(uint32_t))) {
9573 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9574 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9580 dev
->hw_info
.valid_pages
= new_vp
;
9581 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
9582 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
9583 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
9588 case SSD_CMD_FULL_RESET
: {
9589 ret
= ssd_full_reset(dev
);
9593 case SSD_CMD_GET_NR_LOG
: {
9594 if (copy_to_user(argp
, &dev
->internal_log
.nr_log
, sizeof(dev
->internal_log
.nr_log
))) {
9601 case SSD_CMD_GET_LOG
: {
9602 uint32_t length
= dev
->rom_info
.log_sz
;
9606 if (copy_to_user(buf
, dev
->internal_log
.log
, length
)) {
9614 case SSD_CMD_LOG_LEVEL
: {
9616 if (copy_from_user(&level
, argp
, sizeof(int))) {
9617 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9622 if (level
>= SSD_LOG_NR_LEVEL
|| level
< SSD_LOG_LEVEL_INFO
) {
9623 level
= SSD_LOG_LEVEL_ERR
;
9626 //just for showing log, no need to protect
9631 case SSD_CMD_OT_PROTECT
: {
9634 if (copy_from_user(&protect
, argp
, sizeof(int))) {
9635 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9640 ssd_set_ot_protect(dev
, !!protect
);
9644 case SSD_CMD_GET_OT_STATUS
: {
9645 int status
= ssd_get_ot_status(dev
, &status
);
9647 if (copy_to_user(argp
, &status
, sizeof(int))) {
9648 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9655 case SSD_CMD_CLEAR_LOG
: {
9656 ret
= ssd_clear_log(dev
);
9660 case SSD_CMD_CLEAR_SMART
: {
9661 ret
= ssd_clear_smart(dev
);
9665 case SSD_CMD_SW_LOG
: {
9666 struct ssd_sw_log_info sw_log
;
9668 if (copy_from_user(&sw_log
, argp
, sizeof(struct ssd_sw_log_info
))) {
9669 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9674 ret
= ssd_gen_swlog(dev
, sw_log
.event
, sw_log
.data
);
9678 case SSD_CMD_GET_LABEL
: {
9680 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9685 if (copy_to_user(argp
, &dev
->label
, sizeof(struct ssd_label
))) {
9686 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9693 case SSD_CMD_GET_VERSION
: {
9694 struct ssd_version_info ver
;
9696 mutex_lock(&dev
->fw_mutex
);
9697 ret
= __ssd_get_version(dev
, &ver
);
9698 mutex_unlock(&dev
->fw_mutex
);
9703 if (copy_to_user(argp
, &ver
, sizeof(struct ssd_version_info
))) {
9704 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9711 case SSD_CMD_GET_TEMPERATURE
: {
9714 mutex_lock(&dev
->fw_mutex
);
9715 ret
= __ssd_get_temperature(dev
, &temp
);
9716 mutex_unlock(&dev
->fw_mutex
);
9721 if (copy_to_user(argp
, &temp
, sizeof(int))) {
9722 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9729 case SSD_CMD_GET_BMSTATUS
: {
9732 mutex_lock(&dev
->fw_mutex
);
9733 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9734 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9735 status
= SSD_BMSTATUS_WARNING
;
9737 status
= SSD_BMSTATUS_OK
;
9739 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
9740 ret
= __ssd_bm_status(dev
, &status
);
9742 status
= SSD_BMSTATUS_OK
;
9744 mutex_unlock(&dev
->fw_mutex
);
9749 if (copy_to_user(argp
, &status
, sizeof(int))) {
9750 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9757 case SSD_CMD_GET_LABEL2
: {
9761 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9762 label
= &dev
->label
;
9763 length
= sizeof(struct ssd_label
);
9765 label
= &dev
->labelv3
;
9766 length
= sizeof(struct ssd_labelv3
);
9769 if (copy_to_user(argp
, label
, length
)) {
9777 ret
= ssd_flush(dev
);
9779 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9785 case SSD_CMD_SAVE_MD
: {
9788 if (copy_from_user(&save_md
, argp
, sizeof(int))) {
9789 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9794 dev
->save_md
= !!save_md
;
9798 case SSD_CMD_SET_WMODE
: {
9801 if (copy_from_user(&new_wmode
, argp
, sizeof(int))) {
9802 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9807 ret
= __ssd_set_wmode(dev
, new_wmode
);
9815 case SSD_CMD_GET_WMODE
: {
9816 if (copy_to_user(argp
, &dev
->wmode
, sizeof(int))) {
9817 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9825 case SSD_CMD_GET_USER_WMODE
: {
9826 if (copy_to_user(argp
, &dev
->user_wmode
, sizeof(int))) {
9827 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9835 case SSD_CMD_DEBUG
: {
9836 struct ssd_debug_info db_info
;
9843 if (copy_from_user(&db_info
, argp
, sizeof(struct ssd_debug_info
))) {
9844 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9849 if (db_info
.type
< SSD_DEBUG_NONE
|| db_info
.type
>= SSD_DEBUG_NR
) {
9855 if (db_info
.type
>= SSD_DEBUG_READ_ERR
&& db_info
.type
<= SSD_DEBUG_RW_ERR
&&
9856 (db_info
.data
.loc
.off
+ db_info
.data
.loc
.len
) > (dev
->hw_info
.size
>> 9)) {
9861 memcpy(&dev
->db_info
, &db_info
, sizeof(struct ssd_debug_info
));
9863 #ifdef SSD_OT_PROTECT
9865 if (db_info
.type
== SSD_DEBUG_NONE
) {
9866 ssd_check_temperature(dev
, SSD_OT_TEMP
);
9867 } else if (db_info
.type
== SSD_DEBUG_LOG
) {
9868 if (db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
) {
9869 dev
->ot_delay
= SSD_OT_DELAY
;
9870 } else if (db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
) {
9877 if (db_info
.type
== SSD_DEBUG_OFFLINE
) {
9878 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
9879 } else if (db_info
.type
== SSD_DEBUG_NONE
) {
9880 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
9884 if (db_info
.type
== SSD_DEBUG_LOG
&& dev
->event_call
&& dev
->gd
) {
9885 dev
->event_call(dev
->gd
, db_info
.data
.log
.event
, 0);
9891 case SSD_CMD_DRV_PARAM_INFO
: {
9892 struct ssd_drv_param_info drv_param
;
9894 memset(&drv_param
, 0, sizeof(struct ssd_drv_param_info
));
9896 drv_param
.mode
= mode
;
9897 drv_param
.status_mask
= status_mask
;
9898 drv_param
.int_mode
= int_mode
;
9899 drv_param
.threaded_irq
= threaded_irq
;
9900 drv_param
.log_level
= log_level
;
9901 drv_param
.wmode
= wmode
;
9902 drv_param
.ot_protect
= ot_protect
;
9903 drv_param
.finject
= finject
;
9905 if (copy_to_user(argp
, &drv_param
, sizeof(struct ssd_drv_param_info
))) {
9906 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9922 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
9923 static int ssd_block_ioctl(struct inode
*inode
, struct file
*file
,
9924 unsigned int cmd
, unsigned long arg
)
9926 struct ssd_device
*dev
;
9927 void __user
*argp
= (void __user
*)arg
;
9933 dev
= inode
->i_bdev
->bd_disk
->private_data
;
9938 static int ssd_block_ioctl(struct block_device
*bdev
, fmode_t mode
,
9939 unsigned int cmd
, unsigned long arg
)
9941 struct ssd_device
*dev
;
9942 void __user
*argp
= (void __user
*)arg
;
9949 dev
= bdev
->bd_disk
->private_data
;
9957 struct hd_geometry geo
;
9958 geo
.cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
9961 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
9962 geo
.start
= get_start_sect(inode
->i_bdev
);
9964 geo
.start
= get_start_sect(bdev
);
9966 if (copy_to_user(argp
, &geo
, sizeof(geo
))) {
9975 ret
= ssd_flush(dev
);
9977 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9985 ret
= ssd_ioctl_common(dev
, cmd
, arg
);
9996 static void ssd_free_dev(struct kref
*kref
)
9998 struct ssd_device
*dev
;
10004 dev
= container_of(kref
, struct ssd_device
, kref
);
10008 ssd_put_index(dev
->slave
, dev
->idx
);
10013 static void ssd_put(struct ssd_device
*dev
)
10015 kref_put(&dev
->kref
, ssd_free_dev
);
10018 static int ssd_get(struct ssd_device
*dev
)
10020 kref_get(&dev
->kref
);
10025 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10026 static int ssd_block_open(struct inode
*inode
, struct file
*filp
)
10028 struct ssd_device
*dev
;
10034 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10039 static int ssd_block_open(struct block_device
*bdev
, fmode_t mode
)
10041 struct ssd_device
*dev
;
10047 dev
= bdev
->bd_disk
->private_data
;
10053 /*if (!try_module_get(dev->owner))
10059 atomic_inc(&dev
->refcnt
);
10064 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10065 static int ssd_block_release(struct inode
*inode
, struct file
*filp
)
10067 struct ssd_device
*dev
;
10073 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10077 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10078 static int ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10080 struct ssd_device
*dev
;
10086 dev
= disk
->private_data
;
10091 static void ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10093 struct ssd_device
*dev
;
10099 dev
= disk
->private_data
;
10105 atomic_dec(&dev
->refcnt
);
10109 //module_put(dev->owner);
10110 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10115 static struct block_device_operations ssd_fops
= {
10116 .owner
= THIS_MODULE
,
10117 .open
= ssd_block_open
,
10118 .release
= ssd_block_release
,
10119 .ioctl
= ssd_block_ioctl
,
10120 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
10121 .getgeo
= ssd_block_getgeo
,
10125 static void ssd_init_trim(ssd_device_t
*dev
)
10127 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
10128 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10131 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, dev
->rq
);
10133 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6))
10134 dev
->rq
->limits
.discard_zeroes_data
= 1;
10135 dev
->rq
->limits
.discard_alignment
= 4096;
10136 dev
->rq
->limits
.discard_granularity
= 4096;
10138 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_4
) {
10139 dev
->rq
->limits
.max_discard_sectors
= dev
->hw_info
.sg_max_sec
;
10141 dev
->rq
->limits
.max_discard_sectors
= (dev
->hw_info
.sg_max_sec
) * (dev
->hw_info
.cmd_max_sg
);
10146 static void ssd_cleanup_queue(struct ssd_device
*dev
)
10150 blk_cleanup_queue(dev
->rq
);
10154 static int ssd_init_queue(struct ssd_device
*dev
)
10156 dev
->rq
= blk_alloc_queue(GFP_KERNEL
);
10157 if (dev
->rq
== NULL
) {
10158 hio_warn("%s: alloc queue: failed\n ", dev
->name
);
10159 goto out_init_queue
;
10162 /* must be first */
10163 blk_queue_make_request(dev
->rq
, ssd_make_request
);
10165 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6))
10166 blk_queue_max_hw_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10167 blk_queue_max_phys_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10168 blk_queue_max_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10170 blk_queue_max_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10171 blk_queue_max_hw_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10174 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
10175 blk_queue_hardsect_size(dev
->rq
, 512);
10177 blk_queue_logical_block_size(dev
->rq
, 512);
10179 /* not work for make_request based drivers(bio) */
10180 blk_queue_max_segment_size(dev
->rq
, dev
->hw_info
.sg_max_sec
<< 9);
10182 blk_queue_bounce_limit(dev
->rq
, BLK_BOUNCE_HIGH
);
10184 dev
->rq
->queuedata
= dev
;
10186 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
10187 blk_queue_issue_flush_fn(dev
->rq
, ssd_issue_flush_fn
);
10190 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
10191 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, dev
->rq
);
10194 ssd_init_trim(dev
);
10202 static void ssd_cleanup_blkdev(struct ssd_device
*dev
)
10204 del_gendisk(dev
->gd
);
10207 static int ssd_init_blkdev(struct ssd_device
*dev
)
10213 dev
->gd
= alloc_disk(ssd_minors
);
10215 hio_warn("%s: alloc_disk fail\n", dev
->name
);
10218 dev
->gd
->major
= dev
->major
;
10219 dev
->gd
->first_minor
= dev
->idx
* ssd_minors
;
10220 dev
->gd
->fops
= &ssd_fops
;
10221 dev
->gd
->queue
= dev
->rq
;
10222 dev
->gd
->private_data
= dev
;
10223 dev
->gd
->driverfs_dev
= &dev
->pdev
->dev
;
10224 snprintf (dev
->gd
->disk_name
, sizeof(dev
->gd
->disk_name
), "%s", dev
->name
);
10226 set_capacity(dev
->gd
, dev
->hw_info
.size
>> 9);
10236 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10237 static int ssd_ioctl(struct inode
*inode
, struct file
*file
,
10238 unsigned int cmd
, unsigned long arg
)
10240 static long ssd_ioctl(struct file
*file
,
10241 unsigned int cmd
, unsigned long arg
)
10244 struct ssd_device
*dev
;
10250 dev
= file
->private_data
;
10255 return (long)ssd_ioctl_common(dev
, cmd
, arg
);
10258 static int ssd_open(struct inode
*inode
, struct file
*file
)
10260 struct ssd_device
*dev
= NULL
;
10261 struct ssd_device
*n
= NULL
;
10265 if (!inode
|| !file
) {
10269 idx
= iminor(inode
);
10271 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
10272 if (dev
->idx
== idx
) {
10282 file
->private_data
= dev
;
10289 static int ssd_release(struct inode
*inode
, struct file
*file
)
10291 struct ssd_device
*dev
;
10297 dev
= file
->private_data
;
10304 file
->private_data
= NULL
;
10309 static struct file_operations ssd_cfops
= {
10310 .owner
= THIS_MODULE
,
10312 .release
= ssd_release
,
10313 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10314 .ioctl
= ssd_ioctl
,
10316 .unlocked_ioctl
= ssd_ioctl
,
10320 static void ssd_cleanup_chardev(struct ssd_device
*dev
)
10326 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10327 class_simple_device_remove(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10328 devfs_remove("c%s", dev
->name
);
10329 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10330 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10331 devfs_remove("c%s", dev
->name
);
10332 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10333 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10334 devfs_remove("c%s", dev
->name
);
10335 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10336 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10338 device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10342 static int ssd_init_chardev(struct ssd_device
*dev
)
10350 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10351 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10355 class_simple_device_add(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10357 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10358 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10362 class_device_create(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10364 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10365 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10369 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10371 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10372 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10373 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
10374 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), "c%s", dev
->name
);
10375 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10376 device_create_drvdata(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10378 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10384 static int ssd_check_hw(struct ssd_device
*dev
)
10386 uint32_t test_data
= 0x55AA5AA5;
10387 uint32_t read_data
;
10389 ssd_reg32_write(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
, test_data
);
10390 read_data
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
);
10391 if (read_data
!= ~(test_data
)) {
10392 //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data);
10399 static int ssd_check_fw(struct ssd_device
*dev
)
10404 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10408 for (i
=0; i
<SSD_CONTROLLER_WAIT
; i
++) {
10409 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10410 if ((val
& 0x1) && ((val
>> 8) & 0x1)) {
10414 msleep(SSD_INIT_WAIT
);
10417 if (!(val
& 0x1)) {
10418 /* controller fw status */
10419 hio_warn("%s: controller firmware load failed: %#x\n", dev
->name
, val
);
10421 } else if (!((val
>> 8) & 0x1)) {
10422 /* controller state */
10423 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10427 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RELOAD_FW_REG
);
10429 dev
->reload_fw
= 1;
10435 static int ssd_init_fw_info(struct ssd_device
*dev
)
10440 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_VER_REG
);
10441 dev
->hw_info
.bridge_ver
= val
& 0xFFF;
10442 if (dev
->hw_info
.bridge_ver
< SSD_FW_MIN
) {
10443 hio_warn("%s: bridge firmware version %03X is not supported\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10446 hio_info("%s: bridge firmware version: %03X\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10448 ret
= ssd_check_fw(dev
);
10454 /* skip error if not in standard mode */
10455 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10461 static int ssd_check_clock(struct ssd_device
*dev
)
10466 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10470 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10473 if (!((val
>> 4 ) & 0x1)) {
10474 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST
), &dev
->hwmon
)) {
10475 hio_warn("%s: 166MHz clock losed: %#x\n", dev
->name
, val
);
10476 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10481 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
10482 if (!((val
>> 5 ) & 0x1)) {
10483 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW
), &dev
->hwmon
)) {
10484 hio_warn("%s: 166MHz clock is skew: %#x\n", dev
->name
, val
);
10485 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10489 if (!((val
>> 6 ) & 0x1)) {
10490 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST
), &dev
->hwmon
)) {
10491 hio_warn("%s: 156.25MHz clock lost: %#x\n", dev
->name
, val
);
10492 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10496 if (!((val
>> 7 ) & 0x1)) {
10497 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW
), &dev
->hwmon
)) {
10498 hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev
->name
, val
);
10499 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10508 static int ssd_check_volt(struct ssd_device
*dev
)
10515 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10519 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10521 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
)) {
10522 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V0_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10523 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10524 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10525 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10526 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10527 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10531 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10532 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10533 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10534 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10535 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10541 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
)) {
10542 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V8_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10543 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10544 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10545 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10546 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10547 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10551 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10552 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10553 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10554 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10555 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10564 static int ssd_check_reset_sync(struct ssd_device
*dev
)
10568 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10572 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10573 if (!((val
>> 8) & 0x1)) {
10574 /* controller state */
10575 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10579 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10583 if (((val
>> 9 ) & 0x1)) {
10584 hio_warn("%s: controller reset asynchronously: %#x\n", dev
->name
, val
);
10585 ssd_gen_swlog(dev
, SSD_LOG_CTRL_RST_SYNC
, val
);
10592 static int ssd_check_hw_bh(struct ssd_device
*dev
)
10596 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10601 ret
= ssd_check_clock(dev
);
10607 /* skip error if not in standard mode */
10608 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10614 static int ssd_check_controller(struct ssd_device
*dev
)
10618 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10623 ret
= ssd_check_reset_sync(dev
);
10629 /* skip error if not in standard mode */
10630 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10636 static int ssd_check_controller_bh(struct ssd_device
*dev
)
10638 uint32_t test_data
= 0x55AA5AA5;
10640 int reg_base
, reg_sz
;
10645 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10650 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_READY_REG
);
10652 hio_warn("%s: controller 0 not ready\n", dev
->name
);
10656 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10657 reg_base
= SSD_CTRL_TEST_REG0
+ i
* SSD_CTRL_TEST_REG_SZ
;
10658 ssd_reg32_write(dev
->ctrlp
+ reg_base
, test_data
);
10659 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10660 if (val
!= ~(test_data
)) {
10661 hio_warn("%s: check controller %d error: %#x\n", dev
->name
, i
, val
);
10667 ret
= ssd_check_volt(dev
);
10673 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
10674 reg_base
= SSD_PV3_RAM_STATUS_REG0
;
10675 reg_sz
= SSD_PV3_RAM_STATUS_REG_SZ
;
10677 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10679 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10681 if (!((val
>> 1) & 0x1)) {
10683 if (init_wait
<= SSD_RAM_INIT_MAX_WAIT
) {
10684 msleep(SSD_INIT_WAIT
);
10685 goto check_ram_status
;
10687 hio_warn("%s: controller %d ram init failed: %#x\n", dev
->name
, i
, val
);
10688 ssd_gen_swlog(dev
, SSD_LOG_DDR_INIT_ERR
, i
);
10693 reg_base
+= reg_sz
;
10698 for (i
=0; i
<SSD_CH_INFO_MAX_WAIT
; i
++) {
10699 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10700 if (!((val
>> 31) & 0x1)) {
10704 msleep(SSD_INIT_WAIT
);
10706 if ((val
>> 31) & 0x1) {
10707 hio_warn("%s: channel info init failed: %#x\n", dev
->name
, val
);
10714 static int ssd_init_protocol_info(struct ssd_device
*dev
)
10718 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PROTOCOL_VER_REG
);
10719 if (val
== (uint32_t)-1) {
10720 hio_warn("%s: protocol version error: %#x\n", dev
->name
, val
);
10723 dev
->protocol_info
.ver
= val
;
10725 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10726 dev
->protocol_info
.init_state_reg
= SSD_INIT_STATE_REG0
;
10727 dev
->protocol_info
.init_state_reg_sz
= SSD_INIT_STATE_REG_SZ
;
10729 dev
->protocol_info
.chip_info_reg
= SSD_CHIP_INFO_REG0
;
10730 dev
->protocol_info
.chip_info_reg_sz
= SSD_CHIP_INFO_REG_SZ
;
10732 dev
->protocol_info
.init_state_reg
= SSD_PV3_INIT_STATE_REG0
;
10733 dev
->protocol_info
.init_state_reg_sz
= SSD_PV3_INIT_STATE_REG_SZ
;
10735 dev
->protocol_info
.chip_info_reg
= SSD_PV3_CHIP_INFO_REG0
;
10736 dev
->protocol_info
.chip_info_reg_sz
= SSD_PV3_CHIP_INFO_REG_SZ
;
10742 static int ssd_init_hw_info(struct ssd_device
*dev
)
10750 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESP_INFO_REG
);
10751 dev
->hw_info
.resp_ptr_sz
= 16 * (1U << (val
& 0xFF));
10752 dev
->hw_info
.resp_msg_sz
= 16 * (1U << ((val
>> 8) & 0xFF));
10754 if (0 == dev
->hw_info
.resp_ptr_sz
|| 0 == dev
->hw_info
.resp_msg_sz
) {
10755 hio_warn("%s: response info error\n", dev
->name
);
10760 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10761 dev
->hw_info
.cmd_fifo_sz
= 1U << ((val
>> 4) & 0xF);
10762 dev
->hw_info
.cmd_max_sg
= 1U << ((val
>> 8) & 0xF);
10763 dev
->hw_info
.sg_max_sec
= 1U << ((val
>> 12) & 0xF);
10764 dev
->hw_info
.cmd_fifo_sz_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
10766 if (0 == dev
->hw_info
.cmd_fifo_sz
|| 0 == dev
->hw_info
.cmd_max_sg
|| 0 == dev
->hw_info
.sg_max_sec
) {
10767 hio_warn("%s: cmd info error\n", dev
->name
);
10773 if (ssd_check_hw_bh(dev
)) {
10774 hio_warn("%s: check hardware status failed\n", dev
->name
);
10779 if (ssd_check_controller(dev
)) {
10780 hio_warn("%s: check controller state failed\n", dev
->name
);
10785 /* nr controller : read again*/
10786 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10787 dev
->hw_info
.nr_ctrl
= (val
>> 16) & 0xF;
10789 /* nr ctrl configured */
10790 nr_ctrl
= (val
>> 20) & 0xF;
10791 if (0 == dev
->hw_info
.nr_ctrl
) {
10792 hio_warn("%s: nr controller error: %u\n", dev
->name
, dev
->hw_info
.nr_ctrl
);
10795 } else if (0 != nr_ctrl
&& nr_ctrl
!= dev
->hw_info
.nr_ctrl
) {
10796 hio_warn("%s: nr controller error: configured %u but found %u\n", dev
->name
, nr_ctrl
, dev
->hw_info
.nr_ctrl
);
10797 if (mode
<= SSD_DRV_MODE_STANDARD
) {
10803 if (ssd_check_controller_bh(dev
)) {
10804 hio_warn("%s: check controller failed\n", dev
->name
);
10809 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
10810 dev
->hw_info
.pcb_ver
= (uint8_t) ((val
>> 4) & 0xF) + 'A' -1;
10811 if ((val
& 0xF) != 0xF) {
10812 dev
->hw_info
.upper_pcb_ver
= (uint8_t) (val
& 0xF) + 'A' -1;
10815 if (dev
->hw_info
.pcb_ver
< 'A' || (0 != dev
->hw_info
.upper_pcb_ver
&& dev
->hw_info
.upper_pcb_ver
< 'A')) {
10816 hio_warn("%s: PCB version error: %#x %#x\n", dev
->name
, dev
->hw_info
.pcb_ver
, dev
->hw_info
.upper_pcb_ver
);
10822 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10823 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10824 dev
->hw_info
.nr_data_ch
= val
& 0xFF;
10825 dev
->hw_info
.nr_ch
= dev
->hw_info
.nr_data_ch
+ ((val
>> 8) & 0xFF);
10826 dev
->hw_info
.nr_chip
= (val
>> 16) & 0xFF;
10828 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10829 dev
->hw_info
.max_ch
= 1;
10830 while (dev
->hw_info
.max_ch
< dev
->hw_info
.nr_ch
) dev
->hw_info
.max_ch
<<= 1;
10832 /* set max channel 32 */
10833 dev
->hw_info
.max_ch
= 32;
10836 if (0 == dev
->hw_info
.nr_chip
) {
10838 dev
->hw_info
.nr_chip
= 1;
10842 dev
->hw_info
.id_size
= SSD_NAND_ID_SZ
;
10843 dev
->hw_info
.max_ce
= SSD_NAND_MAX_CE
;
10845 if (0 == dev
->hw_info
.nr_data_ch
|| 0 == dev
->hw_info
.nr_ch
|| 0 == dev
->hw_info
.nr_chip
) {
10846 hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev
->name
, dev
->hw_info
.nr_data_ch
, dev
->hw_info
.nr_ch
, dev
->hw_info
.nr_chip
);
10853 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10854 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RAM_INFO_REG
);
10855 dev
->hw_info
.ram_size
= 0x4000000ull
* (1ULL << (val
& 0xF));
10856 dev
->hw_info
.ram_align
= 1U << ((val
>> 12) & 0xF);
10857 if (dev
->hw_info
.ram_align
< SSD_RAM_ALIGN
) {
10858 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10859 dev
->hw_info
.ram_align
= SSD_RAM_ALIGN
;
10861 hio_warn("%s: ram align error: %u\n", dev
->name
, dev
->hw_info
.ram_align
);
10866 dev
->hw_info
.ram_max_len
= 0x1000 * (1U << ((val
>> 16) & 0xF));
10868 if (0 == dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.ram_align
|| 0 == dev
->hw_info
.ram_max_len
|| dev
->hw_info
.ram_align
> dev
->hw_info
.ram_max_len
) {
10869 hio_warn("%s: ram info error\n", dev
->name
);
10874 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10875 dev
->hw_info
.log_sz
= SSD_LOG_MAX_SZ
;
10877 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LOG_INFO_REG
);
10878 dev
->hw_info
.log_sz
= 0x1000 * (1U << (val
& 0xFF));
10880 if (0 == dev
->hw_info
.log_sz
) {
10881 hio_warn("%s: log size error\n", dev
->name
);
10886 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BBT_BASE_REG
);
10887 dev
->hw_info
.bbt_base
= 0x40000ull
* (val
& 0xFFFF);
10888 dev
->hw_info
.bbt_size
= 0x40000 * (((val
>> 16) & 0xFFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
10889 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10890 if (dev
->hw_info
.bbt_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.bbt_size
) {
10891 hio_warn("%s: bbt info error\n", dev
->name
);
10897 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ECT_BASE_REG
);
10898 dev
->hw_info
.md_base
= 0x40000ull
* (val
& 0xFFFF);
10899 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10900 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
10902 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.nr_chip
);
10904 dev
->hw_info
.md_entry_sz
= 8 * (1U << ((val
>> 28) & 0xF));
10905 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
10906 if (dev
->hw_info
.md_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.md_size
||
10907 0 == dev
->hw_info
.md_entry_sz
|| dev
->hw_info
.md_entry_sz
> dev
->hw_info
.md_size
) {
10908 hio_warn("%s: md info error\n", dev
->name
);
10914 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10915 dev
->hw_info
.nand_wbuff_base
= dev
->hw_info
.ram_size
+ 1;
10917 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_NAND_BUFF_BASE
);
10918 dev
->hw_info
.nand_wbuff_base
= 0x8000ull
* val
;
10923 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10924 if (dev
->hw_info
.nr_ctrl
> 1) {
10925 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CTRL_VER_REG
);
10926 dev
->hw_info
.ctrl_ver
= val
& 0xFFF;
10927 hio_info("%s: controller firmware version: %03X\n", dev
->name
, dev
->hw_info
.ctrl_ver
);
10930 val64
= ssd_reg_read(dev
->ctrlp
+ SSD_FLASH_INFO_REG0
);
10931 dev
->hw_info
.nand_vendor_id
= ((val64
>> 56) & 0xFF);
10932 dev
->hw_info
.nand_dev_id
= ((val64
>> 48) & 0xFF);
10934 dev
->hw_info
.block_count
= (((val64
>> 32) & 0xFFFF) + 1);
10935 dev
->hw_info
.page_count
= ((val64
>>16) & 0xFFFF);
10936 dev
->hw_info
.page_size
= (val64
& 0xFFFF);
10938 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_INFO_REG
);
10939 dev
->hw_info
.bbf_pages
= val
& 0xFF;
10940 dev
->hw_info
.bbf_seek
= (val
>> 8) & 0x1;
10942 if (0 == dev
->hw_info
.block_count
|| 0 == dev
->hw_info
.page_count
|| 0 == dev
->hw_info
.page_size
|| dev
->hw_info
.block_count
> INT_MAX
) {
10943 hio_warn("%s: flash info error\n", dev
->name
);
10949 dev
->hw_info
.oob_size
= SSD_NAND_OOB_SZ
; //(dev->hw_info.page_size) >> 5;
10951 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
10952 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10953 dev
->hw_info
.valid_pages
= val
& 0x3FF;
10954 dev
->hw_info
.max_valid_pages
= (val
>>20) & 0x3FF;
10956 dev
->hw_info
.valid_pages
= val
& 0x7FFF;
10957 dev
->hw_info
.max_valid_pages
= (val
>>15) & 0x7FFF;
10959 if (0 == dev
->hw_info
.valid_pages
|| 0 == dev
->hw_info
.max_valid_pages
||
10960 dev
->hw_info
.valid_pages
> dev
->hw_info
.max_valid_pages
|| dev
->hw_info
.max_valid_pages
> dev
->hw_info
.page_count
) {
10961 hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev
->name
, dev
->hw_info
.valid_pages
, dev
->hw_info
.max_valid_pages
);
10966 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESERVED_BLKS_REG
);
10967 dev
->hw_info
.reserved_blks
= val
& 0xFFFF;
10968 dev
->hw_info
.md_reserved_blks
= (val
>> 16) & 0xFF;
10969 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10970 dev
->hw_info
.md_reserved_blks
= SSD_BBT_RESERVED
;
10972 if (dev
->hw_info
.reserved_blks
> dev
->hw_info
.block_count
|| dev
->hw_info
.md_reserved_blks
> dev
->hw_info
.block_count
) {
10973 hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev
->name
, dev
->hw_info
.reserved_blks
, dev
->hw_info
.md_reserved_blks
);
10980 if (mode
< SSD_DRV_MODE_DEBUG
) {
10981 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
10982 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
10983 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
10986 /* extend hardware info */
10987 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
10988 dev
->hw_info_ext
.board_type
= (val
>> 24) & 0xF;
10990 dev
->hw_info_ext
.form_factor
= SSD_FORM_FACTOR_FHHL
;
10991 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_1
) {
10992 dev
->hw_info_ext
.form_factor
= (val
>> 31) & 0x1;
10995 dev->hw_info_ext.cap_type = (val >> 28) & 0x3;
10996 if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) {
10997 dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA;
11000 /* power loss protect */
11001 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PLP_INFO_REG
);
11002 dev
->hw_info_ext
.plp_type
= (val
& 0x3);
11003 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
11005 dev
->hw_info_ext
.cap_type
= ((val
>> 2)& 0x1);
11009 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11010 dev
->hw_info_ext
.work_mode
= (val
>> 25) & 0x1;
11013 /* skip error if not in standard mode */
11014 if (mode
!= SSD_DRV_MODE_STANDARD
) {
11020 static void ssd_cleanup_response(struct ssd_device
*dev
)
11022 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11023 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11025 pci_free_consistent(dev
->pdev
, resp_ptr_sz
, dev
->resp_ptr_base
, dev
->resp_ptr_base_dma
);
11026 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11029 static int ssd_init_response(struct ssd_device
*dev
)
11031 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11032 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11034 dev
->resp_msg_base
= pci_alloc_consistent(dev
->pdev
, resp_msg_sz
, &(dev
->resp_msg_base_dma
));
11035 if (!dev
->resp_msg_base
) {
11036 hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev
->name
);
11037 goto out_alloc_resp_msg
;
11039 memset(dev
->resp_msg_base
, 0xFF, resp_msg_sz
);
11041 dev
->resp_ptr_base
= pci_alloc_consistent(dev
->pdev
, resp_ptr_sz
, &(dev
->resp_ptr_base_dma
));
11042 if (!dev
->resp_ptr_base
){
11043 hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev
->name
);
11044 goto out_alloc_resp_ptr
;
11046 memset(dev
->resp_ptr_base
, 0, resp_ptr_sz
);
11047 dev
->resp_idx
= *(uint32_t *)(dev
->resp_ptr_base
) = dev
->hw_info
.cmd_fifo_sz
* 2 - 1;
11049 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
11050 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
11054 out_alloc_resp_ptr
:
11055 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11056 out_alloc_resp_msg
:
11060 static int ssd_cleanup_cmd(struct ssd_device
*dev
)
11062 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11065 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11066 kfree(dev
->cmd
[i
].sgl
);
11069 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11073 static int ssd_init_cmd(struct ssd_device
*dev
)
11075 int sgl_sz
= sizeof(struct scatterlist
) * dev
->hw_info
.cmd_max_sg
;
11076 int cmd_sz
= sizeof(struct ssd_cmd
) * dev
->hw_info
.cmd_fifo_sz
;
11077 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11080 spin_lock_init(&dev
->cmd_lock
);
11082 dev
->msg_base
= pci_alloc_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), &dev
->msg_base_dma
);
11083 if (!dev
->msg_base
) {
11084 hio_warn("%s: can not alloc cmd msg\n", dev
->name
);
11085 goto out_alloc_msg
;
11088 dev
->cmd
= kmalloc(cmd_sz
, GFP_KERNEL
);
11090 hio_warn("%s: can not alloc cmd\n", dev
->name
);
11091 goto out_alloc_cmd
;
11093 memset(dev
->cmd
, 0, cmd_sz
);
11095 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11096 dev
->cmd
[i
].sgl
= kmalloc(sgl_sz
, GFP_KERNEL
);
11097 if (!dev
->cmd
[i
].sgl
) {
11098 hio_warn("%s: can not alloc cmd sgl %d\n", dev
->name
, i
);
11099 goto out_alloc_sgl
;
11102 dev
->cmd
[i
].msg
= dev
->msg_base
+ (msg_sz
* i
);
11103 dev
->cmd
[i
].msg_dma
= dev
->msg_base_dma
+ ((dma_addr_t
)msg_sz
* i
);
11105 dev
->cmd
[i
].dev
= dev
;
11106 dev
->cmd
[i
].tag
= i
;
11107 dev
->cmd
[i
].flag
= 0;
11109 INIT_LIST_HEAD(&dev
->cmd
[i
].list
);
11112 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11113 dev
->scmd
= ssd_dispatch_cmd
;
11115 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
11117 dev
->scmd
= ssd_send_cmd_db
;
11119 dev
->scmd
= ssd_send_cmd
;
11126 for (i
--; i
>=0; i
--) {
11127 kfree(dev
->cmd
[i
].sgl
);
11131 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11136 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11137 static irqreturn_t
ssd_interrupt_check(int irq
, void *dev_id
)
11139 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11141 if (*(uint32_t *)queue
->resp_ptr
== queue
->resp_idx
) {
11145 return IRQ_WAKE_THREAD
;
11148 static irqreturn_t
ssd_interrupt_threaded(int irq
, void *dev_id
)
11150 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11151 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11152 struct ssd_cmd
*cmd
;
11153 union ssd_response_msq __msg
;
11154 union ssd_response_msq
*msg
= &__msg
;
11156 uint32_t resp_idx
= queue
->resp_idx
;
11157 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11158 uint32_t end_resp_idx
;
11160 if (unlikely(resp_idx
== new_resp_idx
)) {
11164 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11167 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11170 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11171 msg
->u64_msg
= *u64_msg
;
11173 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11174 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11177 /* clear the resp msg */
11178 *u64_msg
= (uint64_t)(-1);
11180 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11181 /*if (unlikely(!cmd->bio)) {
11182 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11183 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11187 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11188 cmd
->errors
= -EIO
;
11192 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11196 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11197 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11198 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11199 queue_work(dev
->workq
, &dev
->log_work
);
11203 if (unlikely(msg
->resp_msg
.status
)) {
11204 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11205 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11206 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11209 ssd_set_alarm(dev
);
11210 queue
->io_stat
.nr_rwerr
++;
11211 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11213 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11214 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11216 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11218 queue
->io_stat
.nr_ioerr
++;
11221 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11222 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11223 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11225 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11227 }while (resp_idx
!= end_resp_idx
);
11229 queue
->resp_idx
= new_resp_idx
;
11231 return IRQ_HANDLED
;
11235 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11236 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
11238 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
)
11241 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11242 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11243 struct ssd_cmd
*cmd
;
11244 union ssd_response_msq __msg
;
11245 union ssd_response_msq
*msg
= &__msg
;
11247 uint32_t resp_idx
= queue
->resp_idx
;
11248 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11249 uint32_t end_resp_idx
;
11251 if (unlikely(resp_idx
== new_resp_idx
)) {
11255 #if (defined SSD_ESCAPE_IRQ)
11256 if (SSD_INT_MSIX
!= dev
->int_mode
) {
11257 dev
->irq_cpu
= smp_processor_id();
11261 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11264 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11267 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11268 msg
->u64_msg
= *u64_msg
;
11270 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11271 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11274 /* clear the resp msg */
11275 *u64_msg
= (uint64_t)(-1);
11277 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11278 /*if (unlikely(!cmd->bio)) {
11279 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11280 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11284 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11285 cmd
->errors
= -EIO
;
11289 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11293 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11294 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11295 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11296 queue_work(dev
->workq
, &dev
->log_work
);
11300 if (unlikely(msg
->resp_msg
.status
)) {
11301 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11302 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11303 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11306 ssd_set_alarm(dev
);
11307 queue
->io_stat
.nr_rwerr
++;
11308 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11310 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11311 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11313 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11315 queue
->io_stat
.nr_ioerr
++;
11318 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11319 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11320 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11322 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11324 }while (resp_idx
!= end_resp_idx
);
11326 queue
->resp_idx
= new_resp_idx
;
11328 return IRQ_HANDLED
;
11331 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11332 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
, struct pt_regs
*regs
)
11334 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
)
11338 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11339 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11341 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11342 ret
= ssd_interrupt(irq
, dev_id
, regs
);
11344 ret
= ssd_interrupt(irq
, dev_id
);
11348 if (IRQ_HANDLED
== ret
) {
11349 ssd_reg32_write(dev
->ctrlp
+ SSD_CLEAR_INTR_REG
, 1);
11355 static void ssd_reset_resp_ptr(struct ssd_device
*dev
)
11359 for (i
=0; i
<dev
->nr_queue
; i
++) {
11360 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11364 static void ssd_free_irq(struct ssd_device
*dev
)
11368 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11369 if (SSD_INT_MSIX
== dev
->int_mode
) {
11370 for (i
=0; i
<dev
->nr_queue
; i
++) {
11371 irq_set_affinity_hint(dev
->entry
[i
].vector
, NULL
);
11376 for (i
=0; i
<dev
->nr_queue
; i
++) {
11377 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11380 if (SSD_INT_MSIX
== dev
->int_mode
) {
11381 pci_disable_msix(dev
->pdev
);
11382 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11383 pci_disable_msi(dev
->pdev
);
11388 static int ssd_init_irq(struct ssd_device
*dev
)
11390 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11391 const struct cpumask
*cpu_mask
;
11392 static int cpu_affinity
= 0;
11394 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11395 const struct cpumask
*mask
;
11396 static int cpu
= 0;
11400 unsigned long flags
= 0;
11403 ssd_reg32_write(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
, 0x800);
11405 #ifdef SSD_ESCAPE_IRQ
11409 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11410 dev
->nr_queue
= SSD_MSIX_VEC
;
11411 for (i
=0; i
<dev
->nr_queue
; i
++) {
11412 dev
->entry
[i
].entry
= i
;
11415 ret
= pci_enable_msix(dev
->pdev
, dev
->entry
, dev
->nr_queue
);
11418 } else if (ret
> 0) {
11419 dev
->nr_queue
= ret
;
11421 hio_warn("%s: can not enable msix\n", dev
->name
);
11423 ssd_set_alarm(dev
);
11428 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11429 mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11430 if ((0 == cpu
) || (!cpumask_intersects(mask
, cpumask_of(cpu
)))) {
11431 cpu
= cpumask_first(mask
);
11433 for (i
=0; i
<dev
->nr_queue
; i
++) {
11434 irq_set_affinity_hint(dev
->entry
[i
].vector
, cpumask_of(cpu
));
11435 cpu
= cpumask_next(cpu
, mask
);
11436 if (cpu
>= nr_cpu_ids
) {
11437 cpu
= cpumask_first(mask
);
11442 dev
->int_mode
= SSD_INT_MSIX
;
11443 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11444 ret
= pci_enable_msi(dev
->pdev
);
11446 hio_warn("%s: can not enable msi\n", dev
->name
);
11448 ssd_set_alarm(dev
);
11453 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11455 dev
->int_mode
= SSD_INT_MSI
;
11458 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11460 dev
->int_mode
= SSD_INT_LEGACY
;
11463 for (i
=0; i
<dev
->nr_queue
; i
++) {
11464 if (dev
->nr_queue
> 1) {
11465 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100-%d", dev
->name
, i
);
11467 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100", dev
->name
);
11470 dev
->queue
[i
].dev
= dev
;
11471 dev
->queue
[i
].idx
= i
;
11473 dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11474 dev
->queue
[i
].resp_idx_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
11476 dev
->queue
[i
].resp_msg_sz
= dev
->hw_info
.resp_msg_sz
;
11477 dev
->queue
[i
].resp_msg
= dev
->resp_msg_base
+ dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* i
;
11478 dev
->queue
[i
].resp_ptr
= dev
->resp_ptr_base
+ dev
->hw_info
.resp_ptr_sz
* i
;
11479 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
;
11481 dev
->queue
[i
].cmd
= dev
->cmd
;
11484 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
11485 flags
= IRQF_SHARED
;
11490 for (i
=0; i
<dev
->nr_queue
; i
++) {
11491 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11492 if (threaded_irq
) {
11493 ret
= request_threaded_irq(dev
->entry
[i
].vector
, ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11494 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11495 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11497 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11500 if (dev
->int_mode
== SSD_INT_LEGACY
) {
11501 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11503 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11507 hio_warn("%s: request irq failed\n", dev
->name
);
11509 ssd_set_alarm(dev
);
11510 goto out_request_irq
;
11513 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11514 cpu_mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11515 if (SSD_INT_MSIX
== dev
->int_mode
) {
11516 if ((0 == cpu_affinity
) || (!cpumask_intersects(mask
, cpumask_of(cpu_affinity
)))) {
11517 cpu_affinity
= cpumask_first(cpu_mask
);
11520 irq_set_affinity(dev
->entry
[i
].vector
, cpumask_of(cpu_affinity
));
11521 cpu_affinity
= cpumask_next(cpu_affinity
, cpu_mask
);
11522 if (cpu_affinity
>= nr_cpu_ids
) {
11523 cpu_affinity
= cpumask_first(cpu_mask
);
11532 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11533 if (SSD_INT_MSIX
== dev
->int_mode
) {
11534 for (j
=0; j
<dev
->nr_queue
; j
++) {
11535 irq_set_affinity_hint(dev
->entry
[j
].vector
, NULL
);
11540 for (i
--; i
>=0; i
--) {
11541 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11544 if (SSD_INT_MSIX
== dev
->int_mode
) {
11545 pci_disable_msix(dev
->pdev
);
11546 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11547 pci_disable_msi(dev
->pdev
);
11554 static void ssd_initial_log(struct ssd_device
*dev
)
11557 uint32_t speed
, width
;
11559 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11563 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_POWER_ON_REG
);
11565 ssd_gen_swlog(dev
, SSD_LOG_POWER_ON
, dev
->hw_info
.bridge_ver
);
11568 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCIE_LINKSTATUS_REG
);
11570 width
= (val
>> 4)& 0x3F;
11571 if (0x1 == speed
) {
11572 hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev
->name
, width
);
11573 } else if (0x2 == speed
) {
11574 hio_info("%s: PCIe: 5GT/s, x%u\n", dev
->name
, width
);
11576 hio_info("%s: PCIe: unknown GT/s, x%u\n", dev
->name
, width
);
11578 ssd_gen_swlog(dev
, SSD_LOG_PCIE_LINK_STATUS
, val
);
11583 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11584 static void ssd_hwmon_worker(void *data
)
11586 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11588 static void ssd_hwmon_worker(struct work_struct
*work
)
11590 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, hwmon_work
);
11593 if (ssd_check_hw(dev
)) {
11594 //hio_err("%s: check hardware failed\n", dev->name);
11598 ssd_check_clock(dev
);
11599 ssd_check_volt(dev
);
11601 ssd_mon_boardvolt(dev
);
11604 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11605 static void ssd_tempmon_worker(void *data
)
11607 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11609 static void ssd_tempmon_worker(struct work_struct
*work
)
11611 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, tempmon_work
);
11614 if (ssd_check_hw(dev
)) {
11615 //hio_err("%s: check hardware failed\n", dev->name);
11623 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11624 static void ssd_capmon_worker(void *data
)
11626 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11628 static void ssd_capmon_worker(struct work_struct
*work
)
11630 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, capmon_work
);
11633 uint32_t cap_threshold
= SSD_PL_CAP_THRESHOLD
;
11636 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11640 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
11644 /* fault before? */
11645 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11646 ret
= ssd_check_pl_cap_fast(dev
);
11653 ret
= ssd_do_cap_learn(dev
, &cap
);
11655 hio_err("%s: cap learn failed\n", dev
->name
);
11656 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
11660 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, cap
);
11662 if (SSD_PL_CAP_CP
== dev
->hw_info_ext
.cap_type
) {
11663 cap_threshold
= SSD_PL_CAP_CP_THRESHOLD
;
11666 //use the fw event id?
11667 if (cap
< cap_threshold
) {
11668 if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11669 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_FAULT
, 0);
11671 } else if (cap
>= (cap_threshold
+ SSD_PL_CAP_THRESHOLD_HYST
)) {
11672 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11673 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_OK
, 0);
11678 static void ssd_routine_start(void *data
)
11680 struct ssd_device
*dev
;
11687 dev
->routine_tick
++;
11689 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
) && !ssd_busy(dev
)) {
11690 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11691 queue_work(dev
->workq
, &dev
->log_work
);
11694 if ((dev
->routine_tick
% SSD_HWMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11695 queue_work(dev
->workq
, &dev
->hwmon_work
);
11698 if ((dev
->routine_tick
% SSD_CAPMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11699 queue_work(dev
->workq
, &dev
->capmon_work
);
11702 if ((dev
->routine_tick
% SSD_CAPMON2_ROUTINE_TICK
) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
) && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11703 /* CAP fault? check again */
11704 queue_work(dev
->workq
, &dev
->capmon_work
);
11707 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11708 queue_work(dev
->workq
, &dev
->tempmon_work
);
11711 /* schedule routine */
11712 mod_timer(&dev
->routine_timer
, jiffies
+ msecs_to_jiffies(SSD_ROUTINE_INTERVAL
));
11715 static void ssd_cleanup_routine(struct ssd_device
*dev
)
11717 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11720 (void)ssd_del_timer(&dev
->routine_timer
);
11722 (void)ssd_del_timer(&dev
->bm_timer
);
11725 static int ssd_init_routine(struct ssd_device
*dev
)
11727 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11730 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11731 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
, dev
);
11732 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
, dev
);
11733 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
, dev
);
11734 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
, dev
);
11736 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
);
11737 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
);
11738 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
);
11739 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
);
11743 ssd_initial_log(dev
);
11745 /* schedule bm routine */
11746 ssd_add_timer(&dev
->bm_timer
, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY
), ssd_bm_routine_start
, dev
);
11748 /* schedule routine */
11749 ssd_add_timer(&dev
->routine_timer
, msecs_to_jiffies(SSD_ROUTINE_INTERVAL
), ssd_routine_start
, dev
);
11755 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
11758 ssd_remove_one (struct pci_dev
*pdev
)
11760 struct ssd_device
*dev
;
11766 dev
= pci_get_drvdata(pdev
);
11771 list_del_init(&dev
->list
);
11773 ssd_unregister_sysfs(dev
);
11775 /* offline firstly */
11776 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
11778 /* clean work queue first */
11780 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
11781 ssd_cleanup_workq(dev
);
11785 (void)ssd_flush(dev
);
11786 (void)ssd_save_md(dev
);
11790 ssd_save_smart(dev
);
11793 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
11794 ssd_cleanup_blkdev(dev
);
11798 ssd_cleanup_chardev(dev
);
11801 /* clean routine */
11803 ssd_cleanup_routine(dev
);
11806 ssd_cleanup_queue(dev
);
11808 ssd_cleanup_tag(dev
);
11809 ssd_cleanup_thread(dev
);
11813 ssd_cleanup_dcmd(dev
);
11814 ssd_cleanup_cmd(dev
);
11815 ssd_cleanup_response(dev
);
11818 ssd_cleanup_log(dev
);
11821 if (dev
->reload_fw
) { //reload fw
11822 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
11825 /* unmap physical adress */
11826 #ifdef LINUX_SUSE_OS
11827 iounmap(dev
->ctrlp
);
11829 pci_iounmap(pdev
, dev
->ctrlp
);
11832 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
11834 pci_disable_device(pdev
);
11836 pci_set_drvdata(pdev
, NULL
);
11842 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
11845 ssd_init_one(struct pci_dev
*pdev
,
11846 const struct pci_device_id
*ent
)
11848 struct ssd_device
*dev
;
11851 if (!pdev
|| !ent
) {
11856 dev
= kmalloc(sizeof(struct ssd_device
), GFP_KERNEL
);
11859 goto out_alloc_dev
;
11861 memset(dev
, 0, sizeof(struct ssd_device
));
11863 dev
->owner
= THIS_MODULE
;
11865 if (SSD_SLAVE_PORT_DEVID
== ent
->device
) {
11869 dev
->idx
= ssd_get_index(dev
->slave
);
11870 if (dev
->idx
< 0) {
11872 goto out_get_index
;
11876 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_DEV_NAME
);
11877 ssd_set_dev_name(&dev
->name
[strlen(SSD_DEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_DEV_NAME
), dev
->idx
);
11879 dev
->major
= ssd_major
;
11880 dev
->cmajor
= ssd_cmajor
;
11882 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_SDEV_NAME
);
11883 ssd_set_dev_name(&dev
->name
[strlen(SSD_SDEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_SDEV_NAME
), dev
->idx
);
11884 dev
->major
= ssd_major_sl
;
11888 atomic_set(&(dev
->refcnt
), 0);
11889 atomic_set(&(dev
->tocnt
), 0);
11891 mutex_init(&dev
->fw_mutex
);
11894 mutex_init(&dev
->gd_mutex
);
11897 pci_set_drvdata(pdev
, dev
);
11899 kref_init(&dev
->kref
);
11901 ret
= pci_enable_device(pdev
);
11903 hio_warn("%s: can not enable device\n", dev
->name
);
11904 goto out_enable_device
;
11907 pci_set_master(pdev
);
11909 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
11910 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
11912 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
11915 hio_warn("%s: set dma mask: failed\n", dev
->name
);
11916 goto out_set_dma_mask
;
11919 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
11920 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
11922 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
11925 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
11926 goto out_set_dma_mask
;
11929 dev
->mmio_base
= pci_resource_start(pdev
, 0);
11930 dev
->mmio_len
= pci_resource_len(pdev
, 0);
11932 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
11933 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
11935 goto out_request_mem_region
;
11938 /* 2.6.9 kernel bug */
11939 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
11941 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
11943 goto out_pci_iomap
;
11946 ret
= ssd_check_hw(dev
);
11948 hio_err("%s: check hardware failed\n", dev
->name
);
11952 ret
= ssd_init_protocol_info(dev
);
11954 hio_err("%s: init protocol info failed\n", dev
->name
);
11955 goto out_init_protocol_info
;
11959 ssd_clear_alarm(dev
);
11961 ret
= ssd_init_fw_info(dev
);
11963 hio_err("%s: init firmware info failed\n", dev
->name
);
11965 ssd_set_alarm(dev
);
11966 goto out_init_fw_info
;
11974 ret
= ssd_init_rom_info(dev
);
11976 hio_err("%s: init rom info failed\n", dev
->name
);
11978 ssd_set_alarm(dev
);
11979 goto out_init_rom_info
;
11982 ret
= ssd_init_label(dev
);
11984 hio_err("%s: init label failed\n", dev
->name
);
11986 ssd_set_alarm(dev
);
11987 goto out_init_label
;
11990 ret
= ssd_init_workq(dev
);
11992 hio_warn("%s: init workq failed\n", dev
->name
);
11993 goto out_init_workq
;
11995 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
11997 ret
= ssd_init_log(dev
);
11999 hio_err("%s: init log failed\n", dev
->name
);
12001 ssd_set_alarm(dev
);
12005 ret
= ssd_init_smart(dev
);
12007 hio_err("%s: init info failed\n", dev
->name
);
12009 ssd_set_alarm(dev
);
12010 goto out_init_smart
;
12014 ret
= ssd_init_hw_info(dev
);
12016 hio_err("%s: init hardware info failed\n", dev
->name
);
12018 ssd_set_alarm(dev
);
12019 goto out_init_hw_info
;
12027 ret
= ssd_init_sensor(dev
);
12029 hio_err("%s: init sensor failed\n", dev
->name
);
12031 ssd_set_alarm(dev
);
12032 goto out_init_sensor
;
12035 ret
= ssd_init_pl_cap(dev
);
12037 hio_err("%s: int pl_cap failed\n", dev
->name
);
12039 ssd_set_alarm(dev
);
12040 goto out_init_pl_cap
;
12044 ret
= ssd_check_init_state(dev
);
12046 hio_err("%s: check init state failed\n", dev
->name
);
12048 ssd_set_alarm(dev
);
12049 goto out_check_init_state
;
12052 ret
= ssd_init_response(dev
);
12054 hio_warn("%s: init resp_msg failed\n", dev
->name
);
12055 goto out_init_response
;
12058 ret
= ssd_init_cmd(dev
);
12060 hio_warn("%s: init msg failed\n", dev
->name
);
12064 ret
= ssd_init_dcmd(dev
);
12066 hio_warn("%s: init cmd failed\n", dev
->name
);
12067 goto out_init_dcmd
;
12070 ret
= ssd_init_irq(dev
);
12072 hio_warn("%s: init irq failed\n", dev
->name
);
12076 ret
= ssd_init_thread(dev
);
12078 hio_warn("%s: init thread failed\n", dev
->name
);
12079 goto out_init_thread
;
12082 ret
= ssd_init_tag(dev
);
12084 hio_warn("%s: init tags failed\n", dev
->name
);
12085 goto out_init_tags
;
12089 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12091 ret
= ssd_init_queue(dev
);
12093 hio_warn("%s: init queue failed\n", dev
->name
);
12094 goto out_init_queue
;
12102 ret
= ssd_init_ot_protect(dev
);
12104 hio_err("%s: int ot_protect failed\n", dev
->name
);
12106 ssd_set_alarm(dev
);
12107 goto out_int_ot_protect
;
12110 ret
= ssd_init_wmode(dev
);
12112 hio_warn("%s: init write mode\n", dev
->name
);
12113 goto out_init_wmode
;
12116 /* init routine after hw is ready */
12117 ret
= ssd_init_routine(dev
);
12119 hio_warn("%s: init routine\n", dev
->name
);
12120 goto out_init_routine
;
12123 ret
= ssd_init_chardev(dev
);
12125 hio_warn("%s: register char device failed\n", dev
->name
);
12126 goto out_init_chardev
;
12130 ret
= ssd_init_blkdev(dev
);
12132 hio_warn("%s: register block device failed\n", dev
->name
);
12133 goto out_init_blkdev
;
12135 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12137 ret
= ssd_register_sysfs(dev
);
12139 hio_warn("%s: register sysfs failed\n", dev
->name
);
12140 goto out_register_sysfs
;
12145 list_add_tail(&dev
->list
, &ssd_list
);
12149 out_register_sysfs
:
12150 test_and_clear_bit(SSD_INIT_BD
, &dev
->state
);
12151 ssd_cleanup_blkdev(dev
);
12155 ssd_cleanup_chardev(dev
);
12160 ssd_cleanup_routine(dev
);
12164 out_int_ot_protect
:
12165 ssd_cleanup_queue(dev
);
12167 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12168 ssd_cleanup_tag(dev
);
12170 ssd_cleanup_thread(dev
);
12174 ssd_cleanup_dcmd(dev
);
12176 ssd_cleanup_cmd(dev
);
12178 ssd_cleanup_response(dev
);
12180 out_check_init_state
:
12187 ssd_cleanup_log(dev
);
12192 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12193 ssd_cleanup_workq(dev
);
12199 out_init_protocol_info
:
12201 #ifdef LINUX_SUSE_OS
12202 iounmap(dev
->ctrlp
);
12204 pci_iounmap(pdev
, dev
->ctrlp
);
12207 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12208 out_request_mem_region
:
12210 pci_disable_device(pdev
);
12212 pci_set_drvdata(pdev
, NULL
);
12220 static void ssd_cleanup_tasklet(void)
12223 for_each_online_cpu(i
) {
12224 tasklet_kill(&per_cpu(ssd_tasklet
, i
));
12228 static int ssd_init_tasklet(void)
12232 for_each_online_cpu(i
) {
12233 INIT_LIST_HEAD(&per_cpu(ssd_doneq
, i
));
12236 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done_db
, 0);
12238 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done
, 0);
12245 static struct pci_device_id ssd_pci_tbl
[] = {
12246 { 0x10ee, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* g3 */
12247 { 0x19e5, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v1 */
12248 //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/
12249 { 0x19e5, 0x0009, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 */
12250 { 0x19e5, 0x000a, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 dp slave*/
12253 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12255 static struct pci_driver ssd_driver
= {
12256 .name
= MODULE_NAME
,
12257 .id_table
= ssd_pci_tbl
,
12258 .probe
= ssd_init_one
,
12259 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12260 .remove
= __devexit_p(ssd_remove_one
),
12262 .remove
= ssd_remove_one
,
12266 /* notifier block to get a notify on system shutdown/halt/reboot */
12267 static int ssd_notify_reboot(struct notifier_block
*nb
, unsigned long event
, void *buf
)
12269 struct ssd_device
*dev
= NULL
;
12270 struct ssd_device
*n
= NULL
;
12272 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
12273 ssd_gen_swlog(dev
, SSD_LOG_POWER_OFF
, 0);
12275 (void)ssd_flush(dev
);
12276 (void)ssd_save_md(dev
);
12280 ssd_save_smart(dev
);
12282 ssd_stop_workq(dev
);
12284 if (dev
->reload_fw
) {
12285 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12293 static struct notifier_block ssd_notifier
= {
12294 ssd_notify_reboot
, NULL
, 0
12297 static int __init
ssd_init_module(void)
12301 hio_info("driver version: %s\n", DRIVER_VERSION
);
12303 ret
= ssd_init_index();
12305 hio_warn("init index failed\n");
12306 goto out_init_index
;
12309 ret
= ssd_init_proc();
12311 hio_warn("init proc failed\n");
12312 goto out_init_proc
;
12315 ret
= ssd_init_sysfs();
12317 hio_warn("init sysfs failed\n");
12318 goto out_init_sysfs
;
12321 ret
= ssd_init_tasklet();
12323 hio_warn("init tasklet failed\n");
12324 goto out_init_tasklet
;
12327 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12328 ssd_class
= class_simple_create(THIS_MODULE
, SSD_DEV_NAME
);
12330 ssd_class
= class_create(THIS_MODULE
, SSD_DEV_NAME
);
12332 if (IS_ERR(ssd_class
)) {
12333 ret
= PTR_ERR(ssd_class
);
12334 goto out_class_create
;
12337 if (ssd_cmajor
> 0) {
12338 ret
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12340 ret
= ssd_cmajor
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12343 hio_warn("unable to register chardev major number\n");
12344 goto out_register_chardev
;
12347 if (ssd_major
> 0) {
12348 ret
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12350 ret
= ssd_major
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12353 hio_warn("unable to register major number\n");
12354 goto out_register_blkdev
;
12357 if (ssd_major_sl
> 0) {
12358 ret
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12360 ret
= ssd_major_sl
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12363 hio_warn("unable to register slave major number\n");
12364 goto out_register_blkdev_sl
;
12367 if (mode
< SSD_DRV_MODE_STANDARD
|| mode
> SSD_DRV_MODE_BASE
) {
12368 mode
= SSD_DRV_MODE_STANDARD
;
12372 if (mode
!= SSD_DRV_MODE_STANDARD
) {
12376 if (int_mode
< SSD_INT_LEGACY
|| int_mode
> SSD_INT_MSIX
) {
12377 int_mode
= SSD_INT_MODE_DEFAULT
;
12380 if (threaded_irq
) {
12381 int_mode
= SSD_INT_MSI
;
12384 if (log_level
>= SSD_LOG_NR_LEVEL
|| log_level
< SSD_LOG_LEVEL_INFO
) {
12385 log_level
= SSD_LOG_LEVEL_ERR
;
12388 if (wmode
< SSD_WMODE_BUFFER
|| wmode
> SSD_WMODE_DEFAULT
) {
12389 wmode
= SSD_WMODE_DEFAULT
;
12392 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
12393 ret
= pci_module_init(&ssd_driver
);
12395 ret
= pci_register_driver(&ssd_driver
);
12398 hio_warn("pci init failed\n");
12402 ret
= register_reboot_notifier(&ssd_notifier
);
12404 hio_warn("register reboot notifier failed\n");
12405 goto out_register_reboot_notifier
;
12410 out_register_reboot_notifier
:
12412 pci_unregister_driver(&ssd_driver
);
12413 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12414 out_register_blkdev_sl
:
12415 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
12416 out_register_blkdev
:
12417 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
12418 out_register_chardev
:
12419 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12420 class_simple_destroy(ssd_class
);
12422 class_destroy(ssd_class
);
12425 ssd_cleanup_tasklet();
12427 ssd_cleanup_sysfs();
12429 ssd_cleanup_proc();
12431 ssd_cleanup_index();
12437 static void __exit
ssd_cleanup_module(void)
12440 hio_info("unload driver: %s\n", DRIVER_VERSION
);
12444 unregister_reboot_notifier(&ssd_notifier
);
12446 pci_unregister_driver(&ssd_driver
);
12448 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12449 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
12450 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
12451 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12452 class_simple_destroy(ssd_class
);
12454 class_destroy(ssd_class
);
12457 ssd_cleanup_tasklet();
12458 ssd_cleanup_sysfs();
12459 ssd_cleanup_proc();
12460 ssd_cleanup_index();
12463 int ssd_register_event_notifier(struct block_device
*bdev
, ssd_event_call event_call
)
12465 struct ssd_device
*dev
;
12467 struct ssd_log
*le
;
12471 if (!bdev
|| !event_call
|| !(bdev
->bd_disk
)) {
12475 dev
= bdev
->bd_disk
->private_data
;
12476 dev
->event_call
= event_call
;
12478 do_gettimeofday(&tv
);
12481 le
= (struct ssd_log
*)(dev
->internal_log
.log
);
12482 log_nr
= dev
->internal_log
.nr_log
;
12485 if (le
->time
<= cur
&& le
->time
>= dev
->uptime
) {
12486 (void)dev
->event_call(dev
->gd
, le
->le
.event
, ssd_parse_log(dev
, le
, 0));
12494 int ssd_unregister_event_notifier(struct block_device
*bdev
)
12496 struct ssd_device
*dev
;
12498 if (!bdev
|| !(bdev
->bd_disk
)) {
12502 dev
= bdev
->bd_disk
->private_data
;
12503 dev
->event_call
= NULL
;
12508 EXPORT_SYMBOL(ssd_get_label
);
12509 EXPORT_SYMBOL(ssd_get_version
);
12510 EXPORT_SYMBOL(ssd_set_otprotect
);
12511 EXPORT_SYMBOL(ssd_bm_status
);
12512 EXPORT_SYMBOL(ssd_submit_pbio
);
12513 EXPORT_SYMBOL(ssd_get_pciaddr
);
12514 EXPORT_SYMBOL(ssd_get_temperature
);
12515 EXPORT_SYMBOL(ssd_register_event_notifier
);
12516 EXPORT_SYMBOL(ssd_unregister_event_notifier
);
12517 EXPORT_SYMBOL(ssd_reset
);
12518 EXPORT_SYMBOL(ssd_set_wmode
);
12522 module_init(ssd_init_module
);
12523 module_exit(ssd_cleanup_module
);
12524 MODULE_VERSION(DRIVER_VERSION
);
12525 MODULE_LICENSE("GPL");
12526 MODULE_AUTHOR("Huawei SSD DEV Team");
12527 MODULE_DESCRIPTION("Huawei SSD driver");