2 * Huawei SSD device driver
3 * Copyright (c) 2016, Huawei Technologies Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #ifndef LINUX_VERSION_CODE
16 #include <linux/version.h>
18 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
19 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/bio.h>
25 #include <linux/timer.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/blkdev.h>
31 #include <linux/sched.h>
32 #include <linux/fcntl.h>
33 #include <linux/interrupt.h>
34 #include <linux/compiler.h>
35 #include <linux/bitops.h>
36 #include <linux/delay.h>
37 #include <linux/time.h>
38 #include <linux/stat.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
44 #include <linux/ioctl.h>
45 #include <linux/hdreg.h> /* HDIO_GETGEO */
46 #include <linux/list.h>
47 #include <linux/reboot.h>
48 #include <linux/kthread.h>
49 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
50 #include <linux/seq_file.h>
52 #include <asm/uaccess.h>
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
54 #include <linux/scatterlist.h>
55 #include <linux/vmalloc.h>
57 #include <asm/scatterlist.h>
60 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
61 #include <linux/devfs_fs_kernel.h>
63 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
64 #define bio_endio(bio, errors) bio_endio(bio)
68 #define MODULE_NAME "hio"
69 #define DRIVER_VERSION "2.1.0.23"
70 #define DRIVER_VERSION_LEN 16
72 #define SSD_FW_MIN 0x1
74 #define SSD_DEV_NAME MODULE_NAME
75 #define SSD_DEV_NAME_LEN 16
76 #define SSD_CDEV_NAME "c"SSD_DEV_NAME
77 #define SSD_SDEV_NAME "s"SSD_DEV_NAME
82 #define SSD_MAJOR_SL 0
85 #define SSD_MAX_DEV 702
86 #define SSD_ALPHABET_NUM 26
88 #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg)
89 #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg)
90 #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg)
91 #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg)
94 #define SSD_SLAVE_PORT_DEVID 0x000a
98 /* 2.6.9 msi affinity bug, should turn msi & msi-x off */
100 #define SSD_ESCAPE_IRQ
106 #define SSD_MSIX_VEC 8
109 //#undef SSD_ESCAPE_IRQ
110 #define SSD_MSIX_AFFINITY_FORCE
115 /* Over temperature protect */
116 #define SSD_OT_PROTECT
118 #ifdef SSD_QUEUE_PBIO
119 #define BIO_SSD_PBIO 20
123 //#define SSD_DEBUG_ERR
126 #define SSD_CMD_TIMEOUT (60*HZ)
129 #define SSD_SPI_TIMEOUT (5*HZ)
130 #define SSD_I2C_TIMEOUT (5*HZ)
132 #define SSD_I2C_MAX_DATA (127)
133 #define SSD_SMBUS_BLOCK_MAX (32)
134 #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2)
137 #define SSD_INIT_WAIT (1000) //1s
138 #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s
139 #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s
140 #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s
141 #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
142 #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
144 /* blkdev busy wait */
145 #define SSD_DEV_BUSY_WAIT 1000 //ms
146 #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s
149 #define SSD_SMBUS_RETRY_INTERVAL (5) //ms
150 #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL)
152 #define SSD_BM_RETRY_MAX 7
154 /* bm routine interval */
155 #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000)
157 /* routine interval */
158 #define SSD_ROUTINE_INTERVAL (10*1000) //10s
159 #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL)
160 #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30)
161 #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover
164 #define SSD_DMA_ALIGN (16)
166 /* some hw defalut */
167 #define SSD_LOG_MAX_SZ 4096
169 #define SSD_NAND_OOB_SZ 1024
170 #define SSD_NAND_ID_SZ 8
171 #define SSD_NAND_ID_BUFF_SZ 1024
172 #define SSD_NAND_MAX_CE 2
174 #define SSD_BBT_RESERVED 8
176 #define SSD_ECC_MAX_FLIP (64+1)
178 #define SSD_RAM_ALIGN 16
181 #define SSD_RELOAD_FLAG 0x3333CCCC
182 #define SSD_RELOAD_FW 0xAA5555AA
183 #define SSD_RESET_NOINIT 0xAA5555AA
184 #define SSD_RESET 0x55AAAA55
185 #define SSD_RESET_FULL 0x5A
186 //#define SSD_RESET_WAIT 1000 //1s
187 //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s
191 #define SSD_PROTOCOL_V1 0x0
193 #define SSD_ROM_SIZE (16*1024*1024)
194 #define SSD_ROM_BLK_SIZE (256*1024)
195 #define SSD_ROM_PAGE_SIZE (256)
196 #define SSD_ROM_NR_BRIDGE_FW 2
197 #define SSD_ROM_NR_CTRL_FW 2
198 #define SSD_ROM_BRIDGE_FW_BASE 0
199 #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024)
200 #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE)
201 #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024)
202 #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW)
203 #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE)
206 #define SSD_PROTOCOL_V3 0x3000000
207 #define SSD_PROTOCOL_V3_1_1 0x3010001
208 #define SSD_PROTOCOL_V3_1_3 0x3010003
209 #define SSD_PROTOCOL_V3_2 0x3020000
210 #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */
211 #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */
212 #define SSD_PROTOCOL_V3_2_4 0x3020004
215 #define SSD_PV3_ROM_NR_BM_FW 1
216 #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8)
218 #define SSD_ROM_LOG_SZ (64*1024*4)
220 #define SSD_ROM_NR_SMART_MAX 2
221 #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX
222 #define SSD_PV3_ROM_SMART_SZ (64*1024)
225 #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */
226 #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */
230 #define SSD_REQ_FIFO_REG 0x0000
231 #define SSD_RESP_FIFO_REG 0x0008 //0x0010
232 #define SSD_RESP_PTR_REG 0x0010 //0x0018
233 #define SSD_INTR_INTERVAL_REG 0x0018
234 #define SSD_READY_REG 0x001C
235 #define SSD_BRIDGE_TEST_REG 0x0020
236 #define SSD_STRIPE_SIZE_REG 0x0028
237 #define SSD_CTRL_VER_REG 0x0030 //controller
238 #define SSD_BRIDGE_VER_REG 0x0034 //bridge
239 #define SSD_PCB_VER_REG 0x0038
240 #define SSD_BURN_FLAG_REG 0x0040
241 #define SSD_BRIDGE_INFO_REG 0x0044
243 #define SSD_WL_VAL_REG 0x0048 //32-bit
245 #define SSD_BB_INFO_REG 0x004C
247 #define SSD_ECC_TEST_REG 0x0050 //test only
248 #define SSD_ERASE_TEST_REG 0x0058 //test only
249 #define SSD_WRITE_TEST_REG 0x0060 //test only
251 #define SSD_RESET_REG 0x0068
252 #define SSD_RELOAD_FW_REG 0x0070
254 #define SSD_RESERVED_BLKS_REG 0x0074
255 #define SSD_VALID_PAGES_REG 0x0078
256 #define SSD_CH_INFO_REG 0x007C
258 #define SSD_CTRL_TEST_REG_SZ 0x8
259 #define SSD_CTRL_TEST_REG0 0x0080
260 #define SSD_CTRL_TEST_REG1 0x0088
261 #define SSD_CTRL_TEST_REG2 0x0090
262 #define SSD_CTRL_TEST_REG3 0x0098
263 #define SSD_CTRL_TEST_REG4 0x00A0
264 #define SSD_CTRL_TEST_REG5 0x00A8
265 #define SSD_CTRL_TEST_REG6 0x00B0
266 #define SSD_CTRL_TEST_REG7 0x00B8
268 #define SSD_FLASH_INFO_REG0 0x00C0
269 #define SSD_FLASH_INFO_REG1 0x00C8
270 #define SSD_FLASH_INFO_REG2 0x00D0
271 #define SSD_FLASH_INFO_REG3 0x00D8
272 #define SSD_FLASH_INFO_REG4 0x00E0
273 #define SSD_FLASH_INFO_REG5 0x00E8
274 #define SSD_FLASH_INFO_REG6 0x00F0
275 #define SSD_FLASH_INFO_REG7 0x00F8
277 #define SSD_RESP_INFO_REG 0x01B8
278 #define SSD_NAND_BUFF_BASE 0x01BC //for nand write
280 #define SSD_CHIP_INFO_REG_SZ 0x10
281 #define SSD_CHIP_INFO_REG0 0x0100 //128 bit
282 #define SSD_CHIP_INFO_REG1 0x0110
283 #define SSD_CHIP_INFO_REG2 0x0120
284 #define SSD_CHIP_INFO_REG3 0x0130
285 #define SSD_CHIP_INFO_REG4 0x0140
286 #define SSD_CHIP_INFO_REG5 0x0150
287 #define SSD_CHIP_INFO_REG6 0x0160
288 #define SSD_CHIP_INFO_REG7 0x0170
290 #define SSD_RAM_INFO_REG 0x01C4
292 #define SSD_BBT_BASE_REG 0x01C8
293 #define SSD_ECT_BASE_REG 0x01CC
295 #define SSD_CLEAR_INTR_REG 0x01F0
297 #define SSD_INIT_STATE_REG_SZ 0x8
298 #define SSD_INIT_STATE_REG0 0x0200
299 #define SSD_INIT_STATE_REG1 0x0208
300 #define SSD_INIT_STATE_REG2 0x0210
301 #define SSD_INIT_STATE_REG3 0x0218
302 #define SSD_INIT_STATE_REG4 0x0220
303 #define SSD_INIT_STATE_REG5 0x0228
304 #define SSD_INIT_STATE_REG6 0x0230
305 #define SSD_INIT_STATE_REG7 0x0238
307 #define SSD_ROM_INFO_REG 0x0600
308 #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604
309 #define SSD_ROM_CTRL_FW_INFO_REG 0x0608
310 #define SSD_ROM_VP_INFO_REG 0x060C
312 #define SSD_LOG_INFO_REG 0x0610
313 #define SSD_LED_REG 0x0614
314 #define SSD_MSG_BASE_REG 0x06F8
317 #define SSD_SPI_REG_CMD 0x0180
318 #define SSD_SPI_REG_CMD_HI 0x0184
319 #define SSD_SPI_REG_WDATA 0x0188
320 #define SSD_SPI_REG_ID 0x0190
321 #define SSD_SPI_REG_STATUS 0x0198
322 #define SSD_SPI_REG_RDATA 0x01A0
323 #define SSD_SPI_REG_READY 0x01A8
326 #define SSD_I2C_CTRL_REG 0x06F0
327 #define SSD_I2C_RDATA_REG 0x06F4
329 /* temperature reg */
330 #define SSD_BRIGE_TEMP_REG 0x0618
332 #define SSD_CTRL_TEMP_REG0 0x0700
333 #define SSD_CTRL_TEMP_REG1 0x0708
334 #define SSD_CTRL_TEMP_REG2 0x0710
335 #define SSD_CTRL_TEMP_REG3 0x0718
336 #define SSD_CTRL_TEMP_REG4 0x0720
337 #define SSD_CTRL_TEMP_REG5 0x0728
338 #define SSD_CTRL_TEMP_REG6 0x0730
339 #define SSD_CTRL_TEMP_REG7 0x0738
341 /* reversion 3 reg */
342 #define SSD_PROTOCOL_VER_REG 0x01B4
344 #define SSD_FLUSH_TIMEOUT_REG 0x02A4
345 #define SSD_BM_FAULT_REG 0x0660
347 #define SSD_PV3_RAM_STATUS_REG_SZ 0x4
348 #define SSD_PV3_RAM_STATUS_REG0 0x0260
349 #define SSD_PV3_RAM_STATUS_REG1 0x0264
350 #define SSD_PV3_RAM_STATUS_REG2 0x0268
351 #define SSD_PV3_RAM_STATUS_REG3 0x026C
352 #define SSD_PV3_RAM_STATUS_REG4 0x0270
353 #define SSD_PV3_RAM_STATUS_REG5 0x0274
354 #define SSD_PV3_RAM_STATUS_REG6 0x0278
355 #define SSD_PV3_RAM_STATUS_REG7 0x027C
357 #define SSD_PV3_CHIP_INFO_REG_SZ 0x40
358 #define SSD_PV3_CHIP_INFO_REG0 0x0300
359 #define SSD_PV3_CHIP_INFO_REG1 0x0340
360 #define SSD_PV3_CHIP_INFO_REG2 0x0380
361 #define SSD_PV3_CHIP_INFO_REG3 0x03B0
362 #define SSD_PV3_CHIP_INFO_REG4 0x0400
363 #define SSD_PV3_CHIP_INFO_REG5 0x0440
364 #define SSD_PV3_CHIP_INFO_REG6 0x0480
365 #define SSD_PV3_CHIP_INFO_REG7 0x04B0
367 #define SSD_PV3_INIT_STATE_REG_SZ 0x20
368 #define SSD_PV3_INIT_STATE_REG0 0x0500
369 #define SSD_PV3_INIT_STATE_REG1 0x0520
370 #define SSD_PV3_INIT_STATE_REG2 0x0540
371 #define SSD_PV3_INIT_STATE_REG3 0x0560
372 #define SSD_PV3_INIT_STATE_REG4 0x0580
373 #define SSD_PV3_INIT_STATE_REG5 0x05A0
374 #define SSD_PV3_INIT_STATE_REG6 0x05C0
375 #define SSD_PV3_INIT_STATE_REG7 0x05E0
377 /* reversion 3.1.1 reg */
378 #define SSD_FULL_RESET_REG 0x01B0
380 #define SSD_CTRL_REG_ZONE_SZ 0x800
382 #define SSD_BB_THRESHOLD_L1_REG 0x2C0
383 #define SSD_BB_THRESHOLD_L2_REG 0x2C4
385 #define SSD_BB_ACC_REG_SZ 0x4
386 #define SSD_BB_ACC_REG0 0x21C0
387 #define SSD_BB_ACC_REG1 0x29C0
388 #define SSD_BB_ACC_REG2 0x31C0
390 #define SSD_EC_THRESHOLD_L1_REG 0x2C8
391 #define SSD_EC_THRESHOLD_L2_REG 0x2CC
393 #define SSD_EC_ACC_REG_SZ 0x4
394 #define SSD_EC_ACC_REG0 0x21E0
395 #define SSD_EC_ACC_REG1 0x29E0
396 #define SSD_EC_ACC_REG2 0x31E0
398 /* reversion 3.1.2 & 3.1.3 reg */
399 #define SSD_HW_STATUS_REG 0x02AC
401 #define SSD_PLP_INFO_REG 0x0664
403 /*reversion 3.2 reg*/
404 #define SSD_POWER_ON_REG 0x01EC
405 #define SSD_PCIE_LINKSTATUS_REG 0x01F8
406 #define SSD_PL_CAP_LEARN_REG 0x01FC
408 #define SSD_FPGA_1V0_REG0 0x2070
409 #define SSD_FPGA_1V8_REG0 0x2078
410 #define SSD_FPGA_1V0_REG1 0x2870
411 #define SSD_FPGA_1V8_REG1 0x2878
413 /*reversion 3.2 reg*/
414 #define SSD_READ_OT_REG0 0x2260
415 #define SSD_WRITE_OT_REG0 0x2264
416 #define SSD_READ_OT_REG1 0x2A60
417 #define SSD_WRITE_OT_REG1 0x2A64
421 #define SSD_FUNC_READ 0x01
422 #define SSD_FUNC_WRITE 0x02
423 #define SSD_FUNC_NAND_READ_WOOB 0x03
424 #define SSD_FUNC_NAND_READ 0x04
425 #define SSD_FUNC_NAND_WRITE 0x05
426 #define SSD_FUNC_NAND_ERASE 0x06
427 #define SSD_FUNC_NAND_READ_ID 0x07
428 #define SSD_FUNC_READ_LOG 0x08
429 #define SSD_FUNC_TRIM 0x09
430 #define SSD_FUNC_RAM_READ 0x10
431 #define SSD_FUNC_RAM_WRITE 0x11
432 #define SSD_FUNC_FLUSH 0x12 //cache / bbt
435 #define SSD_SPI_CMD_PROGRAM 0x02
436 #define SSD_SPI_CMD_READ 0x03
437 #define SSD_SPI_CMD_W_DISABLE 0x04
438 #define SSD_SPI_CMD_READ_STATUS 0x05
439 #define SSD_SPI_CMD_W_ENABLE 0x06
440 #define SSD_SPI_CMD_ERASE 0xd8
441 #define SSD_SPI_CMD_CLSR 0x30
442 #define SSD_SPI_CMD_READ_ID 0x9f
445 #define SSD_I2C_CTRL_READ 0x00
446 #define SSD_I2C_CTRL_WRITE 0x01
448 /* i2c internal register */
449 #define SSD_I2C_CFG_REG 0x00
450 #define SSD_I2C_DATA_REG 0x01
451 #define SSD_I2C_CMD_REG 0x02
452 #define SSD_I2C_STATUS_REG 0x03
453 #define SSD_I2C_SADDR_REG 0x04
454 #define SSD_I2C_LEN_REG 0x05
455 #define SSD_I2C_RLEN_REG 0x06
456 #define SSD_I2C_WLEN_REG 0x07
457 #define SSD_I2C_RESET_REG 0x08 //write for reset
458 #define SSD_I2C_PRER_REG 0x09
462 /* FPGA volt = ADC_value / 4096 * 3v */
463 #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v
464 #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v
465 #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v
466 #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98
469 #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4)
470 #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4)
471 #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4)
472 #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12)
474 #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt))
485 SSD_CLOCK_166M_LOST
= 0,
493 #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1)
494 #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1)
496 #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8))
498 #define SSD_INLET_OT_TEMP (55) //55 DegC
499 #define SSD_INLET_OT_HYST (50) //50 DegC
500 #define SSD_FLASH_OT_TEMP (70) //70 DegC
501 #define SSD_FLASH_OT_HYST (65) //65 DegC
514 SSD_LM75_REG_TEMP
= 0,
521 #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2)
522 #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2)
523 #define SSD_LM80_REG_IN(nr) (0x20 + (nr))
525 #define SSD_LM80_REG_FAN1 0x28
526 #define SSD_LM80_REG_FAN2 0x29
527 #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr))
529 #define SSD_LM80_REG_TEMP 0x27
530 #define SSD_LM80_REG_TEMP_HOT_MAX 0x38
531 #define SSD_LM80_REG_TEMP_HOT_HYST 0x39
532 #define SSD_LM80_REG_TEMP_OS_MAX 0x3a
533 #define SSD_LM80_REG_TEMP_OS_HYST 0x3b
535 #define SSD_LM80_REG_CONFIG 0x00
536 #define SSD_LM80_REG_ALARM1 0x01
537 #define SSD_LM80_REG_ALARM2 0x02
538 #define SSD_LM80_REG_MASK1 0x03
539 #define SSD_LM80_REG_MASK2 0x04
540 #define SSD_LM80_REG_FANDIV 0x05
541 #define SSD_LM80_REG_RES 0x06
543 #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8)
545 #define SSD_LM80_3V3_VOLT(val) ((val)*33/19)
547 #define SSD_LM80_CONV_INTERVAL (1000)
556 SSD_LM80_IN_FPGA_3V3
,
561 struct ssd_lm80_limit
567 /* +/- 5% except cap in*/
568 static struct ssd_lm80_limit ssd_lm80_limit
[SSD_LM80_IN_NR
] = {
569 {171, 217}, /* CAP in: 1710 ~ 2170 */
578 /* temperature sensors */
588 #ifdef SSD_OT_PROTECT
589 #define SSD_OT_DELAY (60) //ms
591 #define SSD_OT_TEMP (90) //90 DegC
593 #define SSD_OT_TEMP_HYST (85) //85 DegC
596 /* fpga temperature */
597 //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f)
598 #define CONVERT_TEMP(val) ((val)*504/4096-273)
600 #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4))
601 #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4))
602 #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4))
606 #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP)
607 #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8)
608 #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2)))
609 #define SSD_PL_CAP_LEARN_WAIT (20) //20ms
610 #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s
612 #define SSD_PL_CAP_CHARGE_WAIT (1000)
613 #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s
615 #define SSD_PL_CAP_VOLT(val) (val*7)
617 #define SSD_PL_CAP_VOLT_FULL (13700)
618 #define SSD_PL_CAP_VOLT_READY (12880)
620 #define SSD_PL_CAP_THRESHOLD (8900)
621 #define SSD_PL_CAP_CP_THRESHOLD (5800)
622 #define SSD_PL_CAP_THRESHOLD_HYST (100)
624 enum ssd_pl_cap_status
632 SSD_PL_CAP_DEFAULT
= 0, /* 4 cap */
633 SSD_PL_CAP_CP
/* 3 cap */
638 #define SSD_HWMON_OFFS_TEMP (0)
639 #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR)
640 #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR)
641 #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR)
642 #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR)
643 #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR)
645 #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx)
646 #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx)
647 #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx)
648 #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx)
649 #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx)
650 #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx)
666 static int sfifo_alloc(struct sfifo
*fifo
, uint32_t size
, uint32_t esize
)
670 if (!fifo
|| size
> INT_MAX
|| esize
== 0) {
674 while (__size
< size
) __size
<<= 1;
680 fifo
->data
= vmalloc(esize
* __size
);
687 fifo
->mask
= __size
- 1;
690 spin_lock_init(&fifo
->lock
);
695 static void sfifo_free(struct sfifo
*fifo
)
710 static int __sfifo_put(struct sfifo
*fifo
, void *val
)
712 if (((fifo
->in
+ 1) & fifo
->mask
) == fifo
->out
) {
716 memcpy((fifo
->data
+ (fifo
->in
* fifo
->esize
)), val
, fifo
->esize
);
717 fifo
->in
= (fifo
->in
+ 1) & fifo
->mask
;
722 static int sfifo_put(struct sfifo
*fifo
, void *val
)
730 if (!in_interrupt()) {
731 spin_lock_irq(&fifo
->lock
);
732 ret
= __sfifo_put(fifo
, val
);
733 spin_unlock_irq(&fifo
->lock
);
735 spin_lock(&fifo
->lock
);
736 ret
= __sfifo_put(fifo
, val
);
737 spin_unlock(&fifo
->lock
);
743 static int __sfifo_get(struct sfifo
*fifo
, void *val
)
745 if (fifo
->out
== fifo
->in
) {
749 memcpy(val
, (fifo
->data
+ (fifo
->out
* fifo
->esize
)), fifo
->esize
);
750 fifo
->out
= (fifo
->out
+ 1) & fifo
->mask
;
755 static int sfifo_get(struct sfifo
*fifo
, void *val
)
763 if (!in_interrupt()) {
764 spin_lock_irq(&fifo
->lock
);
765 ret
= __sfifo_get(fifo
, val
);
766 spin_unlock_irq(&fifo
->lock
);
768 spin_lock(&fifo
->lock
);
769 ret
= __sfifo_get(fifo
, val
);
770 spin_unlock(&fifo
->lock
);
777 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
783 static inline void ssd_blist_init(struct ssd_blist
*ssd_bl
)
789 static inline struct bio
*ssd_blist_get(struct ssd_blist
*ssd_bl
)
791 struct bio
*bio
= ssd_bl
->prev
;
799 static inline void ssd_blist_add(struct ssd_blist
*ssd_bl
, struct bio
*bio
)
804 ssd_bl
->next
->bi_next
= bio
;
813 #define ssd_blist bio_list
814 #define ssd_blist_init bio_list_init
815 #define ssd_blist_get bio_list_get
816 #define ssd_blist_add bio_list_add
819 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
820 #define bio_start(bio) (bio->bi_sector)
822 #define bio_start(bio) (bio->bi_iter.bi_sector)
826 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
827 #define mutex_lock down
828 #define mutex_unlock up
829 #define mutex semaphore
830 #define mutex_init init_MUTEX
834 typedef union ssd_i2c_ctrl
{
842 }__attribute__((packed
)) ssd_i2c_ctrl_t
;
844 typedef union ssd_i2c_data
{
851 }__attribute__((packed
)) ssd_i2c_data_t
;
856 SSD_WMODE_BUFFER
= 0,
873 typedef struct ssd_sg_entry
878 }__attribute__((packed
))ssd_sg_entry_t
;
880 typedef struct ssd_rw_msg
886 uint32_t reserved
; //for 64-bit align
887 struct ssd_sg_entry sge
[1]; //base
888 }__attribute__((packed
))ssd_rw_msg_t
;
890 typedef struct ssd_resp_msg
898 }__attribute__((packed
))ssd_resp_msg_t
;
900 typedef struct ssd_flush_msg
903 uint8_t flag
:2; //flash cache 0 or bbt 1
907 uint32_t reserved
; //align
908 }__attribute__((packed
))ssd_flush_msg_t
;
910 typedef struct ssd_nand_op_msg
916 uint32_t reserved
; //align
922 }__attribute__((packed
))ssd_nand_op_msg_t
;
924 typedef struct ssd_ram_op_msg
930 uint32_t reserved
; //align
934 }__attribute__((packed
))ssd_ram_op_msg_t
;
938 typedef struct ssd_log_msg
944 uint32_t reserved
; //align
946 }__attribute__((packed
))ssd_log_msg_t
;
948 typedef struct ssd_log_op_msg
954 uint32_t reserved
; //align
955 uint64_t reserved1
; //align
957 }__attribute__((packed
))ssd_log_op_msg_t
;
959 typedef struct ssd_log_resp_msg
963 uint16_t reserved1
:2; //align with the normal resp msg
967 }__attribute__((packed
))ssd_log_resp_msg_t
;
971 typedef union ssd_response_msq
973 ssd_resp_msg_t resp_msg
;
974 ssd_log_resp_msg_t log_resp_msg
;
977 } ssd_response_msq_t
;
981 typedef struct ssd_protocol_info
984 uint32_t init_state_reg
;
985 uint32_t init_state_reg_sz
;
986 uint32_t chip_info_reg
;
987 uint32_t chip_info_reg_sz
;
988 } ssd_protocol_info_t
;
990 typedef struct ssd_hw_info
995 uint32_t cmd_fifo_sz
;
996 uint32_t cmd_fifo_sz_mask
;
999 uint32_t resp_ptr_sz
;
1000 uint32_t resp_msg_sz
;
1004 uint16_t nr_data_ch
;
1010 uint8_t upper_pcb_ver
;
1012 uint8_t nand_vendor_id
;
1013 uint8_t nand_dev_id
;
1020 uint16_t bbf_seek
; //
1022 uint16_t page_count
; //per block
1024 uint32_t block_count
; //per flash
1028 uint32_t ram_max_len
;
1032 uint64_t md_base
; //metadata
1034 uint32_t md_entry_sz
;
1038 uint64_t nand_wbuff_base
;
1040 uint32_t md_reserved_blks
;
1041 uint32_t reserved_blks
;
1042 uint32_t valid_pages
;
1043 uint32_t max_valid_pages
;
1047 typedef struct ssd_hw_info_extend
1053 uint8_t form_factor
;
1056 }ssd_hw_info_extend_t
;
1058 typedef struct ssd_rom_info
1061 uint32_t block_size
;
1063 uint8_t nr_bridge_fw
;
1067 uint32_t bridge_fw_base
;
1068 uint32_t bridge_fw_sz
;
1069 uint32_t ctrl_fw_base
;
1070 uint32_t ctrl_fw_sz
;
1071 uint32_t bm_fw_base
;
1075 uint32_t smart_base
;
1078 uint32_t label_base
;
1086 SSD_DEBUG_WRITE_ERR
,
1096 typedef struct ssd_debug_info
1112 #define SSD_LABEL_FIELD_SZ 32
1113 #define SSD_SN_SZ 16
1115 typedef struct ssd_label
1117 char date
[SSD_LABEL_FIELD_SZ
];
1118 char sn
[SSD_LABEL_FIELD_SZ
];
1119 char part
[SSD_LABEL_FIELD_SZ
];
1120 char desc
[SSD_LABEL_FIELD_SZ
];
1121 char other
[SSD_LABEL_FIELD_SZ
];
1122 char maf
[SSD_LABEL_FIELD_SZ
];
1125 #define SSD_LABEL_DESC_SZ 256
1127 typedef struct ssd_labelv3
1129 char boardtype
[SSD_LABEL_FIELD_SZ
];
1130 char barcode
[SSD_LABEL_FIELD_SZ
];
1131 char item
[SSD_LABEL_FIELD_SZ
];
1132 char description
[SSD_LABEL_DESC_SZ
];
1133 char manufactured
[SSD_LABEL_FIELD_SZ
];
1134 char vendorname
[SSD_LABEL_FIELD_SZ
];
1135 char issuenumber
[SSD_LABEL_FIELD_SZ
];
1136 char cleicode
[SSD_LABEL_FIELD_SZ
];
1137 char bom
[SSD_LABEL_FIELD_SZ
];
1141 typedef struct ssd_battery_info
1144 } ssd_battery_info_t
;
1146 /* ssd power stat */
1147 typedef struct ssd_power_stat
1149 uint64_t nr_poweron
;
1150 uint64_t nr_powerloss
;
1151 uint64_t init_failed
;
1155 typedef struct ssd_io_stat
1168 typedef struct ssd_ecc_info
1170 uint64_t bitflip
[SSD_ECC_MAX_FLIP
];
1176 SSD_LOG_LEVEL_INFO
= 0,
1177 SSD_LOG_LEVEL_NOTICE
,
1178 SSD_LOG_LEVEL_WARNING
,
1183 typedef struct ssd_log_info
1186 uint64_t stat
[SSD_LOG_NR_LEVEL
];
1190 #define SSD_SMART_MAGIC (0x5452414D53445353ull)
1192 typedef struct ssd_smart
1194 struct ssd_power_stat pstat
;
1195 struct ssd_io_stat io_stat
;
1196 struct ssd_ecc_info ecc_info
;
1197 struct ssd_log_info log_info
;
1203 typedef struct ssd_internal_log
1207 } ssd_internal_log_t
;
1210 typedef struct ssd_cmd
1213 struct scatterlist
*sgl
;
1214 struct list_head list
;
1217 int flag
; /*pbio(1) or bio(0)*/
1223 unsigned long start_time
;
1226 unsigned int nr_log
;
1228 struct timer_list cmd_timer
;
1229 struct completion
*waiting
;
1232 typedef void (*send_cmd_func
)(struct ssd_cmd
*);
1233 typedef int (*ssd_event_call
)(struct gendisk
*, int, int); /* gendisk, event id, event level */
1236 #define SSD_DCMD_MAX_SZ 32
1238 typedef struct ssd_dcmd
1240 struct list_head list
;
1242 uint8_t msg
[SSD_DCMD_MAX_SZ
];
1258 #define SSD_QUEUE_NAME_LEN 16
1259 typedef struct ssd_queue
{
1260 char name
[SSD_QUEUE_NAME_LEN
];
1266 uint32_t resp_idx_mask
;
1267 uint32_t resp_msg_sz
;
1272 struct ssd_cmd
*cmd
;
1274 struct ssd_io_stat io_stat
;
1275 struct ssd_ecc_info ecc_info
;
1278 typedef struct ssd_device
{
1279 char name
[SSD_DEV_NAME_LEN
];
1286 #ifdef SSD_ESCAPE_IRQ
1292 int ot_delay
; //in ms
1296 atomic_t in_flight
[2]; //r&w
1300 struct list_head list
;
1301 struct pci_dev
*pdev
;
1303 unsigned long mmio_base
;
1304 unsigned long mmio_len
;
1305 void __iomem
*ctrlp
;
1307 struct mutex spi_mutex
;
1308 struct mutex i2c_mutex
;
1310 struct ssd_protocol_info protocol_info
;
1311 struct ssd_hw_info hw_info
;
1312 struct ssd_rom_info rom_info
;
1313 struct ssd_label label
;
1315 struct ssd_smart smart
;
1318 spinlock_t sendq_lock
;
1319 struct ssd_blist sendq
;
1320 struct task_struct
*send_thread
;
1321 wait_queue_head_t send_waitq
;
1324 spinlock_t doneq_lock
;
1325 struct ssd_blist doneq
;
1326 struct task_struct
*done_thread
;
1327 wait_queue_head_t done_waitq
;
1329 struct ssd_dcmd
*dcmd
;
1330 spinlock_t dcmd_lock
;
1331 struct list_head dcmd_list
; /* direct cmd list */
1332 wait_queue_head_t dcmd_wq
;
1334 unsigned long *tag_map
;
1335 wait_queue_head_t tag_wq
;
1337 spinlock_t cmd_lock
;
1338 struct ssd_cmd
*cmd
;
1341 ssd_event_call event_call
;
1343 dma_addr_t msg_base_dma
;
1346 void *resp_msg_base
;
1347 void *resp_ptr_base
;
1348 dma_addr_t resp_msg_base_dma
;
1349 dma_addr_t resp_ptr_base_dma
;
1352 struct msix_entry entry
[SSD_MSIX_VEC
];
1353 struct ssd_queue queue
[SSD_MSIX_VEC
];
1355 struct request_queue
*rq
; /* The device request queue */
1356 struct gendisk
*gd
; /* The gendisk structure */
1358 struct mutex internal_log_mutex
;
1359 struct ssd_internal_log internal_log
;
1360 struct workqueue_struct
*workq
;
1361 struct work_struct log_work
; /* get log */
1364 unsigned long state
; /* device state, for example, block device inited */
1366 struct module
*owner
;
1377 struct mutex gd_mutex
;
1378 struct ssd_log_info log_info
; /* volatile */
1380 atomic_t queue_depth
;
1381 struct mutex barrier_mutex
;
1382 struct mutex fw_mutex
;
1384 struct ssd_hw_info_extend hw_info_ext
;
1385 struct ssd_labelv3 labelv3
;
1389 struct mutex bm_mutex
;
1390 struct work_struct bm_work
; /* check bm */
1391 struct timer_list bm_timer
;
1392 struct sfifo log_fifo
;
1394 struct timer_list routine_timer
;
1395 unsigned long routine_tick
;
1396 unsigned long hwmon
;
1398 struct work_struct hwmon_work
; /* check hw */
1399 struct work_struct capmon_work
; /* check battery */
1400 struct work_struct tempmon_work
; /* check temp */
1403 struct ssd_debug_info db_info
;
1408 typedef struct ssd_acc_info
{
1409 uint32_t threshold_l1
;
1410 uint32_t threshold_l2
;
1414 typedef struct ssd_reg_op_info
1418 } ssd_reg_op_info_t
;
1420 typedef struct ssd_spi_op_info
1425 } ssd_spi_op_info_t
;
1427 typedef struct ssd_i2c_op_info
1434 } ssd_i2c_op_info_t
;
1436 typedef struct ssd_smbus_op_info
1442 } ssd_smbus_op_info_t
;
1444 typedef struct ssd_ram_op_info
{
1448 uint8_t __user
*buf
;
1449 } ssd_ram_op_info_t
;
1451 typedef struct ssd_flash_op_info
{
1456 uint8_t __user
*buf
;
1457 } ssd_flash_op_info_t
;
1459 typedef struct ssd_sw_log_info
{
1463 } ssd_sw_log_info_t
;
1465 typedef struct ssd_version_info
1467 uint32_t bridge_ver
; /* bridge fw version */
1468 uint32_t ctrl_ver
; /* controller fw version */
1469 uint32_t bm_ver
; /* battery manager fw version */
1470 uint8_t pcb_ver
; /* main pcb version */
1471 uint8_t upper_pcb_ver
;
1474 } ssd_version_info_t
;
1476 typedef struct pci_addr
1484 typedef struct ssd_drv_param_info
{
1494 } ssd_drv_param_info_t
;
1498 enum ssd_form_factor
1500 SSD_FORM_FACTOR_HHHL
= 0,
1501 SSD_FORM_FACTOR_FHHL
1505 /* ssd power loss protect */
1514 #define SSD_BM_SLAVE_ADDRESS 0x16
1515 #define SSD_BM_CAP 5
1518 #define SSD_BM_SAFETYSTATUS 0x51
1519 #define SSD_BM_OPERATIONSTATUS 0x54
1521 /* ManufacturerAccess */
1522 #define SSD_BM_MANUFACTURERACCESS 0x00
1523 #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */
1525 /* Data flash access */
1526 #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77
1527 #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78
1528 #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56
1529 #define SSD_BM_CONFIGURATION_REGISTERS_ID 64
1531 /* min cap voltage */
1532 #define SSD_BM_CAP_VOLT_MIN 500
1537 SSD_BM_CAP_VINA = 1,
1543 SSD_BMSTATUS_OK
= 0,
1544 SSD_BMSTATUS_CHARGING
, /* not fully charged */
1545 SSD_BMSTATUS_WARNING
1550 SBS_UNIT_TEMPERATURE
,
1555 SBS_UNIT_CAPACITANCE
1583 uint16_t cap_volt
[SSD_BM_CAP
];
1590 struct ssd_bm_manufacturer_data
1592 uint16_t pack_lot_code
;
1593 uint16_t pcb_lot_code
;
1594 uint16_t firmware_ver
;
1595 uint16_t hardware_ver
;
1598 struct ssd_bm_configuration_registers
1611 uint16_t fet_action
;
1616 #define SBS_VALUE_MASK 0xffff
1618 #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var)
1619 #define bm_var(start, offset) ((void *) start + (offset))
1621 static struct sbs_cmd ssd_bm_sbs
[] = {
1622 {0x08, SBS_SIZE_WORD
, SBS_UNIT_TEMPERATURE
, bm_var_offset(temp
), SBS_VALUE_MASK
, "Temperature"},
1623 {0x09, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(volt
), SBS_VALUE_MASK
, "Voltage"},
1624 {0x0a, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(curr
), SBS_VALUE_MASK
, "Current"},
1625 {0x0b, SBS_SIZE_WORD
, SBS_UNIT_ESR
, bm_var_offset(esr
), SBS_VALUE_MASK
, "ESR"},
1626 {0x0d, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(rsoc
), SBS_VALUE_MASK
, "RelativeStateOfCharge"},
1627 {0x0e, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(health
), SBS_VALUE_MASK
, "Health"},
1628 {0x10, SBS_SIZE_WORD
, SBS_UNIT_CAPACITANCE
, bm_var_offset(cap
), SBS_VALUE_MASK
, "Capacitance"},
1629 {0x14, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(chg_curr
), SBS_VALUE_MASK
, "ChargingCurrent"},
1630 {0x15, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(chg_volt
), SBS_VALUE_MASK
, "ChargingVoltage"},
1631 {0x3b, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[4]), SBS_VALUE_MASK
, "CapacitorVoltage5"},
1632 {0x3c, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[3]), SBS_VALUE_MASK
, "CapacitorVoltage4"},
1633 {0x3d, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[2]), SBS_VALUE_MASK
, "CapacitorVoltage3"},
1634 {0x3e, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[1]), SBS_VALUE_MASK
, "CapacitorVoltage2"},
1635 {0x3f, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[0]), SBS_VALUE_MASK
, "CapacitorVoltage1"},
1636 {0x50, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_alert
), 0x870F, "SafetyAlert"},
1637 {0x51, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_status
), 0xE7BF, "SafetyStatus"},
1638 {0x54, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(op_status
), 0x79F4, "OperationStatus"},
1639 {0x5a, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(sys_volt
), SBS_VALUE_MASK
, "SystemVoltage"},
1640 {0, 0, 0, 0, 0, NULL
},
1644 #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info)
1645 #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info)
1646 #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info)
1647 #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart)
1648 #define SSD_CMD_GET_IDX _IOR('H', 105, int)
1649 #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int)
1650 #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int)
1651 #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN])
1653 #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info)
1654 #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info)
1656 #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend)
1658 #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info)
1659 #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info)
1661 #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info)
1662 #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info)
1663 #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info)
1665 #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info)
1666 #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info)
1667 #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info)
1669 #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info)
1670 #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info)
1671 #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info)
1672 #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info)
1673 #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info)
1674 #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info)
1675 #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info)
1676 #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info)
1678 #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t)
1679 #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int)
1680 #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int)
1681 #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t)
1682 #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int)
1684 #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info)
1685 #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info)
1687 #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info)
1688 #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob
1689 #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info)
1690 #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info)
1691 #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO
1693 #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info)
1695 #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int)
1696 #define SSD_CMD_SET_ALARM _IOW('H', 191, int)
1698 #define SSD_CMD_RESET _IOW('H', 200, int)
1699 #define SSD_CMD_RELOAD_FW _IOW('H', 201, int)
1700 #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int)
1701 #define SSD_CMD_LOAD_DEV _IOW('H', 203, int)
1702 #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t)
1703 #define SSD_CMD_FULL_RESET _IOW('H', 206, int)
1705 #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t)
1706 #define SSD_CMD_GET_LOG _IOR('H', 221, void *)
1707 #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int)
1709 #define SSD_CMD_OT_PROTECT _IOW('H', 223, int)
1710 #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int)
1712 #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int)
1713 #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int)
1715 #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info)
1717 #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label)
1718 #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info)
1719 #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int)
1720 #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int)
1721 #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *)
1724 #define SSD_CMD_FLUSH _IOW('H', 240, int)
1725 #define SSD_CMD_SAVE_MD _IOW('H', 241, int)
1727 #define SSD_CMD_SET_WMODE _IOW('H', 242, int)
1728 #define SSD_CMD_GET_WMODE _IOR('H', 243, int)
1729 #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int)
1731 #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info)
1732 #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info)
1736 #define SSD_LOG_MAX_SZ 4096
1737 #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE
1741 SSD_LOG_DATA_NONE
= 0,
1746 typedef struct ssd_log_entry
1764 }__attribute__((packed
))ssd_log_entry_t
;
1766 typedef struct ssd_log
1769 uint64_t ctrl_idx
:8;
1771 } __attribute__((packed
)) ssd_log_t
;
1773 typedef struct ssd_log_desc
1781 } __attribute__((packed
)) ssd_log_desc_t
;
1783 #define SSD_LOG_SW_IDX 0xF
1784 #define SSD_UNKNOWN_EVENT ((uint16_t)-1)
1785 static struct ssd_log_desc ssd_log_desc
[] = {
1786 /* event, level, show flash, show block, show page, desc */
1787 {0x0, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Create BBT failure"}, //g3
1788 {0x1, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Read BBT failure"}, //g3
1789 {0x2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Mark bad block"},
1790 {0x3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flush BBT failure"},
1791 {0x4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1792 {0x7, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "No available blocks"},
1793 {0x8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Bad EC header"},
1794 {0x9, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 0, "Bad VID header"}, //g3
1795 {0xa, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Wear leveling"},
1796 {0xb, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "WL read back failure"},
1797 {0x11, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Data recovery failure"}, // err
1798 {0x20, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan mapping table failure"}, // err g3
1799 {0x21, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1800 {0x22, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1801 {0x23, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1802 {0x24, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Merge: read mapping page failure"},
1803 {0x25, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: read back failure"},
1804 {0x26, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1805 {0x27, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Data corrupted for abnormal power down"}, //g3
1806 {0x28, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: mapping page corrupted"},
1807 {0x29, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: no mapping page"},
1808 {0x2a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: mapping pages incomplete"},
1809 {0x2b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read back failure after programming failure"}, // err
1810 {0xf1, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure without recovery"}, // err
1811 {0xf2, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available blocks"}, // maybe err g3
1812 {0xf3, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: RAID incomplete"}, // err g3
1813 {0xf4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1814 {0xf5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure in moving data"},
1815 {0xf6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1816 {0xf7, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Init: RAID not complete"},
1817 {0xf8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: data moving interrupted"},
1818 {0xfe, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Data inspection failure"},
1819 {0xff, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "IO: ECC failed"},
1822 {0x2e, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available reserved blocks" }, // err
1823 {0x30, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PMT membership not found"},
1824 {0x31, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PMT corrupted"},
1825 {0x32, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT membership not found"},
1826 {0x33, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT not found"},
1827 {0x34, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT corrupted"},
1828 {0x35, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT page read failure"},
1829 {0x36, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT page read failure"},
1830 {0x37, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT backup page read failure"},
1831 {0x38, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT read failure"},
1832 {0x39, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT scan failure"}, // err
1833 {0x3a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page read failure"},
1834 {0x3b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page scan failure"}, // err
1835 {0x3c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan unclosed block failure"}, // err
1836 {0x3d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: write pointer mismatch"},
1837 {0x3e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: PBMT read failure"},
1838 {0x3f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: PMT recovery: PBMT scan failure"},
1839 {0x40, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: data page read failure"}, //err
1840 {0x41, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT write pointer mismatch"},
1841 {0x42, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT latest version corrupted"},
1842 {0x43, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: too many unclosed blocks"},
1843 {0x44, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PDW block found"},
1844 {0x45, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Init: more than one PDW block found"}, //err
1845 {0x46, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page is blank or read failure"},
1846 {0x47, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PDW block not found"},
1848 {0x50, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: hit error data"}, // err
1849 {0x51, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: read back failure"}, // err
1850 {0x52, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Cache: unknown command"}, //?
1851 {0x53, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "GC/WL read back failure"}, // err
1853 {0x60, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Erase failure"},
1855 {0x70, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "LPA not matched"},
1856 {0x71, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "PBN not matched"},
1857 {0x72, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read retry failure"},
1858 {0x73, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Need raid recovery"},
1859 {0x74, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "Need read retry"},
1860 {0x75, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read invalid data page"},
1861 {0x76, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN matched"},
1862 {0x77, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN not matched"},
1863 {0x78, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in flash, PBN not matched"},
1864 {0x79, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in cache, LPA not matched"},
1865 {0x7a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in flash, LPA not matched"},
1866 {0x7b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in cache, LPA not matched"},
1867 {0x7c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in flash, LPA not matched"},
1868 {0x7d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data page status error"},
1869 {0x7e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1870 {0x7f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Access flash timeout"},
1872 {0x80, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "EC overflow"},
1873 {0x81, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_NONE
, 0, 0, "Scrubbing completed"},
1874 {0x82, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Unstable block(too much bit flip)"},
1875 {0x83, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: ram error"}, //?
1876 {0x84, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: one PBMT read failure"},
1878 {0x88, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: mark bad block"},
1879 {0x89, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: invalid page count error"}, // maybe err
1880 {0x8a, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Bad Block close to limit"},
1881 {0x8b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: Bad Block over limit"},
1882 {0x8c, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: P/E cycles close to limit"},
1883 {0x8d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: P/E cycles over limit"},
1885 {0x90, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Over temperature"}, //xx
1886 {0x91, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature is OK"}, //xx
1887 {0x92, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Battery fault"},
1888 {0x93, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault"}, //err
1889 {0x94, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "DDR error"}, //err
1890 {0x95, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Controller serdes error"}, //err
1891 {0x96, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 1 error"}, //err
1892 {0x97, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 2 error"}, //err
1893 {0x98, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault (corrected)"}, //err
1894 {0x99, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Battery is OK"},
1895 {0x9a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature close to limit"}, //xx
1897 {0x9b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (low)"},
1898 {0x9c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (high)"},
1899 {0x9d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "I2C fault" },
1900 {0x9e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "DDR single bit error" },
1901 {0x9f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Board voltage fault" },
1903 {0xa0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "LPA not matched"},
1904 {0xa1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Re-read data in cache"},
1905 {0xa2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1906 {0xa3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Read blank page"},
1907 {0xa4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: new data in cache"},
1908 {0xa5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: PBN not matched"},
1909 {0xa6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data with error flag"},
1910 {0xa7, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: recoverd data with error flag"},
1911 {0xa8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Blank page in cache, PBN matched"},
1912 {0xa9, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Blank page in cache, PBN matched"},
1913 {0xaa, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flash init failure"},
1914 {0xab, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Mapping table recovery failure"},
1915 {0xac, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: ECC failed"},
1916 {0xb0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature is up to degree 95"},
1917 {0xb1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature is up to degree 100"},
1919 {0x300, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "CMD timeout"},
1920 {0x301, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Power on"},
1921 {0x302, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Power off"},
1922 {0x303, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear log"},
1923 {0x304, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity"},
1924 {0x305, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data"},
1925 {0x306, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "BM safety status"},
1926 {0x307, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "I/O error"},
1927 {0x308, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CMD error"},
1928 {0x309, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set wmode"},
1929 {0x30a, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "DDR init failed" },
1930 {0x30b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "PCIe link status" },
1931 {0x30c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Controller reset sync error" },
1932 {0x30d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Clock fault" },
1933 {0x30e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "FPGA voltage fault status" },
1934 {0x30f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity finished"},
1935 {0x310, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data finished"},
1936 {0x311, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Reset"},
1937 {0x312, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "CAP: voltage fault"},
1938 {0x313, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: learn fault"},
1939 {0x314, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CAP status"},
1940 {0x315, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Board voltage fault status"},
1941 {0x316, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Inlet over temperature"},
1942 {0x317, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Inlet temperature is OK"},
1943 {0x318, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Flash over temperature"},
1944 {0x319, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Flash temperature is OK"},
1945 {0x31a, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: short circuit"},
1946 {0x31b, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "Sensor fault"},
1947 {0x31c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data"},
1948 {0x31d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data finished"},
1950 {SSD_UNKNOWN_EVENT
, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "unknown event"},
1953 #define SSD_LOG_OVER_TEMP 0x90
1954 #define SSD_LOG_NORMAL_TEMP 0x91
1955 #define SSD_LOG_WARN_TEMP 0x9a
1956 #define SSD_LOG_SEU_FAULT 0x93
1957 #define SSD_LOG_SEU_FAULT1 0x98
1958 #define SSD_LOG_BATTERY_FAULT 0x92
1959 #define SSD_LOG_BATTERY_OK 0x99
1960 #define SSD_LOG_BOARD_VOLT_FAULT 0x9f
1963 #define SSD_LOG_TIMEOUT 0x300
1964 #define SSD_LOG_POWER_ON 0x301
1965 #define SSD_LOG_POWER_OFF 0x302
1966 #define SSD_LOG_CLEAR_LOG 0x303
1967 #define SSD_LOG_SET_CAPACITY 0x304
1968 #define SSD_LOG_CLEAR_DATA 0x305
1969 #define SSD_LOG_BM_SFSTATUS 0x306
1970 #define SSD_LOG_EIO 0x307
1971 #define SSD_LOG_ECMD 0x308
1972 #define SSD_LOG_SET_WMODE 0x309
1973 #define SSD_LOG_DDR_INIT_ERR 0x30a
1974 #define SSD_LOG_PCIE_LINK_STATUS 0x30b
1975 #define SSD_LOG_CTRL_RST_SYNC 0x30c
1976 #define SSD_LOG_CLK_FAULT 0x30d
1977 #define SSD_LOG_VOLT_FAULT 0x30e
1978 #define SSD_LOG_SET_CAPACITY_END 0x30F
1979 #define SSD_LOG_CLEAR_DATA_END 0x310
1980 #define SSD_LOG_RESET 0x311
1981 #define SSD_LOG_CAP_VOLT_FAULT 0x312
1982 #define SSD_LOG_CAP_LEARN_FAULT 0x313
1983 #define SSD_LOG_CAP_STATUS 0x314
1984 #define SSD_LOG_VOLT_STATUS 0x315
1985 #define SSD_LOG_INLET_OVER_TEMP 0x316
1986 #define SSD_LOG_INLET_NORMAL_TEMP 0x317
1987 #define SSD_LOG_FLASH_OVER_TEMP 0x318
1988 #define SSD_LOG_FLASH_NORMAL_TEMP 0x319
1989 #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a
1990 #define SSD_LOG_SENSOR_FAULT 0x31b
1991 #define SSD_LOG_ERASE_ALL 0x31c
1992 #define SSD_LOG_ERASE_ALL_END 0x31d
1995 /* sw log fifo depth */
1996 #define SSD_LOG_FIFO_SZ 1024
2000 static DEFINE_PER_CPU(struct list_head
, ssd_doneq
);
2001 static DEFINE_PER_CPU(struct tasklet_struct
, ssd_tasklet
);
2004 /* unloading driver */
2005 static volatile int ssd_exiting
= 0;
2007 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
2008 static struct class_simple
*ssd_class
;
2010 static struct class *ssd_class
;
2013 static int ssd_cmajor
= SSD_CMAJOR
;
2015 /* ssd block device major, minors */
2016 static int ssd_major
= SSD_MAJOR
;
2017 static int ssd_major_sl
= SSD_MAJOR_SL
;
2018 static int ssd_minors
= SSD_MINORS
;
2020 /* ssd device list */
2021 static struct list_head ssd_list
;
2022 static unsigned long ssd_index_bits
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2023 static unsigned long ssd_index_bits_sl
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2024 static atomic_t ssd_nr
;
2029 SSD_DRV_MODE_STANDARD
= 0, /* full */
2030 SSD_DRV_MODE_DEBUG
= 2, /* debug */
2031 SSD_DRV_MODE_BASE
/* base only */
2041 #if (defined SSD_MSIX)
2042 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2043 #elif (defined SSD_MSI)
2044 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2046 /* auto select the defaut int mode according to the kernel version*/
2047 /* suse 11 sp1 irqbalance bug: use msi instead*/
2048 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5))
2049 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2051 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2055 static int mode
= SSD_DRV_MODE_STANDARD
;
2056 static int status_mask
= 0xFF;
2057 static int int_mode
= SSD_INT_MODE_DEFAULT
;
2058 static int threaded_irq
= 0;
2059 static int log_level
= SSD_LOG_LEVEL_WARNING
;
2060 static int ot_protect
= 1;
2061 static int wmode
= SSD_WMODE_DEFAULT
;
2062 static int finject
= 0;
2064 module_param(mode
, int, 0);
2065 module_param(status_mask
, int, 0);
2066 module_param(int_mode
, int, 0);
2067 module_param(threaded_irq
, int, 0);
2068 module_param(log_level
, int, 0);
2069 module_param(ot_protect
, int, 0);
2070 module_param(wmode
, int, 0);
2071 module_param(finject
, int, 0);
2074 MODULE_PARM_DESC(mode
, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode");
2075 MODULE_PARM_DESC(status_mask
, "command status mask, 0 - without command error, 0xff - with command error");
2076 MODULE_PARM_DESC(int_mode
, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix");
2077 MODULE_PARM_DESC(threaded_irq
, "threaded irq, 0 - normal irq, 1 - threaded irq");
2078 MODULE_PARM_DESC(log_level
, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only");
2079 MODULE_PARM_DESC(ot_protect
, "over temperature protect, 0 - disable, 1 - enable");
2080 MODULE_PARM_DESC(wmode
, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default");
2081 MODULE_PARM_DESC(finject
, "enable fault simulation, 0 - off, 1 - on, for debug purpose only");
2085 static int __init
ssd_drv_mode(char *str
)
2087 mode
= (int)simple_strtoul(str
, NULL
, 0);
2092 static int __init
ssd_status_mask(char *str
)
2094 status_mask
= (int)simple_strtoul(str
, NULL
, 16);
2099 static int __init
ssd_int_mode(char *str
)
2101 int_mode
= (int)simple_strtoul(str
, NULL
, 0);
2106 static int __init
ssd_threaded_irq(char *str
)
2108 threaded_irq
= (int)simple_strtoul(str
, NULL
, 0);
2113 static int __init
ssd_log_level(char *str
)
2115 log_level
= (int)simple_strtoul(str
, NULL
, 0);
2120 static int __init
ssd_ot_protect(char *str
)
2122 ot_protect
= (int)simple_strtoul(str
, NULL
, 0);
2127 static int __init
ssd_wmode(char *str
)
2129 wmode
= (int)simple_strtoul(str
, NULL
, 0);
2134 static int __init
ssd_finject(char *str
)
2136 finject
= (int)simple_strtoul(str
, NULL
, 0);
2141 __setup(MODULE_NAME
"_mode=", ssd_drv_mode
);
2142 __setup(MODULE_NAME
"_status_mask=", ssd_status_mask
);
2143 __setup(MODULE_NAME
"_int_mode=", ssd_int_mode
);
2144 __setup(MODULE_NAME
"_threaded_irq=", ssd_threaded_irq
);
2145 __setup(MODULE_NAME
"_log_level=", ssd_log_level
);
2146 __setup(MODULE_NAME
"_ot_protect=", ssd_ot_protect
);
2147 __setup(MODULE_NAME
"_wmode=", ssd_wmode
);
2148 __setup(MODULE_NAME
"_finject=", ssd_finject
);
2152 #ifdef CONFIG_PROC_FS
2153 #include <linux/proc_fs.h>
2154 #include <asm/uaccess.h>
2156 #define SSD_PROC_DIR MODULE_NAME
2157 #define SSD_PROC_INFO "info"
2159 static struct proc_dir_entry
*ssd_proc_dir
= NULL
;
2160 static struct proc_dir_entry
*ssd_proc_info
= NULL
;
2162 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2163 static int ssd_proc_read(char *page
, char **start
,
2164 off_t off
, int count
, int *eof
, void *data
)
2166 struct ssd_device
*dev
= NULL
;
2167 struct ssd_device
*n
= NULL
;
2177 len
+= snprintf((page
+ len
), (count
- len
), "Driver Version:\t%s\n", DRIVER_VERSION
);
2179 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2181 size
= dev
->hw_info
.size
;
2182 do_div(size
, 1000000000);
2184 len
+= snprintf((page
+ len
), (count
- len
), "\n");
2186 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2188 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2189 if (dev
->hw_info
.ctrl_ver
!= 0) {
2190 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2193 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2195 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2196 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2199 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Device:\t%s\n", idx
, dev
->name
);
2207 static int ssd_proc_show(struct seq_file
*m
, void *v
)
2209 struct ssd_device
*dev
= NULL
;
2210 struct ssd_device
*n
= NULL
;
2218 seq_printf(m
, "Driver Version:\t%s\n", DRIVER_VERSION
);
2220 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2222 size
= dev
->hw_info
.size
;
2223 do_div(size
, 1000000000);
2225 seq_printf(m
, "\n");
2227 seq_printf(m
, "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2229 seq_printf(m
, "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2230 if (dev
->hw_info
.ctrl_ver
!= 0) {
2231 seq_printf(m
, "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2234 seq_printf(m
, "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2236 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2237 seq_printf(m
, "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2240 seq_printf(m
, "HIO %d Device:\t%s\n", idx
, dev
->name
);
2246 static int ssd_proc_open(struct inode
*inode
, struct file
*file
)
2248 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
2249 return single_open(file
, ssd_proc_show
, PDE(inode
)->data
);
2251 return single_open(file
, ssd_proc_show
, PDE_DATA(inode
));
2255 static const struct file_operations ssd_proc_fops
= {
2256 .open
= ssd_proc_open
,
2258 .llseek
= seq_lseek
,
2259 .release
= single_release
,
2264 static void ssd_cleanup_proc(void)
2266 if (ssd_proc_info
) {
2267 remove_proc_entry(SSD_PROC_INFO
, ssd_proc_dir
);
2268 ssd_proc_info
= NULL
;
2271 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2272 ssd_proc_dir
= NULL
;
2275 static int ssd_init_proc(void)
2277 ssd_proc_dir
= proc_mkdir(SSD_PROC_DIR
, NULL
);
2279 goto out_proc_mkdir
;
2281 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2282 ssd_proc_info
= create_proc_entry(SSD_PROC_INFO
, S_IFREG
| S_IRUGO
| S_IWUSR
, ssd_proc_dir
);
2284 goto out_create_proc_entry
;
2286 ssd_proc_info
->read_proc
= ssd_proc_read
;
2289 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
2290 ssd_proc_info
->owner
= THIS_MODULE
;
2293 ssd_proc_info
= proc_create(SSD_PROC_INFO
, 0600, ssd_proc_dir
, &ssd_proc_fops
);
2295 goto out_create_proc_entry
;
2300 out_create_proc_entry
:
2301 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2307 static void ssd_cleanup_proc(void)
2311 static int ssd_init_proc(void)
2315 #endif /* CONFIG_PROC_FS */
2318 static void ssd_unregister_sysfs(struct ssd_device
*dev
)
2323 static int ssd_register_sysfs(struct ssd_device
*dev
)
2328 static void ssd_cleanup_sysfs(void)
2333 static int ssd_init_sysfs(void)
2338 static inline void ssd_put_index(int slave
, int index
)
2340 unsigned long *index_bits
= ssd_index_bits
;
2343 index_bits
= ssd_index_bits_sl
;
2346 if (test_and_clear_bit(index
, index_bits
)) {
2347 atomic_dec(&ssd_nr
);
2351 static inline int ssd_get_index(int slave
)
2353 unsigned long *index_bits
= ssd_index_bits
;
2357 index_bits
= ssd_index_bits_sl
;
2361 if ((index
= find_first_zero_bit(index_bits
, SSD_MAX_DEV
)) >= SSD_MAX_DEV
) {
2365 if (test_and_set_bit(index
, index_bits
)) {
2369 atomic_inc(&ssd_nr
);
2374 static void ssd_cleanup_index(void)
2379 static int ssd_init_index(void)
2381 INIT_LIST_HEAD(&ssd_list
);
2382 atomic_set(&ssd_nr
, 0);
2383 memset(ssd_index_bits
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2384 memset(ssd_index_bits_sl
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2389 static void ssd_set_dev_name(char *name
, size_t size
, int idx
)
2391 if(idx
< SSD_ALPHABET_NUM
) {
2392 snprintf(name
, size
, "%c", 'a'+idx
);
2394 idx
-= SSD_ALPHABET_NUM
;
2395 snprintf(name
, size
, "%c%c", 'a'+(idx
/SSD_ALPHABET_NUM
), 'a'+(idx
%SSD_ALPHABET_NUM
));
2399 /* pci register r&w */
2400 static inline void ssd_reg_write(void *addr
, uint64_t val
)
2402 iowrite32((uint32_t)val
, addr
);
2403 iowrite32((uint32_t)(val
>> 32), addr
+ 4);
2407 static inline uint64_t ssd_reg_read(void *addr
)
2410 uint32_t val_lo
, val_hi
;
2412 val_lo
= ioread32(addr
);
2413 val_hi
= ioread32(addr
+ 4);
2416 val
= val_lo
| ((uint64_t)val_hi
<< 32);
2422 #define ssd_reg32_write(addr, val) writel(val, addr)
2423 #define ssd_reg32_read(addr) readl(addr)
2426 static void ssd_clear_alarm(struct ssd_device
*dev
)
2430 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2434 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2436 /* firmware control */
2439 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2442 static void ssd_set_alarm(struct ssd_device
*dev
)
2446 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2450 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2454 /* software control */
2457 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2460 #define u32_swap(x) \
2462 (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
2463 (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
2464 (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
2465 (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24)))
2467 #define u16_swap(x) \
2469 (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \
2470 (((uint16_t)(x) & (uint16_t)0xff00) >> 8) ))
2474 /* No lock, for init only*/
2475 static int ssd_spi_read_id(struct ssd_device
*dev
, uint32_t *id
)
2485 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_ID
);
2487 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2488 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2489 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2490 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2494 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2495 if (val
== 0x1000000) {
2499 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2506 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_ID
);
2515 static int ssd_init_spi(struct ssd_device
*dev
)
2521 mutex_lock(&dev
->spi_mutex
);
2524 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2527 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2529 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2534 } while (val
!= 0x1000000);
2536 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2541 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2549 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2551 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2554 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2555 mutex_unlock(&dev
->spi_mutex
);
2562 static int ssd_spi_page_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2573 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2574 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
) {
2578 mutex_lock(&dev
->spi_mutex
);
2579 while (rlen
< size
) {
2580 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, ((off
+ rlen
) >> 24));
2582 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, (((off
+ rlen
) << 8) | SSD_SPI_CMD_READ
));
2584 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2585 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2586 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2587 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2591 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2592 if (val
== 0x1000000) {
2596 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2603 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
2604 *(uint32_t *)(buf
+ rlen
)= u32_swap(val
);
2606 rlen
+= sizeof(uint32_t);
2610 mutex_unlock(&dev
->spi_mutex
);
2614 static int ssd_spi_page_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2626 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2627 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
||
2628 (off
/ dev
->rom_info
.page_size
) != ((off
+ size
- 1) / dev
->rom_info
.page_size
)) {
2632 mutex_lock(&dev
->spi_mutex
);
2634 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2636 wlen
= size
/ sizeof(uint32_t);
2637 for (i
=0; i
<(int)wlen
; i
++) {
2638 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_WDATA
, u32_swap(*((uint32_t *)buf
+ i
)));
2642 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2644 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_PROGRAM
));
2650 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2652 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2654 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2659 } while (val
!= 0x1000000);
2661 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2666 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2673 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2674 if ((val
>> 6) & 0x1) {
2681 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2683 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2686 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2688 mutex_unlock(&dev
->spi_mutex
);
2693 static int ssd_spi_block_erase(struct ssd_device
*dev
, uint32_t off
)
2703 if ((off
% dev
->rom_info
.block_size
) != 0 || off
>= dev
->rom_info
.size
) {
2707 mutex_lock(&dev
->spi_mutex
);
2709 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2710 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2713 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2715 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_ERASE
));
2719 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2722 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2724 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2729 } while (val
!= 0x1000000);
2731 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2736 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2743 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2744 if ((val
>> 5) & 0x1) {
2751 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2753 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2756 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2758 mutex_unlock(&dev
->spi_mutex
);
2763 static int ssd_spi_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2774 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2775 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2779 while (len
< size
) {
2780 roff
= (off
+ len
) % dev
->rom_info
.page_size
;
2781 rsize
= dev
->rom_info
.page_size
- roff
;
2782 if ((size
- len
) < rsize
) {
2783 rsize
= (size
- len
);
2787 ret
= ssd_spi_page_read(dev
, (buf
+ len
), roff
, rsize
);
2801 static int ssd_spi_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2812 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2813 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2817 while (len
< size
) {
2818 woff
= (off
+ len
) % dev
->rom_info
.page_size
;
2819 wsize
= dev
->rom_info
.page_size
- woff
;
2820 if ((size
- len
) < wsize
) {
2821 wsize
= (size
- len
);
2825 ret
= ssd_spi_page_write(dev
, (buf
+ len
), woff
, wsize
);
2839 static int ssd_spi_erase(struct ssd_device
*dev
, uint32_t off
, uint32_t size
)
2849 if (size
== 0 || ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
||
2850 (off
% dev
->rom_info
.block_size
) != 0 || (size
% dev
->rom_info
.block_size
) != 0) {
2854 while (len
< size
) {
2857 ret
= ssd_spi_block_erase(dev
, eoff
);
2862 len
+= dev
->rom_info
.block_size
;
2872 static uint32_t __ssd_i2c_reg32_read(void *addr
)
2874 return ssd_reg32_read(addr
);
2877 static void __ssd_i2c_reg32_write(void *addr
, uint32_t val
)
2879 ssd_reg32_write(addr
, val
);
2880 ssd_reg32_read(addr
);
2883 static int __ssd_i2c_clear(struct ssd_device
*dev
, uint8_t saddr
)
2885 ssd_i2c_ctrl_t ctrl
;
2886 ssd_i2c_data_t data
;
2893 ctrl
.bits
.wdata
= 0;
2894 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
2895 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2896 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2900 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2901 if (data
.bits
.valid
== 0) {
2906 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2912 status
= data
.bits
.rdata
;
2914 if (!(status
& 0x4)) {
2915 /* clear read fifo data */
2916 ctrl
.bits
.wdata
= 0;
2917 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2918 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2919 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2923 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2924 if (data
.bits
.valid
== 0) {
2929 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2937 if (nr_data
<= SSD_I2C_MAX_DATA
) {
2946 ctrl
.bits
.wdata
= 0x04;
2947 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
2948 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2949 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2952 if (!(status
& 0x8)) {
2954 /* reset i2c controller */
2955 ctrl
.bits
.wdata
= 0x0;
2956 ctrl
.bits
.addr
= SSD_I2C_RESET_REG
;
2957 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2958 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2965 static int ssd_i2c_write(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
2967 ssd_i2c_ctrl_t ctrl
;
2968 ssd_i2c_data_t data
;
2974 mutex_lock(&dev
->i2c_mutex
);
2979 ctrl
.bits
.wdata
= saddr
;
2980 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
2981 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2982 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2985 while (off
< size
) {
2986 ctrl
.bits
.wdata
= buf
[off
];
2987 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2988 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2989 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2995 ctrl
.bits
.wdata
= 0x01;
2996 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
2997 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2998 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3003 ctrl
.bits
.wdata
= 0;
3004 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3005 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3006 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3009 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3010 if (data
.bits
.valid
== 0) {
3015 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3022 status
= data
.bits
.rdata
;
3027 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3034 if (!(status
& 0x1)) {
3040 if (status
& 0x20) {
3046 if (status
& 0x10) {
3053 if (__ssd_i2c_clear(dev
, saddr
)) {
3057 mutex_unlock(&dev
->i2c_mutex
);
3062 static int ssd_i2c_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3064 ssd_i2c_ctrl_t ctrl
;
3065 ssd_i2c_data_t data
;
3071 mutex_lock(&dev
->i2c_mutex
);
3076 ctrl
.bits
.wdata
= saddr
;
3077 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3078 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3079 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3082 ctrl
.bits
.wdata
= size
;
3083 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3084 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3085 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3088 ctrl
.bits
.wdata
= 0x02;
3089 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3090 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3091 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3096 ctrl
.bits
.wdata
= 0;
3097 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3098 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3099 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3102 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3103 if (data
.bits
.valid
== 0) {
3108 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3115 status
= data
.bits
.rdata
;
3120 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3127 if (!(status
& 0x2)) {
3133 if (status
& 0x20) {
3139 if (status
& 0x10) {
3145 while (off
< size
) {
3146 ctrl
.bits
.wdata
= 0;
3147 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3148 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3149 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3153 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3154 if (data
.bits
.valid
== 0) {
3159 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3166 buf
[off
] = data
.bits
.rdata
;
3173 if (__ssd_i2c_clear(dev
, saddr
)) {
3177 mutex_unlock(&dev
->i2c_mutex
);
3182 static int ssd_i2c_write_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t wsize
, uint8_t *wbuf
, uint8_t rsize
, uint8_t *rbuf
)
3184 ssd_i2c_ctrl_t ctrl
;
3185 ssd_i2c_data_t data
;
3191 mutex_lock(&dev
->i2c_mutex
);
3196 ctrl
.bits
.wdata
= saddr
;
3197 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3198 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3199 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3203 while (off
< wsize
) {
3204 ctrl
.bits
.wdata
= wbuf
[off
];
3205 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3206 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3207 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3213 ctrl
.bits
.wdata
= rsize
;
3214 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3215 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3216 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3219 ctrl
.bits
.wdata
= 0x03;
3220 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3221 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3222 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3227 ctrl
.bits
.wdata
= 0;
3228 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3229 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3230 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3233 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3234 if (data
.bits
.valid
== 0) {
3239 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3246 status
= data
.bits
.rdata
;
3251 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3258 if (!(status
& 0x2)) {
3264 if (status
& 0x20) {
3270 if (status
& 0x10) {
3277 while (off
< rsize
) {
3278 ctrl
.bits
.wdata
= 0;
3279 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3280 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3281 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3285 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3286 if (data
.bits
.valid
== 0) {
3291 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3298 rbuf
[off
] = data
.bits
.rdata
;
3305 if (__ssd_i2c_clear(dev
, saddr
)) {
3308 mutex_unlock(&dev
->i2c_mutex
);
3313 static int ssd_smbus_send_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3319 ret
= ssd_i2c_write(dev
, saddr
, 1, buf
);
3320 if (!ret
|| -ETIMEDOUT
== ret
) {
3325 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3328 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3334 static int ssd_smbus_receive_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3340 ret
= ssd_i2c_read(dev
, saddr
, 1, buf
);
3341 if (!ret
|| -ETIMEDOUT
== ret
) {
3346 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3349 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3355 static int ssd_smbus_write_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3357 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3362 memcpy((smb_data
+ 1), buf
, 1);
3365 ret
= ssd_i2c_write(dev
, saddr
, 2, smb_data
);
3366 if (!ret
|| -ETIMEDOUT
== ret
) {
3371 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3374 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3380 static int ssd_smbus_read_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3382 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3389 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 1, buf
);
3390 if (!ret
|| -ETIMEDOUT
== ret
) {
3395 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3398 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3404 static int ssd_smbus_write_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3406 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3411 memcpy((smb_data
+ 1), buf
, 2);
3414 ret
= ssd_i2c_write(dev
, saddr
, 3, smb_data
);
3415 if (!ret
|| -ETIMEDOUT
== ret
) {
3420 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3423 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3429 static int ssd_smbus_read_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3431 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3438 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 2, buf
);
3439 if (!ret
|| -ETIMEDOUT
== ret
) {
3444 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3447 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3453 static int ssd_smbus_write_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3455 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3461 memcpy((smb_data
+ 2), buf
, size
);
3464 ret
= ssd_i2c_write(dev
, saddr
, (2 + size
), smb_data
);
3465 if (!ret
|| -ETIMEDOUT
== ret
) {
3470 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3473 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3479 static int ssd_smbus_read_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3481 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3489 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, (SSD_SMBUS_BLOCK_MAX
+ 1), (smb_data
+ 1));
3490 if (!ret
|| -ETIMEDOUT
== ret
) {
3495 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3498 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3504 rsize
= smb_data
[1];
3506 if (rsize
> size
) {
3510 memcpy(buf
, (smb_data
+ 2), rsize
);
3516 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
);
3519 static int ssd_init_lm75(struct ssd_device
*dev
, uint8_t saddr
)
3524 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3529 conf
&= (uint8_t)(~1u);
3531 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3540 static int ssd_lm75_read(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3545 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM75_REG_TEMP
, (uint8_t *)&val
);
3550 *data
= u16_swap(val
);
3555 static int ssd_init_lm80(struct ssd_device
*dev
, uint8_t saddr
)
3564 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3571 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_RES
, &val
);
3576 /* set volt limit */
3577 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3578 high
= ssd_lm80_limit
[i
].high
;
3579 low
= ssd_lm80_limit
[i
].low
;
3581 if (SSD_LM80_IN_CAP
== i
) {
3585 if (dev
->hw_info
.nr_ctrl
<= 1 && SSD_LM80_IN_1V2
== i
) {
3591 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MAX(i
), &high
);
3597 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MIN(i
), &low
);
3603 /* set interrupt mask: allow volt in interrupt except cap in*/
3605 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3610 /* set interrupt mask: disable others */
3612 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK2
, &val
);
3619 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3628 static int ssd_lm80_enable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3633 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3637 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3642 val
&= ~(1UL << (uint32_t)idx
);
3644 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3653 static int ssd_lm80_disable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3658 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3662 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3667 val
|= (1UL << (uint32_t)idx
);
3669 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3678 static int ssd_lm80_read_temp(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3683 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_TEMP
, (uint8_t *)&val
);
3688 *data
= u16_swap(val
);
3693 static int ssd_lm80_check_event(struct ssd_device
*dev
, uint8_t saddr
)
3696 uint16_t val
= 0, status
;
3697 uint8_t alarm1
= 0, alarm2
= 0;
3701 /* read interrupt status to clear interrupt */
3702 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM1
, &alarm1
);
3707 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM2
, &alarm2
);
3712 status
= (uint16_t)alarm1
| ((uint16_t)alarm2
<< 8);
3714 /* parse inetrrupt status */
3715 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3716 if (!((status
>> (uint32_t)i
) & 0x1)) {
3717 if (test_and_clear_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3718 /* enable INx irq */
3719 ret
= ssd_lm80_enable_in(dev
, saddr
, i
);
3728 /* disable INx irq */
3729 ret
= ssd_lm80_disable_in(dev
, saddr
, i
);
3734 if (test_and_set_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3738 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_IN(i
), (uint8_t *)&val
);
3743 volt
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
3746 case SSD_LM80_IN_CAP
: {
3748 ssd_gen_swlog(dev
, SSD_LOG_CAP_SHORT_CIRCUIT
, 0);
3750 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(volt
));
3755 case SSD_LM80_IN_1V2
:
3756 case SSD_LM80_IN_1V2a
:
3757 case SSD_LM80_IN_1V5
:
3758 case SSD_LM80_IN_1V8
: {
3759 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, volt
));
3762 case SSD_LM80_IN_FPGA_3V3
:
3763 case SSD_LM80_IN_3V3
: {
3764 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, SSD_LM80_3V3_VOLT(volt
)));
3774 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3775 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, (uint32_t)saddr
);
3778 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3783 static int ssd_init_sensor(struct ssd_device
*dev
)
3787 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3791 ret
= ssd_init_lm75(dev
, SSD_SENSOR_LM75_SADDRESS
);
3793 hio_warn("%s: init lm75 failed\n", dev
->name
);
3794 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3795 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
);
3800 if (dev
->hw_info
.pcb_ver
>= 'B' || dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_HHHL
) {
3801 ret
= ssd_init_lm80(dev
, SSD_SENSOR_LM80_SADDRESS
);
3803 hio_warn("%s: init lm80 failed\n", dev
->name
);
3804 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3805 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
3812 /* skip error if not in standard mode */
3813 if (mode
!= SSD_DRV_MODE_STANDARD
) {
3820 static int ssd_mon_boardvolt(struct ssd_device
*dev
)
3822 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3826 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3830 return ssd_lm80_check_event(dev
, SSD_SENSOR_LM80_SADDRESS
);
3834 static int ssd_mon_temp(struct ssd_device
*dev
)
3840 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3844 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3849 ret
= ssd_lm80_read_temp(dev
, SSD_SENSOR_LM80_SADDRESS
, &val
);
3851 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3852 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
3856 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3858 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3859 if (cur
>= SSD_INLET_OT_TEMP
) {
3860 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3861 ssd_gen_swlog(dev
, SSD_LOG_INLET_OVER_TEMP
, (uint32_t)cur
);
3863 } else if(cur
< SSD_INLET_OT_HYST
) {
3864 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3865 ssd_gen_swlog(dev
, SSD_LOG_INLET_NORMAL_TEMP
, (uint32_t)cur
);
3870 ret
= ssd_lm75_read(dev
, SSD_SENSOR_LM75_SADDRESS
, &val
);
3872 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3873 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
);
3877 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
);
3879 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3880 if (cur
>= SSD_FLASH_OT_TEMP
) {
3881 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3882 ssd_gen_swlog(dev
, SSD_LOG_FLASH_OVER_TEMP
, (uint32_t)cur
);
3884 } else if(cur
< SSD_FLASH_OT_HYST
) {
3885 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3886 ssd_gen_swlog(dev
, SSD_LOG_FLASH_NORMAL_TEMP
, (uint32_t)cur
);
3895 static inline void ssd_put_tag(struct ssd_device
*dev
, int tag
)
3897 test_and_clear_bit(tag
, dev
->tag_map
);
3898 wake_up(&dev
->tag_wq
);
3901 static inline int ssd_get_tag(struct ssd_device
*dev
, int wait
)
3906 while ((tag
= find_first_zero_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
)) >= atomic_read(&dev
->queue_depth
)) {
3907 DEFINE_WAIT(__wait
);
3913 prepare_to_wait_exclusive(&dev
->tag_wq
, &__wait
, TASK_UNINTERRUPTIBLE
);
3916 finish_wait(&dev
->tag_wq
, &__wait
);
3919 if (test_and_set_bit(tag
, dev
->tag_map
)) {
3926 static void ssd_barrier_put_tag(struct ssd_device
*dev
, int tag
)
3928 test_and_clear_bit(tag
, dev
->tag_map
);
3931 static int ssd_barrier_get_tag(struct ssd_device
*dev
)
3935 if (test_and_set_bit(tag
, dev
->tag_map
)) {
3942 static void ssd_barrier_end(struct ssd_device
*dev
)
3944 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
3945 wake_up_all(&dev
->tag_wq
);
3947 mutex_unlock(&dev
->barrier_mutex
);
3950 static int ssd_barrier_start(struct ssd_device
*dev
)
3954 mutex_lock(&dev
->barrier_mutex
);
3956 atomic_set(&dev
->queue_depth
, 0);
3958 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
3959 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
3963 __set_current_state(TASK_INTERRUPTIBLE
);
3964 schedule_timeout(1);
3967 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
3968 wake_up_all(&dev
->tag_wq
);
3970 mutex_unlock(&dev
->barrier_mutex
);
3975 static int ssd_busy(struct ssd_device
*dev
)
3977 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
3984 static int ssd_wait_io(struct ssd_device
*dev
)
3988 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
3989 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
3993 __set_current_state(TASK_INTERRUPTIBLE
);
3994 schedule_timeout(1);
4001 static int ssd_in_barrier(struct ssd_device
*dev
)
4003 return (0 == atomic_read(&dev
->queue_depth
));
4007 static void ssd_cleanup_tag(struct ssd_device
*dev
)
4009 kfree(dev
->tag_map
);
4012 static int ssd_init_tag(struct ssd_device
*dev
)
4014 int nr_ulongs
= ALIGN(dev
->hw_info
.cmd_fifo_sz
, BITS_PER_LONG
) / BITS_PER_LONG
;
4016 mutex_init(&dev
->barrier_mutex
);
4018 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4020 dev
->tag_map
= kmalloc(nr_ulongs
* sizeof(unsigned long), GFP_ATOMIC
);
4021 if (!dev
->tag_map
) {
4025 memset(dev
->tag_map
, 0, nr_ulongs
* sizeof(unsigned long));
4027 init_waitqueue_head(&dev
->tag_wq
);
4033 static void ssd_end_io_acct(struct ssd_cmd
*cmd
)
4035 struct ssd_device
*dev
= cmd
->dev
;
4036 struct bio
*bio
= cmd
->bio
;
4037 unsigned long dur
= jiffies
- cmd
->start_time
;
4038 int rw
= bio_data_dir(bio
);
4040 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4041 int cpu
= part_stat_lock();
4042 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4043 part_round_stats(cpu
, part
);
4044 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4045 part_dec_in_flight(part
, rw
);
4047 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4048 int cpu
= part_stat_lock();
4049 struct hd_struct
*part
= &dev
->gd
->part0
;
4050 part_round_stats(cpu
, part
);
4051 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4053 part
->in_flight
[rw
] = atomic_dec_return(&dev
->in_flight
[rw
]);
4054 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4056 disk_round_stats(dev
->gd
);
4058 disk_stat_add(dev
->gd
, ticks
[rw
], dur
);
4059 dev
->gd
->in_flight
= atomic_dec_return(&dev
->in_flight
[0]);
4062 disk_round_stats(dev
->gd
);
4065 disk_stat_add(dev
->gd
, write_ticks
, dur
);
4067 disk_stat_add(dev
->gd
, read_ticks
, dur
);
4069 dev
->gd
->in_flight
= atomic_dec_return(&dev
->in_flight
[0]);
4073 static void ssd_start_io_acct(struct ssd_cmd
*cmd
)
4075 struct ssd_device
*dev
= cmd
->dev
;
4076 struct bio
*bio
= cmd
->bio
;
4077 int rw
= bio_data_dir(bio
);
4079 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4080 int cpu
= part_stat_lock();
4081 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4082 part_round_stats(cpu
, part
);
4083 part_stat_inc(cpu
, part
, ios
[rw
]);
4084 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4085 part_inc_in_flight(part
, rw
);
4087 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4088 int cpu
= part_stat_lock();
4089 struct hd_struct
*part
= &dev
->gd
->part0
;
4090 part_round_stats(cpu
, part
);
4091 part_stat_inc(cpu
, part
, ios
[rw
]);
4092 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4094 part
->in_flight
[rw
] = atomic_inc_return(&dev
->in_flight
[rw
]);
4095 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4097 disk_round_stats(dev
->gd
);
4099 disk_stat_inc(dev
->gd
, ios
[rw
]);
4100 disk_stat_add(dev
->gd
, sectors
[rw
], bio_sectors(bio
));
4101 dev
->gd
->in_flight
= atomic_inc_return(&dev
->in_flight
[0]);
4104 disk_round_stats(dev
->gd
);
4107 disk_stat_inc(dev
->gd
, writes
);
4108 disk_stat_add(dev
->gd
, write_sectors
, bio_sectors(bio
));
4110 disk_stat_inc(dev
->gd
, reads
);
4111 disk_stat_add(dev
->gd
, read_sectors
, bio_sectors(bio
));
4113 dev
->gd
->in_flight
= atomic_inc_return(&dev
->in_flight
[0]);
4116 cmd
->start_time
= jiffies
;
4120 static void ssd_queue_bio(struct ssd_device
*dev
, struct bio
*bio
)
4122 spin_lock(&dev
->sendq_lock
);
4123 ssd_blist_add(&dev
->sendq
, bio
);
4124 spin_unlock(&dev
->sendq_lock
);
4126 atomic_inc(&dev
->in_sendq
);
4127 wake_up(&dev
->send_waitq
);
4130 static inline void ssd_end_request(struct ssd_cmd
*cmd
)
4132 struct ssd_device
*dev
= cmd
->dev
;
4133 struct bio
*bio
= cmd
->bio
;
4134 int errors
= cmd
->errors
;
4138 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)))
4139 if (!(bio
->bi_rw
& REQ_DISCARD
)) {
4140 ssd_end_io_acct(cmd
);
4142 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4143 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4146 #elif (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
4147 if (!bio_rw_flagged(bio
, BIO_RW_DISCARD
)) {
4148 ssd_end_io_acct(cmd
);
4150 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4151 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4155 ssd_end_io_acct(cmd
);
4158 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4159 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4164 ssd_put_tag(dev
, tag
);
4166 if (SSD_INT_MSIX
== dev
->int_mode
|| tag
< 16 || errors
) {
4167 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4168 bio_endio(bio
, errors
);
4170 bio_endio(bio
, bio
->bi_size
, errors
);
4172 } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ {
4173 spin_lock(&dev
->doneq_lock
);
4174 ssd_blist_add(&dev
->doneq
, bio
);
4175 spin_unlock(&dev
->doneq_lock
);
4177 atomic_inc(&dev
->in_doneq
);
4178 wake_up(&dev
->done_waitq
);
4182 complete(cmd
->waiting
);
4187 static void ssd_end_timeout_request(struct ssd_cmd
*cmd
)
4189 struct ssd_device
*dev
= cmd
->dev
;
4190 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4193 for (i
=0; i
<dev
->nr_queue
; i
++) {
4194 disable_irq(dev
->entry
[i
].vector
);
4197 atomic_inc(&dev
->tocnt
);
4199 hio_err("%s: cmd timeout: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4200 cmd
->errors
= -ETIMEDOUT
;
4201 ssd_end_request(cmd
);
4204 for (i
=0; i
<dev
->nr_queue
; i
++) {
4205 enable_irq(dev
->entry
[i
].vector
);
4213 static void ssd_cmd_add_timer(struct ssd_cmd
*cmd
, int timeout
, void (*complt
)(struct ssd_cmd
*))
4215 init_timer(&cmd
->cmd_timer
);
4217 cmd
->cmd_timer
.data
= (unsigned long)cmd
;
4218 cmd
->cmd_timer
.expires
= jiffies
+ timeout
;
4219 cmd
->cmd_timer
.function
= (void (*)(unsigned long)) complt
;
4221 add_timer(&cmd
->cmd_timer
);
4224 static int ssd_cmd_del_timer(struct ssd_cmd
*cmd
)
4226 return del_timer(&cmd
->cmd_timer
);
4229 static void ssd_add_timer(struct timer_list
*timer
, int timeout
, void (*complt
)(void *), void *data
)
4233 timer
->data
= (unsigned long)data
;
4234 timer
->expires
= jiffies
+ timeout
;
4235 timer
->function
= (void (*)(unsigned long)) complt
;
4240 static int ssd_del_timer(struct timer_list
*timer
)
4242 return del_timer(timer
);
4245 static void ssd_cmd_timeout(struct ssd_cmd
*cmd
)
4247 struct ssd_device
*dev
= cmd
->dev
;
4248 uint32_t msg
= *(uint32_t *)cmd
->msg
;
4250 ssd_end_timeout_request(cmd
);
4252 ssd_gen_swlog(dev
, SSD_LOG_TIMEOUT
, msg
);
4256 static void __ssd_done(unsigned long data
)
4258 struct ssd_cmd
*cmd
;
4261 local_irq_disable();
4262 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4263 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4265 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4269 while (!list_empty(&localq
)) {
4270 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4271 list_del_init(&cmd
->list
);
4273 ssd_end_request(cmd
);
4277 static void __ssd_done_db(unsigned long data
)
4279 struct ssd_cmd
*cmd
;
4280 struct ssd_device
*dev
;
4284 local_irq_disable();
4285 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4286 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4288 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4292 while (!list_empty(&localq
)) {
4293 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4294 list_del_init(&cmd
->list
);
4296 dev
= (struct ssd_device
*)cmd
->dev
;
4300 sector_t off
= dev
->db_info
.data
.loc
.off
;
4301 uint32_t len
= dev
->db_info
.data
.loc
.len
;
4303 switch (dev
->db_info
.type
) {
4304 case SSD_DEBUG_READ_ERR
:
4305 if (bio_data_dir(bio
) == READ
&&
4306 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4310 case SSD_DEBUG_WRITE_ERR
:
4311 if (bio_data_dir(bio
) == WRITE
&&
4312 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4313 cmd
->errors
= -EROFS
;
4316 case SSD_DEBUG_RW_ERR
:
4317 if (!((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4318 if (bio_data_dir(bio
) == READ
) {
4321 cmd
->errors
= -EROFS
;
4330 ssd_end_request(cmd
);
4334 static inline void ssd_done_bh(struct ssd_cmd
*cmd
)
4336 unsigned long flags
= 0;
4338 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4339 struct ssd_device
*dev
= cmd
->dev
;
4340 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4341 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4348 local_irq_save(flags
);
4349 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4350 list_add_tail(&cmd
->list
, &__get_cpu_var(ssd_doneq
));
4351 tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet
));
4353 list_add_tail(&cmd
->list
, this_cpu_ptr(&ssd_doneq
));
4354 tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet
));
4356 local_irq_restore(flags
);
4361 static inline void ssd_done(struct ssd_cmd
*cmd
)
4363 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4364 struct ssd_device
*dev
= cmd
->dev
;
4365 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4366 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4373 ssd_end_request(cmd
);
4378 static inline void ssd_dispatch_cmd(struct ssd_cmd
*cmd
)
4380 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4382 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4384 spin_lock(&dev
->cmd_lock
);
4385 ssd_reg_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, cmd
->msg_dma
);
4386 spin_unlock(&dev
->cmd_lock
);
4389 static inline void ssd_send_cmd(struct ssd_cmd
*cmd
)
4391 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4393 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4395 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4398 static inline void ssd_send_cmd_db(struct ssd_cmd
*cmd
)
4400 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4401 struct bio
*bio
= cmd
->bio
;
4403 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4406 switch (dev
->db_info
.type
) {
4407 case SSD_DEBUG_READ_TO
:
4408 if (bio_data_dir(bio
) == READ
) {
4412 case SSD_DEBUG_WRITE_TO
:
4413 if (bio_data_dir(bio
) == WRITE
) {
4417 case SSD_DEBUG_RW_TO
:
4425 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4429 /* fixed for BIOVEC_PHYS_MERGEABLE */
4430 #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED
4431 #include <linux/bio.h>
4432 #include <linux/io.h>
4433 #include <xen/page.h>
4435 static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec
*vec1
,
4436 const struct bio_vec
*vec2
)
4438 unsigned long mfn1
= pfn_to_mfn(page_to_pfn(vec1
->bv_page
));
4439 unsigned long mfn2
= pfn_to_mfn(page_to_pfn(vec2
->bv_page
));
4441 return __BIOVEC_PHYS_MERGEABLE(vec1
, vec2
) &&
4442 ((mfn1
== mfn2
) || ((mfn1
+1) == mfn2
));
4445 #ifdef BIOVEC_PHYS_MERGEABLE
4446 #undef BIOVEC_PHYS_MERGEABLE
4448 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
4449 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
4450 (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2)))
4454 static inline int ssd_bio_map_sg(struct ssd_device
*dev
, struct bio
*bio
, struct scatterlist
*sgl
)
4456 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
4457 struct bio_vec
*bvec
, *bvprv
= NULL
;
4458 struct scatterlist
*sg
= NULL
;
4459 int i
= 0, nsegs
= 0;
4461 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23))
4462 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4466 * for each segment in bio
4468 bio_for_each_segment(bvec
, bio
, i
) {
4469 if (bvprv
&& BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
)) {
4470 sg
->length
+= bvec
->bv_len
;
4472 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4476 sg
= sg
? (sg
+ 1) : sgl
;
4477 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4478 sg_set_page(sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
4480 sg
->page
= bvec
->bv_page
;
4481 sg
->length
= bvec
->bv_len
;
4482 sg
->offset
= bvec
->bv_offset
;
4489 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4499 struct bio_vec bvec
, bvprv
;
4500 struct bvec_iter iter
;
4501 struct scatterlist
*sg
= NULL
;
4505 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4508 * for each segment in bio
4510 bio_for_each_segment(bvec
, bio
, iter
) {
4511 if (!first
&& BIOVEC_PHYS_MERGEABLE(&bvprv
, &bvec
)) {
4512 sg
->length
+= bvec
.bv_len
;
4514 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4518 sg
= sg
? (sg
+ 1) : sgl
;
4520 sg_set_page(sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
4537 static int __ssd_submit_pbio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4539 struct ssd_cmd
*cmd
;
4540 struct ssd_rw_msg
*msg
;
4541 struct ssd_sg_entry
*sge
;
4542 sector_t block
= bio_start(bio
);
4546 tag
= ssd_get_tag(dev
, wait
);
4551 cmd
= &dev
->cmd
[tag
];
4555 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4557 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)))
4558 if (bio
->bi_rw
& REQ_DISCARD
) {
4559 unsigned int length
= bio_sectors(bio
);
4561 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4563 msg
->fun
= SSD_FUNC_TRIM
;
4566 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4568 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4571 block
+= sge
->length
;
4572 length
-= sge
->length
;
4579 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4584 #elif (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
4585 if (bio_rw_flagged(bio
, BIO_RW_DISCARD
)) {
4586 unsigned int length
= bio_sectors(bio
);
4588 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4590 msg
->fun
= SSD_FUNC_TRIM
;
4593 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4595 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4598 block
+= sge
->length
;
4599 length
-= sge
->length
;
4606 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4613 //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl);
4614 msg
->nsegs
= cmd
->nsegs
= bio
->bi_vcnt
;
4617 if (bio_data_dir(bio
) == READ
) {
4618 msg
->fun
= SSD_FUNC_READ
;
4621 msg
->fun
= SSD_FUNC_WRITE
;
4622 msg
->flag
= dev
->wmode
;
4626 for (i
=0; i
<bio
->bi_vcnt
; i
++) {
4628 sge
->length
= bio
->bi_io_vec
[i
].bv_len
>> 9;
4629 sge
->buf
= (uint64_t)((void *)bio
->bi_io_vec
[i
].bv_page
+ bio
->bi_io_vec
[i
].bv_offset
);
4631 block
+= sge
->length
;
4637 #ifdef SSD_OT_PROTECT
4638 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4639 msleep_interruptible(dev
->ot_delay
);
4643 ssd_start_io_acct(cmd
);
4649 static inline int ssd_submit_bio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4651 struct ssd_cmd
*cmd
;
4652 struct ssd_rw_msg
*msg
;
4653 struct ssd_sg_entry
*sge
;
4654 struct scatterlist
*sgl
;
4655 sector_t block
= bio_start(bio
);
4659 tag
= ssd_get_tag(dev
, wait
);
4664 cmd
= &dev
->cmd
[tag
];
4668 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4672 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)))
4673 if (bio
->bi_rw
& REQ_DISCARD
) {
4674 unsigned int length
= bio_sectors(bio
);
4676 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4678 msg
->fun
= SSD_FUNC_TRIM
;
4681 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4683 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4686 block
+= sge
->length
;
4687 length
-= sge
->length
;
4694 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4699 #elif (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
4700 if (bio_rw_flagged(bio
, BIO_RW_DISCARD
)) {
4701 unsigned int length
= bio_sectors(bio
);
4703 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4705 msg
->fun
= SSD_FUNC_TRIM
;
4708 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4710 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4713 block
+= sge
->length
;
4714 length
-= sge
->length
;
4721 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4728 msg
->nsegs
= cmd
->nsegs
= ssd_bio_map_sg(dev
, bio
, sgl
);
4731 if (bio_data_dir(bio
) == READ
) {
4732 msg
->fun
= SSD_FUNC_READ
;
4734 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_FROMDEVICE
);
4736 msg
->fun
= SSD_FUNC_WRITE
;
4737 msg
->flag
= dev
->wmode
;
4738 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_TODEVICE
);
4742 for (i
=0; i
<cmd
->nsegs
; i
++) {
4744 sge
->length
= sg_dma_len(sgl
) >> 9;
4745 sge
->buf
= sg_dma_address(sgl
);
4747 block
+= sge
->length
;
4754 #ifdef SSD_OT_PROTECT
4755 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4756 msleep_interruptible(dev
->ot_delay
);
4760 ssd_start_io_acct(cmd
);
4767 static int ssd_done_thread(void *data
)
4769 struct ssd_device
*dev
;
4772 #ifdef SSD_ESCAPE_IRQ
4781 //set_user_nice(current, -5);
4783 while (!kthread_should_stop()) {
4784 wait_event_interruptible(dev
->done_waitq
, (atomic_read(&dev
->in_doneq
) || kthread_should_stop()));
4786 while (atomic_read(&dev
->in_doneq
)) {
4788 spin_lock(&dev
->doneq_lock
);
4789 bio
= ssd_blist_get(&dev
->doneq
);
4790 spin_unlock(&dev
->doneq_lock
);
4792 spin_lock_irq(&dev
->doneq_lock
);
4793 bio
= ssd_blist_get(&dev
->doneq
);
4794 spin_unlock_irq(&dev
->doneq_lock
);
4798 next
= bio
->bi_next
;
4799 bio
->bi_next
= NULL
;
4800 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4803 bio_endio(bio
, bio
->bi_size
, 0);
4805 atomic_dec(&dev
->in_doneq
);
4811 #ifdef SSD_ESCAPE_IRQ
4812 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4813 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4814 cpumask_setall(&new_mask
);
4815 cpumask_clear_cpu(dev
->irq_cpu
, &new_mask
);
4816 set_cpus_allowed_ptr(current
, &new_mask
);
4818 cpus_setall(new_mask
);
4819 cpu_clear(dev
->irq_cpu
, new_mask
);
4820 set_cpus_allowed(current
, new_mask
);
4829 static int ssd_send_thread(void *data
)
4831 struct ssd_device
*dev
;
4834 #ifdef SSD_ESCAPE_IRQ
4843 //set_user_nice(current, -5);
4845 while (!kthread_should_stop()) {
4846 wait_event_interruptible(dev
->send_waitq
, (atomic_read(&dev
->in_sendq
) || kthread_should_stop()));
4848 while (atomic_read(&dev
->in_sendq
)) {
4849 spin_lock(&dev
->sendq_lock
);
4850 bio
= ssd_blist_get(&dev
->sendq
);
4851 spin_unlock(&dev
->sendq_lock
);
4854 next
= bio
->bi_next
;
4855 bio
->bi_next
= NULL
;
4856 #ifdef SSD_QUEUE_PBIO
4857 if (test_and_clear_bit(BIO_SSD_PBIO
, &bio
->bi_flags
)) {
4858 __ssd_submit_pbio(dev
, bio
, 1);
4860 ssd_submit_bio(dev
, bio
, 1);
4863 ssd_submit_bio(dev
, bio
, 1);
4865 atomic_dec(&dev
->in_sendq
);
4871 #ifdef SSD_ESCAPE_IRQ
4872 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4873 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4874 cpumask_setall(&new_mask
);
4875 cpumask_clear_cpu(dev
->irq_cpu
, &new_mask
);
4876 set_cpus_allowed_ptr(current
, &new_mask
);
4878 cpus_setall(new_mask
);
4879 cpu_clear(dev
->irq_cpu
, new_mask
);
4880 set_cpus_allowed(current
, new_mask
);
4890 static void ssd_cleanup_thread(struct ssd_device
*dev
)
4892 kthread_stop(dev
->send_thread
);
4893 kthread_stop(dev
->done_thread
);
4896 static int ssd_init_thread(struct ssd_device
*dev
)
4900 atomic_set(&dev
->in_doneq
, 0);
4901 atomic_set(&dev
->in_sendq
, 0);
4903 spin_lock_init(&dev
->doneq_lock
);
4904 spin_lock_init(&dev
->sendq_lock
);
4906 ssd_blist_init(&dev
->doneq
);
4907 ssd_blist_init(&dev
->sendq
);
4909 init_waitqueue_head(&dev
->done_waitq
);
4910 init_waitqueue_head(&dev
->send_waitq
);
4912 dev
->done_thread
= kthread_run(ssd_done_thread
, dev
, "%s/d", dev
->name
);
4913 if (IS_ERR(dev
->done_thread
)) {
4914 ret
= PTR_ERR(dev
->done_thread
);
4915 goto out_done_thread
;
4918 dev
->send_thread
= kthread_run(ssd_send_thread
, dev
, "%s/s", dev
->name
);
4919 if (IS_ERR(dev
->send_thread
)) {
4920 ret
= PTR_ERR(dev
->send_thread
);
4921 goto out_send_thread
;
4927 kthread_stop(dev
->done_thread
);
4933 static void ssd_put_dcmd(struct ssd_dcmd
*dcmd
)
4935 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4937 spin_lock(&dev
->dcmd_lock
);
4938 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4939 spin_unlock(&dev
->dcmd_lock
);
4942 static struct ssd_dcmd
*ssd_get_dcmd(struct ssd_device
*dev
)
4944 struct ssd_dcmd
*dcmd
= NULL
;
4946 spin_lock(&dev
->dcmd_lock
);
4947 if (!list_empty(&dev
->dcmd_list
)) {
4948 dcmd
= list_entry(dev
->dcmd_list
.next
,
4949 struct ssd_dcmd
, list
);
4950 list_del_init(&dcmd
->list
);
4952 spin_unlock(&dev
->dcmd_lock
);
4957 static void ssd_cleanup_dcmd(struct ssd_device
*dev
)
4962 static int ssd_init_dcmd(struct ssd_device
*dev
)
4964 struct ssd_dcmd
*dcmd
;
4965 int dcmd_sz
= sizeof(struct ssd_dcmd
)*dev
->hw_info
.cmd_fifo_sz
;
4968 spin_lock_init(&dev
->dcmd_lock
);
4969 INIT_LIST_HEAD(&dev
->dcmd_list
);
4970 init_waitqueue_head(&dev
->dcmd_wq
);
4972 dev
->dcmd
= kmalloc(dcmd_sz
, GFP_KERNEL
);
4974 hio_warn("%s: can not alloc dcmd\n", dev
->name
);
4975 goto out_alloc_dcmd
;
4977 memset(dev
->dcmd
, 0, dcmd_sz
);
4979 for (i
=0, dcmd
=dev
->dcmd
; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++, dcmd
++) {
4981 INIT_LIST_HEAD(&dcmd
->list
);
4982 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4991 static void ssd_put_dmsg(void *msg
)
4993 struct ssd_dcmd
*dcmd
= container_of(msg
, struct ssd_dcmd
, msg
);
4994 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4996 memset(dcmd
->msg
, 0, SSD_DCMD_MAX_SZ
);
4998 wake_up(&dev
->dcmd_wq
);
5001 static void *ssd_get_dmsg(struct ssd_device
*dev
)
5003 struct ssd_dcmd
*dcmd
= ssd_get_dcmd(dev
);
5007 prepare_to_wait_exclusive(&dev
->dcmd_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
5010 dcmd
= ssd_get_dcmd(dev
);
5012 finish_wait(&dev
->dcmd_wq
, &wait
);
5018 static int ssd_do_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5020 DECLARE_COMPLETION(wait
);
5021 struct ssd_cmd
*cmd
;
5025 tag
= ssd_get_tag(dev
, 1);
5030 cmd
= &dev
->cmd
[tag
];
5032 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5033 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5035 cmd
->waiting
= &wait
;
5039 wait_for_completion(cmd
->waiting
);
5040 cmd
->waiting
= NULL
;
5042 if (cmd
->errors
== -ETIMEDOUT
) {
5044 } else if (cmd
->errors
) {
5049 *done
= cmd
->nr_log
;
5051 ssd_put_tag(dev
, cmd
->tag
);
5056 static int ssd_do_barrier_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5058 DECLARE_COMPLETION(wait
);
5059 struct ssd_cmd
*cmd
;
5063 tag
= ssd_barrier_get_tag(dev
);
5068 cmd
= &dev
->cmd
[tag
];
5070 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5071 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5073 cmd
->waiting
= &wait
;
5077 wait_for_completion(cmd
->waiting
);
5078 cmd
->waiting
= NULL
;
5080 if (cmd
->errors
== -ETIMEDOUT
) {
5082 } else if (cmd
->errors
) {
5087 *done
= cmd
->nr_log
;
5089 ssd_barrier_put_tag(dev
, cmd
->tag
);
5094 #ifdef SSD_OT_PROTECT
5095 static void ssd_check_temperature(struct ssd_device
*dev
, int temp
)
5102 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5106 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5109 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5110 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
5112 val
= ssd_reg_read(dev
->ctrlp
+ off
);
5113 if (val
== 0xffffffffffffffffull
) {
5117 cur
= (int)CUR_TEMP(val
);
5119 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5120 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5121 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5122 dev
->ot_delay
= SSD_OT_DELAY
;
5129 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5130 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5131 hio_warn("%s: Temperature is OK.\n", dev
->name
);
5138 static int ssd_get_ot_status(struct ssd_device
*dev
, int *status
)
5144 if (!dev
|| !status
) {
5148 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5149 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5150 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5151 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5152 if ((val
>> 22) & 0x1) {
5158 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5159 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5160 if ((val
>> 22) & 0x1) {
5166 *status
= !!dev
->ot_delay
;
5173 static void ssd_set_ot_protect(struct ssd_device
*dev
, int protect
)
5179 mutex_lock(&dev
->fw_mutex
);
5181 dev
->ot_protect
= !!protect
;
5183 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5184 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5185 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5186 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5187 if (dev
->ot_protect
) {
5192 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5195 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5196 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5197 if (dev
->ot_protect
) {
5202 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5206 mutex_unlock(&dev
->fw_mutex
);
5209 static int ssd_init_ot_protect(struct ssd_device
*dev
)
5211 ssd_set_ot_protect(dev
, ot_protect
);
5213 #ifdef SSD_OT_PROTECT
5214 ssd_check_temperature(dev
, SSD_OT_TEMP
);
5221 static int ssd_read_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
, int *nr_log
)
5223 struct ssd_log_op_msg
*msg
;
5224 struct ssd_log_msg
*lmsg
;
5226 size_t length
= dev
->hw_info
.log_sz
;
5229 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
5233 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
5234 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
5235 ret
= dma_mapping_error(buf_dma
);
5237 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
5240 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
5241 goto out_dma_mapping
;
5244 msg
= (struct ssd_log_op_msg
*)ssd_get_dmsg(dev
);
5246 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5247 lmsg
= (struct ssd_log_msg
*)msg
;
5248 lmsg
->fun
= SSD_FUNC_READ_LOG
;
5249 lmsg
->ctrl_idx
= ctrl_idx
;
5250 lmsg
->buf
= buf_dma
;
5252 msg
->fun
= SSD_FUNC_READ_LOG
;
5253 msg
->ctrl_idx
= ctrl_idx
;
5257 ret
= ssd_do_request(dev
, READ
, msg
, nr_log
);
5260 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
5266 #define SSD_LOG_PRINT_BUF_SZ 256
5267 static int ssd_parse_log(struct ssd_device
*dev
, struct ssd_log
*log
, int print
)
5269 struct ssd_log_desc
*log_desc
= ssd_log_desc
;
5270 struct ssd_log_entry
*le
;
5272 char print_buf
[SSD_LOG_PRINT_BUF_SZ
];
5278 while (log_desc
->event
!= SSD_UNKNOWN_EVENT
) {
5279 if (log_desc
->event
== le
->event
) {
5289 if (log_desc
->level
< log_level
) {
5294 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5297 sn
= dev
->labelv3
.barcode
;
5300 print_len
= snprintf(print_buf
, SSD_LOG_PRINT_BUF_SZ
, "%s (%s): <%#x>", dev
->name
, sn
, le
->event
);
5302 if (log
->ctrl_idx
!= SSD_LOG_SW_IDX
) {
5303 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " controller %d", log
->ctrl_idx
);
5306 switch (log_desc
->data
) {
5307 case SSD_LOG_DATA_NONE
:
5309 case SSD_LOG_DATA_LOC
:
5310 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5311 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc
.flash
);
5312 if (log_desc
->sblock
) {
5313 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc
.block
);
5315 if (log_desc
->spage
) {
5316 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc
.page
);
5319 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc1
.flash
);
5320 if (log_desc
->sblock
) {
5321 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc1
.block
);
5323 if (log_desc
->spage
) {
5324 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc1
.page
);
5328 case SSD_LOG_DATA_HEX
:
5329 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " info %#x", le
->data
.val
);
5334 /*print_len += */snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), ": %s", log_desc
->desc
);
5336 switch (log_desc
->level
) {
5337 case SSD_LOG_LEVEL_INFO
:
5338 hio_info("%s\n", print_buf
);
5340 case SSD_LOG_LEVEL_NOTICE
:
5341 hio_note("%s\n", print_buf
);
5343 case SSD_LOG_LEVEL_WARNING
:
5344 hio_warn("%s\n", print_buf
);
5346 case SSD_LOG_LEVEL_ERR
:
5347 hio_err("%s\n", print_buf
);
5348 //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ.");
5351 hio_warn("%s\n", print_buf
);
5356 return log_desc
->level
;
5359 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
);
5360 static int ssd_switch_wmode(struct ssd_device
*dev
, int wmode
);
5363 static int ssd_handle_event(struct ssd_device
*dev
, uint16_t event
, int level
)
5368 case SSD_LOG_OVER_TEMP
: {
5369 #ifdef SSD_OT_PROTECT
5370 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5371 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5372 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5373 dev
->ot_delay
= SSD_OT_DELAY
;
5380 case SSD_LOG_NORMAL_TEMP
: {
5381 #ifdef SSD_OT_PROTECT
5382 /* need to check all controller's temperature */
5383 ssd_check_temperature(dev
, SSD_OT_TEMP_HYST
);
5388 case SSD_LOG_BATTERY_FAULT
: {
5391 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5392 if (!ssd_bm_get_sfstatus(dev
, &sfstatus
)) {
5393 ssd_gen_swlog(dev
, SSD_LOG_BM_SFSTATUS
, sfstatus
);
5397 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5398 ssd_switch_wmode(dev
, dev
->user_wmode
);
5403 case SSD_LOG_BATTERY_OK
: {
5404 if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5405 ssd_switch_wmode(dev
, dev
->user_wmode
);
5410 case SSD_LOG_BOARD_VOLT_FAULT
: {
5411 ssd_mon_boardvolt(dev
);
5415 case SSD_LOG_CLEAR_LOG
: {
5417 memset(&dev
->smart
.log_info
, 0, sizeof(struct ssd_log_info
));
5421 case SSD_LOG_CAP_VOLT_FAULT
:
5422 case SSD_LOG_CAP_LEARN_FAULT
:
5423 case SSD_LOG_CAP_SHORT_CIRCUIT
: {
5424 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5425 ssd_switch_wmode(dev
, dev
->user_wmode
);
5434 /* ssd event call */
5435 if (dev
->event_call
) {
5436 dev
->event_call(dev
->gd
, event
, level
);
5439 if (SSD_LOG_CAP_VOLT_FAULT
== event
|| SSD_LOG_CAP_LEARN_FAULT
== event
|| SSD_LOG_CAP_SHORT_CIRCUIT
== event
) {
5440 dev
->event_call(dev
->gd
, SSD_LOG_BATTERY_FAULT
, level
);
5447 static int ssd_save_log(struct ssd_device
*dev
, struct ssd_log
*log
)
5453 mutex_lock(&dev
->internal_log_mutex
);
5455 size
= sizeof(struct ssd_log
);
5456 off
= dev
->internal_log
.nr_log
* size
;
5458 if (off
== dev
->rom_info
.log_sz
) {
5459 if (dev
->internal_log
.nr_log
== dev
->smart
.log_info
.nr_log
) {
5460 hio_warn("%s: internal log is full\n", dev
->name
);
5465 internal_log
= dev
->internal_log
.log
+ off
;
5466 memcpy(internal_log
, log
, size
);
5468 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
5469 off
+= dev
->rom_info
.log_base
;
5471 ret
= ssd_spi_write(dev
, log
, off
, size
);
5477 dev
->internal_log
.nr_log
++;
5480 mutex_unlock(&dev
->internal_log_mutex
);
5484 static int ssd_save_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5491 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5494 memset(&log
, 0, sizeof(struct ssd_log
));
5496 do_gettimeofday(&tv
);
5497 log
.ctrl_idx
= SSD_LOG_SW_IDX
;
5498 log
.time
= tv
.tv_sec
;
5499 log
.le
.event
= event
;
5500 log
.le
.data
.val
= data
;
5502 level
= ssd_parse_log(dev
, &log
, 0);
5503 if (level
>= SSD_LOG_LEVEL
) {
5504 ret
= ssd_save_log(dev
, &log
);
5508 if (SSD_LOG_LEVEL_ERR
== level
) {
5513 dev
->smart
.log_info
.nr_log
++;
5514 dev
->smart
.log_info
.stat
[level
]++;
5517 ssd_handle_event(dev
, event
, level
);
5522 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5524 struct ssd_log_entry le
;
5527 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5535 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5539 ret
= sfifo_put(&dev
->log_fifo
, &le
);
5544 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
5545 queue_work(dev
->workq
, &dev
->log_work
);
5551 static int ssd_do_swlog(struct ssd_device
*dev
)
5553 struct ssd_log_entry le
;
5556 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5557 while (!sfifo_get(&dev
->log_fifo
, &le
)) {
5558 ret
= ssd_save_swlog(dev
, le
.event
, le
.data
.val
);
5567 static int __ssd_clear_log(struct ssd_device
*dev
)
5569 uint32_t off
, length
;
5572 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5576 if (dev
->internal_log
.nr_log
== 0) {
5580 mutex_lock(&dev
->internal_log_mutex
);
5582 off
= dev
->rom_info
.log_base
;
5583 length
= dev
->rom_info
.log_sz
;
5585 ret
= ssd_spi_erase(dev
, off
, length
);
5587 hio_warn("%s: log erase: failed\n", dev
->name
);
5591 dev
->internal_log
.nr_log
= 0;
5594 mutex_unlock(&dev
->internal_log_mutex
);
5598 static int ssd_clear_log(struct ssd_device
*dev
)
5602 ret
= __ssd_clear_log(dev
);
5604 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_LOG
, 0);
5610 static int ssd_do_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
)
5612 struct ssd_log_entry
*le
;
5619 ret
= ssd_read_log(dev
, ctrl_idx
, buf
, &nr_log
);
5624 do_gettimeofday(&tv
);
5626 log
.time
= tv
.tv_sec
;
5627 log
.ctrl_idx
= ctrl_idx
;
5629 le
= (ssd_log_entry_t
*)buf
;
5630 while (nr_log
> 0) {
5631 memcpy(&log
.le
, le
, sizeof(struct ssd_log_entry
));
5633 level
= ssd_parse_log(dev
, &log
, 1);
5634 if (level
>= SSD_LOG_LEVEL
) {
5635 ssd_save_log(dev
, &log
);
5639 if (SSD_LOG_LEVEL_ERR
== level
) {
5643 dev
->smart
.log_info
.nr_log
++;
5644 if (SSD_LOG_SEU_FAULT
!= le
->event
&& SSD_LOG_SEU_FAULT1
!= le
->event
) {
5645 dev
->smart
.log_info
.stat
[level
]++;
5649 /* log to the volatile log info */
5650 dev
->log_info
.nr_log
++;
5651 dev
->log_info
.stat
[level
]++;
5655 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
5657 /*dev->readonly = 1;
5658 set_disk_ro(dev->gd, 1);
5659 hio_warn("%s: switched to read-only mode.\n", dev->name);*/
5663 ssd_handle_event(dev
, le
->event
, level
);
5672 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5673 static void ssd_log_worker(void *data
)
5675 struct ssd_device
*dev
= (struct ssd_device
*)data
;
5677 static void ssd_log_worker(struct work_struct
*work
)
5679 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, log_work
);
5684 if (!test_bit(SSD_LOG_ERR
, &dev
->state
) && test_bit(SSD_ONLINE
, &dev
->state
)) {
5686 if (!dev
->log_buf
) {
5687 dev
->log_buf
= kmalloc(dev
->hw_info
.log_sz
, GFP_KERNEL
);
5688 if (!dev
->log_buf
) {
5689 hio_warn("%s: ssd_log_worker: no mem\n", dev
->name
);
5695 if (test_and_clear_bit(SSD_LOG_HW
, &dev
->state
)) {
5696 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5697 ret
= ssd_do_log(dev
, i
, dev
->log_buf
);
5699 (void)test_and_set_bit(SSD_LOG_ERR
, &dev
->state
);
5700 hio_warn("%s: do log fail\n", dev
->name
);
5706 ret
= ssd_do_swlog(dev
);
5708 hio_warn("%s: do swlog fail\n", dev
->name
);
5712 static void ssd_cleanup_log(struct ssd_device
*dev
)
5715 kfree(dev
->log_buf
);
5716 dev
->log_buf
= NULL
;
5719 sfifo_free(&dev
->log_fifo
);
5721 if (dev
->internal_log
.log
) {
5722 vfree(dev
->internal_log
.log
);
5723 dev
->internal_log
.log
= NULL
;
5727 static int ssd_init_log(struct ssd_device
*dev
)
5729 struct ssd_log
*log
;
5734 mutex_init(&dev
->internal_log_mutex
);
5736 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5737 INIT_WORK(&dev
->log_work
, ssd_log_worker
, dev
);
5739 INIT_WORK(&dev
->log_work
, ssd_log_worker
);
5742 off
= dev
->rom_info
.log_base
;
5743 size
= dev
->rom_info
.log_sz
;
5745 dev
->internal_log
.log
= vmalloc(size
);
5746 if (!dev
->internal_log
.log
) {
5751 ret
= sfifo_alloc(&dev
->log_fifo
, SSD_LOG_FIFO_SZ
, sizeof(struct ssd_log_entry
));
5753 goto out_alloc_log_fifo
;
5756 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5760 log
= (struct ssd_log
*)dev
->internal_log
.log
;
5761 while (len
< size
) {
5762 ret
= ssd_spi_read(dev
, log
, off
, sizeof(struct ssd_log
));
5767 if (log
->ctrl_idx
== 0xff) {
5771 dev
->internal_log
.nr_log
++;
5773 len
+= sizeof(struct ssd_log
);
5774 off
+= sizeof(struct ssd_log
);
5780 sfifo_free(&dev
->log_fifo
);
5782 vfree(dev
->internal_log
.log
);
5783 dev
->internal_log
.log
= NULL
;
5784 dev
->internal_log
.nr_log
= 0;
5786 /* skip error if not in standard mode */
5787 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5794 static void ssd_stop_workq(struct ssd_device
*dev
)
5796 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
5797 flush_workqueue(dev
->workq
);
5800 static void ssd_start_workq(struct ssd_device
*dev
)
5802 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
5805 queue_work(dev
->workq
, &dev
->log_work
);
5808 static void ssd_cleanup_workq(struct ssd_device
*dev
)
5810 flush_workqueue(dev
->workq
);
5811 destroy_workqueue(dev
->workq
);
5815 static int ssd_init_workq(struct ssd_device
*dev
)
5819 dev
->workq
= create_singlethread_workqueue(dev
->name
);
5830 static int ssd_init_rom_info(struct ssd_device
*dev
)
5834 mutex_init(&dev
->spi_mutex
);
5835 mutex_init(&dev
->i2c_mutex
);
5837 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5838 /* fix bug: read data to clear status */
5839 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
5841 dev
->rom_info
.size
= SSD_ROM_SIZE
;
5842 dev
->rom_info
.block_size
= SSD_ROM_BLK_SIZE
;
5843 dev
->rom_info
.page_size
= SSD_ROM_PAGE_SIZE
;
5845 dev
->rom_info
.bridge_fw_base
= SSD_ROM_BRIDGE_FW_BASE
;
5846 dev
->rom_info
.bridge_fw_sz
= SSD_ROM_BRIDGE_FW_SIZE
;
5847 dev
->rom_info
.nr_bridge_fw
= SSD_ROM_NR_BRIDGE_FW
;
5849 dev
->rom_info
.ctrl_fw_base
= SSD_ROM_CTRL_FW_BASE
;
5850 dev
->rom_info
.ctrl_fw_sz
= SSD_ROM_CTRL_FW_SIZE
;
5851 dev
->rom_info
.nr_ctrl_fw
= SSD_ROM_NR_CTRL_FW
;
5853 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5855 dev
->rom_info
.vp_base
= SSD_ROM_VP_BASE
;
5856 dev
->rom_info
.label_base
= SSD_ROM_LABEL_BASE
;
5857 } else if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5858 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5859 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5860 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5861 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5863 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5864 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5865 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5866 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5868 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5869 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5870 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5871 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
5873 dev
->rom_info
.bm_fw_base
= dev
->rom_info
.ctrl_fw_base
+ (dev
->rom_info
.nr_ctrl_fw
* dev
->rom_info
.ctrl_fw_sz
);
5874 dev
->rom_info
.bm_fw_sz
= SSD_PV3_ROM_BM_FW_SZ
;
5875 dev
->rom_info
.nr_bm_fw
= SSD_PV3_ROM_NR_BM_FW
;
5877 dev
->rom_info
.log_base
= dev
->rom_info
.bm_fw_base
+ (dev
->rom_info
.nr_bm_fw
* dev
->rom_info
.bm_fw_sz
);
5878 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5880 dev
->rom_info
.smart_base
= dev
->rom_info
.log_base
+ dev
->rom_info
.log_sz
;
5881 dev
->rom_info
.smart_sz
= SSD_PV3_ROM_SMART_SZ
;
5882 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
5884 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
5885 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
5886 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
+ dev
->rom_info
.block_size
;
5887 if (dev
->rom_info
.label_base
>= dev
->rom_info
.size
) {
5888 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- dev
->rom_info
.block_size
;
5891 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5892 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5893 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5894 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5896 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5897 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5898 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5899 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5901 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5902 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5903 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5904 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
5906 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
5907 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
5908 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- SSD_PV3_2_ROM_SEC_SZ
;
5910 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
5911 dev
->rom_info
.smart_sz
= SSD_PV3_2_ROM_SEC_SZ
;
5912 dev
->rom_info
.smart_base
= dev
->rom_info
.label_base
- (dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
);
5913 if (dev
->rom_info
.smart_sz
> dev
->rom_info
.block_size
) {
5914 dev
->rom_info
.smart_sz
= dev
->rom_info
.block_size
;
5917 dev
->rom_info
.log_sz
= SSD_PV3_2_ROM_LOG_SZ
;
5918 dev
->rom_info
.log_base
= dev
->rom_info
.smart_base
- dev
->rom_info
.log_sz
;
5921 return ssd_init_spi(dev
);
5925 static int ssd_update_smart(struct ssd_device
*dev
, struct ssd_smart
*smart
)
5929 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
5930 struct hd_struct
*part
;
5936 if (!test_bit(SSD_INIT_BD
, &dev
->state
)) {
5940 do_gettimeofday(&tv
);
5941 if ((uint64_t)tv
.tv_sec
< dev
->uptime
) {
5944 run_time
= tv
.tv_sec
- dev
->uptime
;
5947 /* avoid frequently update */
5948 if (run_time
>= 60) {
5953 smart
->io_stat
.run_time
+= run_time
;
5955 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
5956 cpu
= part_stat_lock();
5957 part
= &dev
->gd
->part0
;
5958 part_round_stats(cpu
, part
);
5961 smart
->io_stat
.nr_read
+= part_stat_read(part
, ios
[READ
]);
5962 smart
->io_stat
.nr_write
+= part_stat_read(part
, ios
[WRITE
]);
5963 smart
->io_stat
.rsectors
+= part_stat_read(part
, sectors
[READ
]);
5964 smart
->io_stat
.wsectors
+= part_stat_read(part
, sectors
[WRITE
]);
5965 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
5967 disk_round_stats(dev
->gd
);
5970 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, ios
[READ
]);
5971 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, ios
[WRITE
]);
5972 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, sectors
[READ
]);
5973 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, sectors
[WRITE
]);
5976 disk_round_stats(dev
->gd
);
5979 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, reads
);
5980 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, writes
);
5981 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, read_sectors
);
5982 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, write_sectors
);
5985 smart
->io_stat
.nr_to
+= atomic_read(&dev
->tocnt
);
5987 for (i
=0; i
<dev
->nr_queue
; i
++) {
5988 smart
->io_stat
.nr_rwerr
+= dev
->queue
[i
].io_stat
.nr_rwerr
;
5989 smart
->io_stat
.nr_ioerr
+= dev
->queue
[i
].io_stat
.nr_ioerr
;
5992 for (i
=0; i
<dev
->nr_queue
; i
++) {
5993 for (j
=0; j
<SSD_ECC_MAX_FLIP
; j
++) {
5994 smart
->ecc_info
.bitflip
[j
] += dev
->queue
[i
].ecc_info
.bitflip
[j
];
5998 //dev->uptime = tv.tv_sec;
6003 static int ssd_clear_smart(struct ssd_device
*dev
)
6007 uint32_t off
, length
;
6011 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6016 off
= dev
->rom_info
.smart_base
;
6017 length
= dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
;
6019 ret
= ssd_spi_erase(dev
, off
, length
);
6021 hio_warn("%s: info erase: failed\n", dev
->name
);
6025 sversion
= dev
->smart
.version
;
6027 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6028 dev
->smart
.version
= sversion
+ 1;
6029 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6031 /* clear all tmp acc */
6032 for (i
=0; i
<dev
->nr_queue
; i
++) {
6033 memset(&(dev
->queue
[i
].io_stat
), 0, sizeof(struct ssd_io_stat
));
6034 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(struct ssd_ecc_info
));
6037 atomic_set(&dev
->tocnt
, 0);
6039 /* clear tmp log info */
6040 memset(&dev
->log_info
, 0, sizeof(struct ssd_log_info
));
6042 do_gettimeofday(&tv
);
6043 dev
->uptime
= tv
.tv_sec
;
6046 //ssd_clear_alarm(dev);
6051 static int ssd_save_smart(struct ssd_device
*dev
)
6057 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
6060 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6064 if (!ssd_update_smart(dev
, &dev
->smart
)) {
6068 dev
->smart
.version
++;
6070 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6071 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6072 size
= dev
->rom_info
.smart_sz
;
6074 ret
= ssd_spi_erase(dev
, off
, size
);
6076 hio_warn("%s: info erase failed\n", dev
->name
);
6080 size
= sizeof(struct ssd_smart
);
6082 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6084 hio_warn("%s: info write failed\n", dev
->name
);
6095 static int ssd_init_smart(struct ssd_device
*dev
)
6097 struct ssd_smart
*smart
;
6103 do_gettimeofday(&tv
);
6104 dev
->uptime
= tv
.tv_sec
;
6106 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6110 smart
= kmalloc(sizeof(struct ssd_smart
) * SSD_ROM_NR_SMART_MAX
, GFP_KERNEL
);
6116 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6119 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6120 memset(&smart
[i
], 0, sizeof(struct ssd_smart
));
6122 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6123 size
= sizeof(struct ssd_smart
);
6125 ret
= ssd_spi_read(dev
, &smart
[i
], off
, size
);
6127 hio_warn("%s: info read failed\n", dev
->name
);
6131 if (smart
[i
].magic
!= SSD_SMART_MAGIC
) {
6133 smart
[i
].version
= 0;
6137 if (smart
[i
].version
> dev
->smart
.version
) {
6138 memcpy(&dev
->smart
, &smart
[i
], sizeof(struct ssd_smart
));
6142 if (dev
->smart
.magic
!= SSD_SMART_MAGIC
) {
6143 /* first time power up */
6144 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6145 dev
->smart
.version
= 1;
6148 /* check log info */
6150 struct ssd_log_info log_info
;
6151 struct ssd_log
*log
= (struct ssd_log
*)dev
->internal_log
.log
;
6153 memset(&log_info
, 0, sizeof(struct ssd_log_info
));
6155 while (log_info
.nr_log
< dev
->internal_log
.nr_log
) {
6156 /* skip the volatile log info */
6157 if (SSD_LOG_SEU_FAULT
!= log
->le
.event
&& SSD_LOG_SEU_FAULT1
!= log
->le
.event
) {
6158 log_info
.stat
[ssd_parse_log(dev
, log
, 0)]++;
6166 for (i
=(SSD_LOG_NR_LEVEL
-1); i
>=0; i
--) {
6167 if (log_info
.stat
[i
] > dev
->smart
.log_info
.stat
[i
]) {
6169 memcpy(&dev
->smart
.log_info
, &log_info
, sizeof(struct ssd_log_info
));
6170 dev
->smart
.version
++;
6176 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6177 if (smart
[i
].magic
== SSD_SMART_MAGIC
&& smart
[i
].version
== dev
->smart
.version
) {
6181 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6182 size
= dev
->rom_info
.smart_sz
;
6184 ret
= ssd_spi_erase(dev
, off
, size
);
6186 hio_warn("%s: info erase failed\n", dev
->name
);
6190 size
= sizeof(struct ssd_smart
);
6191 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6193 hio_warn("%s: info write failed\n", dev
->name
);
6200 /* sync smart with alarm led */
6201 if (dev
->smart
.io_stat
.nr_to
|| dev
->smart
.io_stat
.nr_rwerr
|| dev
->smart
.log_info
.stat
[SSD_LOG_LEVEL_ERR
]) {
6202 hio_warn("%s: some fault found in the history info\n", dev
->name
);
6209 /* skip error if not in standard mode */
6210 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6217 static int __ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6219 struct ssd_bm_manufacturer_data bm_md
= {0};
6220 uint16_t sc_id
= SSD_BM_SYSTEM_DATA_SUBCLASS_ID
;
6228 mutex_lock(&dev
->bm_mutex
);
6230 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6231 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6236 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6237 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_manufacturer_data
), (uint8_t *)&bm_md
);
6242 if (bm_md
.firmware_ver
& 0xF000) {
6247 *ver
= bm_md
.firmware_ver
;
6250 mutex_unlock(&dev
->bm_mutex
);
6254 static int ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6257 int i
= SSD_BM_RETRY_MAX
;
6261 ret
= __ssd_bm_get_version(dev
, &tmp
);
6275 static int __ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6277 struct ssd_bm_configuration_registers bm_cr
;
6278 uint16_t sc_id
= SSD_BM_CONFIGURATION_REGISTERS_ID
;
6282 mutex_lock(&dev
->bm_mutex
);
6284 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6285 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6290 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6291 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_configuration_registers
), (uint8_t *)&bm_cr
);
6296 if (bm_cr
.operation_cfg
.cc
== 0 || bm_cr
.operation_cfg
.cc
> 4) {
6301 *nr_cap
= bm_cr
.operation_cfg
.cc
+ 1;
6304 mutex_unlock(&dev
->bm_mutex
);
6308 static int ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6311 int i
= SSD_BM_RETRY_MAX
;
6315 ret
= __ssd_bm_nr_cap(dev
, &tmp
);
6329 static int ssd_bm_enter_cap_learning(struct ssd_device
*dev
)
6331 uint16_t buf
= SSD_BM_ENTER_CAP_LEARNING
;
6332 uint8_t cmd
= SSD_BM_MANUFACTURERACCESS
;
6335 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&buf
);
6344 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
)
6347 uint8_t cmd
= SSD_BM_SAFETYSTATUS
;
6350 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6360 static int ssd_bm_get_opstatus(struct ssd_device
*dev
, uint16_t *status
)
6363 uint8_t cmd
= SSD_BM_OPERATIONSTATUS
;
6366 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6376 static int ssd_get_bmstruct(struct ssd_device
*dev
, struct ssd_bm
*bm_status_out
)
6378 struct sbs_cmd
*bm_sbs
= ssd_bm_sbs
;
6379 struct ssd_bm bm_status
;
6380 uint8_t buf
[2] = {0, };
6385 memset(&bm_status
, 0, sizeof(struct ssd_bm
));
6387 while (bm_sbs
->desc
!= NULL
) {
6388 switch (bm_sbs
->size
) {
6390 ret
= ssd_smbus_read_byte(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, buf
);
6392 //printf("Error: smbus read byte %#x\n", bm_sbs->cmd);
6398 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, (uint8_t *)&val
);
6400 //printf("Error: smbus read word %#x\n", bm_sbs->cmd);
6403 //val = *(uint16_t *)buf;
6411 switch (bm_sbs
->unit
) {
6412 case SBS_UNIT_VALUE
:
6413 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
& bm_sbs
->mask
;
6415 case SBS_UNIT_TEMPERATURE
:
6416 cval
= (uint16_t)(val
- 2731) / 10;
6417 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = cval
;
6419 case SBS_UNIT_VOLTAGE
:
6420 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6422 case SBS_UNIT_CURRENT
:
6423 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6426 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6428 case SBS_UNIT_PERCENT
:
6429 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6431 case SBS_UNIT_CAPACITANCE
:
6432 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6443 memcpy(bm_status_out
, &bm_status
, sizeof(struct ssd_bm
));
6449 static int __ssd_bm_status(struct ssd_device
*dev
, int *status
)
6451 struct ssd_bm bm_status
= {0};
6456 ret
= ssd_get_bmstruct(dev
, &bm_status
);
6461 /* capacitor voltage */
6462 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
6467 for (i
=0; i
<nr_cap
; i
++) {
6468 if (bm_status
.cap_volt
[i
] < SSD_BM_CAP_VOLT_MIN
) {
6469 *status
= SSD_BMSTATUS_WARNING
;
6475 if (bm_status
.sf_status
) {
6476 *status
= SSD_BMSTATUS_WARNING
;
6481 if (!((bm_status
.op_status
>> 12) & 0x1)) {
6482 *status
= SSD_BMSTATUS_CHARGING
;
6484 *status
= SSD_BMSTATUS_OK
;
6491 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int mode
);
6493 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
6494 static void ssd_bm_worker(void *data
)
6496 struct ssd_device
*dev
= (struct ssd_device
*)data
;
6498 static void ssd_bm_worker(struct work_struct
*work
)
6500 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, bm_work
);
6506 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6510 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
6514 if (dev
->hw_info_ext
.plp_type
!= SSD_PLP_SCAP
) {
6518 ret
= ssd_bm_get_opstatus(dev
, &opstatus
);
6520 hio_warn("%s: get bm operationstatus failed\n", dev
->name
);
6524 /* need cap learning ? */
6525 if (!(opstatus
& 0xF0)) {
6526 ret
= ssd_bm_enter_cap_learning(dev
);
6528 hio_warn("%s: enter capacitance learning failed\n", dev
->name
);
6534 static void ssd_bm_routine_start(void *data
)
6536 struct ssd_device
*dev
;
6543 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
6544 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6545 queue_work(dev
->workq
, &dev
->bm_work
);
6547 queue_work(dev
->workq
, &dev
->capmon_work
);
6553 static int ssd_do_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6560 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6565 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6570 /* make sure the lm80 voltage value is updated */
6571 msleep(SSD_LM80_CONV_INTERVAL
);
6573 /* check if full charged */
6576 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6578 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6579 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6583 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6584 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_FULL
) {
6589 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6593 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6596 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U2
, (uint8_t *)&val
);
6598 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6599 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6603 u2
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6610 /* enter cap learn */
6611 ssd_reg32_write(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
, 0x1);
6615 msleep(SSD_PL_CAP_LEARN_WAIT
);
6617 t
= ssd_reg32_read(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
);
6618 if (!((t
>> 1) & 0x1)) {
6623 if (wait
> SSD_PL_CAP_LEARN_MAX_WAIT
) {
6629 if ((t
>> 4) & 0x1) {
6640 *cap
= SSD_PL_CAP_LEARN(u1
, u2
, t
);
6646 static int ssd_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6654 mutex_lock(&dev
->bm_mutex
);
6656 ssd_stop_workq(dev
);
6658 ret
= ssd_do_cap_learn(dev
, cap
);
6660 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
6664 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, *cap
);
6667 ssd_start_workq(dev
);
6668 mutex_unlock(&dev
->bm_mutex
);
6673 static int ssd_check_pl_cap(struct ssd_device
*dev
)
6681 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6685 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6692 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6694 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6695 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6699 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6700 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_READY
) {
6705 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6707 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(u1
));
6710 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6713 low
= ssd_lm80_limit
[SSD_LM80_IN_CAP
].low
;
6714 ret
= ssd_smbus_write_byte(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP
), &low
);
6719 /* enable cap INx */
6720 ret
= ssd_lm80_enable_in(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_IN_CAP
);
6722 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6723 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6729 /* skip error if not in standard mode */
6730 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6736 static int ssd_check_pl_cap_fast(struct ssd_device
*dev
)
6742 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6746 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6751 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6755 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6756 if (SSD_PL_CAP_VOLT(u1
) < SSD_PL_CAP_VOLT_READY
) {
6764 static int ssd_init_pl_cap(struct ssd_device
*dev
)
6768 /* set here: user write mode */
6769 dev
->user_wmode
= wmode
;
6771 mutex_init(&dev
->bm_mutex
);
6773 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6775 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BM_FAULT_REG
);
6776 if ((val
>> 1) & 0x1) {
6777 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
6780 ret
= ssd_check_pl_cap(dev
);
6782 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
6790 static void __end_str(char *str
, int len
)
6794 for(i
=0; i
<len
; i
++) {
6795 if (*(str
+i
) == '\0')
6801 static int ssd_init_label(struct ssd_device
*dev
)
6807 /* label location */
6808 off
= dev
->rom_info
.label_base
;
6810 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6811 size
= sizeof(struct ssd_label
);
6814 ret
= ssd_spi_read(dev
, &dev
->label
, off
, size
);
6816 memset(&dev
->label
, 0, size
);
6820 __end_str(dev
->label
.date
, SSD_LABEL_FIELD_SZ
);
6821 __end_str(dev
->label
.sn
, SSD_LABEL_FIELD_SZ
);
6822 __end_str(dev
->label
.part
, SSD_LABEL_FIELD_SZ
);
6823 __end_str(dev
->label
.desc
, SSD_LABEL_FIELD_SZ
);
6824 __end_str(dev
->label
.other
, SSD_LABEL_FIELD_SZ
);
6825 __end_str(dev
->label
.maf
, SSD_LABEL_FIELD_SZ
);
6827 size
= sizeof(struct ssd_labelv3
);
6830 ret
= ssd_spi_read(dev
, &dev
->labelv3
, off
, size
);
6832 memset(&dev
->labelv3
, 0, size
);
6836 __end_str(dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
6837 __end_str(dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
6838 __end_str(dev
->labelv3
.item
, SSD_LABEL_FIELD_SZ
);
6839 __end_str(dev
->labelv3
.description
, SSD_LABEL_DESC_SZ
);
6840 __end_str(dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
6841 __end_str(dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
6842 __end_str(dev
->labelv3
.issuenumber
, SSD_LABEL_FIELD_SZ
);
6843 __end_str(dev
->labelv3
.cleicode
, SSD_LABEL_FIELD_SZ
);
6844 __end_str(dev
->labelv3
.bom
, SSD_LABEL_FIELD_SZ
);
6848 /* skip error if not in standard mode */
6849 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6855 int ssd_get_label(struct block_device
*bdev
, struct ssd_label
*label
)
6857 struct ssd_device
*dev
;
6859 if (!bdev
|| !label
|| !(bdev
->bd_disk
)) {
6863 dev
= bdev
->bd_disk
->private_data
;
6865 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
6866 memset(label
, 0, sizeof(struct ssd_label
));
6867 memcpy(label
->date
, dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
6868 memcpy(label
->sn
, dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
6869 memcpy(label
->desc
, dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
6870 memcpy(label
->maf
, dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
6872 memcpy(label
, &dev
->label
, sizeof(struct ssd_label
));
6878 static int __ssd_get_version(struct ssd_device
*dev
, struct ssd_version_info
*ver
)
6880 uint16_t bm_ver
= 0;
6883 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6884 ret
= ssd_bm_get_version(dev
, &bm_ver
);
6890 ver
->bridge_ver
= dev
->hw_info
.bridge_ver
;
6891 ver
->ctrl_ver
= dev
->hw_info
.ctrl_ver
;
6892 ver
->bm_ver
= bm_ver
;
6893 ver
->pcb_ver
= dev
->hw_info
.pcb_ver
;
6894 ver
->upper_pcb_ver
= dev
->hw_info
.upper_pcb_ver
;
6901 int ssd_get_version(struct block_device
*bdev
, struct ssd_version_info
*ver
)
6903 struct ssd_device
*dev
;
6906 if (!bdev
|| !ver
|| !(bdev
->bd_disk
)) {
6910 dev
= bdev
->bd_disk
->private_data
;
6912 mutex_lock(&dev
->fw_mutex
);
6913 ret
= __ssd_get_version(dev
, ver
);
6914 mutex_unlock(&dev
->fw_mutex
);
6919 static int __ssd_get_temperature(struct ssd_device
*dev
, int *temp
)
6927 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6933 if (dev
->db_info
.type
== SSD_DEBUG_LOG
&&
6934 (dev
->db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
||
6935 dev
->db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
||
6936 dev
->db_info
.data
.log
.event
== SSD_LOG_WARN_TEMP
)) {
6937 *temp
= (int)dev
->db_info
.data
.log
.extra
;
6942 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
6943 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
6945 val
= ssd_reg_read(dev
->ctrlp
+ off
);
6946 if (val
== 0xffffffffffffffffull
) {
6950 cur
= (int)CUR_TEMP(val
);
6961 int ssd_get_temperature(struct block_device
*bdev
, int *temp
)
6963 struct ssd_device
*dev
;
6966 if (!bdev
|| !temp
|| !(bdev
->bd_disk
)) {
6970 dev
= bdev
->bd_disk
->private_data
;
6973 mutex_lock(&dev
->fw_mutex
);
6974 ret
= __ssd_get_temperature(dev
, temp
);
6975 mutex_unlock(&dev
->fw_mutex
);
6980 int ssd_set_otprotect(struct block_device
*bdev
, int otprotect
)
6982 struct ssd_device
*dev
;
6984 if (!bdev
|| !(bdev
->bd_disk
)) {
6988 dev
= bdev
->bd_disk
->private_data
;
6989 ssd_set_ot_protect(dev
, !!otprotect
);
6994 int ssd_bm_status(struct block_device
*bdev
, int *status
)
6996 struct ssd_device
*dev
;
6999 if (!bdev
|| !status
|| !(bdev
->bd_disk
)) {
7003 dev
= bdev
->bd_disk
->private_data
;
7005 mutex_lock(&dev
->fw_mutex
);
7006 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7007 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7008 *status
= SSD_BMSTATUS_WARNING
;
7010 *status
= SSD_BMSTATUS_OK
;
7012 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7013 ret
= __ssd_bm_status(dev
, status
);
7015 *status
= SSD_BMSTATUS_OK
;
7017 mutex_unlock(&dev
->fw_mutex
);
7022 int ssd_get_pciaddr(struct block_device
*bdev
, struct pci_addr
*paddr
)
7024 struct ssd_device
*dev
;
7026 if (!bdev
|| !paddr
|| !bdev
->bd_disk
) {
7030 dev
= bdev
->bd_disk
->private_data
;
7032 paddr
->domain
= pci_domain_nr(dev
->pdev
->bus
);
7033 paddr
->bus
= dev
->pdev
->bus
->number
;
7034 paddr
->slot
= PCI_SLOT(dev
->pdev
->devfn
);
7035 paddr
->func
= PCI_FUNC(dev
->pdev
->devfn
);
7041 static int ssd_bb_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7046 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7050 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L1_REG
);
7051 if (0xffffffffull
== acc
->threshold_l1
) {
7054 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L2_REG
);
7055 if (0xffffffffull
== acc
->threshold_l2
) {
7060 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7061 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7062 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_BB_ACC_REG_SZ
* chip
));
7063 if (0xffffffffull
== acc
->val
) {
7066 if (val
> acc
->val
) {
7075 static int ssd_ec_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7080 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7084 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L1_REG
);
7085 if (0xffffffffull
== acc
->threshold_l1
) {
7088 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L2_REG
);
7089 if (0xffffffffull
== acc
->threshold_l2
) {
7094 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7095 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7096 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_EC_ACC_REG_SZ
* chip
));
7097 if (0xffffffffull
== acc
->val
) {
7101 if (val
> acc
->val
) {
7112 static int ssd_ram_read_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7114 struct ssd_ram_op_msg
*msg
;
7116 size_t len
= length
;
7120 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7121 || !length
|| length
> dev
->hw_info
.ram_max_len
7122 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7126 len
/= dev
->hw_info
.ram_align
;
7127 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7129 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7130 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7131 ret
= dma_mapping_error(buf_dma
);
7133 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7136 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7137 goto out_dma_mapping
;
7140 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7142 msg
->fun
= SSD_FUNC_RAM_READ
;
7143 msg
->ctrl_idx
= ctrl_idx
;
7144 msg
->start
= (uint32_t)ofs_w
;
7148 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7151 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7157 static int ssd_ram_write_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7159 struct ssd_ram_op_msg
*msg
;
7161 size_t len
= length
;
7165 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7166 || !length
|| length
> dev
->hw_info
.ram_max_len
7167 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7171 len
/= dev
->hw_info
.ram_align
;
7172 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7174 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7175 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7176 ret
= dma_mapping_error(buf_dma
);
7178 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7181 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7182 goto out_dma_mapping
;
7185 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7187 msg
->fun
= SSD_FUNC_RAM_WRITE
;
7188 msg
->ctrl_idx
= ctrl_idx
;
7189 msg
->start
= (uint32_t)ofs_w
;
7193 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7196 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7203 static int ssd_ram_read(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7210 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7211 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7216 len
= dev
->hw_info
.ram_max_len
;
7217 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7221 ret
= ssd_ram_read_4k(dev
, buf
, len
, off
, ctrl_idx
);
7234 static int ssd_ram_write(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7241 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7242 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7247 len
= dev
->hw_info
.ram_max_len
;
7248 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7252 ret
= ssd_ram_write_4k(dev
, buf
, len
, off
, ctrl_idx
);
7267 static int ssd_check_flash(struct ssd_device
*dev
, int flash
, int page
, int ctrl_idx
)
7269 int cur_ch
= flash
% dev
->hw_info
.max_ch
;
7270 int cur_chip
= flash
/dev
->hw_info
.max_ch
;
7272 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
7276 if (cur_ch
>= dev
->hw_info
.nr_ch
|| cur_chip
>= dev
->hw_info
.nr_chip
) {
7280 if (page
>= (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7286 static int ssd_nand_read_id(struct ssd_device
*dev
, void *id
, int flash
, int chip
, int ctrl_idx
)
7288 struct ssd_nand_op_msg
*msg
;
7295 buf_dma
= pci_map_single(dev
->pdev
, id
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7296 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7297 ret
= dma_mapping_error(buf_dma
);
7299 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7302 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7303 goto out_dma_mapping
;
7306 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7307 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7311 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7313 msg
->fun
= SSD_FUNC_NAND_READ_ID
;
7314 msg
->chip_no
= flash
;
7315 msg
->chip_ce
= chip
;
7316 msg
->ctrl_idx
= ctrl_idx
;
7319 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7322 pci_unmap_single(dev
->pdev
, buf_dma
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7329 static int ssd_nand_read(struct ssd_device
*dev
, void *buf
,
7330 int flash
, int chip
, int page
, int page_count
, int ctrl_idx
)
7332 struct ssd_nand_op_msg
*msg
;
7341 if ((page
+ page_count
) > dev
->hw_info
.block_count
*dev
->hw_info
.page_count
) {
7345 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7350 length
= page_count
* dev
->hw_info
.page_size
;
7352 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7353 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7354 ret
= dma_mapping_error(buf_dma
);
7356 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7359 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7360 goto out_dma_mapping
;
7363 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7364 flash
= (flash
<< 1) | chip
;
7368 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7370 msg
->fun
= SSD_FUNC_NAND_READ
;
7371 msg
->ctrl_idx
= ctrl_idx
;
7372 msg
->chip_no
= flash
;
7373 msg
->chip_ce
= chip
;
7374 msg
->page_no
= page
;
7375 msg
->page_count
= page_count
;
7378 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7381 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7388 static int ssd_nand_read_w_oob(struct ssd_device
*dev
, void *buf
,
7389 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7391 struct ssd_nand_op_msg
*msg
;
7400 if ((page
+ count
) > (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7404 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7409 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7411 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7412 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7413 ret
= dma_mapping_error(buf_dma
);
7415 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7418 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7419 goto out_dma_mapping
;
7422 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7423 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7427 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7429 msg
->fun
= SSD_FUNC_NAND_READ_WOOB
;
7430 msg
->ctrl_idx
= ctrl_idx
;
7431 msg
->chip_no
= flash
;
7432 msg
->chip_ce
= chip
;
7433 msg
->page_no
= page
;
7434 msg
->page_count
= count
;
7437 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7440 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7447 static int ssd_nand_write(struct ssd_device
*dev
, void *buf
,
7448 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7450 struct ssd_nand_op_msg
*msg
;
7455 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7467 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7472 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7474 /* write data to ram */
7475 /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx);
7480 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7481 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7482 ret
= dma_mapping_error(buf_dma
);
7484 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7487 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7488 goto out_dma_mapping
;
7491 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7492 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7496 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7498 msg
->fun
= SSD_FUNC_NAND_WRITE
;
7499 msg
->ctrl_idx
= ctrl_idx
;
7500 msg
->chip_no
= flash
;
7501 msg
->chip_ce
= chip
;
7503 msg
->page_no
= page
;
7504 msg
->page_count
= count
;
7507 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7510 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7516 static int ssd_nand_erase(struct ssd_device
*dev
, int flash
, int chip
, int page
, int ctrl_idx
)
7518 struct ssd_nand_op_msg
*msg
;
7521 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7526 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7527 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7531 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7533 msg
->fun
= SSD_FUNC_NAND_ERASE
;
7534 msg
->ctrl_idx
= ctrl_idx
;
7535 msg
->chip_no
= flash
;
7536 msg
->chip_ce
= chip
;
7537 msg
->page_no
= page
;
7539 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7545 static int ssd_update_bbt(struct ssd_device
*dev
, int flash
, int ctrl_idx
)
7547 struct ssd_nand_op_msg
*msg
;
7548 struct ssd_flush_msg
*fmsg
;
7551 ret
= ssd_check_flash(dev
, flash
, 0, ctrl_idx
);
7556 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7558 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7559 fmsg
= (struct ssd_flush_msg
*)msg
;
7561 fmsg
->fun
= SSD_FUNC_FLUSH
;
7563 fmsg
->flash
= flash
;
7564 fmsg
->ctrl_idx
= ctrl_idx
;
7566 msg
->fun
= SSD_FUNC_FLUSH
;
7568 msg
->chip_no
= flash
;
7569 msg
->ctrl_idx
= ctrl_idx
;
7572 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7578 /* flash controller init state */
7579 static int __ssd_check_init_state(struct ssd_device
*dev
)
7581 uint32_t *init_state
= NULL
;
7582 int reg_base
, reg_sz
;
7583 int max_wait
= SSD_INIT_MAX_WAIT
;
7589 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7590 ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data);
7591 read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8);
7592 if (read_data == ~test_data) {
7593 //dev->hw_info.nr_ctrl++;
7594 dev->hw_info.nr_ctrl_map |= 1<<i;
7600 read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG);
7602 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7603 if (((read_data>>i) & 0x1) == 0) {
7608 if (dev->hw_info.nr_ctrl != j) {
7609 printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j);
7615 init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0);
7616 for (j=1; j<dev->hw_info.nr_ctrl;j++) {
7617 if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) {
7618 printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j);
7624 /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0);
7625 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7626 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) {
7627 printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j);
7632 init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8);
7633 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7634 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) {
7635 printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j);
7641 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7642 max_wait
= SSD_INIT_MAX_WAIT_V3_2
;
7645 reg_base
= dev
->protocol_info
.init_state_reg
;
7646 reg_sz
= dev
->protocol_info
.init_state_reg_sz
;
7648 init_state
= (uint32_t *)kmalloc(reg_sz
, GFP_KERNEL
);
7653 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7655 for (j
=0, k
=0; j
<reg_sz
; j
+=sizeof(uint32_t), k
++) {
7656 init_state
[k
] = ssd_reg32_read(dev
->ctrlp
+ reg_base
+ j
);
7659 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7660 /* just check the last bit, no need to check all channel */
7661 ch_start
= dev
->hw_info
.max_ch
- 1;
7666 for (j
=0; j
<dev
->hw_info
.nr_chip
; j
++) {
7667 for (k
=ch_start
; k
<dev
->hw_info
.max_ch
; k
++) {
7668 if (test_bit((j
*dev
->hw_info
.max_ch
+ k
), (void *)init_state
)) {
7673 if (init_wait
<= max_wait
) {
7674 msleep(SSD_INIT_WAIT
);
7677 if (k
< dev
->hw_info
.nr_ch
) {
7678 hio_warn("%s: controller %d chip %d ch %d init failed\n",
7679 dev
->name
, i
, j
, k
);
7681 hio_warn("%s: controller %d chip %d init failed\n",
7692 //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait);
7698 static int ssd_check_init_state(struct ssd_device
*dev
)
7700 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7704 return __ssd_check_init_state(dev
);
7707 static void ssd_reset_resp_ptr(struct ssd_device
*dev
);
7709 /* reset flash controller etc */
7710 static int __ssd_reset(struct ssd_device
*dev
, int type
)
7712 if (type
< SSD_RST_NOINIT
|| type
> SSD_RST_FULL
) {
7716 mutex_lock(&dev
->fw_mutex
);
7718 if (type
== SSD_RST_NOINIT
) { //no init
7719 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET_NOINIT
);
7720 } else if (type
== SSD_RST_NORMAL
) { //reset & init
7721 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET
);
7722 } else { // full reset
7723 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7724 mutex_unlock(&dev
->fw_mutex
);
7728 ssd_reg32_write(dev
->ctrlp
+ SSD_FULL_RESET_REG
, SSD_RESET_FULL
);
7731 ssd_reset_resp_ptr(dev
);
7734 #ifdef SSD_OT_PROTECT
7741 ssd_set_flush_timeout(dev
, dev
->wmode
);
7743 mutex_unlock(&dev
->fw_mutex
);
7744 ssd_gen_swlog(dev
, SSD_LOG_RESET
, (uint32_t)type
);
7746 return __ssd_check_init_state(dev
);
7749 static int ssd_save_md(struct ssd_device
*dev
)
7751 struct ssd_nand_op_msg
*msg
;
7754 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7757 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7761 if (!dev
->save_md
) {
7765 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7767 msg
->fun
= SSD_FUNC_FLUSH
;
7772 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7778 static int ssd_barrier_save_md(struct ssd_device
*dev
)
7780 struct ssd_nand_op_msg
*msg
;
7783 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7786 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7790 if (!dev
->save_md
) {
7794 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7796 msg
->fun
= SSD_FUNC_FLUSH
;
7801 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
7807 static int ssd_flush(struct ssd_device
*dev
)
7809 struct ssd_nand_op_msg
*msg
;
7810 struct ssd_flush_msg
*fmsg
;
7813 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7816 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7818 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7819 fmsg
= (struct ssd_flush_msg
*)msg
;
7821 fmsg
->fun
= SSD_FUNC_FLUSH
;
7826 msg
->fun
= SSD_FUNC_FLUSH
;
7832 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7838 static int ssd_barrier_flush(struct ssd_device
*dev
)
7840 struct ssd_nand_op_msg
*msg
;
7841 struct ssd_flush_msg
*fmsg
;
7844 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7847 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7849 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7850 fmsg
= (struct ssd_flush_msg
*)msg
;
7852 fmsg
->fun
= SSD_FUNC_FLUSH
;
7857 msg
->fun
= SSD_FUNC_FLUSH
;
7863 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
7869 #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710
7870 #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8
7871 #define SSD_WMODE_FUA_TIMEOUT 0x000503E8
7872 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int m
)
7877 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7882 case SSD_WMODE_BUFFER
:
7883 to
= SSD_WMODE_BUFFER_TIMEOUT
;
7885 case SSD_WMODE_BUFFER_EX
:
7886 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_1
) {
7887 to
= SSD_WMODE_BUFFER_EX_TIMEOUT
;
7889 to
= SSD_WMODE_BUFFER_TIMEOUT
;
7893 to
= SSD_WMODE_FUA_TIMEOUT
;
7899 val
= (((uint32_t)((uint32_t)m
& 0x3) << 28) | to
);
7901 ssd_reg32_write(dev
->ctrlp
+ SSD_FLUSH_TIMEOUT_REG
, val
);
7904 static int ssd_do_switch_wmode(struct ssd_device
*dev
, int m
)
7908 ret
= ssd_barrier_start(dev
);
7913 ret
= ssd_barrier_flush(dev
);
7915 goto out_barrier_end
;
7918 /* set contoller flush timeout */
7919 ssd_set_flush_timeout(dev
, m
);
7925 ssd_barrier_end(dev
);
7930 static int ssd_switch_wmode(struct ssd_device
*dev
, int m
)
7936 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
7940 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7941 default_wmode
= SSD_WMODE_BUFFER
;
7943 default_wmode
= SSD_WMODE_BUFFER_EX
;
7946 if (SSD_WMODE_AUTO
== m
) {
7947 /* battery fault ? */
7948 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7949 next_wmode
= SSD_WMODE_FUA
;
7951 next_wmode
= default_wmode
;
7953 } else if (SSD_WMODE_DEFAULT
== m
) {
7954 next_wmode
= default_wmode
;
7959 if (next_wmode
!= dev
->wmode
) {
7960 hio_warn("%s: switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
7961 ret
= ssd_do_switch_wmode(dev
, next_wmode
);
7963 hio_err("%s: can not switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
7970 static int ssd_init_wmode(struct ssd_device
*dev
)
7975 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7976 default_wmode
= SSD_WMODE_BUFFER
;
7978 default_wmode
= SSD_WMODE_BUFFER_EX
;
7982 if (SSD_WMODE_AUTO
== dev
->user_wmode
) {
7983 /* battery fault ? */
7984 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7985 dev
->wmode
= SSD_WMODE_FUA
;
7987 dev
->wmode
= default_wmode
;
7989 } else if (SSD_WMODE_DEFAULT
== dev
->user_wmode
) {
7990 dev
->wmode
= default_wmode
;
7992 dev
->wmode
= dev
->user_wmode
;
7994 ssd_set_flush_timeout(dev
, dev
->wmode
);
7999 static int __ssd_set_wmode(struct ssd_device
*dev
, int m
)
8003 /* not support old fw*/
8004 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8009 if (m
< SSD_WMODE_BUFFER
|| m
> SSD_WMODE_DEFAULT
) {
8014 ssd_gen_swlog(dev
, SSD_LOG_SET_WMODE
, m
);
8016 dev
->user_wmode
= m
;
8018 ret
= ssd_switch_wmode(dev
, dev
->user_wmode
);
8027 int ssd_set_wmode(struct block_device
*bdev
, int m
)
8029 struct ssd_device
*dev
;
8031 if (!bdev
|| !(bdev
->bd_disk
)) {
8035 dev
= bdev
->bd_disk
->private_data
;
8037 return __ssd_set_wmode(dev
, m
);
8040 static int ssd_do_reset(struct ssd_device
*dev
)
8044 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8048 ssd_stop_workq(dev
);
8050 ret
= ssd_barrier_start(dev
);
8055 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8057 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8060 //ret = __ssd_reset(dev, SSD_RST_FULL);
8061 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8064 goto out_barrier_end
;
8068 ssd_barrier_end(dev
);
8070 ssd_start_workq(dev
);
8071 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8075 static int ssd_full_reset(struct ssd_device
*dev
)
8079 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8083 ssd_stop_workq(dev
);
8085 ret
= ssd_barrier_start(dev
);
8090 ret
= ssd_barrier_flush(dev
);
8092 goto out_barrier_end
;
8095 ret
= ssd_barrier_save_md(dev
);
8097 goto out_barrier_end
;
8100 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8102 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8105 //ret = __ssd_reset(dev, SSD_RST_FULL);
8106 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8109 goto out_barrier_end
;
8113 ssd_barrier_end(dev
);
8115 ssd_start_workq(dev
);
8116 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8120 int ssd_reset(struct block_device
*bdev
)
8122 struct ssd_device
*dev
;
8124 if (!bdev
|| !(bdev
->bd_disk
)) {
8128 dev
= bdev
->bd_disk
->private_data
;
8130 return ssd_full_reset(dev
);
8133 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
8134 static int ssd_issue_flush_fn(struct request_queue
*q
, struct gendisk
*disk
,
8135 sector_t
*error_sector
)
8137 struct ssd_device
*dev
= q
->queuedata
;
8139 return ssd_flush(dev
);
8143 void ssd_submit_pbio(struct request_queue
*q
, struct bio
*bio
)
8145 struct ssd_device
*dev
= q
->queuedata
;
8146 #ifdef SSD_QUEUE_PBIO
8150 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8151 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8152 bio_endio(bio
, -ENODEV
);
8154 bio_endio(bio
, bio
->bi_size
, -ENODEV
);
8159 #ifdef SSD_DEBUG_ERR
8160 if (atomic_read(&dev
->tocnt
)) {
8161 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8162 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8163 bio_endio(bio
, -EIO
);
8165 bio_endio(bio
, bio
->bi_size
, -EIO
);
8171 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
8172 if (unlikely(bio_barrier(bio
))) {
8173 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8174 bio_endio(bio
, -EOPNOTSUPP
);
8176 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8180 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
8181 if (unlikely(bio_rw_flagged(bio
, BIO_RW_BARRIER
))) {
8182 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8183 bio_endio(bio
, -EOPNOTSUPP
);
8185 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8189 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
8190 if (unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
8191 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8192 bio_endio(bio
, -EOPNOTSUPP
);
8194 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8200 if (unlikely(bio
->bi_rw
& REQ_FUA
)) {
8201 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8202 bio_endio(bio
, -EOPNOTSUPP
);
8204 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8210 if (unlikely(dev
->readonly
&& bio_data_dir(bio
) == WRITE
)) {
8211 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8212 bio_endio(bio
, -EROFS
);
8214 bio_endio(bio
, bio
->bi_size
, -EROFS
);
8219 #ifdef SSD_QUEUE_PBIO
8220 if (0 == atomic_read(&dev
->in_sendq
)) {
8221 ret
= __ssd_submit_pbio(dev
, bio
, 0);
8225 (void)test_and_set_bit(BIO_SSD_PBIO
, &bio
->bi_flags
);
8226 ssd_queue_bio(dev
, bio
);
8229 __ssd_submit_pbio(dev
, bio
, 1);
8236 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
8237 static int ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8239 static void ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8242 struct ssd_device
*dev
= q
->queuedata
;
8245 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8246 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8247 bio_endio(bio
, -ENODEV
);
8249 bio_endio(bio
, bio
->bi_size
, -ENODEV
);
8254 #ifdef SSD_DEBUG_ERR
8255 if (atomic_read(&dev
->tocnt
)) {
8256 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8258 bio_endio(bio
, -EIO
);
8260 bio_endio(bio
, bio
->bi_size
, -EIO
);
8266 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
8267 if (unlikely(bio_barrier(bio
))) {
8268 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8269 bio_endio(bio
, -EOPNOTSUPP
);
8271 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8275 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
8276 if (unlikely(bio_rw_flagged(bio
, BIO_RW_BARRIER
))) {
8277 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8278 bio_endio(bio
, -EOPNOTSUPP
);
8280 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8284 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
8285 if (unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
8286 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8287 bio_endio(bio
, -EOPNOTSUPP
);
8289 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8295 if (unlikely(bio
->bi_rw
& REQ_FUA
)) {
8296 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8297 bio_endio(bio
, -EOPNOTSUPP
);
8299 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8304 /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */
8305 if (unlikely((bio
->bi_rw
& REQ_FLUSH
) && !bio_sectors(bio
))) {
8312 if (0 == atomic_read(&dev
->in_sendq
)) {
8313 ret
= ssd_submit_bio(dev
, bio
, 0);
8317 ssd_queue_bio(dev
, bio
);
8321 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
8328 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
8329 static int ssd_block_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
8331 struct ssd_device
*dev
;
8337 dev
= bdev
->bd_disk
->private_data
;
8344 geo
->cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
8349 static void ssd_cleanup_blkdev(struct ssd_device
*dev
);
8350 static int ssd_init_blkdev(struct ssd_device
*dev
);
8351 static int ssd_ioctl_common(struct ssd_device
*dev
, unsigned int cmd
, unsigned long arg
)
8353 void __user
*argp
= (void __user
*)arg
;
8354 void __user
*buf
= NULL
;
8359 case SSD_CMD_GET_PROTOCOL_INFO
:
8360 if (copy_to_user(argp
, &dev
->protocol_info
, sizeof(struct ssd_protocol_info
))) {
8361 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8367 case SSD_CMD_GET_HW_INFO
:
8368 if (copy_to_user(argp
, &dev
->hw_info
, sizeof(struct ssd_hw_info
))) {
8369 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8375 case SSD_CMD_GET_ROM_INFO
:
8376 if (copy_to_user(argp
, &dev
->rom_info
, sizeof(struct ssd_rom_info
))) {
8377 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8383 case SSD_CMD_GET_SMART
: {
8384 struct ssd_smart smart
;
8387 memcpy(&smart
, &dev
->smart
, sizeof(struct ssd_smart
));
8389 mutex_lock(&dev
->gd_mutex
);
8390 ssd_update_smart(dev
, &smart
);
8391 mutex_unlock(&dev
->gd_mutex
);
8393 /* combine the volatile log info */
8394 if (dev
->log_info
.nr_log
) {
8395 for (i
=0; i
<SSD_LOG_NR_LEVEL
; i
++) {
8396 smart
.log_info
.stat
[i
] += dev
->log_info
.stat
[i
];
8400 if (copy_to_user(argp
, &smart
, sizeof(struct ssd_smart
))) {
8401 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8409 case SSD_CMD_GET_IDX
:
8410 if (copy_to_user(argp
, &dev
->idx
, sizeof(int))) {
8411 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8417 case SSD_CMD_GET_AMOUNT
: {
8418 int nr_ssd
= atomic_read(&ssd_nr
);
8419 if (copy_to_user(argp
, &nr_ssd
, sizeof(int))) {
8420 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8427 case SSD_CMD_GET_TO_INFO
: {
8428 int tocnt
= atomic_read(&dev
->tocnt
);
8430 if (copy_to_user(argp
, &tocnt
, sizeof(int))) {
8431 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8438 case SSD_CMD_GET_DRV_VER
: {
8439 char ver
[] = DRIVER_VERSION
;
8440 int len
= sizeof(ver
);
8442 if (len
> (DRIVER_VERSION_LEN
- 1)) {
8443 len
= (DRIVER_VERSION_LEN
- 1);
8445 if (copy_to_user(argp
, ver
, len
)) {
8446 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8453 case SSD_CMD_GET_BBACC_INFO
: {
8454 struct ssd_acc_info acc
;
8456 mutex_lock(&dev
->fw_mutex
);
8457 ret
= ssd_bb_acc(dev
, &acc
);
8458 mutex_unlock(&dev
->fw_mutex
);
8463 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8464 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8471 case SSD_CMD_GET_ECACC_INFO
: {
8472 struct ssd_acc_info acc
;
8474 mutex_lock(&dev
->fw_mutex
);
8475 ret
= ssd_ec_acc(dev
, &acc
);
8476 mutex_unlock(&dev
->fw_mutex
);
8481 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8482 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8489 case SSD_CMD_GET_HW_INFO_EXT
:
8490 if (copy_to_user(argp
, &dev
->hw_info_ext
, sizeof(struct ssd_hw_info_extend
))) {
8491 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8497 case SSD_CMD_REG_READ
: {
8498 struct ssd_reg_op_info reg_info
;
8500 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8501 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8506 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8511 reg_info
.value
= ssd_reg32_read(dev
->ctrlp
+ reg_info
.offset
);
8512 if (copy_to_user(argp
, ®_info
, sizeof(struct ssd_reg_op_info
))) {
8513 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8521 case SSD_CMD_REG_WRITE
: {
8522 struct ssd_reg_op_info reg_info
;
8524 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8525 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8530 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8535 ssd_reg32_write(dev
->ctrlp
+ reg_info
.offset
, reg_info
.value
);
8540 case SSD_CMD_SPI_READ
: {
8541 struct ssd_spi_op_info spi_info
;
8544 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8545 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8551 size
= spi_info
.len
;
8554 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8559 kbuf
= kmalloc(size
, GFP_KERNEL
);
8565 ret
= ssd_spi_page_read(dev
, kbuf
, off
, size
);
8571 if (copy_to_user(buf
, kbuf
, size
)) {
8572 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8583 case SSD_CMD_SPI_WRITE
: {
8584 struct ssd_spi_op_info spi_info
;
8587 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8588 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8594 size
= spi_info
.len
;
8597 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8602 kbuf
= kmalloc(size
, GFP_KERNEL
);
8608 if (copy_from_user(kbuf
, buf
, size
)) {
8609 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8615 ret
= ssd_spi_page_write(dev
, kbuf
, off
, size
);
8626 case SSD_CMD_SPI_ERASE
: {
8627 struct ssd_spi_op_info spi_info
;
8630 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8631 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8638 if ((off
+ dev
->rom_info
.block_size
) > dev
->rom_info
.size
) {
8643 ret
= ssd_spi_block_erase(dev
, off
);
8651 case SSD_CMD_I2C_READ
: {
8652 struct ssd_i2c_op_info i2c_info
;
8656 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8657 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8662 saddr
= i2c_info
.saddr
;
8663 rsize
= i2c_info
.rsize
;
8664 buf
= i2c_info
.rbuf
;
8666 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8671 kbuf
= kmalloc(rsize
, GFP_KERNEL
);
8677 ret
= ssd_i2c_read(dev
, saddr
, rsize
, kbuf
);
8683 if (copy_to_user(buf
, kbuf
, rsize
)) {
8684 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8695 case SSD_CMD_I2C_WRITE
: {
8696 struct ssd_i2c_op_info i2c_info
;
8700 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8701 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8706 saddr
= i2c_info
.saddr
;
8707 wsize
= i2c_info
.wsize
;
8708 buf
= i2c_info
.wbuf
;
8710 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8715 kbuf
= kmalloc(wsize
, GFP_KERNEL
);
8721 if (copy_from_user(kbuf
, buf
, wsize
)) {
8722 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8728 ret
= ssd_i2c_write(dev
, saddr
, wsize
, kbuf
);
8739 case SSD_CMD_I2C_WRITE_READ
: {
8740 struct ssd_i2c_op_info i2c_info
;
8746 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8747 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8752 saddr
= i2c_info
.saddr
;
8753 wsize
= i2c_info
.wsize
;
8754 rsize
= i2c_info
.rsize
;
8755 buf
= i2c_info
.wbuf
;
8757 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8762 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8767 size
= wsize
+ rsize
;
8769 kbuf
= kmalloc(size
, GFP_KERNEL
);
8775 if (copy_from_user((kbuf
+ rsize
), buf
, wsize
)) {
8776 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8782 buf
= i2c_info
.rbuf
;
8784 ret
= ssd_i2c_write_read(dev
, saddr
, wsize
, (kbuf
+ rsize
), rsize
, kbuf
);
8790 if (copy_to_user(buf
, kbuf
, rsize
)) {
8791 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8802 case SSD_CMD_SMBUS_SEND_BYTE
: {
8803 struct ssd_smbus_op_info smbus_info
;
8804 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8808 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8809 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8814 saddr
= smbus_info
.saddr
;
8815 buf
= smbus_info
.buf
;
8818 if (copy_from_user(smb_data
, buf
, size
)) {
8819 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8824 ret
= ssd_smbus_send_byte(dev
, saddr
, smb_data
);
8832 case SSD_CMD_SMBUS_RECEIVE_BYTE
: {
8833 struct ssd_smbus_op_info smbus_info
;
8834 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8838 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8839 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8844 saddr
= smbus_info
.saddr
;
8845 buf
= smbus_info
.buf
;
8848 ret
= ssd_smbus_receive_byte(dev
, saddr
, smb_data
);
8853 if (copy_to_user(buf
, smb_data
, size
)) {
8854 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8862 case SSD_CMD_SMBUS_WRITE_BYTE
: {
8863 struct ssd_smbus_op_info smbus_info
;
8864 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8869 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8870 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8875 saddr
= smbus_info
.saddr
;
8876 command
= smbus_info
.cmd
;
8877 buf
= smbus_info
.buf
;
8880 if (copy_from_user(smb_data
, buf
, size
)) {
8881 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8886 ret
= ssd_smbus_write_byte(dev
, saddr
, command
, smb_data
);
8894 case SSD_CMD_SMBUS_READ_BYTE
: {
8895 struct ssd_smbus_op_info smbus_info
;
8896 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8901 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8902 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8907 saddr
= smbus_info
.saddr
;
8908 command
= smbus_info
.cmd
;
8909 buf
= smbus_info
.buf
;
8912 ret
= ssd_smbus_read_byte(dev
, saddr
, command
, smb_data
);
8917 if (copy_to_user(buf
, smb_data
, size
)) {
8918 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8926 case SSD_CMD_SMBUS_WRITE_WORD
: {
8927 struct ssd_smbus_op_info smbus_info
;
8928 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8933 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8934 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8939 saddr
= smbus_info
.saddr
;
8940 command
= smbus_info
.cmd
;
8941 buf
= smbus_info
.buf
;
8944 if (copy_from_user(smb_data
, buf
, size
)) {
8945 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8950 ret
= ssd_smbus_write_word(dev
, saddr
, command
, smb_data
);
8958 case SSD_CMD_SMBUS_READ_WORD
: {
8959 struct ssd_smbus_op_info smbus_info
;
8960 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8965 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8966 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8971 saddr
= smbus_info
.saddr
;
8972 command
= smbus_info
.cmd
;
8973 buf
= smbus_info
.buf
;
8976 ret
= ssd_smbus_read_word(dev
, saddr
, command
, smb_data
);
8981 if (copy_to_user(buf
, smb_data
, size
)) {
8982 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8990 case SSD_CMD_SMBUS_WRITE_BLOCK
: {
8991 struct ssd_smbus_op_info smbus_info
;
8992 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8997 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8998 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9003 saddr
= smbus_info
.saddr
;
9004 command
= smbus_info
.cmd
;
9005 buf
= smbus_info
.buf
;
9006 size
= smbus_info
.size
;
9008 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9013 if (copy_from_user(smb_data
, buf
, size
)) {
9014 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9019 ret
= ssd_smbus_write_block(dev
, saddr
, command
, size
, smb_data
);
9027 case SSD_CMD_SMBUS_READ_BLOCK
: {
9028 struct ssd_smbus_op_info smbus_info
;
9029 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9034 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9035 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9040 saddr
= smbus_info
.saddr
;
9041 command
= smbus_info
.cmd
;
9042 buf
= smbus_info
.buf
;
9043 size
= smbus_info
.size
;
9045 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9050 ret
= ssd_smbus_read_block(dev
, saddr
, command
, size
, smb_data
);
9055 if (copy_to_user(buf
, smb_data
, size
)) {
9056 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9064 case SSD_CMD_BM_GET_VER
: {
9067 ret
= ssd_bm_get_version(dev
, &ver
);
9072 if (copy_to_user(argp
, &ver
, sizeof(uint16_t))) {
9073 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9081 case SSD_CMD_BM_GET_NR_CAP
: {
9084 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
9089 if (copy_to_user(argp
, &nr_cap
, sizeof(int))) {
9090 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9098 case SSD_CMD_BM_CAP_LEARNING
: {
9099 ret
= ssd_bm_enter_cap_learning(dev
);
9108 case SSD_CMD_CAP_LEARN
: {
9111 ret
= ssd_cap_learn(dev
, &cap
);
9116 if (copy_to_user(argp
, &cap
, sizeof(uint32_t))) {
9117 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9125 case SSD_CMD_GET_CAP_STATUS
: {
9128 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9132 if (copy_to_user(argp
, &cap_status
, sizeof(int))) {
9133 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9141 case SSD_CMD_RAM_READ
: {
9142 struct ssd_ram_op_info ram_info
;
9145 size_t rlen
, len
= dev
->hw_info
.ram_max_len
;
9148 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9149 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9154 ofs
= ram_info
.start
;
9155 length
= ram_info
.length
;
9157 ctrl_idx
= ram_info
.ctrl_idx
;
9159 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9164 kbuf
= kmalloc(len
, GFP_KERNEL
);
9170 for (rlen
=0; rlen
<length
; rlen
+=len
, buf
+=len
, ofs
+=len
) {
9171 if ((length
- rlen
) < len
) {
9172 len
= length
- rlen
;
9175 ret
= ssd_ram_read(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9180 if (copy_to_user(buf
, kbuf
, len
)) {
9191 case SSD_CMD_RAM_WRITE
: {
9192 struct ssd_ram_op_info ram_info
;
9195 size_t wlen
, len
= dev
->hw_info
.ram_max_len
;
9198 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9199 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9203 ofs
= ram_info
.start
;
9204 length
= ram_info
.length
;
9206 ctrl_idx
= ram_info
.ctrl_idx
;
9208 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9213 kbuf
= kmalloc(len
, GFP_KERNEL
);
9219 for (wlen
=0; wlen
<length
; wlen
+=len
, buf
+=len
, ofs
+=len
) {
9220 if ((length
- wlen
) < len
) {
9221 len
= length
- wlen
;
9224 if (copy_from_user(kbuf
, buf
, len
)) {
9229 ret
= ssd_ram_write(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9240 case SSD_CMD_NAND_READ_ID
: {
9241 struct ssd_flash_op_info flash_info
;
9242 int chip_no
, chip_ce
, length
, ctrl_idx
;
9244 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9245 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9250 chip_no
= flash_info
.flash
;
9251 chip_ce
= flash_info
.chip
;
9252 ctrl_idx
= flash_info
.ctrl_idx
;
9253 buf
= flash_info
.buf
;
9254 length
= dev
->hw_info
.id_size
;
9256 //kbuf = kmalloc(length, GFP_KERNEL);
9257 kbuf
= kmalloc(SSD_NAND_ID_BUFF_SZ
, GFP_KERNEL
); //xx
9262 memset(kbuf
, 0, length
);
9264 ret
= ssd_nand_read_id(dev
, kbuf
, chip_no
, chip_ce
, ctrl_idx
);
9270 if (copy_to_user(buf
, kbuf
, length
)) {
9281 case SSD_CMD_NAND_READ
: { //with oob
9282 struct ssd_flash_op_info flash_info
;
9284 int flash
, chip
, page
, ctrl_idx
;
9287 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9288 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9293 flash
= flash_info
.flash
;
9294 chip
= flash_info
.chip
;
9295 page
= flash_info
.page
;
9296 buf
= flash_info
.buf
;
9297 ctrl_idx
= flash_info
.ctrl_idx
;
9299 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9301 kbuf
= kmalloc(length
, GFP_KERNEL
);
9307 err
= ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9308 if (ret
&& (-EIO
!= ret
)) {
9313 if (copy_to_user(buf
, kbuf
, length
)) {
9325 case SSD_CMD_NAND_WRITE
: {
9326 struct ssd_flash_op_info flash_info
;
9327 int flash
, chip
, page
, ctrl_idx
;
9330 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9331 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9336 flash
= flash_info
.flash
;
9337 chip
= flash_info
.chip
;
9338 page
= flash_info
.page
;
9339 buf
= flash_info
.buf
;
9340 ctrl_idx
= flash_info
.ctrl_idx
;
9342 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9344 kbuf
= kmalloc(length
, GFP_KERNEL
);
9350 if (copy_from_user(kbuf
, buf
, length
)) {
9356 ret
= ssd_nand_write(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9366 case SSD_CMD_NAND_ERASE
: {
9367 struct ssd_flash_op_info flash_info
;
9368 int flash
, chip
, page
, ctrl_idx
;
9370 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9371 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9376 flash
= flash_info
.flash
;
9377 chip
= flash_info
.chip
;
9378 page
= flash_info
.page
;
9379 ctrl_idx
= flash_info
.ctrl_idx
;
9381 if ((page
% dev
->hw_info
.page_count
) != 0) {
9386 //hio_warn("erase fs = %llx\n", ofs);
9387 ret
= ssd_nand_erase(dev
, flash
, chip
, page
, ctrl_idx
);
9395 case SSD_CMD_NAND_READ_EXT
: { //ingore EIO
9396 struct ssd_flash_op_info flash_info
;
9398 int flash
, chip
, page
, ctrl_idx
;
9400 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9401 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9406 flash
= flash_info
.flash
;
9407 chip
= flash_info
.chip
;
9408 page
= flash_info
.page
;
9409 buf
= flash_info
.buf
;
9410 ctrl_idx
= flash_info
.ctrl_idx
;
9412 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9414 kbuf
= kmalloc(length
, GFP_KERNEL
);
9420 ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9421 if (-EIO
== ret
) { //ingore EIO
9429 if (copy_to_user(buf
, kbuf
, length
)) {
9439 case SSD_CMD_UPDATE_BBT
: {
9440 struct ssd_flash_op_info flash_info
;
9441 int ctrl_idx
, flash
;
9443 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9444 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9449 ctrl_idx
= flash_info
.ctrl_idx
;
9450 flash
= flash_info
.flash
;
9451 ret
= ssd_update_bbt(dev
, flash
, ctrl_idx
);
9459 case SSD_CMD_CLEAR_ALARM
:
9460 ssd_clear_alarm(dev
);
9463 case SSD_CMD_SET_ALARM
:
9468 ret
= ssd_do_reset(dev
);
9471 case SSD_CMD_RELOAD_FW
:
9473 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9474 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
9475 } else if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_1_1
) {
9476 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
9481 case SSD_CMD_UNLOAD_DEV
: {
9482 if (atomic_read(&dev
->refcnt
)) {
9488 ssd_save_smart(dev
);
9490 ret
= ssd_flush(dev
);
9495 /* cleanup the block device */
9496 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
9497 mutex_lock(&dev
->gd_mutex
);
9498 ssd_cleanup_blkdev(dev
);
9499 mutex_unlock(&dev
->gd_mutex
);
9505 case SSD_CMD_LOAD_DEV
: {
9507 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9512 ret
= ssd_init_smart(dev
);
9514 hio_warn("%s: init info: failed\n", dev
->name
);
9518 ret
= ssd_init_blkdev(dev
);
9520 hio_warn("%s: register block device: failed\n", dev
->name
);
9523 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
9528 case SSD_CMD_UPDATE_VP
: {
9530 uint32_t new_vp
, new_vp1
= 0;
9532 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9537 if (copy_from_user(&new_vp
, argp
, sizeof(uint32_t))) {
9538 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9543 if (new_vp
> dev
->hw_info
.max_valid_pages
|| new_vp
<= 0) {
9548 while (new_vp
<= dev
->hw_info
.max_valid_pages
) {
9549 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, new_vp
);
9551 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
9552 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9553 new_vp1
= val
& 0x3FF;
9555 new_vp1
= val
& 0x7FFF;
9558 if (new_vp1
== new_vp
) {
9563 /*if (new_vp == dev->hw_info.valid_pages) {
9568 if (new_vp1
!= new_vp
|| new_vp
> dev
->hw_info
.max_valid_pages
) {
9570 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9575 if (copy_to_user(argp
, &new_vp
, sizeof(uint32_t))) {
9576 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9577 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9583 dev
->hw_info
.valid_pages
= new_vp
;
9584 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
9585 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
9586 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
9591 case SSD_CMD_FULL_RESET
: {
9592 ret
= ssd_full_reset(dev
);
9596 case SSD_CMD_GET_NR_LOG
: {
9597 if (copy_to_user(argp
, &dev
->internal_log
.nr_log
, sizeof(dev
->internal_log
.nr_log
))) {
9604 case SSD_CMD_GET_LOG
: {
9605 uint32_t length
= dev
->rom_info
.log_sz
;
9609 if (copy_to_user(buf
, dev
->internal_log
.log
, length
)) {
9617 case SSD_CMD_LOG_LEVEL
: {
9619 if (copy_from_user(&level
, argp
, sizeof(int))) {
9620 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9625 if (level
>= SSD_LOG_NR_LEVEL
|| level
< SSD_LOG_LEVEL_INFO
) {
9626 level
= SSD_LOG_LEVEL_ERR
;
9629 //just for showing log, no need to protect
9634 case SSD_CMD_OT_PROTECT
: {
9637 if (copy_from_user(&protect
, argp
, sizeof(int))) {
9638 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9643 ssd_set_ot_protect(dev
, !!protect
);
9647 case SSD_CMD_GET_OT_STATUS
: {
9648 int status
= ssd_get_ot_status(dev
, &status
);
9650 if (copy_to_user(argp
, &status
, sizeof(int))) {
9651 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9658 case SSD_CMD_CLEAR_LOG
: {
9659 ret
= ssd_clear_log(dev
);
9663 case SSD_CMD_CLEAR_SMART
: {
9664 ret
= ssd_clear_smart(dev
);
9668 case SSD_CMD_SW_LOG
: {
9669 struct ssd_sw_log_info sw_log
;
9671 if (copy_from_user(&sw_log
, argp
, sizeof(struct ssd_sw_log_info
))) {
9672 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9677 ret
= ssd_gen_swlog(dev
, sw_log
.event
, sw_log
.data
);
9681 case SSD_CMD_GET_LABEL
: {
9683 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9688 if (copy_to_user(argp
, &dev
->label
, sizeof(struct ssd_label
))) {
9689 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9696 case SSD_CMD_GET_VERSION
: {
9697 struct ssd_version_info ver
;
9699 mutex_lock(&dev
->fw_mutex
);
9700 ret
= __ssd_get_version(dev
, &ver
);
9701 mutex_unlock(&dev
->fw_mutex
);
9706 if (copy_to_user(argp
, &ver
, sizeof(struct ssd_version_info
))) {
9707 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9714 case SSD_CMD_GET_TEMPERATURE
: {
9717 mutex_lock(&dev
->fw_mutex
);
9718 ret
= __ssd_get_temperature(dev
, &temp
);
9719 mutex_unlock(&dev
->fw_mutex
);
9724 if (copy_to_user(argp
, &temp
, sizeof(int))) {
9725 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9732 case SSD_CMD_GET_BMSTATUS
: {
9735 mutex_lock(&dev
->fw_mutex
);
9736 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9737 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9738 status
= SSD_BMSTATUS_WARNING
;
9740 status
= SSD_BMSTATUS_OK
;
9742 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
9743 ret
= __ssd_bm_status(dev
, &status
);
9745 status
= SSD_BMSTATUS_OK
;
9747 mutex_unlock(&dev
->fw_mutex
);
9752 if (copy_to_user(argp
, &status
, sizeof(int))) {
9753 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9760 case SSD_CMD_GET_LABEL2
: {
9764 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9765 label
= &dev
->label
;
9766 length
= sizeof(struct ssd_label
);
9768 label
= &dev
->labelv3
;
9769 length
= sizeof(struct ssd_labelv3
);
9772 if (copy_to_user(argp
, label
, length
)) {
9780 ret
= ssd_flush(dev
);
9782 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9788 case SSD_CMD_SAVE_MD
: {
9791 if (copy_from_user(&save_md
, argp
, sizeof(int))) {
9792 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9797 dev
->save_md
= !!save_md
;
9801 case SSD_CMD_SET_WMODE
: {
9804 if (copy_from_user(&new_wmode
, argp
, sizeof(int))) {
9805 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9810 ret
= __ssd_set_wmode(dev
, new_wmode
);
9818 case SSD_CMD_GET_WMODE
: {
9819 if (copy_to_user(argp
, &dev
->wmode
, sizeof(int))) {
9820 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9828 case SSD_CMD_GET_USER_WMODE
: {
9829 if (copy_to_user(argp
, &dev
->user_wmode
, sizeof(int))) {
9830 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9838 case SSD_CMD_DEBUG
: {
9839 struct ssd_debug_info db_info
;
9846 if (copy_from_user(&db_info
, argp
, sizeof(struct ssd_debug_info
))) {
9847 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9852 if (db_info
.type
< SSD_DEBUG_NONE
|| db_info
.type
>= SSD_DEBUG_NR
) {
9858 if (db_info
.type
>= SSD_DEBUG_READ_ERR
&& db_info
.type
<= SSD_DEBUG_RW_ERR
&&
9859 (db_info
.data
.loc
.off
+ db_info
.data
.loc
.len
) > (dev
->hw_info
.size
>> 9)) {
9864 memcpy(&dev
->db_info
, &db_info
, sizeof(struct ssd_debug_info
));
9866 #ifdef SSD_OT_PROTECT
9868 if (db_info
.type
== SSD_DEBUG_NONE
) {
9869 ssd_check_temperature(dev
, SSD_OT_TEMP
);
9870 } else if (db_info
.type
== SSD_DEBUG_LOG
) {
9871 if (db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
) {
9872 dev
->ot_delay
= SSD_OT_DELAY
;
9873 } else if (db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
) {
9880 if (db_info
.type
== SSD_DEBUG_OFFLINE
) {
9881 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
9882 } else if (db_info
.type
== SSD_DEBUG_NONE
) {
9883 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
9887 if (db_info
.type
== SSD_DEBUG_LOG
&& dev
->event_call
&& dev
->gd
) {
9888 dev
->event_call(dev
->gd
, db_info
.data
.log
.event
, 0);
9894 case SSD_CMD_DRV_PARAM_INFO
: {
9895 struct ssd_drv_param_info drv_param
;
9897 memset(&drv_param
, 0, sizeof(struct ssd_drv_param_info
));
9899 drv_param
.mode
= mode
;
9900 drv_param
.status_mask
= status_mask
;
9901 drv_param
.int_mode
= int_mode
;
9902 drv_param
.threaded_irq
= threaded_irq
;
9903 drv_param
.log_level
= log_level
;
9904 drv_param
.wmode
= wmode
;
9905 drv_param
.ot_protect
= ot_protect
;
9906 drv_param
.finject
= finject
;
9908 if (copy_to_user(argp
, &drv_param
, sizeof(struct ssd_drv_param_info
))) {
9909 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9925 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
9926 static int ssd_block_ioctl(struct inode
*inode
, struct file
*file
,
9927 unsigned int cmd
, unsigned long arg
)
9929 struct ssd_device
*dev
;
9930 void __user
*argp
= (void __user
*)arg
;
9936 dev
= inode
->i_bdev
->bd_disk
->private_data
;
9941 static int ssd_block_ioctl(struct block_device
*bdev
, fmode_t mode
,
9942 unsigned int cmd
, unsigned long arg
)
9944 struct ssd_device
*dev
;
9945 void __user
*argp
= (void __user
*)arg
;
9952 dev
= bdev
->bd_disk
->private_data
;
9960 struct hd_geometry geo
;
9961 geo
.cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
9964 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
9965 geo
.start
= get_start_sect(inode
->i_bdev
);
9967 geo
.start
= get_start_sect(bdev
);
9969 if (copy_to_user(argp
, &geo
, sizeof(geo
))) {
9978 ret
= ssd_flush(dev
);
9980 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9988 ret
= ssd_ioctl_common(dev
, cmd
, arg
);
9999 static void ssd_free_dev(struct kref
*kref
)
10001 struct ssd_device
*dev
;
10007 dev
= container_of(kref
, struct ssd_device
, kref
);
10011 ssd_put_index(dev
->slave
, dev
->idx
);
10016 static void ssd_put(struct ssd_device
*dev
)
10018 kref_put(&dev
->kref
, ssd_free_dev
);
10021 static int ssd_get(struct ssd_device
*dev
)
10023 kref_get(&dev
->kref
);
10028 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10029 static int ssd_block_open(struct inode
*inode
, struct file
*filp
)
10031 struct ssd_device
*dev
;
10037 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10042 static int ssd_block_open(struct block_device
*bdev
, fmode_t mode
)
10044 struct ssd_device
*dev
;
10050 dev
= bdev
->bd_disk
->private_data
;
10056 /*if (!try_module_get(dev->owner))
10062 atomic_inc(&dev
->refcnt
);
10067 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10068 static int ssd_block_release(struct inode
*inode
, struct file
*filp
)
10070 struct ssd_device
*dev
;
10076 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10080 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10081 static int ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10083 struct ssd_device
*dev
;
10089 dev
= disk
->private_data
;
10094 static void ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10096 struct ssd_device
*dev
;
10102 dev
= disk
->private_data
;
10108 atomic_dec(&dev
->refcnt
);
10112 //module_put(dev->owner);
10113 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10118 static struct block_device_operations ssd_fops
= {
10119 .owner
= THIS_MODULE
,
10120 .open
= ssd_block_open
,
10121 .release
= ssd_block_release
,
10122 .ioctl
= ssd_block_ioctl
,
10123 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
10124 .getgeo
= ssd_block_getgeo
,
10128 static void ssd_init_trim(ssd_device_t
*dev
)
10130 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
10131 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, dev
->rq
);
10136 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6))
10137 dev
->rq
->limits
.discard_zeroes_data
= 1;
10138 dev
->rq
->limits
.discard_alignment
= 4096;
10139 dev
->rq
->limits
.discard_granularity
= 4096;
10141 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_4
) {
10142 dev
->rq
->limits
.max_discard_sectors
= dev
->hw_info
.sg_max_sec
;
10144 dev
->rq
->limits
.max_discard_sectors
= (dev
->hw_info
.sg_max_sec
) * (dev
->hw_info
.cmd_max_sg
);
10149 static void ssd_cleanup_queue(struct ssd_device
*dev
)
10153 blk_cleanup_queue(dev
->rq
);
10157 static int ssd_init_queue(struct ssd_device
*dev
)
10159 dev
->rq
= blk_alloc_queue(GFP_KERNEL
);
10160 if (dev
->rq
== NULL
) {
10161 hio_warn("%s: alloc queue: failed\n ", dev
->name
);
10162 goto out_init_queue
;
10165 /* must be first */
10166 blk_queue_make_request(dev
->rq
, ssd_make_request
);
10168 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6))
10169 blk_queue_max_hw_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10170 blk_queue_max_phys_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10171 blk_queue_max_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10173 blk_queue_max_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10174 blk_queue_max_hw_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10177 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
10178 blk_queue_hardsect_size(dev
->rq
, 512);
10180 blk_queue_logical_block_size(dev
->rq
, 512);
10182 /* not work for make_request based drivers(bio) */
10183 blk_queue_max_segment_size(dev
->rq
, dev
->hw_info
.sg_max_sec
<< 9);
10185 blk_queue_bounce_limit(dev
->rq
, BLK_BOUNCE_HIGH
);
10187 dev
->rq
->queuedata
= dev
;
10189 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
10190 blk_queue_issue_flush_fn(dev
->rq
, ssd_issue_flush_fn
);
10193 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
10194 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, dev
->rq
);
10197 ssd_init_trim(dev
);
10205 static void ssd_cleanup_blkdev(struct ssd_device
*dev
)
10207 del_gendisk(dev
->gd
);
10210 static int ssd_init_blkdev(struct ssd_device
*dev
)
10216 dev
->gd
= alloc_disk(ssd_minors
);
10218 hio_warn("%s: alloc_disk fail\n", dev
->name
);
10221 dev
->gd
->major
= dev
->major
;
10222 dev
->gd
->first_minor
= dev
->idx
* ssd_minors
;
10223 dev
->gd
->fops
= &ssd_fops
;
10224 dev
->gd
->queue
= dev
->rq
;
10225 dev
->gd
->private_data
= dev
;
10226 dev
->gd
->driverfs_dev
= &dev
->pdev
->dev
;
10227 snprintf (dev
->gd
->disk_name
, sizeof(dev
->gd
->disk_name
), "%s", dev
->name
);
10229 set_capacity(dev
->gd
, dev
->hw_info
.size
>> 9);
10239 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10240 static int ssd_ioctl(struct inode
*inode
, struct file
*file
,
10241 unsigned int cmd
, unsigned long arg
)
10243 static long ssd_ioctl(struct file
*file
,
10244 unsigned int cmd
, unsigned long arg
)
10247 struct ssd_device
*dev
;
10253 dev
= file
->private_data
;
10258 return (long)ssd_ioctl_common(dev
, cmd
, arg
);
10261 static int ssd_open(struct inode
*inode
, struct file
*file
)
10263 struct ssd_device
*dev
= NULL
;
10264 struct ssd_device
*n
= NULL
;
10268 if (!inode
|| !file
) {
10272 idx
= iminor(inode
);
10274 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
10275 if (dev
->idx
== idx
) {
10285 file
->private_data
= dev
;
10292 static int ssd_release(struct inode
*inode
, struct file
*file
)
10294 struct ssd_device
*dev
;
10300 dev
= file
->private_data
;
10307 file
->private_data
= NULL
;
10312 static struct file_operations ssd_cfops
= {
10313 .owner
= THIS_MODULE
,
10315 .release
= ssd_release
,
10316 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10317 .ioctl
= ssd_ioctl
,
10319 .unlocked_ioctl
= ssd_ioctl
,
10323 static void ssd_cleanup_chardev(struct ssd_device
*dev
)
10329 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10330 class_simple_device_remove(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10331 devfs_remove("c%s", dev
->name
);
10332 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10333 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10334 devfs_remove("c%s", dev
->name
);
10335 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10336 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10337 devfs_remove("c%s", dev
->name
);
10338 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10339 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10341 device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10345 static int ssd_init_chardev(struct ssd_device
*dev
)
10353 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10354 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10358 class_simple_device_add(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10360 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10361 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10365 class_device_create(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10367 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10368 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10372 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10374 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10375 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10376 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
10377 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), "c%s", dev
->name
);
10378 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10379 device_create_drvdata(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10381 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10387 static int ssd_check_hw(struct ssd_device
*dev
)
10389 uint32_t test_data
= 0x55AA5AA5;
10390 uint32_t read_data
;
10392 ssd_reg32_write(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
, test_data
);
10393 read_data
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
);
10394 if (read_data
!= ~(test_data
)) {
10395 //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data);
10402 static int ssd_check_fw(struct ssd_device
*dev
)
10407 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10411 for (i
=0; i
<SSD_CONTROLLER_WAIT
; i
++) {
10412 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10413 if ((val
& 0x1) && ((val
>> 8) & 0x1)) {
10417 msleep(SSD_INIT_WAIT
);
10420 if (!(val
& 0x1)) {
10421 /* controller fw status */
10422 hio_warn("%s: controller firmware load failed: %#x\n", dev
->name
, val
);
10424 } else if (!((val
>> 8) & 0x1)) {
10425 /* controller state */
10426 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10430 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RELOAD_FW_REG
);
10432 dev
->reload_fw
= 1;
10438 static int ssd_init_fw_info(struct ssd_device
*dev
)
10443 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_VER_REG
);
10444 dev
->hw_info
.bridge_ver
= val
& 0xFFF;
10445 if (dev
->hw_info
.bridge_ver
< SSD_FW_MIN
) {
10446 hio_warn("%s: bridge firmware version %03X is not supported\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10449 hio_info("%s: bridge firmware version: %03X\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10451 ret
= ssd_check_fw(dev
);
10457 /* skip error if not in standard mode */
10458 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10464 static int ssd_check_clock(struct ssd_device
*dev
)
10469 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10473 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10476 if (!((val
>> 4 ) & 0x1)) {
10477 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST
), &dev
->hwmon
)) {
10478 hio_warn("%s: 166MHz clock losed: %#x\n", dev
->name
, val
);
10479 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10484 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
10485 if (!((val
>> 5 ) & 0x1)) {
10486 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW
), &dev
->hwmon
)) {
10487 hio_warn("%s: 166MHz clock is skew: %#x\n", dev
->name
, val
);
10488 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10492 if (!((val
>> 6 ) & 0x1)) {
10493 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST
), &dev
->hwmon
)) {
10494 hio_warn("%s: 156.25MHz clock lost: %#x\n", dev
->name
, val
);
10495 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10499 if (!((val
>> 7 ) & 0x1)) {
10500 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW
), &dev
->hwmon
)) {
10501 hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev
->name
, val
);
10502 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10511 static int ssd_check_volt(struct ssd_device
*dev
)
10518 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10522 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10524 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
)) {
10525 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V0_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10526 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10527 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10528 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10529 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10530 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10534 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10535 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10536 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10537 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10538 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10544 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
)) {
10545 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V8_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10546 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10547 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10548 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10549 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10550 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10554 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10555 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10556 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10557 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10558 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10567 static int ssd_check_reset_sync(struct ssd_device
*dev
)
10571 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10575 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10576 if (!((val
>> 8) & 0x1)) {
10577 /* controller state */
10578 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10582 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10586 if (((val
>> 9 ) & 0x1)) {
10587 hio_warn("%s: controller reset asynchronously: %#x\n", dev
->name
, val
);
10588 ssd_gen_swlog(dev
, SSD_LOG_CTRL_RST_SYNC
, val
);
10595 static int ssd_check_hw_bh(struct ssd_device
*dev
)
10599 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10604 ret
= ssd_check_clock(dev
);
10610 /* skip error if not in standard mode */
10611 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10617 static int ssd_check_controller(struct ssd_device
*dev
)
10621 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10626 ret
= ssd_check_reset_sync(dev
);
10632 /* skip error if not in standard mode */
10633 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10639 static int ssd_check_controller_bh(struct ssd_device
*dev
)
10641 uint32_t test_data
= 0x55AA5AA5;
10643 int reg_base
, reg_sz
;
10648 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10653 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_READY_REG
);
10655 hio_warn("%s: controller 0 not ready\n", dev
->name
);
10659 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10660 reg_base
= SSD_CTRL_TEST_REG0
+ i
* SSD_CTRL_TEST_REG_SZ
;
10661 ssd_reg32_write(dev
->ctrlp
+ reg_base
, test_data
);
10662 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10663 if (val
!= ~(test_data
)) {
10664 hio_warn("%s: check controller %d error: %#x\n", dev
->name
, i
, val
);
10670 ret
= ssd_check_volt(dev
);
10676 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
10677 reg_base
= SSD_PV3_RAM_STATUS_REG0
;
10678 reg_sz
= SSD_PV3_RAM_STATUS_REG_SZ
;
10680 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10682 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10684 if (!((val
>> 1) & 0x1)) {
10686 if (init_wait
<= SSD_RAM_INIT_MAX_WAIT
) {
10687 msleep(SSD_INIT_WAIT
);
10688 goto check_ram_status
;
10690 hio_warn("%s: controller %d ram init failed: %#x\n", dev
->name
, i
, val
);
10691 ssd_gen_swlog(dev
, SSD_LOG_DDR_INIT_ERR
, i
);
10696 reg_base
+= reg_sz
;
10701 for (i
=0; i
<SSD_CH_INFO_MAX_WAIT
; i
++) {
10702 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10703 if (!((val
>> 31) & 0x1)) {
10707 msleep(SSD_INIT_WAIT
);
10709 if ((val
>> 31) & 0x1) {
10710 hio_warn("%s: channel info init failed: %#x\n", dev
->name
, val
);
10717 static int ssd_init_protocol_info(struct ssd_device
*dev
)
10721 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PROTOCOL_VER_REG
);
10722 if (val
== (uint32_t)-1) {
10723 hio_warn("%s: protocol version error: %#x\n", dev
->name
, val
);
10726 dev
->protocol_info
.ver
= val
;
10728 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10729 dev
->protocol_info
.init_state_reg
= SSD_INIT_STATE_REG0
;
10730 dev
->protocol_info
.init_state_reg_sz
= SSD_INIT_STATE_REG_SZ
;
10732 dev
->protocol_info
.chip_info_reg
= SSD_CHIP_INFO_REG0
;
10733 dev
->protocol_info
.chip_info_reg_sz
= SSD_CHIP_INFO_REG_SZ
;
10735 dev
->protocol_info
.init_state_reg
= SSD_PV3_INIT_STATE_REG0
;
10736 dev
->protocol_info
.init_state_reg_sz
= SSD_PV3_INIT_STATE_REG_SZ
;
10738 dev
->protocol_info
.chip_info_reg
= SSD_PV3_CHIP_INFO_REG0
;
10739 dev
->protocol_info
.chip_info_reg_sz
= SSD_PV3_CHIP_INFO_REG_SZ
;
10745 static int ssd_init_hw_info(struct ssd_device
*dev
)
10753 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESP_INFO_REG
);
10754 dev
->hw_info
.resp_ptr_sz
= 16 * (1U << (val
& 0xFF));
10755 dev
->hw_info
.resp_msg_sz
= 16 * (1U << ((val
>> 8) & 0xFF));
10757 if (0 == dev
->hw_info
.resp_ptr_sz
|| 0 == dev
->hw_info
.resp_msg_sz
) {
10758 hio_warn("%s: response info error\n", dev
->name
);
10763 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10764 dev
->hw_info
.cmd_fifo_sz
= 1U << ((val
>> 4) & 0xF);
10765 dev
->hw_info
.cmd_max_sg
= 1U << ((val
>> 8) & 0xF);
10766 dev
->hw_info
.sg_max_sec
= 1U << ((val
>> 12) & 0xF);
10767 dev
->hw_info
.cmd_fifo_sz_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
10769 if (0 == dev
->hw_info
.cmd_fifo_sz
|| 0 == dev
->hw_info
.cmd_max_sg
|| 0 == dev
->hw_info
.sg_max_sec
) {
10770 hio_warn("%s: cmd info error\n", dev
->name
);
10776 if (ssd_check_hw_bh(dev
)) {
10777 hio_warn("%s: check hardware status failed\n", dev
->name
);
10782 if (ssd_check_controller(dev
)) {
10783 hio_warn("%s: check controller state failed\n", dev
->name
);
10788 /* nr controller : read again*/
10789 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10790 dev
->hw_info
.nr_ctrl
= (val
>> 16) & 0xF;
10792 /* nr ctrl configured */
10793 nr_ctrl
= (val
>> 20) & 0xF;
10794 if (0 == dev
->hw_info
.nr_ctrl
) {
10795 hio_warn("%s: nr controller error: %u\n", dev
->name
, dev
->hw_info
.nr_ctrl
);
10798 } else if (0 != nr_ctrl
&& nr_ctrl
!= dev
->hw_info
.nr_ctrl
) {
10799 hio_warn("%s: nr controller error: configured %u but found %u\n", dev
->name
, nr_ctrl
, dev
->hw_info
.nr_ctrl
);
10800 if (mode
<= SSD_DRV_MODE_STANDARD
) {
10806 if (ssd_check_controller_bh(dev
)) {
10807 hio_warn("%s: check controller failed\n", dev
->name
);
10812 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
10813 dev
->hw_info
.pcb_ver
= (uint8_t) ((val
>> 4) & 0xF) + 'A' -1;
10814 if ((val
& 0xF) != 0xF) {
10815 dev
->hw_info
.upper_pcb_ver
= (uint8_t) (val
& 0xF) + 'A' -1;
10818 if (dev
->hw_info
.pcb_ver
< 'A' || (0 != dev
->hw_info
.upper_pcb_ver
&& dev
->hw_info
.upper_pcb_ver
< 'A')) {
10819 hio_warn("%s: PCB version error: %#x %#x\n", dev
->name
, dev
->hw_info
.pcb_ver
, dev
->hw_info
.upper_pcb_ver
);
10825 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10826 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10827 dev
->hw_info
.nr_data_ch
= val
& 0xFF;
10828 dev
->hw_info
.nr_ch
= dev
->hw_info
.nr_data_ch
+ ((val
>> 8) & 0xFF);
10829 dev
->hw_info
.nr_chip
= (val
>> 16) & 0xFF;
10831 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10832 dev
->hw_info
.max_ch
= 1;
10833 while (dev
->hw_info
.max_ch
< dev
->hw_info
.nr_ch
) dev
->hw_info
.max_ch
<<= 1;
10835 /* set max channel 32 */
10836 dev
->hw_info
.max_ch
= 32;
10839 if (0 == dev
->hw_info
.nr_chip
) {
10841 dev
->hw_info
.nr_chip
= 1;
10845 dev
->hw_info
.id_size
= SSD_NAND_ID_SZ
;
10846 dev
->hw_info
.max_ce
= SSD_NAND_MAX_CE
;
10848 if (0 == dev
->hw_info
.nr_data_ch
|| 0 == dev
->hw_info
.nr_ch
|| 0 == dev
->hw_info
.nr_chip
) {
10849 hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev
->name
, dev
->hw_info
.nr_data_ch
, dev
->hw_info
.nr_ch
, dev
->hw_info
.nr_chip
);
10856 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10857 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RAM_INFO_REG
);
10858 dev
->hw_info
.ram_size
= 0x4000000ull
* (1ULL << (val
& 0xF));
10859 dev
->hw_info
.ram_align
= 1U << ((val
>> 12) & 0xF);
10860 if (dev
->hw_info
.ram_align
< SSD_RAM_ALIGN
) {
10861 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10862 dev
->hw_info
.ram_align
= SSD_RAM_ALIGN
;
10864 hio_warn("%s: ram align error: %u\n", dev
->name
, dev
->hw_info
.ram_align
);
10869 dev
->hw_info
.ram_max_len
= 0x1000 * (1U << ((val
>> 16) & 0xF));
10871 if (0 == dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.ram_align
|| 0 == dev
->hw_info
.ram_max_len
|| dev
->hw_info
.ram_align
> dev
->hw_info
.ram_max_len
) {
10872 hio_warn("%s: ram info error\n", dev
->name
);
10877 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10878 dev
->hw_info
.log_sz
= SSD_LOG_MAX_SZ
;
10880 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LOG_INFO_REG
);
10881 dev
->hw_info
.log_sz
= 0x1000 * (1U << (val
& 0xFF));
10883 if (0 == dev
->hw_info
.log_sz
) {
10884 hio_warn("%s: log size error\n", dev
->name
);
10889 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BBT_BASE_REG
);
10890 dev
->hw_info
.bbt_base
= 0x40000ull
* (val
& 0xFFFF);
10891 dev
->hw_info
.bbt_size
= 0x40000 * (((val
>> 16) & 0xFFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
10892 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10893 if (dev
->hw_info
.bbt_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.bbt_size
) {
10894 hio_warn("%s: bbt info error\n", dev
->name
);
10900 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ECT_BASE_REG
);
10901 dev
->hw_info
.md_base
= 0x40000ull
* (val
& 0xFFFF);
10902 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10903 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
10905 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.nr_chip
);
10907 dev
->hw_info
.md_entry_sz
= 8 * (1U << ((val
>> 28) & 0xF));
10908 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
10909 if (dev
->hw_info
.md_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.md_size
||
10910 0 == dev
->hw_info
.md_entry_sz
|| dev
->hw_info
.md_entry_sz
> dev
->hw_info
.md_size
) {
10911 hio_warn("%s: md info error\n", dev
->name
);
10917 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10918 dev
->hw_info
.nand_wbuff_base
= dev
->hw_info
.ram_size
+ 1;
10920 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_NAND_BUFF_BASE
);
10921 dev
->hw_info
.nand_wbuff_base
= 0x8000ull
* val
;
10926 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10927 if (dev
->hw_info
.nr_ctrl
> 1) {
10928 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CTRL_VER_REG
);
10929 dev
->hw_info
.ctrl_ver
= val
& 0xFFF;
10930 hio_info("%s: controller firmware version: %03X\n", dev
->name
, dev
->hw_info
.ctrl_ver
);
10933 val64
= ssd_reg_read(dev
->ctrlp
+ SSD_FLASH_INFO_REG0
);
10934 dev
->hw_info
.nand_vendor_id
= ((val64
>> 56) & 0xFF);
10935 dev
->hw_info
.nand_dev_id
= ((val64
>> 48) & 0xFF);
10937 dev
->hw_info
.block_count
= (((val64
>> 32) & 0xFFFF) + 1);
10938 dev
->hw_info
.page_count
= ((val64
>>16) & 0xFFFF);
10939 dev
->hw_info
.page_size
= (val64
& 0xFFFF);
10941 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_INFO_REG
);
10942 dev
->hw_info
.bbf_pages
= val
& 0xFF;
10943 dev
->hw_info
.bbf_seek
= (val
>> 8) & 0x1;
10945 if (0 == dev
->hw_info
.block_count
|| 0 == dev
->hw_info
.page_count
|| 0 == dev
->hw_info
.page_size
|| dev
->hw_info
.block_count
> INT_MAX
) {
10946 hio_warn("%s: flash info error\n", dev
->name
);
10952 dev
->hw_info
.oob_size
= SSD_NAND_OOB_SZ
; //(dev->hw_info.page_size) >> 5;
10954 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
10955 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10956 dev
->hw_info
.valid_pages
= val
& 0x3FF;
10957 dev
->hw_info
.max_valid_pages
= (val
>>20) & 0x3FF;
10959 dev
->hw_info
.valid_pages
= val
& 0x7FFF;
10960 dev
->hw_info
.max_valid_pages
= (val
>>15) & 0x7FFF;
10962 if (0 == dev
->hw_info
.valid_pages
|| 0 == dev
->hw_info
.max_valid_pages
||
10963 dev
->hw_info
.valid_pages
> dev
->hw_info
.max_valid_pages
|| dev
->hw_info
.max_valid_pages
> dev
->hw_info
.page_count
) {
10964 hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev
->name
, dev
->hw_info
.valid_pages
, dev
->hw_info
.max_valid_pages
);
10969 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESERVED_BLKS_REG
);
10970 dev
->hw_info
.reserved_blks
= val
& 0xFFFF;
10971 dev
->hw_info
.md_reserved_blks
= (val
>> 16) & 0xFF;
10972 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10973 dev
->hw_info
.md_reserved_blks
= SSD_BBT_RESERVED
;
10975 if (dev
->hw_info
.reserved_blks
> dev
->hw_info
.block_count
|| dev
->hw_info
.md_reserved_blks
> dev
->hw_info
.block_count
) {
10976 hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev
->name
, dev
->hw_info
.reserved_blks
, dev
->hw_info
.md_reserved_blks
);
10983 if (mode
< SSD_DRV_MODE_DEBUG
) {
10984 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
10985 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
10986 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
10989 /* extend hardware info */
10990 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
10991 dev
->hw_info_ext
.board_type
= (val
>> 24) & 0xF;
10993 dev
->hw_info_ext
.form_factor
= SSD_FORM_FACTOR_FHHL
;
10994 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_1
) {
10995 dev
->hw_info_ext
.form_factor
= (val
>> 31) & 0x1;
10998 dev->hw_info_ext.cap_type = (val >> 28) & 0x3;
10999 if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) {
11000 dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA;
11003 /* power loss protect */
11004 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PLP_INFO_REG
);
11005 dev
->hw_info_ext
.plp_type
= (val
& 0x3);
11006 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
11008 dev
->hw_info_ext
.cap_type
= ((val
>> 2)& 0x1);
11012 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11013 dev
->hw_info_ext
.work_mode
= (val
>> 25) & 0x1;
11016 /* skip error if not in standard mode */
11017 if (mode
!= SSD_DRV_MODE_STANDARD
) {
11023 static void ssd_cleanup_response(struct ssd_device
*dev
)
11025 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11026 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11028 pci_free_consistent(dev
->pdev
, resp_ptr_sz
, dev
->resp_ptr_base
, dev
->resp_ptr_base_dma
);
11029 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11032 static int ssd_init_response(struct ssd_device
*dev
)
11034 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11035 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11037 dev
->resp_msg_base
= pci_alloc_consistent(dev
->pdev
, resp_msg_sz
, &(dev
->resp_msg_base_dma
));
11038 if (!dev
->resp_msg_base
) {
11039 hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev
->name
);
11040 goto out_alloc_resp_msg
;
11042 memset(dev
->resp_msg_base
, 0xFF, resp_msg_sz
);
11044 dev
->resp_ptr_base
= pci_alloc_consistent(dev
->pdev
, resp_ptr_sz
, &(dev
->resp_ptr_base_dma
));
11045 if (!dev
->resp_ptr_base
){
11046 hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev
->name
);
11047 goto out_alloc_resp_ptr
;
11049 memset(dev
->resp_ptr_base
, 0, resp_ptr_sz
);
11050 dev
->resp_idx
= *(uint32_t *)(dev
->resp_ptr_base
) = dev
->hw_info
.cmd_fifo_sz
* 2 - 1;
11052 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
11053 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
11057 out_alloc_resp_ptr
:
11058 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11059 out_alloc_resp_msg
:
11063 static int ssd_cleanup_cmd(struct ssd_device
*dev
)
11065 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11068 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11069 kfree(dev
->cmd
[i
].sgl
);
11072 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11076 static int ssd_init_cmd(struct ssd_device
*dev
)
11078 int sgl_sz
= sizeof(struct scatterlist
) * dev
->hw_info
.cmd_max_sg
;
11079 int cmd_sz
= sizeof(struct ssd_cmd
) * dev
->hw_info
.cmd_fifo_sz
;
11080 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11083 spin_lock_init(&dev
->cmd_lock
);
11085 dev
->msg_base
= pci_alloc_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), &dev
->msg_base_dma
);
11086 if (!dev
->msg_base
) {
11087 hio_warn("%s: can not alloc cmd msg\n", dev
->name
);
11088 goto out_alloc_msg
;
11091 dev
->cmd
= kmalloc(cmd_sz
, GFP_KERNEL
);
11093 hio_warn("%s: can not alloc cmd\n", dev
->name
);
11094 goto out_alloc_cmd
;
11096 memset(dev
->cmd
, 0, cmd_sz
);
11098 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11099 dev
->cmd
[i
].sgl
= kmalloc(sgl_sz
, GFP_KERNEL
);
11100 if (!dev
->cmd
[i
].sgl
) {
11101 hio_warn("%s: can not alloc cmd sgl %d\n", dev
->name
, i
);
11102 goto out_alloc_sgl
;
11105 dev
->cmd
[i
].msg
= dev
->msg_base
+ (msg_sz
* i
);
11106 dev
->cmd
[i
].msg_dma
= dev
->msg_base_dma
+ ((dma_addr_t
)msg_sz
* i
);
11108 dev
->cmd
[i
].dev
= dev
;
11109 dev
->cmd
[i
].tag
= i
;
11110 dev
->cmd
[i
].flag
= 0;
11112 INIT_LIST_HEAD(&dev
->cmd
[i
].list
);
11115 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11116 dev
->scmd
= ssd_dispatch_cmd
;
11118 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
11120 dev
->scmd
= ssd_send_cmd_db
;
11122 dev
->scmd
= ssd_send_cmd
;
11129 for (i
--; i
>=0; i
--) {
11130 kfree(dev
->cmd
[i
].sgl
);
11134 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11139 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11140 static irqreturn_t
ssd_interrupt_check(int irq
, void *dev_id
)
11142 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11144 if (*(uint32_t *)queue
->resp_ptr
== queue
->resp_idx
) {
11148 return IRQ_WAKE_THREAD
;
11151 static irqreturn_t
ssd_interrupt_threaded(int irq
, void *dev_id
)
11153 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11154 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11155 struct ssd_cmd
*cmd
;
11156 union ssd_response_msq __msg
;
11157 union ssd_response_msq
*msg
= &__msg
;
11159 uint32_t resp_idx
= queue
->resp_idx
;
11160 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11161 uint32_t end_resp_idx
;
11163 if (unlikely(resp_idx
== new_resp_idx
)) {
11167 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11170 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11173 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11174 msg
->u64_msg
= *u64_msg
;
11176 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11177 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11180 /* clear the resp msg */
11181 *u64_msg
= (uint64_t)(-1);
11183 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11184 /*if (unlikely(!cmd->bio)) {
11185 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11186 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11190 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11191 cmd
->errors
= -EIO
;
11195 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11199 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11200 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11201 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11202 queue_work(dev
->workq
, &dev
->log_work
);
11206 if (unlikely(msg
->resp_msg
.status
)) {
11207 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11208 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11209 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11212 ssd_set_alarm(dev
);
11213 queue
->io_stat
.nr_rwerr
++;
11214 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11216 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11217 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11219 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11221 queue
->io_stat
.nr_ioerr
++;
11224 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11225 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11226 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11228 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11230 }while (resp_idx
!= end_resp_idx
);
11232 queue
->resp_idx
= new_resp_idx
;
11234 return IRQ_HANDLED
;
11238 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11239 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
11241 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
)
11244 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11245 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11246 struct ssd_cmd
*cmd
;
11247 union ssd_response_msq __msg
;
11248 union ssd_response_msq
*msg
= &__msg
;
11250 uint32_t resp_idx
= queue
->resp_idx
;
11251 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11252 uint32_t end_resp_idx
;
11254 if (unlikely(resp_idx
== new_resp_idx
)) {
11258 #if (defined SSD_ESCAPE_IRQ)
11259 if (SSD_INT_MSIX
!= dev
->int_mode
) {
11260 dev
->irq_cpu
= smp_processor_id();
11264 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11267 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11270 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11271 msg
->u64_msg
= *u64_msg
;
11273 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11274 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11277 /* clear the resp msg */
11278 *u64_msg
= (uint64_t)(-1);
11280 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11281 /*if (unlikely(!cmd->bio)) {
11282 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11283 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11287 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11288 cmd
->errors
= -EIO
;
11292 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11296 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11297 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11298 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11299 queue_work(dev
->workq
, &dev
->log_work
);
11303 if (unlikely(msg
->resp_msg
.status
)) {
11304 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11305 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11306 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11309 ssd_set_alarm(dev
);
11310 queue
->io_stat
.nr_rwerr
++;
11311 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11313 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11314 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11316 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11318 queue
->io_stat
.nr_ioerr
++;
11321 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11322 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11323 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11325 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11327 }while (resp_idx
!= end_resp_idx
);
11329 queue
->resp_idx
= new_resp_idx
;
11331 return IRQ_HANDLED
;
11334 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11335 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
, struct pt_regs
*regs
)
11337 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
)
11341 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11342 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11344 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11345 ret
= ssd_interrupt(irq
, dev_id
, regs
);
11347 ret
= ssd_interrupt(irq
, dev_id
);
11351 if (IRQ_HANDLED
== ret
) {
11352 ssd_reg32_write(dev
->ctrlp
+ SSD_CLEAR_INTR_REG
, 1);
11358 static void ssd_reset_resp_ptr(struct ssd_device
*dev
)
11362 for (i
=0; i
<dev
->nr_queue
; i
++) {
11363 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11367 static void ssd_free_irq(struct ssd_device
*dev
)
11371 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11372 if (SSD_INT_MSIX
== dev
->int_mode
) {
11373 for (i
=0; i
<dev
->nr_queue
; i
++) {
11374 irq_set_affinity_hint(dev
->entry
[i
].vector
, NULL
);
11379 for (i
=0; i
<dev
->nr_queue
; i
++) {
11380 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11383 if (SSD_INT_MSIX
== dev
->int_mode
) {
11384 pci_disable_msix(dev
->pdev
);
11385 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11386 pci_disable_msi(dev
->pdev
);
11391 static int ssd_init_irq(struct ssd_device
*dev
)
11393 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11394 const struct cpumask
*cpu_mask
;
11395 static int cpu_affinity
= 0;
11397 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11398 const struct cpumask
*mask
;
11399 static int cpu
= 0;
11403 unsigned long flags
= 0;
11406 ssd_reg32_write(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
, 0x800);
11408 #ifdef SSD_ESCAPE_IRQ
11412 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11413 dev
->nr_queue
= SSD_MSIX_VEC
;
11414 for (i
=0; i
<dev
->nr_queue
; i
++) {
11415 dev
->entry
[i
].entry
= i
;
11418 ret
= pci_enable_msix(dev
->pdev
, dev
->entry
, dev
->nr_queue
);
11421 } else if (ret
> 0) {
11422 dev
->nr_queue
= ret
;
11424 hio_warn("%s: can not enable msix\n", dev
->name
);
11426 ssd_set_alarm(dev
);
11431 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11432 mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11433 if ((0 == cpu
) || (!cpumask_intersects(mask
, cpumask_of(cpu
)))) {
11434 cpu
= cpumask_first(mask
);
11436 for (i
=0; i
<dev
->nr_queue
; i
++) {
11437 irq_set_affinity_hint(dev
->entry
[i
].vector
, cpumask_of(cpu
));
11438 cpu
= cpumask_next(cpu
, mask
);
11439 if (cpu
>= nr_cpu_ids
) {
11440 cpu
= cpumask_first(mask
);
11445 dev
->int_mode
= SSD_INT_MSIX
;
11446 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11447 ret
= pci_enable_msi(dev
->pdev
);
11449 hio_warn("%s: can not enable msi\n", dev
->name
);
11451 ssd_set_alarm(dev
);
11456 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11458 dev
->int_mode
= SSD_INT_MSI
;
11461 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11463 dev
->int_mode
= SSD_INT_LEGACY
;
11466 for (i
=0; i
<dev
->nr_queue
; i
++) {
11467 if (dev
->nr_queue
> 1) {
11468 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100-%d", dev
->name
, i
);
11470 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100", dev
->name
);
11473 dev
->queue
[i
].dev
= dev
;
11474 dev
->queue
[i
].idx
= i
;
11476 dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11477 dev
->queue
[i
].resp_idx_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
11479 dev
->queue
[i
].resp_msg_sz
= dev
->hw_info
.resp_msg_sz
;
11480 dev
->queue
[i
].resp_msg
= dev
->resp_msg_base
+ dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* i
;
11481 dev
->queue
[i
].resp_ptr
= dev
->resp_ptr_base
+ dev
->hw_info
.resp_ptr_sz
* i
;
11482 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
;
11484 dev
->queue
[i
].cmd
= dev
->cmd
;
11487 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
11488 flags
= IRQF_SHARED
;
11493 for (i
=0; i
<dev
->nr_queue
; i
++) {
11494 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11495 if (threaded_irq
) {
11496 ret
= request_threaded_irq(dev
->entry
[i
].vector
, ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11497 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11498 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11500 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11503 if (dev
->int_mode
== SSD_INT_LEGACY
) {
11504 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11506 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11510 hio_warn("%s: request irq failed\n", dev
->name
);
11512 ssd_set_alarm(dev
);
11513 goto out_request_irq
;
11516 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11517 cpu_mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11518 if (SSD_INT_MSIX
== dev
->int_mode
) {
11519 if ((0 == cpu_affinity
) || (!cpumask_intersects(mask
, cpumask_of(cpu_affinity
)))) {
11520 cpu_affinity
= cpumask_first(cpu_mask
);
11523 irq_set_affinity(dev
->entry
[i
].vector
, cpumask_of(cpu_affinity
));
11524 cpu_affinity
= cpumask_next(cpu_affinity
, cpu_mask
);
11525 if (cpu_affinity
>= nr_cpu_ids
) {
11526 cpu_affinity
= cpumask_first(cpu_mask
);
11535 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11536 if (SSD_INT_MSIX
== dev
->int_mode
) {
11537 for (j
=0; j
<dev
->nr_queue
; j
++) {
11538 irq_set_affinity_hint(dev
->entry
[j
].vector
, NULL
);
11543 for (i
--; i
>=0; i
--) {
11544 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11547 if (SSD_INT_MSIX
== dev
->int_mode
) {
11548 pci_disable_msix(dev
->pdev
);
11549 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11550 pci_disable_msi(dev
->pdev
);
11557 static void ssd_initial_log(struct ssd_device
*dev
)
11560 uint32_t speed
, width
;
11562 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11566 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_POWER_ON_REG
);
11568 ssd_gen_swlog(dev
, SSD_LOG_POWER_ON
, dev
->hw_info
.bridge_ver
);
11571 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCIE_LINKSTATUS_REG
);
11573 width
= (val
>> 4)& 0x3F;
11574 if (0x1 == speed
) {
11575 hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev
->name
, width
);
11576 } else if (0x2 == speed
) {
11577 hio_info("%s: PCIe: 5GT/s, x%u\n", dev
->name
, width
);
11579 hio_info("%s: PCIe: unknown GT/s, x%u\n", dev
->name
, width
);
11581 ssd_gen_swlog(dev
, SSD_LOG_PCIE_LINK_STATUS
, val
);
11586 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11587 static void ssd_hwmon_worker(void *data
)
11589 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11591 static void ssd_hwmon_worker(struct work_struct
*work
)
11593 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, hwmon_work
);
11596 if (ssd_check_hw(dev
)) {
11597 //hio_err("%s: check hardware failed\n", dev->name);
11601 ssd_check_clock(dev
);
11602 ssd_check_volt(dev
);
11604 ssd_mon_boardvolt(dev
);
11607 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11608 static void ssd_tempmon_worker(void *data
)
11610 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11612 static void ssd_tempmon_worker(struct work_struct
*work
)
11614 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, tempmon_work
);
11617 if (ssd_check_hw(dev
)) {
11618 //hio_err("%s: check hardware failed\n", dev->name);
11626 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11627 static void ssd_capmon_worker(void *data
)
11629 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11631 static void ssd_capmon_worker(struct work_struct
*work
)
11633 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, capmon_work
);
11636 uint32_t cap_threshold
= SSD_PL_CAP_THRESHOLD
;
11639 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11643 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
11647 /* fault before? */
11648 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11649 ret
= ssd_check_pl_cap_fast(dev
);
11656 ret
= ssd_do_cap_learn(dev
, &cap
);
11658 hio_err("%s: cap learn failed\n", dev
->name
);
11659 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
11663 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, cap
);
11665 if (SSD_PL_CAP_CP
== dev
->hw_info_ext
.cap_type
) {
11666 cap_threshold
= SSD_PL_CAP_CP_THRESHOLD
;
11669 //use the fw event id?
11670 if (cap
< cap_threshold
) {
11671 if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11672 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_FAULT
, 0);
11674 } else if (cap
>= (cap_threshold
+ SSD_PL_CAP_THRESHOLD_HYST
)) {
11675 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11676 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_OK
, 0);
11681 static void ssd_routine_start(void *data
)
11683 struct ssd_device
*dev
;
11690 dev
->routine_tick
++;
11692 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
) && !ssd_busy(dev
)) {
11693 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11694 queue_work(dev
->workq
, &dev
->log_work
);
11697 if ((dev
->routine_tick
% SSD_HWMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11698 queue_work(dev
->workq
, &dev
->hwmon_work
);
11701 if ((dev
->routine_tick
% SSD_CAPMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11702 queue_work(dev
->workq
, &dev
->capmon_work
);
11705 if ((dev
->routine_tick
% SSD_CAPMON2_ROUTINE_TICK
) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
) && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11706 /* CAP fault? check again */
11707 queue_work(dev
->workq
, &dev
->capmon_work
);
11710 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11711 queue_work(dev
->workq
, &dev
->tempmon_work
);
11714 /* schedule routine */
11715 mod_timer(&dev
->routine_timer
, jiffies
+ msecs_to_jiffies(SSD_ROUTINE_INTERVAL
));
11718 static void ssd_cleanup_routine(struct ssd_device
*dev
)
11720 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11723 (void)ssd_del_timer(&dev
->routine_timer
);
11725 (void)ssd_del_timer(&dev
->bm_timer
);
11728 static int ssd_init_routine(struct ssd_device
*dev
)
11730 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11733 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11734 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
, dev
);
11735 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
, dev
);
11736 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
, dev
);
11737 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
, dev
);
11739 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
);
11740 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
);
11741 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
);
11742 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
);
11746 ssd_initial_log(dev
);
11748 /* schedule bm routine */
11749 ssd_add_timer(&dev
->bm_timer
, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY
), ssd_bm_routine_start
, dev
);
11751 /* schedule routine */
11752 ssd_add_timer(&dev
->routine_timer
, msecs_to_jiffies(SSD_ROUTINE_INTERVAL
), ssd_routine_start
, dev
);
11758 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
11761 ssd_remove_one (struct pci_dev
*pdev
)
11763 struct ssd_device
*dev
;
11769 dev
= pci_get_drvdata(pdev
);
11774 list_del_init(&dev
->list
);
11776 ssd_unregister_sysfs(dev
);
11778 /* offline firstly */
11779 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
11781 /* clean work queue first */
11783 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
11784 ssd_cleanup_workq(dev
);
11788 (void)ssd_flush(dev
);
11789 (void)ssd_save_md(dev
);
11793 ssd_save_smart(dev
);
11796 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
11797 ssd_cleanup_blkdev(dev
);
11801 ssd_cleanup_chardev(dev
);
11804 /* clean routine */
11806 ssd_cleanup_routine(dev
);
11809 ssd_cleanup_queue(dev
);
11811 ssd_cleanup_tag(dev
);
11812 ssd_cleanup_thread(dev
);
11816 ssd_cleanup_dcmd(dev
);
11817 ssd_cleanup_cmd(dev
);
11818 ssd_cleanup_response(dev
);
11821 ssd_cleanup_log(dev
);
11824 if (dev
->reload_fw
) { //reload fw
11825 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
11828 /* unmap physical adress */
11829 #ifdef LINUX_SUSE_OS
11830 iounmap(dev
->ctrlp
);
11832 pci_iounmap(pdev
, dev
->ctrlp
);
11835 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
11837 pci_disable_device(pdev
);
11839 pci_set_drvdata(pdev
, NULL
);
11845 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
11848 ssd_init_one(struct pci_dev
*pdev
,
11849 const struct pci_device_id
*ent
)
11851 struct ssd_device
*dev
;
11854 if (!pdev
|| !ent
) {
11859 dev
= kmalloc(sizeof(struct ssd_device
), GFP_KERNEL
);
11862 goto out_alloc_dev
;
11864 memset(dev
, 0, sizeof(struct ssd_device
));
11866 dev
->owner
= THIS_MODULE
;
11868 if (SSD_SLAVE_PORT_DEVID
== ent
->device
) {
11872 dev
->idx
= ssd_get_index(dev
->slave
);
11873 if (dev
->idx
< 0) {
11875 goto out_get_index
;
11879 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_DEV_NAME
);
11880 ssd_set_dev_name(&dev
->name
[strlen(SSD_DEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_DEV_NAME
), dev
->idx
);
11882 dev
->major
= ssd_major
;
11883 dev
->cmajor
= ssd_cmajor
;
11885 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_SDEV_NAME
);
11886 ssd_set_dev_name(&dev
->name
[strlen(SSD_SDEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_SDEV_NAME
), dev
->idx
);
11887 dev
->major
= ssd_major_sl
;
11891 atomic_set(&(dev
->refcnt
), 0);
11892 atomic_set(&(dev
->tocnt
), 0);
11894 mutex_init(&dev
->fw_mutex
);
11897 mutex_init(&dev
->gd_mutex
);
11900 pci_set_drvdata(pdev
, dev
);
11902 kref_init(&dev
->kref
);
11904 ret
= pci_enable_device(pdev
);
11906 hio_warn("%s: can not enable device\n", dev
->name
);
11907 goto out_enable_device
;
11910 pci_set_master(pdev
);
11912 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
11913 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
11915 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
11918 hio_warn("%s: set dma mask: failed\n", dev
->name
);
11919 goto out_set_dma_mask
;
11922 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
11923 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
11925 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
11928 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
11929 goto out_set_dma_mask
;
11932 dev
->mmio_base
= pci_resource_start(pdev
, 0);
11933 dev
->mmio_len
= pci_resource_len(pdev
, 0);
11935 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
11936 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
11938 goto out_request_mem_region
;
11941 /* 2.6.9 kernel bug */
11942 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
11944 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
11946 goto out_pci_iomap
;
11949 ret
= ssd_check_hw(dev
);
11951 hio_err("%s: check hardware failed\n", dev
->name
);
11955 ret
= ssd_init_protocol_info(dev
);
11957 hio_err("%s: init protocol info failed\n", dev
->name
);
11958 goto out_init_protocol_info
;
11962 ssd_clear_alarm(dev
);
11964 ret
= ssd_init_fw_info(dev
);
11966 hio_err("%s: init firmware info failed\n", dev
->name
);
11968 ssd_set_alarm(dev
);
11969 goto out_init_fw_info
;
11977 ret
= ssd_init_rom_info(dev
);
11979 hio_err("%s: init rom info failed\n", dev
->name
);
11981 ssd_set_alarm(dev
);
11982 goto out_init_rom_info
;
11985 ret
= ssd_init_label(dev
);
11987 hio_err("%s: init label failed\n", dev
->name
);
11989 ssd_set_alarm(dev
);
11990 goto out_init_label
;
11993 ret
= ssd_init_workq(dev
);
11995 hio_warn("%s: init workq failed\n", dev
->name
);
11996 goto out_init_workq
;
11998 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12000 ret
= ssd_init_log(dev
);
12002 hio_err("%s: init log failed\n", dev
->name
);
12004 ssd_set_alarm(dev
);
12008 ret
= ssd_init_smart(dev
);
12010 hio_err("%s: init info failed\n", dev
->name
);
12012 ssd_set_alarm(dev
);
12013 goto out_init_smart
;
12017 ret
= ssd_init_hw_info(dev
);
12019 hio_err("%s: init hardware info failed\n", dev
->name
);
12021 ssd_set_alarm(dev
);
12022 goto out_init_hw_info
;
12030 ret
= ssd_init_sensor(dev
);
12032 hio_err("%s: init sensor failed\n", dev
->name
);
12034 ssd_set_alarm(dev
);
12035 goto out_init_sensor
;
12038 ret
= ssd_init_pl_cap(dev
);
12040 hio_err("%s: int pl_cap failed\n", dev
->name
);
12042 ssd_set_alarm(dev
);
12043 goto out_init_pl_cap
;
12047 ret
= ssd_check_init_state(dev
);
12049 hio_err("%s: check init state failed\n", dev
->name
);
12051 ssd_set_alarm(dev
);
12052 goto out_check_init_state
;
12055 ret
= ssd_init_response(dev
);
12057 hio_warn("%s: init resp_msg failed\n", dev
->name
);
12058 goto out_init_response
;
12061 ret
= ssd_init_cmd(dev
);
12063 hio_warn("%s: init msg failed\n", dev
->name
);
12067 ret
= ssd_init_dcmd(dev
);
12069 hio_warn("%s: init cmd failed\n", dev
->name
);
12070 goto out_init_dcmd
;
12073 ret
= ssd_init_irq(dev
);
12075 hio_warn("%s: init irq failed\n", dev
->name
);
12079 ret
= ssd_init_thread(dev
);
12081 hio_warn("%s: init thread failed\n", dev
->name
);
12082 goto out_init_thread
;
12085 ret
= ssd_init_tag(dev
);
12087 hio_warn("%s: init tags failed\n", dev
->name
);
12088 goto out_init_tags
;
12092 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12094 ret
= ssd_init_queue(dev
);
12096 hio_warn("%s: init queue failed\n", dev
->name
);
12097 goto out_init_queue
;
12105 ret
= ssd_init_ot_protect(dev
);
12107 hio_err("%s: int ot_protect failed\n", dev
->name
);
12109 ssd_set_alarm(dev
);
12110 goto out_int_ot_protect
;
12113 ret
= ssd_init_wmode(dev
);
12115 hio_warn("%s: init write mode\n", dev
->name
);
12116 goto out_init_wmode
;
12119 /* init routine after hw is ready */
12120 ret
= ssd_init_routine(dev
);
12122 hio_warn("%s: init routine\n", dev
->name
);
12123 goto out_init_routine
;
12126 ret
= ssd_init_chardev(dev
);
12128 hio_warn("%s: register char device failed\n", dev
->name
);
12129 goto out_init_chardev
;
12133 ret
= ssd_init_blkdev(dev
);
12135 hio_warn("%s: register block device failed\n", dev
->name
);
12136 goto out_init_blkdev
;
12138 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12140 ret
= ssd_register_sysfs(dev
);
12142 hio_warn("%s: register sysfs failed\n", dev
->name
);
12143 goto out_register_sysfs
;
12148 list_add_tail(&dev
->list
, &ssd_list
);
12152 out_register_sysfs
:
12153 test_and_clear_bit(SSD_INIT_BD
, &dev
->state
);
12154 ssd_cleanup_blkdev(dev
);
12158 ssd_cleanup_chardev(dev
);
12163 ssd_cleanup_routine(dev
);
12167 out_int_ot_protect
:
12168 ssd_cleanup_queue(dev
);
12170 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12171 ssd_cleanup_tag(dev
);
12173 ssd_cleanup_thread(dev
);
12177 ssd_cleanup_dcmd(dev
);
12179 ssd_cleanup_cmd(dev
);
12181 ssd_cleanup_response(dev
);
12183 out_check_init_state
:
12190 ssd_cleanup_log(dev
);
12195 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12196 ssd_cleanup_workq(dev
);
12202 out_init_protocol_info
:
12204 #ifdef LINUX_SUSE_OS
12205 iounmap(dev
->ctrlp
);
12207 pci_iounmap(pdev
, dev
->ctrlp
);
12210 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12211 out_request_mem_region
:
12213 pci_disable_device(pdev
);
12215 pci_set_drvdata(pdev
, NULL
);
12223 static void ssd_cleanup_tasklet(void)
12226 for_each_online_cpu(i
) {
12227 tasklet_kill(&per_cpu(ssd_tasklet
, i
));
12231 static int ssd_init_tasklet(void)
12235 for_each_online_cpu(i
) {
12236 INIT_LIST_HEAD(&per_cpu(ssd_doneq
, i
));
12239 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done_db
, 0);
12241 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done
, 0);
12248 static struct pci_device_id ssd_pci_tbl
[] = {
12249 { 0x10ee, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* g3 */
12250 { 0x19e5, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v1 */
12251 //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/
12252 { 0x19e5, 0x0009, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 */
12253 { 0x19e5, 0x000a, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 dp slave*/
12256 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12258 static struct pci_driver ssd_driver
= {
12259 .name
= MODULE_NAME
,
12260 .id_table
= ssd_pci_tbl
,
12261 .probe
= ssd_init_one
,
12262 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12263 .remove
= __devexit_p(ssd_remove_one
),
12265 .remove
= ssd_remove_one
,
12269 /* notifier block to get a notify on system shutdown/halt/reboot */
12270 static int ssd_notify_reboot(struct notifier_block
*nb
, unsigned long event
, void *buf
)
12272 struct ssd_device
*dev
= NULL
;
12273 struct ssd_device
*n
= NULL
;
12275 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
12276 ssd_gen_swlog(dev
, SSD_LOG_POWER_OFF
, 0);
12278 (void)ssd_flush(dev
);
12279 (void)ssd_save_md(dev
);
12283 ssd_save_smart(dev
);
12285 ssd_stop_workq(dev
);
12287 if (dev
->reload_fw
) {
12288 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12296 static struct notifier_block ssd_notifier
= {
12297 ssd_notify_reboot
, NULL
, 0
12300 static int __init
ssd_init_module(void)
12304 hio_info("driver version: %s\n", DRIVER_VERSION
);
12306 ret
= ssd_init_index();
12308 hio_warn("init index failed\n");
12309 goto out_init_index
;
12312 ret
= ssd_init_proc();
12314 hio_warn("init proc failed\n");
12315 goto out_init_proc
;
12318 ret
= ssd_init_sysfs();
12320 hio_warn("init sysfs failed\n");
12321 goto out_init_sysfs
;
12324 ret
= ssd_init_tasklet();
12326 hio_warn("init tasklet failed\n");
12327 goto out_init_tasklet
;
12330 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12331 ssd_class
= class_simple_create(THIS_MODULE
, SSD_DEV_NAME
);
12333 ssd_class
= class_create(THIS_MODULE
, SSD_DEV_NAME
);
12335 if (IS_ERR(ssd_class
)) {
12336 ret
= PTR_ERR(ssd_class
);
12337 goto out_class_create
;
12340 if (ssd_cmajor
> 0) {
12341 ret
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12343 ret
= ssd_cmajor
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12346 hio_warn("unable to register chardev major number\n");
12347 goto out_register_chardev
;
12350 if (ssd_major
> 0) {
12351 ret
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12353 ret
= ssd_major
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12356 hio_warn("unable to register major number\n");
12357 goto out_register_blkdev
;
12360 if (ssd_major_sl
> 0) {
12361 ret
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12363 ret
= ssd_major_sl
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12366 hio_warn("unable to register slave major number\n");
12367 goto out_register_blkdev_sl
;
12370 if (mode
< SSD_DRV_MODE_STANDARD
|| mode
> SSD_DRV_MODE_BASE
) {
12371 mode
= SSD_DRV_MODE_STANDARD
;
12375 if (mode
!= SSD_DRV_MODE_STANDARD
) {
12379 if (int_mode
< SSD_INT_LEGACY
|| int_mode
> SSD_INT_MSIX
) {
12380 int_mode
= SSD_INT_MODE_DEFAULT
;
12383 if (threaded_irq
) {
12384 int_mode
= SSD_INT_MSI
;
12387 if (log_level
>= SSD_LOG_NR_LEVEL
|| log_level
< SSD_LOG_LEVEL_INFO
) {
12388 log_level
= SSD_LOG_LEVEL_ERR
;
12391 if (wmode
< SSD_WMODE_BUFFER
|| wmode
> SSD_WMODE_DEFAULT
) {
12392 wmode
= SSD_WMODE_DEFAULT
;
12395 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
12396 ret
= pci_module_init(&ssd_driver
);
12398 ret
= pci_register_driver(&ssd_driver
);
12401 hio_warn("pci init failed\n");
12405 ret
= register_reboot_notifier(&ssd_notifier
);
12407 hio_warn("register reboot notifier failed\n");
12408 goto out_register_reboot_notifier
;
12413 out_register_reboot_notifier
:
12415 pci_unregister_driver(&ssd_driver
);
12416 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12417 out_register_blkdev_sl
:
12418 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
12419 out_register_blkdev
:
12420 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
12421 out_register_chardev
:
12422 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12423 class_simple_destroy(ssd_class
);
12425 class_destroy(ssd_class
);
12428 ssd_cleanup_tasklet();
12430 ssd_cleanup_sysfs();
12432 ssd_cleanup_proc();
12434 ssd_cleanup_index();
12440 static void __exit
ssd_cleanup_module(void)
12443 hio_info("unload driver: %s\n", DRIVER_VERSION
);
12447 unregister_reboot_notifier(&ssd_notifier
);
12449 pci_unregister_driver(&ssd_driver
);
12451 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12452 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
12453 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
12454 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12455 class_simple_destroy(ssd_class
);
12457 class_destroy(ssd_class
);
12460 ssd_cleanup_tasklet();
12461 ssd_cleanup_sysfs();
12462 ssd_cleanup_proc();
12463 ssd_cleanup_index();
12466 int ssd_register_event_notifier(struct block_device
*bdev
, ssd_event_call event_call
)
12468 struct ssd_device
*dev
;
12470 struct ssd_log
*le
;
12474 if (!bdev
|| !event_call
|| !(bdev
->bd_disk
)) {
12478 dev
= bdev
->bd_disk
->private_data
;
12479 dev
->event_call
= event_call
;
12481 do_gettimeofday(&tv
);
12484 le
= (struct ssd_log
*)(dev
->internal_log
.log
);
12485 log_nr
= dev
->internal_log
.nr_log
;
12488 if (le
->time
<= cur
&& le
->time
>= dev
->uptime
) {
12489 (void)dev
->event_call(dev
->gd
, le
->le
.event
, ssd_parse_log(dev
, le
, 0));
12497 int ssd_unregister_event_notifier(struct block_device
*bdev
)
12499 struct ssd_device
*dev
;
12501 if (!bdev
|| !(bdev
->bd_disk
)) {
12505 dev
= bdev
->bd_disk
->private_data
;
12506 dev
->event_call
= NULL
;
12511 EXPORT_SYMBOL(ssd_get_label
);
12512 EXPORT_SYMBOL(ssd_get_version
);
12513 EXPORT_SYMBOL(ssd_set_otprotect
);
12514 EXPORT_SYMBOL(ssd_bm_status
);
12515 EXPORT_SYMBOL(ssd_submit_pbio
);
12516 EXPORT_SYMBOL(ssd_get_pciaddr
);
12517 EXPORT_SYMBOL(ssd_get_temperature
);
12518 EXPORT_SYMBOL(ssd_register_event_notifier
);
12519 EXPORT_SYMBOL(ssd_unregister_event_notifier
);
12520 EXPORT_SYMBOL(ssd_reset
);
12521 EXPORT_SYMBOL(ssd_set_wmode
);
12525 module_init(ssd_init_module
);
12526 module_exit(ssd_cleanup_module
);
12527 MODULE_VERSION(DRIVER_VERSION
);
12528 MODULE_LICENSE("GPL");
12529 MODULE_AUTHOR("Huawei SSD DEV Team");
12530 MODULE_DESCRIPTION("Huawei SSD driver");